From 5d4b803914d256a86f53a9a95e127026ae64caa6 Mon Sep 17 00:00:00 2001 From: Stella Laurenzo Date: Sat, 27 Apr 2024 14:08:09 -0700 Subject: [PATCH] [NFC reformat] Run pre-commit on all files and format misc. This is part 1 of ~3, formatting all miscellaneous text files and CPP files matched by a first run of pre-commit. These tend to be low change-traffic and are likely not disruptive. Subsequent patches will format Python files and remaining CPP files. --- CMakeLists.txt | 2 +- build_tools/ci/test_posix.sh | 2 +- .../python_deploy/build_linux_packages.sh | 2 +- docs/importers/onnx_importer.md | 1 - docs/roadmap.md | 1 - include/CMakeLists.txt | 2 +- .../torch-mlir/Dialect/Torch/IR/TorchOps.td | 18 ++--- .../Dialect/Torch/Transforms/Passes.td | 2 +- .../TorchOnnxToTorch/DefaultDomainGtoP.cpp | 67 +++++++++---------- .../StablehloLegalizeUtils.cpp | 5 +- .../TorchToTosa/TosaLegalizeUtils.cpp | 8 +-- .../TMTensor/Transforms/CMakeLists.txt | 2 +- .../Transforms/LowerToBackendContract.cpp | 3 +- .../TorchConversion/Transforms/CMakeLists.txt | 2 +- projects/CMakeLists.txt | 8 +-- .../mlir_native_functions.cpp | 2 +- .../csrc/base_lazy_backend/ops/unbind_int.h | 2 +- projects/pt1/python/CMakeLists.txt | 4 +- .../CMakeLists.txt | 1 - projects/pt1/test/CMakeLists.txt | 2 +- test/CAPI/lit.local.cfg | 2 +- test/CAPI/torch.c | 14 ++-- test/CMakeLists.txt | 2 +- .../TorchOnnxToTorch/simple_ops_a_to_f.mlir | 18 ++--- .../TorchOnnxToTorch/simple_ops_g_to_p.mlir | 8 +-- .../TorchOnnxToTorch/simple_ops_q_to_z.mlir | 8 +-- .../unsupported_fb_opt_ops.mlir | 2 +- test/Conversion/TorchToLinalg/flatten.mlir | 2 - .../Conversion/TorchToLinalg/gridsampler.mlir | 1 - test/Conversion/TorchToLinalg/view.mlir | 1 - .../TorchToStablehlo/elementwise.mlir | 2 +- test/Conversion/TorchToStablehlo/gather.mlir | 1 - test/Conversion/TorchToStablehlo/scatter.mlir | 2 +- .../TorchToStablehlo/view_like.mlir | 1 - .../TorchToTosa/cast_fp32_to_fp16.mlir | 4 +- .../TorchToTosa/conv2d_transpose.mlir | 1 - test/Dialect/Torch/canonicalize.mlir | 4 +- test/Dialect/Torch/ops.mlir | 1 - test/python/fx_importer/v2.3/lit.local.cfg | 2 +- utils/bazel/torch-mlir-overlay/.bazelignore | 2 +- 40 files changed, 100 insertions(+), 114 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 44f02ac6af38..0c562fbe31c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -247,4 +247,4 @@ add_subdirectory(projects) # Finish with top-level Python bindings so it can handle additional deps. if(MLIR_ENABLE_BINDINGS_PYTHON) add_subdirectory(python) -endif() \ No newline at end of file +endif() diff --git a/build_tools/ci/test_posix.sh b/build_tools/ci/test_posix.sh index a5387602ae44..accdc41990c3 100755 --- a/build_tools/ci/test_posix.sh +++ b/build_tools/ci/test_posix.sh @@ -30,7 +30,7 @@ echo "::endgroup::" case $torch_version in nightly) - # Failing with: NotImplementedError: + # Failing with: NotImplementedError: # Could not run 'aten::empty.memory_format' with arguments from the 'Lazy' backend. # As of 2024-01-07 # echo "::group::Run Lazy Tensor Core e2e integration tests" diff --git a/build_tools/python_deploy/build_linux_packages.sh b/build_tools/python_deploy/build_linux_packages.sh index 6c5daf15ff02..4feccdd64029 100755 --- a/build_tools/python_deploy/build_linux_packages.sh +++ b/build_tools/python_deploy/build_linux_packages.sh @@ -282,7 +282,7 @@ function _check_file_not_changed_by() { function test_in_tree() { local torch_version="$1" - + echo ":::: Test in-tree" cmake --build /main_checkout/torch-mlir/build --target check-torch-mlir-all diff --git a/docs/importers/onnx_importer.md b/docs/importers/onnx_importer.md index a0b861d6d9cb..386c012d9786 100644 --- a/docs/importers/onnx_importer.md +++ b/docs/importers/onnx_importer.md @@ -140,4 +140,3 @@ torch-mlir's representation: * `ConstantOfShape`: Mapped to `torch.vtensor.literal` with a corresponding `value` attribute. - diff --git a/docs/roadmap.md b/docs/roadmap.md index f60502a52423..e5520432657a 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -277,4 +277,3 @@ directly provided a way to plug into this. Additionally, we can leverage the [`pytorch-jit-paritybench`](https://github.com/jansel/pytorch-jit-paritybench) to verify our end-to-end correctness on real models. - diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt index cba1f75a101e..7814043049e0 100644 --- a/include/CMakeLists.txt +++ b/include/CMakeLists.txt @@ -1,2 +1,2 @@ add_subdirectory(torch-mlir) -add_subdirectory(torch-mlir-dialects) \ No newline at end of file +add_subdirectory(torch-mlir-dialects) diff --git a/include/torch-mlir/Dialect/Torch/IR/TorchOps.td b/include/torch-mlir/Dialect/Torch/IR/TorchOps.td index f5214db58f19..f578cefe0297 100644 --- a/include/torch-mlir/Dialect/Torch/IR/TorchOps.td +++ b/include/torch-mlir/Dialect/Torch/IR/TorchOps.td @@ -756,12 +756,12 @@ def Torch_ConstantNumberOp : Torch_Op<"constant.number", [ConstantLike, Pure]> { let summary = "Materialize a constant `number` value."; let description = [{ - This op is used as a workaround to the fact that the constant - materialization in MLIR must materialize a constant with a single op. - To materialize ops with a static `!torch.number` type, we must use this op, + This op is used as a workaround to the fact that the constant + materialization in MLIR must materialize a constant with a single op. + To materialize ops with a static `!torch.number` type, we must use this op, even though we statically know if it is an integer or a float. - Note: This op unconditionally canonicalizes to + Note: This op unconditionally canonicalizes to `torch.constant.{float,int}` + `torch.derefine` }]; let arguments = (ins @@ -846,7 +846,7 @@ def Torch_OperatorOp : Torch_Op<"operator", [ let regions = (region VariadicRegion:$regions); let assemblyFormat = [{ - $name `(` $operands `)` attr-dict `:` functional-type($operands, $results) $regions + $name `(` $operands `)` attr-dict `:` functional-type($operands, $results) $regions }]; } @@ -1146,10 +1146,10 @@ def Torch_PromoteDtypesOp: Torch_Op<"promote_dtypes", [ let assemblyFormat = "$ranks `,` $dtypes attr-dict `:` functional-type(operands, results)"; } -// To handle runtime assertions, torchscript provides us `torch._assert` operation. -// But TS compiler introduces control flow for `torch._assert` operation. The -// `torch._assert` would introduce control flow like: -// +// To handle runtime assertions, torchscript provides us `torch._assert` operation. +// But TS compiler introduces control flow for `torch._assert` operation. The +// `torch._assert` would introduce control flow like: +// // %cond = "torch.aten.Bool.Tensor"(%0) : (!torch.tensor) -> !torch.bool // "torch.prim.If"(%cond) ({ // "torch.prim.If.yield"() : () -> () diff --git a/include/torch-mlir/Dialect/Torch/Transforms/Passes.td b/include/torch-mlir/Dialect/Torch/Transforms/Passes.td index 52774c30389c..715b2265dbca 100644 --- a/include/torch-mlir/Dialect/Torch/Transforms/Passes.td +++ b/include/torch-mlir/Dialect/Torch/Transforms/Passes.td @@ -369,7 +369,7 @@ def LowerToBackendContract to the backend contract. This pass does not do any global program restructuring -- it works entirely within a single semantic model of a `builtin.module` with `torch.global_slot` ops and `func.func` ops. - + This pass runs a set of simplifications within that semantic model until the backend contract is satisfied, and fails if it cannot be satisfied. In particular, the backend contract consists of: diff --git a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp index 90c64db33b01..7a150794cb4b 100644 --- a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp +++ b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp @@ -628,42 +628,39 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP( binder.op, resultType, operand); return success(); }); - patterns.onOp("Not", 1, - [](OpBinder binder, ConversionPatternRewriter &rewriter) { - Torch::ValueTensorType resultType; - Value operand; - if (binder.tensorOperand(operand) || - binder.tensorResultType(resultType)) { - return failure(); - } + patterns.onOp( + "Not", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) { + Torch::ValueTensorType resultType; + Value operand; + if (binder.tensorOperand(operand) || + binder.tensorResultType(resultType)) { + return failure(); + } - auto loc = binder.getLoc(); - auto operandTy = - cast(operand.getType()); - auto eTy = operandTy.getDtype(); - - if (!eTy.isInteger(1)) { - auto i1ty = rewriter.getI1Type(); - auto ty = rewriter.getType( - operandTy.getSizes(), i1ty); - auto torchqTy = Torch::getScalarTypeForType(i1ty); - Value tyConst = rewriter.create( - binder.getLoc(), rewriter.getType(), - rewriter.getIntegerAttr( - rewriter.getIntegerType(64), - static_cast(torchqTy))); - Value none = rewriter.create(loc); - Value cstFalse = - rewriter.create(loc, false); - operand = rewriter.create( - loc, ty, operand, tyConst, - /*non_blocking=*/cstFalse, /*copy=*/cstFalse, - /*memory_format=*/none); - } - rewriter.replaceOpWithNewOp( - binder.op, resultType, operand); - return success(); - }); + auto loc = binder.getLoc(); + auto operandTy = cast(operand.getType()); + auto eTy = operandTy.getDtype(); + + if (!eTy.isInteger(1)) { + auto i1ty = rewriter.getI1Type(); + auto ty = rewriter.getType( + operandTy.getSizes(), i1ty); + auto torchqTy = Torch::getScalarTypeForType(i1ty); + Value tyConst = rewriter.create( + binder.getLoc(), rewriter.getType(), + rewriter.getIntegerAttr(rewriter.getIntegerType(64), + static_cast(torchqTy))); + Value none = rewriter.create(loc); + Value cstFalse = rewriter.create(loc, false); + operand = rewriter.create( + loc, ty, operand, tyConst, + /*non_blocking=*/cstFalse, /*copy=*/cstFalse, + /*memory_format=*/none); + } + rewriter.replaceOpWithNewOp( + binder.op, resultType, operand); + return success(); + }); patterns.onOp("Or", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) { Torch::ValueTensorType resultType; diff --git a/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp b/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp index 5db6ee339b09..40ec715cd62e 100644 --- a/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp +++ b/lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp @@ -189,9 +189,8 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input, do_bcast = true; } else { op->emitError("The size of tensor a (") - << inDim << ")" - << "must match the size of tensor b (" << outDim << ")" - << "at non-singleton dimension " << inPos; + << inDim << ")" << "must match the size of tensor b (" << outDim + << ")" << "at non-singleton dimension " << inPos; } } std::reverse(bcastDims.begin(), bcastDims.end()); diff --git a/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp b/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp index 5c46b8942fdd..ab3db75fa85f 100644 --- a/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp +++ b/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp @@ -287,19 +287,19 @@ static LogicalResult checkValidityOfCast(Type src, Type dest) { (src.isInteger(1) && dest.isInteger(64)) || (src.isInteger(1) && dest.isF32()) || // f64 -> * - (src.isF64() && dest.isF32()) || + (src.isF64() && dest.isF32()) || (src.isF64() && dest.isBF16()) || // f32 -> * - (src.isF32() && dest.isF64()) || + (src.isF32() && dest.isF64()) || (src.isF32() && dest.isBF16()) || - (src.isF32() && dest.isF16()) || + (src.isF32() && dest.isF16()) || (src.isF32() && dest.isInteger(8)) || (src.isF32() && dest.isInteger(64)) || (src.isF32() && dest.isInteger(1)) || // bf16 -> * (src.isBF16() && dest.isInteger(8)) || (src.isBF16() && dest.isInteger(16)) || - (src.isBF16() && dest.isInteger(32)) || + (src.isBF16() && dest.isInteger(32)) || (src.isBF16() && dest.isF32())) { return success(); } diff --git a/lib/Dialect/TMTensor/Transforms/CMakeLists.txt b/lib/Dialect/TMTensor/Transforms/CMakeLists.txt index 0ae89342127e..484b2b059aad 100644 --- a/lib/Dialect/TMTensor/Transforms/CMakeLists.txt +++ b/lib/Dialect/TMTensor/Transforms/CMakeLists.txt @@ -22,4 +22,4 @@ add_mlir_library(TorchMLIRTMTensorPasses MLIRTransforms ) -torch_mlir_target_includes(TorchMLIRTMTensorPasses) \ No newline at end of file +torch_mlir_target_includes(TorchMLIRTMTensorPasses) diff --git a/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp b/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp index b3318c6c1c72..38686f9b6692 100644 --- a/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp +++ b/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp @@ -305,8 +305,7 @@ class LowerToBackendContractPass return signalPassFailure(); } while (!satisfiesBackendContract(module, target)); LLVM_DEBUG({ - llvm::dbgs() << "LowerToBackendContractPass: " - << "succeeded after " << i + llvm::dbgs() << "LowerToBackendContractPass: " << "succeeded after " << i << " iterations of the simplification pipeline\n"; }); } diff --git a/lib/Dialect/TorchConversion/Transforms/CMakeLists.txt b/lib/Dialect/TorchConversion/Transforms/CMakeLists.txt index 79fb0ee4aa3e..d5042926b63c 100644 --- a/lib/Dialect/TorchConversion/Transforms/CMakeLists.txt +++ b/lib/Dialect/TorchConversion/Transforms/CMakeLists.txt @@ -21,7 +21,7 @@ endif() add_mlir_library(TorchMLIRTorchConversionPasses BackendTypeConversion.cpp - BackendTypeConversionPasses.cpp + BackendTypeConversionPasses.cpp Passes.cpp ConvertCustomQuantOp.cpp UnpackQuantTensor.cpp diff --git a/projects/CMakeLists.txt b/projects/CMakeLists.txt index ea7e34593aba..572c4535b7a5 100644 --- a/projects/CMakeLists.txt +++ b/projects/CMakeLists.txt @@ -44,16 +44,16 @@ if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER OR TORCH_MLIR_ENABLE_LTC) message(FATAL_ERROR "Without TORCH_MLIR_USE_INSTALLED_PYTORCH, expected to find Torch configuration at ${Torch_DIR}, which does not exist") endif() endif() - + find_package(Torch 1.11 REQUIRED) - + set(TORCHGEN_DIR ${Torch_ROOT}/../../../torchgen) - + include_directories(BEFORE ${TORCH_INCLUDE_DIRS} ${Python3_INCLUDE_DIRS} ) - link_directories("${TORCH_INSTALL_PREFIX}/lib") + link_directories("${TORCH_INSTALL_PREFIX}/lib") message(STATUS "TORCH_CXXFLAGS is = ${TORCH_CXXFLAGS}") if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND NOT TORCH_CXXFLAGS) message(WARNING diff --git a/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp b/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp index ea544e68b580..946430e72503 100644 --- a/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp +++ b/projects/ltc/csrc/base_lazy_backend/mlir_native_functions.cpp @@ -713,4 +713,4 @@ at::Tensor &LazyNativeFunctions::logsumexp_out(const at::Tensor &self, void InitializeAtenBindings() {} } // namespace lazy -} // namespace torch \ No newline at end of file +} // namespace torch diff --git a/projects/ltc/csrc/base_lazy_backend/ops/unbind_int.h b/projects/ltc/csrc/base_lazy_backend/ops/unbind_int.h index 9d6d83842b10..1a6d9f5eb57f 100644 --- a/projects/ltc/csrc/base_lazy_backend/ops/unbind_int.h +++ b/projects/ltc/csrc/base_lazy_backend/ops/unbind_int.h @@ -34,4 +34,4 @@ class UnbindCopyInt : public torch::lazy::TorchMlirNode { }; } // namespace lazy -} // namespace torch \ No newline at end of file +} // namespace torch diff --git a/projects/pt1/python/CMakeLists.txt b/projects/pt1/python/CMakeLists.txt index 443fcc809e2c..c86f8e52c881 100644 --- a/projects/pt1/python/CMakeLists.txt +++ b/projects/pt1/python/CMakeLists.txt @@ -56,7 +56,7 @@ endif() # Can we build the JIT IR importer with `declare_mlir_python_extension`? # Then it would "just work". if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER) - add_dependencies(TorchMLIRPythonTorchExtensionsSources + add_dependencies(TorchMLIRPythonTorchExtensionsSources TorchMLIRJITIRImporter TorchMLIRJITIRImporterPybind TorchMLIRE2ETestPythonModules @@ -65,7 +65,7 @@ endif() if(TORCH_MLIR_ENABLE_LTC) # Add Torch-MLIR LTC backend as dependency - add_dependencies(TorchMLIRPythonTorchExtensionsSources + add_dependencies(TorchMLIRPythonTorchExtensionsSources torch_mlir_ltc_backend reference_lazy_backend ) diff --git a/projects/pt1/python/torch_mlir/_torch_mlir_custom_op_example/CMakeLists.txt b/projects/pt1/python/torch_mlir/_torch_mlir_custom_op_example/CMakeLists.txt index 383f9b66b0f9..9c2fea88c555 100644 --- a/projects/pt1/python/torch_mlir/_torch_mlir_custom_op_example/CMakeLists.txt +++ b/projects/pt1/python/torch_mlir/_torch_mlir_custom_op_example/CMakeLists.txt @@ -28,4 +28,3 @@ set_target_properties(torch_mlir_custom_op_example PROPERTIES ) torch_mlir_python_target_compile_options(torch_mlir_custom_op_example) mlir_check_all_link_libraries(torch_mlir_custom_op_example) - diff --git a/projects/pt1/test/CMakeLists.txt b/projects/pt1/test/CMakeLists.txt index b750c9845194..ed1b710871ff 100644 --- a/projects/pt1/test/CMakeLists.txt +++ b/projects/pt1/test/CMakeLists.txt @@ -13,7 +13,7 @@ configure_lit_site_cfg( set(TORCH_MLIR_TEST_DEPENDS FileCheck count not TorchMLIRPythonModules - torch-mlir-opt + torch-mlir-opt torch-mlir-capi-torch-test ) diff --git a/test/CAPI/lit.local.cfg b/test/CAPI/lit.local.cfg index 03902ac9639d..f08a0de488dd 100644 --- a/test/CAPI/lit.local.cfg +++ b/test/CAPI/lit.local.cfg @@ -1 +1 @@ -config.suffixes.add('.c') \ No newline at end of file +config.suffixes.add('.c') diff --git a/test/CAPI/torch.c b/test/CAPI/torch.c index e9c5d23e2438..d42cf96d554c 100644 --- a/test/CAPI/torch.c +++ b/test/CAPI/torch.c @@ -36,7 +36,7 @@ static void testTensor(MlirContext ctx, intptr_t numSizes, int64_t *sizes, fprintf(stderr, #TTT "Type %s rank: %zu\n", testName, \ torchMlirTorch##TTT##TypeGetRank(TTT##Type)); \ int64_t *TTT##Sizes = malloc(sizeof(int64_t) * numSizes); \ - torchMlirTorch##TTT##TypeGetSizes(TTT##Type, TTT##Sizes); \ + torchMlirTorch##TTT##TypeGetSizes(TTT##Type, TTT##Sizes); \ for (int i = 0; i < numSizes; ++i) { \ fprintf(stderr, #TTT "Type %s pos %d size: %ld\n", testName, i, \ TTT##Sizes[i]); \ @@ -157,22 +157,26 @@ static void testTypeMetaDataAccessors(MlirContext ctx) { MlirType dictType1 = torchMlirTorchDictTypeGet(strType, floatType); fprintf(stderr, "dict keyType: "); - mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr, NULL); + mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr, + NULL); fprintf(stderr, "\n"); // CHECK: dict keyType: !torch.str fprintf(stderr, "dict valueType: "); - mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr, NULL); + mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr, + NULL); fprintf(stderr, "\n"); // CHECK: dict valueType: !torch.float MlirType dictType2 = torchMlirTorchDictTypeGet(floatType, strType); fprintf(stderr, "dict keyType: "); - mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr, NULL); + mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr, + NULL); fprintf(stderr, "\n"); // CHECK: dict keyType: !torch.float fprintf(stderr, "dict valueType: "); - mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr, NULL); + mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr, + NULL); fprintf(stderr, "\n"); // CHECK: dict valueType: !torch.str } diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 8997cfba2f72..dbfa86aa5aa7 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -14,7 +14,7 @@ configure_lit_site_cfg( set(TORCH_MLIR_TEST_DEPENDS FileCheck count not TorchMLIRPythonModules - torch-mlir-opt + torch-mlir-opt torch-mlir-capi-torch-test ) diff --git a/test/Conversion/TorchOnnxToTorch/simple_ops_a_to_f.mlir b/test/Conversion/TorchOnnxToTorch/simple_ops_a_to_f.mlir index 33d8d8f658b2..f53e55a1679b 100644 --- a/test/Conversion/TorchOnnxToTorch/simple_ops_a_to_f.mlir +++ b/test/Conversion/TorchOnnxToTorch/simple_ops_a_to_f.mlir @@ -86,7 +86,7 @@ func.func @test_argmax_negative_axis_keepdims_random_select_last_index(%arg0: !t // CHECK: %[[C1:.*]] = torch.constant.int 1 // CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMAX]], %[[C3]], %[[C1]] : !torch.vtensor<[2,3,1],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,3,1],si64> // CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,3,1],si64> -> !torch.vtensor<[2,3,1],si64> - %0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64> + %0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64> return %0 : !torch.vtensor<[2,3,1],si64> } @@ -115,7 +115,7 @@ func.func @test_argmax_no_keepdims_random_select_last_index(%arg0: !torch.vtenso // CHECK: %[[C1_1:.*]] = torch.constant.int 1 // CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMAX]], %[[C2]], %[[C1_1]] : !torch.vtensor<[2,4],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,4],si64> // CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,4],si64> -> !torch.vtensor<[2,4],si64> - %0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = 1 : si64, torch.onnx.keepdims = 0 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,4],si64> + %0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = 1 : si64, torch.onnx.keepdims = 0 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,4],si64> return %0 : !torch.vtensor<[2,4],si64> } @@ -155,7 +155,7 @@ func.func @test_argmin_negative_axis_keepdims_random_select_last_index(%arg0: !t // CHECK: %[[C1:.*]] = torch.constant.int 1 // CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMIN]], %[[C3]], %[[C1]] : !torch.vtensor<[2,3,1],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,3,1],si64> // CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,3,1],si64> -> !torch.vtensor<[2,3,1],si64> - %0 = torch.operator "onnx.ArgMin"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64> + %0 = torch.operator "onnx.ArgMin"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64> return %0 : !torch.vtensor<[2,3,1],si64> } @@ -851,7 +851,7 @@ func.func @test_dynamicquantizelinear(%arg0: !torch.vtensor<[3,4,5],f32>) -> (!t // CHECK: %[[SCALE:.*]] = torch.aten.item %[[SCALE_T]] : !torch.vtensor<[],f32> -> !torch.float // CHECK: %[[QUANT:.*]] = torch.aten.quantize_per_tensor %arg0, %[[SCALE]], %[[ZP]], %[[CI13]] : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[3,4,5],!torch.quint8> // CHECK: %[[INTQUANT:.*]] = torch.aten.int_repr %[[QUANT]] : !torch.vtensor<[3,4,5],!torch.quint8> -> !torch.vtensor<[3,4,5],ui8> - %0:3 = torch.operator "onnx.DynamicQuantizeLinear"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> (!torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8>) + %0:3 = torch.operator "onnx.DynamicQuantizeLinear"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> (!torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8>) // CHECK: return %[[INTQUANT]], %[[SCALE_T]], %[[ZP_T]] : !torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8> return %0#0, %0#1, %0#2 : !torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8> } @@ -1035,7 +1035,7 @@ func.func @test_convinteger_without_padding(%arg0: !torch.vtensor<[1,1,3,3],ui8> // CHECK: %[[WEIGHT:.*]] = torch.aten._make_per_tensor_quantized_tensor %arg1, %[[SCALE]], %[[WEIGHT_ZP]] : !torch.vtensor<[1,1,2,2],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,2,2],!torch.quint8> // CHECK: torch.aten.convolution %[[INPUT]], %[[WEIGHT]], %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],!torch.quint8>, !torch.vtensor<[1,1,2,2],!torch.quint8>, !torch.none, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int -> !torch.vtensor<[1,1,2,2],si32> %none = torch.constant.none - %0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>, !torch.vtensor<[1],ui8>) -> !torch.vtensor<[1,1,2,2],si32> + %0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>, !torch.vtensor<[1],ui8>) -> !torch.vtensor<[1,1,2,2],si32> return %0 : !torch.vtensor<[1,1,2,2],si32> } @@ -1066,7 +1066,7 @@ func.func @test_convinteger_with_padding(%arg0: !torch.vtensor<[1,1,3,3],ui8>, % // CHECK: %[[WEIGHT:.*]] = torch.aten._make_per_tensor_quantized_tensor %arg1, %[[SCALE]], %[[WEIGHT_ZP]] : !torch.vtensor<[1,1,2,2],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,2,2],!torch.quint8> // CHECK: torch.aten.convolution %[[INPUT]], %[[WEIGHT]], %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],!torch.quint8>, !torch.vtensor<[1,1,2,2],!torch.quint8>, !torch.none, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int -> !torch.vtensor<[1,1,4,4],si32> %none = torch.constant.none - %0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2) {torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64]} : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>) -> !torch.vtensor<[1,1,4,4],si32> + %0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2) {torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64]} : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>) -> !torch.vtensor<[1,1,4,4],si32> return %0 : !torch.vtensor<[1,1,4,4],si32> } @@ -1597,9 +1597,9 @@ func.func @dense_constant() -> () attributes {torch.onnx_meta.ir_version = 8 : s // CHECK-LABEL: @dense_constant_i1 func.func @dense_constant_i1() -> !torch.vtensor<[5],i1> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64} { - // CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<[true, false, false, true, true]> : tensor<5xi1>) : !torch.vtensor<[5],i1> - // CHECK: return %[[CST]] : !torch.vtensor<[5],i1> - %0 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_> : tensor<5xi1>} : () -> !torch.vtensor<[5],i1> + // CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<[true, false, false, true, true]> : tensor<5xi1>) : !torch.vtensor<[5],i1> + // CHECK: return %[[CST]] : !torch.vtensor<[5],i1> + %0 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_> : tensor<5xi1>} : () -> !torch.vtensor<[5],i1> return %0 : !torch.vtensor<[5],i1> } diff --git a/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir b/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir index 76b7e11c2d01..d280d5f6b495 100644 --- a/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir +++ b/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir @@ -782,7 +782,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %arg0, %[[CIM1]], %[[NONE]] : !torch.vtensor<[1,3],f32>, !torch.int, !torch.none -> !torch.vtensor<[1,3],f32> // CHECK: return %[[LSM]] : !torch.vtensor<[1,3],f32> - %0 = torch.operator "onnx.LogSoftmax"(%arg0) : (!torch.vtensor<[1,3],f32>) -> !torch.vtensor<[1,3],f32> + %0 = torch.operator "onnx.LogSoftmax"(%arg0) : (!torch.vtensor<[1,3],f32>) -> !torch.vtensor<[1,3],f32> return %0 : !torch.vtensor<[1,3],f32> } @@ -794,7 +794,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch // CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %arg0, %[[CI2]], %[[NONE]] : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,4,5],f32> // CHECK: return %[[LSM]] : !torch.vtensor<[3,4,5],f32> - %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> + %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> return %0 : !torch.vtensor<[3,4,5],f32> } @@ -812,7 +812,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %[[FLAT_IN]], %[[CI1]], %[[NONE]] : !torch.vtensor<[3,?],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,?],f32> // CHECK: %[[UNFLAT:.*]] = torch.aten.unflatten.int %[[LSM]], %[[CI1]], %[[LIST]] : !torch.vtensor<[3,?],f32>, !torch.int, !torch.list -> !torch.vtensor<[3,4,?],f32> // CHECK: return %[[UNFLAT]] : !torch.vtensor<[3,4,?],f32> - %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,?],f32>) -> !torch.vtensor<[3,4,?],f32> + %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,?],f32>) -> !torch.vtensor<[3,4,?],f32> return %0 : !torch.vtensor<[3,4,?],f32> } @@ -830,7 +830,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %[[FLAT_IN]], %[[CI1]], %[[NONE]] : !torch.vtensor<[3,20],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,20],f32> // CHECK: %[[UNFLAT:.*]] = torch.aten.unflatten.int %[[LSM]], %[[CI1]], %[[LIST]] : !torch.vtensor<[3,20],f32>, !torch.int, !torch.list -> !torch.vtensor<[3,4,5],f32> // CHECK: return %[[UNFLAT]] : !torch.vtensor<[3,4,5],f32> - %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> + %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> return %0 : !torch.vtensor<[3,4,5],f32> } diff --git a/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir b/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir index e3519a89a73e..9c0ab351297f 100644 --- a/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir +++ b/test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir @@ -1842,7 +1842,7 @@ func.func @test_random_normal() -> !torch.vtensor<[10],f32> attributes {torch.on // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK: torch.aten.normal_functional %[[EMPTY_TENSOR]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> - %0 = torch.operator "onnx.RandomNormal"() {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32> + %0 = torch.operator "onnx.RandomNormal"() {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32> } @@ -1857,7 +1857,7 @@ func.func @test_random_normal_like(%arg0: !torch.vtensor<[10],f32>) -> !torch.vt // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK: torch.aten.normal_functional %[[CAST]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> - %0 = torch.operator "onnx.RandomNormalLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32> + %0 = torch.operator "onnx.RandomNormalLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32> } @@ -1873,7 +1873,7 @@ func.func @test_random_uniform() -> !torch.vtensor<[10],f32> attributes {torch.o // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK: torch.aten.uniform %[[EMPTY_TENSOR]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> - %0 = torch.operator "onnx.RandomUniform"() {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32> + %0 = torch.operator "onnx.RandomUniform"() {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32> } @@ -1888,6 +1888,6 @@ func.func @test_random_uniform_like(%arg0: !torch.vtensor<[10],f32>) -> !torch.v // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK: torch.aten.uniform %[[CAST]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> - %0 = torch.operator "onnx.RandomUniformLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32> + %0 = torch.operator "onnx.RandomUniformLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32> } diff --git a/test/Conversion/TorchOnnxToTorch/unsupported_fb_opt_ops.mlir b/test/Conversion/TorchOnnxToTorch/unsupported_fb_opt_ops.mlir index 0a8bbfe1a8e3..b4f9dfbb30f2 100644 --- a/test/Conversion/TorchOnnxToTorch/unsupported_fb_opt_ops.mlir +++ b/test/Conversion/TorchOnnxToTorch/unsupported_fb_opt_ops.mlir @@ -45,4 +45,4 @@ func.func @cumsum_operation(%arg0: !torch.vtensor<[2,3],f64>, -> !torch.vtensor<[2,3],f64> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 11 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} { %212 = torch.operator "onnx.CumSum"(%arg0, %arg1) : (!torch.vtensor<[2,3],f64>, !torch.vtensor<[],si32>) -> !torch.vtensor<[2,3],f64> return %212 : !torch.vtensor<[2,3],f64> -} \ No newline at end of file +} diff --git a/test/Conversion/TorchToLinalg/flatten.mlir b/test/Conversion/TorchToLinalg/flatten.mlir index e76ada25474e..a2648e2b10f9 100644 --- a/test/Conversion/TorchToLinalg/flatten.mlir +++ b/test/Conversion/TorchToLinalg/flatten.mlir @@ -82,5 +82,3 @@ func.func @torch.aten.flatten.using_ints$rank0(%arg0: !torch.vtensor<[],f32>) -> %0 = torch.aten.flatten.using_ints %arg0, %int0, %int0 : !torch.vtensor<[],f32>, !torch.int, !torch.int -> !torch.vtensor<[1],f32> return %0 : !torch.vtensor<[1],f32> } - - diff --git a/test/Conversion/TorchToLinalg/gridsampler.mlir b/test/Conversion/TorchToLinalg/gridsampler.mlir index 40a2dae45aa9..456cfc934471 100644 --- a/test/Conversion/TorchToLinalg/gridsampler.mlir +++ b/test/Conversion/TorchToLinalg/gridsampler.mlir @@ -86,4 +86,3 @@ func.func @grid_sampler3(%arg0: !torch.vtensor<[?,?,?,?],f32>, %arg1: !torch.vte %4 = torch.aten.grid_sampler %arg0, %arg1, %int0, %int1, %false : !torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[?,?,?,?],f32>, !torch.int, !torch.int, !torch.bool -> !torch.vtensor<[?,?,?,?],f32> return %4 : !torch.vtensor<[?,?,?,?],f32> } - diff --git a/test/Conversion/TorchToLinalg/view.mlir b/test/Conversion/TorchToLinalg/view.mlir index 7cad9ffe33f6..c606328d339b 100644 --- a/test/Conversion/TorchToLinalg/view.mlir +++ b/test/Conversion/TorchToLinalg/view.mlir @@ -254,4 +254,3 @@ func.func @torch.aten.view$dynamicInferredSame(%arg0: !torch.vtensor<[10,?,2,3], %1 = torch.aten.view %arg0, %0 : !torch.vtensor<[10,?,2,3],f32>, !torch.list -> !torch.vtensor<[2,5,?,6],f32> return %1 : !torch.vtensor<[2,5,?,6],f32> } - diff --git a/test/Conversion/TorchToStablehlo/elementwise.mlir b/test/Conversion/TorchToStablehlo/elementwise.mlir index 367985233577..814770bd613c 100644 --- a/test/Conversion/TorchToStablehlo/elementwise.mlir +++ b/test/Conversion/TorchToStablehlo/elementwise.mlir @@ -636,4 +636,4 @@ func.func @torch.aten.div.Tensor_mode$floor(%arg0: !torch.vtensor<[?,?,?,?],f32> func.func @torch.aten.abs(%arg0: !torch.vtensor<[15,15],si64>) -> !torch.vtensor<[15,15],si64>{ %0 = torch.aten.abs %arg0 : !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64> return %0 : !torch.vtensor<[15,15],si64> -} \ No newline at end of file +} diff --git a/test/Conversion/TorchToStablehlo/gather.mlir b/test/Conversion/TorchToStablehlo/gather.mlir index ea4ca9b8272e..a88b6e375071 100644 --- a/test/Conversion/TorchToStablehlo/gather.mlir +++ b/test/Conversion/TorchToStablehlo/gather.mlir @@ -63,4 +63,3 @@ func.func @torch.aten.embedding$rank_two_indices(%weight: !torch.vtensor<[?,?],f %ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,1], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,1,?],f32> return %ret: !torch.vtensor<[?,1,?],f32> } - diff --git a/test/Conversion/TorchToStablehlo/scatter.mlir b/test/Conversion/TorchToStablehlo/scatter.mlir index a3fb1af6df03..432fc0c86c5f 100644 --- a/test/Conversion/TorchToStablehlo/scatter.mlir +++ b/test/Conversion/TorchToStablehlo/scatter.mlir @@ -32,4 +32,4 @@ func.func @forward(%arg0: !torch.vtensor<[?,?],si64>, %arg1: !torch.vtensor<[?,? %int0 = torch.constant.int 0 %0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64> return %0 : !torch.vtensor<[?,?],si64> -} \ No newline at end of file +} diff --git a/test/Conversion/TorchToStablehlo/view_like.mlir b/test/Conversion/TorchToStablehlo/view_like.mlir index 30f33a4fbcea..ab54d2764b66 100644 --- a/test/Conversion/TorchToStablehlo/view_like.mlir +++ b/test/Conversion/TorchToStablehlo/view_like.mlir @@ -565,4 +565,3 @@ func.func @torch.aten.unsqueeze$from_end(%arg0: !torch.vtensor<[?,?,?,?],f32>) - %0 = torch.aten.unsqueeze %arg0, %int-2 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.vtensor<[?,?,?,1,?],f32> return %0 : !torch.vtensor<[?,?,?,1,?],f32> } - diff --git a/test/Conversion/TorchToTosa/cast_fp32_to_fp16.mlir b/test/Conversion/TorchToTosa/cast_fp32_to_fp16.mlir index 5504ac0e4002..9d119a1009d5 100644 --- a/test/Conversion/TorchToTosa/cast_fp32_to_fp16.mlir +++ b/test/Conversion/TorchToTosa/cast_fp32_to_fp16.mlir @@ -1,6 +1,6 @@ // RUN: torch-mlir-opt <%s -convert-torch-to-tosa -split-input-file -// CHECK: %{{.*}} = tosa.cast %{{.*}} : (tensor<1x32x220x220xf32>) -> tensor<1x32x220x220xf16> +// CHECK: %{{.*}} = tosa.cast %{{.*}} : (tensor<1x32x220x220xf32>) -> tensor<1x32x220x220xf16> func.func @forward(%arg0: !torch.vtensor<[1,32,220,220],f32>) -> !torch.vtensor<[1,32,220,220],f16> { %int5 = torch.constant.int 5 %false = torch.constant.bool false @@ -8,5 +8,3 @@ func.func @forward(%arg0: !torch.vtensor<[1,32,220,220],f32>) -> !torch.vtensor< %out = torch.aten.to.dtype %arg0, %int5, %false, %false, %none : !torch.vtensor<[1,32,220,220],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,220,220],f16> return %out : !torch.vtensor<[1,32,220,220],f16> } - - diff --git a/test/Conversion/TorchToTosa/conv2d_transpose.mlir b/test/Conversion/TorchToTosa/conv2d_transpose.mlir index 7f0d5e2ab25b..7c24dc896630 100644 --- a/test/Conversion/TorchToTosa/conv2d_transpose.mlir +++ b/test/Conversion/TorchToTosa/conv2d_transpose.mlir @@ -15,4 +15,3 @@ func.func @forward(%input: !torch.vtensor<[1,64,1,100],f32>) -> !torch.vtensor<[ %output = torch.aten.convolution %input, %weight, %bias, %stride, %int1x1, %int1x1, %true, %int1x1, %int1 : !torch.vtensor<[1,64,1,100],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int -> !torch.vtensor<[1,64,2,200],f32> return %output : !torch.vtensor<[1,64,2,200],f32> } - diff --git a/test/Dialect/Torch/canonicalize.mlir b/test/Dialect/Torch/canonicalize.mlir index a9be9b766a09..7fd4e9832394 100644 --- a/test/Dialect/Torch/canonicalize.mlir +++ b/test/Dialect/Torch/canonicalize.mlir @@ -1524,7 +1524,7 @@ func.func @torch.aten.tensor.float() -> !torch.vtensor<[],f32> { // CHECK-NEXT: torch.vtensor.literal(dense<45> : tensor) : !torch.vtensor<[],si32> func.func @torch.aten.tensor.int() -> !torch.vtensor<[],si32> { %none = torch.constant.none - %false = torch.constant.bool false + %false = torch.constant.bool false %int45 = torch.constant.int 45 %67 = torch.aten.tensor.int %int45, %none, %none, %false : !torch.int, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[],si32> return %67 : !torch.vtensor<[],si32> @@ -2091,7 +2091,7 @@ func.func @torch.aten.broadcast_to$fold(%arg0: !torch.vtensor<[3,4,2],f32>) -> ! // ----- // CHECK-LABEL: func.func @torch.aten.broadcast_to$fold_splat -// CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<3.000000e+00> : tensor<3x4x2xf32>) : !torch.vtensor<[3,4,2],f32> +// CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<3.000000e+00> : tensor<3x4x2xf32>) : !torch.vtensor<[3,4,2],f32> // CHECK: return %[[CST]] func.func @torch.aten.broadcast_to$fold_splat() -> !torch.vtensor<[3,4,2],f32> { %tensor = torch.vtensor.literal(dense<3.0> : tensor<1x4x1xf32>) : !torch.vtensor<[1,4,1],f32> diff --git a/test/Dialect/Torch/ops.mlir b/test/Dialect/Torch/ops.mlir index 623217fd22dc..ecf5e626fb1d 100644 --- a/test/Dialect/Torch/ops.mlir +++ b/test/Dialect/Torch/ops.mlir @@ -186,4 +186,3 @@ func.func @torch.permute$negative_index_valid (%arg0: !torch.vtensor<[1,2,3],f32 %3 = torch.aten.permute %arg0, %perm : !torch.vtensor<[1,2,3],f32>, !torch.list -> !torch.vtensor<[1,2,3],f32> return %3 : !torch.vtensor<[1,2,3],f32> } - diff --git a/test/python/fx_importer/v2.3/lit.local.cfg b/test/python/fx_importer/v2.3/lit.local.cfg index 00c613754f64..312965e07fa7 100644 --- a/test/python/fx_importer/v2.3/lit.local.cfg +++ b/test/python/fx_importer/v2.3/lit.local.cfg @@ -4,6 +4,6 @@ try: import torch if torch.__version__ >= "2.3.0": print("Enabling Torch v2.3+ tests") - config.unsupported = False + config.unsupported = False except ModuleNotFoundError: ... diff --git a/utils/bazel/torch-mlir-overlay/.bazelignore b/utils/bazel/torch-mlir-overlay/.bazelignore index b19530a49158..8bd2e85c3b1c 100644 --- a/utils/bazel/torch-mlir-overlay/.bazelignore +++ b/utils/bazel/torch-mlir-overlay/.bazelignore @@ -2,6 +2,6 @@ # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# Skip the following directories when overlaying +# Skip the following directories when overlaying utils/bazel externals