From 60021161b015a556291ef554602f7d3235bbbb0c Mon Sep 17 00:00:00 2001 From: rahul shrivastava Date: Thu, 9 Jan 2025 10:20:44 -0800 Subject: [PATCH 1/6] LTC On Signed-off-by: rahul shrivastava --- build_tools/ci/build_posix.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_tools/ci/build_posix.sh b/build_tools/ci/build_posix.sh index 36e9057c973f..ea3e570c8b7e 100755 --- a/build_tools/ci/build_posix.sh +++ b/build_tools/ci/build_posix.sh @@ -50,7 +50,7 @@ cmake -S "$repo_root/externals/llvm-project/llvm" -B "$build_dir" \ -DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$repo_root" \ -DLLVM_TARGETS_TO_BUILD=host \ -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ - -DTORCH_MLIR_ENABLE_LTC=OFF \ + -DTORCH_MLIR_ENABLE_LTC=ON \ -DTORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS=ON echo "::endgroup::" From f96bb22403f0ea7c081204c098580e83d6ade9dd Mon Sep 17 00:00:00 2001 From: rahul shrivastava Date: Thu, 9 Jan 2025 19:43:08 -0800 Subject: [PATCH 2/6] More changes Signed-off-by: rahul shrivastava --- .../python_deploy/build_linux_packages.sh | 22 +++++++++---------- projects/CMakeLists.txt | 6 +++++ setup.py | 3 +++ 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/build_tools/python_deploy/build_linux_packages.sh b/build_tools/python_deploy/build_linux_packages.sh index aa687bab447c..3107cd626288 100755 --- a/build_tools/python_deploy/build_linux_packages.sh +++ b/build_tools/python_deploy/build_linux_packages.sh @@ -217,10 +217,10 @@ function build_in_tree() { local torch_version="$3" local enable_ltc="ON" - if [[ "${torch_version}" == "stable" ]] - then - enable_ltc="OFF" - fi + #if [[ "${torch_version}" == "stable" ]] + #then + # enable_ltc="OFF" + #fi echo ":::: Build in-tree Torch from binary: $torch_from_bin with Python: $python_version" cmake -GNinja -B/main_checkout/torch-mlir/build \ @@ -369,10 +369,10 @@ function build_out_of_tree() { echo ":::: Build out-of-tree Torch from binary: $torch_from_bin with Python: $python_version ($torch_version)" local enable_ltc="ON" - if [[ "${torch_version}" == "stable" ]] - then - enable_ltc="OFF" - fi + #if [[ "${torch_version}" == "stable" ]] + #then + # enable_ltc="OFF" + #fi if [ ! -d "/main_checkout/torch-mlir/llvm-build/lib/cmake/mlir/" ] then @@ -396,7 +396,7 @@ function build_out_of_tree() { fi # Incremental builds come here directly and can run cmake if required. - cmake -GNinja -B/main_checkout/torch-mlir/build_oot \ + cmake -GNinja --verbose -B/main_checkout/torch-mlir/build_oot \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_COMPILER_LAUNCHER=ccache \ @@ -433,7 +433,7 @@ function clean_build() { function build_torch_mlir_ext() { # Disable LTC build for releases - export TORCH_MLIR_ENABLE_LTC=0 + export TORCH_MLIR_ENABLE_LTC=1 local torch_version="$1" case $torch_version in nightly) @@ -472,7 +472,7 @@ function run_audit_wheel() { function build_torch_mlir() { # Disable LTC build for releases - export TORCH_MLIR_ENABLE_LTC=0 + export TORCH_MLIR_ENABLE_LTC=1 python -m pip install --no-cache-dir -r /main_checkout/torch-mlir/build-requirements.txt CMAKE_GENERATOR=Ninja \ TORCH_MLIR_PYTHON_PACKAGE_VERSION=${TORCH_MLIR_PYTHON_PACKAGE_VERSION} \ diff --git a/projects/CMakeLists.txt b/projects/CMakeLists.txt index 572c4535b7a5..9699b9b662b0 100644 --- a/projects/CMakeLists.txt +++ b/projects/CMakeLists.txt @@ -8,6 +8,12 @@ endif() # PyTorch # Configure PyTorch if we have any features enabled which require it. ################################################################################ + + message(STATUS "CHECK RAHUL LTC ENABLED") +if(TORCH_MLIR_ENABLE_LTC) + message(STATUS "RAHUL LTC ENABLED") +endif() + if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER OR TORCH_MLIR_ENABLE_LTC) if (NOT TORCH_MLIR_USE_INSTALLED_PYTORCH) diff --git a/setup.py b/setup.py index d62f08073b58..1bd383fe5d8c 100644 --- a/setup.py +++ b/setup.py @@ -74,6 +74,7 @@ def _check_env_flag(name: str, default=None) -> bool: # If true, enable LTC build by default TORCH_MLIR_ENABLE_LTC = _check_env_flag("TORCH_MLIR_ENABLE_LTC", True) +print("RAHUL ", TORCH_MLIR_ENABLE_LTC) TORCH_MLIR_ENABLE_ONLY_MLIR_PYTHON_BINDINGS = _check_env_flag( "TORCH_MLIR_ENABLE_ONLY_MLIR_PYTHON_BINDINGS", True ) @@ -122,6 +123,8 @@ def cmake_build(self, cmake_build_dir): f"-DTORCH_MLIR_ENABLE_LTC={'ON' if TORCH_MLIR_ENABLE_LTC else 'OFF'}", f"-DTORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS={'OFF' if TORCH_MLIR_ENABLE_ONLY_MLIR_PYTHON_BINDINGS else 'ON'}", ] + + print("RAHUL iconfg ", cmake_config_args) if LLVM_INSTALL_DIR: cmake_config_args += [ f"-DMLIR_DIR='{LLVM_INSTALL_DIR}/lib/cmake/mlir/'", From d927e0da2c08c4864c90950b8cb8ede8219271f2 Mon Sep 17 00:00:00 2001 From: rahul shrivastava Date: Thu, 9 Jan 2025 19:55:05 -0800 Subject: [PATCH 3/6] More Signed-off-by: rahul shrivastava --- build_tools/python_deploy/build_linux_packages.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build_tools/python_deploy/build_linux_packages.sh b/build_tools/python_deploy/build_linux_packages.sh index 3107cd626288..1f049365bc4e 100755 --- a/build_tools/python_deploy/build_linux_packages.sh +++ b/build_tools/python_deploy/build_linux_packages.sh @@ -238,7 +238,7 @@ function build_in_tree() { -DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="/main_checkout/torch-mlir" \ -DLLVM_TARGETS_TO_BUILD=host \ -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ - -DTORCH_MLIR_ENABLE_LTC=${enable_ltc} \ + -DTORCH_MLIR_ENABLE_LTC=ON \ -DTORCH_MLIR_USE_INSTALLED_PYTORCH="$torch_from_bin" \ -DTORCH_MLIR_SRC_PYTORCH_REPO=${TORCH_MLIR_SRC_PYTORCH_REPO} \ -DTORCH_MLIR_SRC_PYTORCH_BRANCH=${TORCH_MLIR_SRC_PYTORCH_BRANCH} \ @@ -407,7 +407,7 @@ function build_out_of_tree() { -DLLVM_DIR="/main_checkout/torch-mlir/llvm-build/lib/cmake/llvm/" \ -DMLIR_DIR="/main_checkout/torch-mlir/llvm-build/lib/cmake/mlir/" \ -DMLIR_ENABLE_BINDINGS_PYTHON=OFF \ - -DTORCH_MLIR_ENABLE_LTC=${enable_ltc} \ + -DTORCH_MLIR_ENABLE_LTC=ON \ -DTORCH_MLIR_USE_INSTALLED_PYTORCH="$torch_from_bin" \ -DTORCH_MLIR_SRC_PYTORCH_REPO=${TORCH_MLIR_SRC_PYTORCH_REPO} \ -DTORCH_MLIR_SRC_PYTORCH_BRANCH=${TORCH_MLIR_SRC_PYTORCH_BRANCH} \ From daa7417af3a267ca70490a0c7611a34460073d4b Mon Sep 17 00:00:00 2001 From: rahul shrivastava Date: Fri, 10 Jan 2025 00:57:49 -0800 Subject: [PATCH 4/6] rrrelu Signed-off-by: rahul shrivastava --- CMakeLists.txt | 2 + .../python_deploy/build_linux_packages.sh | 2 +- .../Dialect/Torch/IR/GeneratedTorchOps.td | 111 ------------ .../Transforms/AbstractInterpLibrary.cpp | 51 ------ .../Torch/Transforms/DecomposeComplexOps.cpp | 161 ------------------ .../Transforms/LowerToBackendContract.cpp | 3 - projects/CMakeLists.txt | 6 +- projects/pt1/e2e_testing/xfail_sets.py | 31 ---- .../build_tools/abstract_interp_lib_gen.py | 32 ---- .../build_tools/torch_ods_gen.py | 7 - .../test_suite/backprop.py | 160 ----------------- 11 files changed, 8 insertions(+), 558 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 822afa0af17e..770f4e5d3517 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -24,6 +24,8 @@ endif() project(torch-mlir LANGUAGES CXX C) set(CMAKE_C_STANDARD 11) set(CMAKE_CXX_STANDARD 17) +set(CMAKE_VERBOSE_MAKEFILE ON) + include(CMakeDependentOption) diff --git a/build_tools/python_deploy/build_linux_packages.sh b/build_tools/python_deploy/build_linux_packages.sh index 1f049365bc4e..a20358b73978 100755 --- a/build_tools/python_deploy/build_linux_packages.sh +++ b/build_tools/python_deploy/build_linux_packages.sh @@ -223,7 +223,7 @@ function build_in_tree() { #fi echo ":::: Build in-tree Torch from binary: $torch_from_bin with Python: $python_version" - cmake -GNinja -B/main_checkout/torch-mlir/build \ + cmake -GNinja -B/main_checkout/torch-mlir/build \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ diff --git a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td index 7acf4a5ed948..cd13022941bc 100644 --- a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td +++ b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td @@ -309,59 +309,6 @@ def Torch_AtenRrelu_Op : Torch_Op<"aten.rrelu_", [ }]; } -def Torch_AtenRreluWithNoiseOp : Torch_Op<"aten.rrelu_with_noise", [ - AllowsTypeRefinement - ]> { - let summary = "Generated op for `aten::rrelu_with_noise : (Tensor, Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor)`"; - let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchTensorType:$noise, - AnyTorchScalarType:$lower, - AnyTorchScalarType:$upper, - Torch_BoolType:$training, - AnyTorchOptionalGeneratorType:$generator - ); - let results = (outs - AnyTorchOptionalTensorType:$result - ); - let hasCustomAssemblyFormat = 1; - let extraClassDefinition = [{ - ParseResult AtenRreluWithNoiseOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 6, 1); - } - void AtenRreluWithNoiseOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 6, 1); - } - }]; -} - -def Torch_AtenRreluWithNoise_Op : Torch_Op<"aten.rrelu_with_noise_", [ - IsTrailingUnderscoreInplaceVariant, - AllowsTypeRefinement - ]> { - let summary = "Generated op for `aten::rrelu_with_noise_ : (Tensor, Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor)`"; - let arguments = (ins - Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$noise, - AnyTorchScalarType:$lower, - AnyTorchScalarType:$upper, - Torch_BoolType:$training, - AnyTorchOptionalGeneratorType:$generator - ); - let results = (outs - AnyTorchOptionalNonValueTensorType:$result - ); - let hasCustomAssemblyFormat = 1; - let extraClassDefinition = [{ - ParseResult AtenRreluWithNoise_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 6, 1); - } - void AtenRreluWithNoise_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 6, 1); - } - }]; -} - def Torch_AtenCeluOp : Torch_Op<"aten.celu", [ AllowsTypeRefinement, HasValueSemantics, @@ -17512,64 +17459,6 @@ def Torch_AtenLeakyReluBackwardOp : Torch_Op<"aten.leaky_relu_backward", [ }]; } -def Torch_AtenRreluWithNoiseBackwardOp : Torch_Op<"aten.rrelu_with_noise_backward", [ - AllowsTypeRefinement, - HasValueSemantics, - ReadOnly - ]> { - let summary = "Generated op for `aten::rrelu_with_noise_backward : (Tensor, Tensor, Tensor, Scalar, Scalar, bool, bool) -> (Tensor)`"; - let arguments = (ins - AnyTorchTensorType:$grad_output, - AnyTorchTensorType:$self, - AnyTorchTensorType:$noise, - AnyTorchScalarType:$lower, - AnyTorchScalarType:$upper, - Torch_BoolType:$training, - Torch_BoolType:$self_is_result - ); - let results = (outs - AnyTorchOptionalTensorType:$result - ); - let hasCustomAssemblyFormat = 1; - let extraClassDefinition = [{ - ParseResult AtenRreluWithNoiseBackwardOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 7, 1); - } - void AtenRreluWithNoiseBackwardOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 7, 1); - } - }]; -} - -def Torch_AtenRreluWithNoiseFunctionalOp : Torch_Op<"aten.rrelu_with_noise_functional", [ - AllowsTypeRefinement, - HasValueSemantics, - ReadOnly - ]> { - let summary = "Generated op for `aten::rrelu_with_noise_functional : (Tensor, Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor, Tensor)`"; - let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchTensorType:$noise, - AnyTorchScalarType:$lower, - AnyTorchScalarType:$upper, - Torch_BoolType:$training, - AnyTorchOptionalGeneratorType:$generator - ); - let results = (outs - AnyTorchOptionalTensorType:$result0, - AnyTorchOptionalTensorType:$noise_out - ); - let hasCustomAssemblyFormat = 1; - let extraClassDefinition = [{ - ParseResult AtenRreluWithNoiseFunctionalOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 6, 2); - } - void AtenRreluWithNoiseFunctionalOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 6, 2); - } - }]; -} - def Torch_AtenQuantizePerChannelOp : Torch_Op<"aten.quantize_per_channel", [ AllowsTypeRefinement, HasValueSemantics, diff --git a/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp b/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp index ae164e00ab2b..601650987d55 100644 --- a/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp +++ b/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp @@ -6694,10 +6694,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list) -> !torch.list\n" " return %0 : !torch.list\n" " }\n" -" func.func @\"__torch_mlir_shape_fn.aten.rrelu_with_noise_backward\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.list, %arg3: !torch.float, %arg4: !torch.float, %arg5: !torch.bool, %arg6: !torch.bool) -> !torch.list {\n" -" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list) -> !torch.list\n" -" return %0 : !torch.list\n" -" }\n" " func.func @\"__torch_mlir_shape_fn.aten.hardtanh_backward\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float, %arg3: !torch.float) -> !torch.list {\n" " %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list) -> !torch.list\n" " return %0 : !torch.list\n" @@ -7300,16 +7296,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list) -> !torch.list\n" " return %0 : !torch.list\n" " }\n" -" func.func @\"__torch_mlir_shape_fn.aten.rrelu_with_noise\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float, %arg3: !torch.float, %arg4: !torch.bool, %arg5: !torch.any) -> !torch.list {\n" -" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list) -> !torch.list\n" -" return %0 : !torch.list\n" -" }\n" -" func.func @\"__torch_mlir_shape_fn.aten.rrelu_with_noise_functional\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.float, %arg3: !torch.float, %arg4: !torch.bool, %arg5: !torch.any) -> !torch.tuple, list> {\n" -" %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list) -> !torch.list\n" -" %1 = call @__torch__.torch.jit._shape_functions.unary(%arg1) : (!torch.list) -> !torch.list\n" -" %2 = torch.prim.TupleConstruct %0, %1 : !torch.list, !torch.list -> !torch.tuple, list>\n" -" return %2 : !torch.tuple, list>\n" -" }\n" " func.func @\"__torch_mlir_shape_fn.aten.selu\"(%arg0: !torch.list) -> !torch.list {\n" " %0 = call @__torch__.torch.jit._shape_functions.unary(%arg0) : (!torch.list) -> !torch.list\n" " return %0 : !torch.list\n" @@ -12428,14 +12414,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " %4 = call @__torch__.torch_mlir.jit_ir_importer.build_tools.library_generator.promote_dtypes(%2, %3) : (!torch.list>, !torch.list) -> !torch.int\n" " return %4 : !torch.int\n" " }\n" -" func.func @\"__torch_mlir_dtype_fn.aten.rrelu_with_noise_backward\"(%arg0: !torch.tuple, %arg1: !torch.tuple, %arg2: !torch.tuple, %arg3: !torch.number, %arg4: !torch.number, %arg5: !torch.bool, %arg6: !torch.bool) -> !torch.int {\n" -" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple -> !torch.int, !torch.int\n" -" %1:2 = torch.prim.TupleUnpack %arg1 : !torch.tuple -> !torch.int, !torch.int\n" -" %2 = torch.prim.ListConstruct %0#0, %1#0 : (!torch.int, !torch.int) -> !torch.list>\n" -" %3 = torch.prim.ListConstruct %0#1, %1#1 : (!torch.int, !torch.int) -> !torch.list\n" -" %4 = call @__torch__.torch_mlir.jit_ir_importer.build_tools.library_generator.promote_dtypes(%2, %3) : (!torch.list>, !torch.list) -> !torch.int\n" -" return %4 : !torch.int\n" -" }\n" " func.func @\"__torch_mlir_dtype_fn.aten.lift_fresh_copy\"(%arg0: !torch.tuple) -> !torch.int {\n" " %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple -> !torch.int, !torch.int\n" " return %0#1 : !torch.int\n" @@ -12612,35 +12590,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple -> !torch.int, !torch.int\n" " return %0#1 : !torch.int\n" " }\n" -" func.func @\"__torch_mlir_dtype_fn.aten.rrelu_with_noise\"(%arg0: !torch.tuple, %arg1: !torch.tuple, %arg2: !torch.number, %arg3: !torch.number, %arg4: !torch.bool, %arg5: !torch.any) -> !torch.int {\n" -" %none = torch.constant.none\n" -" %str = torch.constant.str \"AssertionError: \"\n" -" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple -> !torch.int, !torch.int\n" -" %1:2 = torch.prim.TupleUnpack %arg1 : !torch.tuple -> !torch.int, !torch.int\n" -" %2 = torch.aten.eq.int %0#0, %1#0 : !torch.int, !torch.int -> !torch.bool\n" -" torch.prim.If %2 -> () {\n" -" torch.prim.If.yield\n" -" } else {\n" -" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" -" torch.prim.If.yield\n" -" }\n" -" return %0#1 : !torch.int\n" -" }\n" -" func.func @\"__torch_mlir_dtype_fn.aten.rrelu_with_noise_functional\"(%arg0: !torch.tuple, %arg1: !torch.tuple, %arg2: !torch.number, %arg3: !torch.number, %arg4: !torch.bool, %arg5: !torch.any) -> !torch.tuple {\n" -" %none = torch.constant.none\n" -" %str = torch.constant.str \"AssertionError: \"\n" -" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple -> !torch.int, !torch.int\n" -" %1:2 = torch.prim.TupleUnpack %arg1 : !torch.tuple -> !torch.int, !torch.int\n" -" %2 = torch.aten.eq.int %0#0, %1#0 : !torch.int, !torch.int -> !torch.bool\n" -" torch.prim.If %2 -> () {\n" -" torch.prim.If.yield\n" -" } else {\n" -" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n" -" torch.prim.If.yield\n" -" }\n" -" %3 = torch.prim.TupleConstruct %0#1, %1#1 : !torch.int, !torch.int -> !torch.tuple\n" -" return %3 : !torch.tuple\n" -" }\n" " func.func @\"__torch_mlir_dtype_fn.aten.relu6\"(%arg0: !torch.tuple) -> !torch.int {\n" " %none = torch.constant.none\n" " %str = torch.constant.str \"AssertionError: \"\n" diff --git a/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp b/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp index 91d6b5eb17fc..d1429e809e77 100644 --- a/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp +++ b/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp @@ -3675,59 +3675,6 @@ class DecomposeAtenLeakyReluBackwardOp }; } // namespace -namespace { -class DecomposeAtenRreluWithNoiseBackwardOp - : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(AtenRreluWithNoiseBackwardOp op, - PatternRewriter &rewriter) const override { - Location loc = op.getLoc(); - Value gradOutput = op.getGradOutput(); - Value self = op.getSelf(); - Value noise = op.getNoise(); - auto resType = cast(op.getType()); - if (!resType.hasDtype()) { - return rewriter.notifyMatchFailure(op, "result should have dtype"); - } - - bool training; - if (!matchPattern(op.getTraining(), m_TorchConstantBool(&training))) { - return rewriter.notifyMatchFailure(op, - "training should be a bool constant"); - } - - bool selfIsResult = false; - if (!matchPattern(op.getSelfIsResult(), - m_TorchConstantBool(&selfIsResult)) || - selfIsResult) - return rewriter.notifyMatchFailure( - op, "unimplemented: self_is_result should be false"); - - double lower, upper; - if (!matchPattern(op.getLower(), m_TorchConstantFloat(&lower)) || - !matchPattern(op.getUpper(), m_TorchConstantFloat(&upper))) { - return rewriter.notifyMatchFailure( - op, "lower and upper should be float constants"); - } - - if (training && (upper - lower > 0.000001)) { - Value rreluWithNoiseBackwardOutput = - rewriter.create(loc, resType, gradOutput, noise); - rewriter.replaceOp(op, rreluWithNoiseBackwardOutput); - } else { - double negative_slope = (upper + lower) / 2; - Value cstNegativeSlope = rewriter.create( - loc, rewriter.getF64FloatAttr(negative_slope)); - rewriter.replaceOpWithNewOp( - op, resType, gradOutput, self, cstNegativeSlope, - op.getSelfIsResult()); - } - return success(); - } -}; -} // namespace - namespace { class DecomposeAtenPreluOp : public OpRewritePattern { public: @@ -3823,109 +3770,6 @@ class DecomposeAtenRreluOp : public OpRewritePattern { }; } // namespace -namespace { -class DecomposeAtenRreluWithNoiseOp - : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(AtenRreluWithNoiseOp op, - PatternRewriter &rewriter) const override { - Location loc = op.getLoc(); - Value self = op.getSelf(); - Value noise = op.getNoise(); - Value lower = op.getLower(); - Value upper = op.getUpper(); - auto resType = cast(op.getType()); - Value cstNone = rewriter.create(loc); - Value cstFalse = - rewriter.create(loc, rewriter.getBoolAttr(false)); - Value result = - rewriter - .create( - loc, resType, self, noise, lower, upper, cstFalse, cstNone) - ->getResult(0); - rewriter.replaceOp(op, result); - return success(); - } -}; -} // namespace - -namespace { -class DecomposeAtenRreluWithNoiseFunctionalOp - : public OpRewritePattern { -public: - using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(AtenRreluWithNoiseFunctionalOp op, - PatternRewriter &rewriter) const override { - Location loc = op.getLoc(); - Value self = op.getSelf(); - Value noise = op.getNoise(); - Value lower = op.getLower(); - Value upper = op.getUpper(); - auto resType = cast(op.getResultTypes()[0]); - if (!resType.hasDtype()) { - return rewriter.notifyMatchFailure(op, "result should have dtype"); - } - - bool training; - if (!matchPattern(op.getTraining(), m_TorchConstantBool(&training))) { - return rewriter.notifyMatchFailure(op, "training should be a constant"); - } - - Value constantZeroFloat = - rewriter.create(loc, rewriter.getF64FloatAttr(0.0)); - Value constantOneFloat = - rewriter.create(loc, rewriter.getF64FloatAttr(1.0)); - Value constantTwoFloat = - rewriter.create(loc, rewriter.getF64FloatAttr(2.0)); - - Value alpha; - if (training) { - Value none = rewriter.create(loc); - Value emptyTensor = rewriter.create( - loc, resType, self, constantZeroFloat, /*dtype=*/none, - /*layout=*/none, - /*device=*/none, /*pin_memoty=*/none, /*memory_format=*/none); - alpha = rewriter.create(loc, resType, emptyTensor, - /*from=*/lower, /*to=*/upper, - /*generator=*/none); - } else { - Value half = rewriter.create(loc, constantTwoFloat.getType(), - lower, upper); - alpha = rewriter.create(loc, constantTwoFloat.getType(), half, - constantTwoFloat); - } - - Value zeroTensor = - createRank0Tensor(rewriter, loc, resType, constantZeroFloat); - Value positiveOutput = - rewriter.create(loc, resType, zeroTensor, self); - - Value scaledSelf; - if (training) { - scaledSelf = rewriter.create(loc, resType, self, alpha); - auto boolResType = resType.getWithSizesAndDtype(resType.getSizes(), - rewriter.getI1Type()); - Value oneTensor = - createRank0Tensor(rewriter, loc, resType, constantOneFloat); - Value not_positive = rewriter.create( - loc, boolResType, self, constantZeroFloat); - noise = rewriter.create(loc, resType, not_positive, - alpha, oneTensor); - } else { - scaledSelf = rewriter.create(loc, resType, self, alpha); - } - - Value negativeOutput = - rewriter.create(loc, resType, zeroTensor, scaledSelf); - Value rreluOutput = rewriter.create( - loc, resType, positiveOutput, negativeOutput, constantOneFloat); - rewriter.replaceOp(op, {rreluOutput, noise}); - return success(); - } -}; -} // namespace - // CELU(x)=max(0,x)+min(0,alpha∗(exp(x/alpha)−1)) namespace { class DecomposeAtenCeluOp : public OpRewritePattern { @@ -11598,11 +11442,6 @@ class DecomposeComplexOpsPass addPatternIfTargetOpIsIllegal(patterns); addPatternIfTargetOpIsIllegal(patterns); addPatternIfTargetOpIsIllegal(patterns); - addPatternIfTargetOpIsIllegal(patterns); - addPatternIfTargetOpIsIllegal( - patterns); - addPatternIfTargetOpIsIllegal( - patterns); addPatternIfTargetOpIsIllegal(patterns); addPatternIfTargetOpIsIllegal(patterns); addPatternIfTargetOpIsIllegal(patterns); diff --git a/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp b/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp index f15911e2b5ba..ea9c2d014aca 100644 --- a/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp +++ b/lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp @@ -500,9 +500,6 @@ static void markDecomposedOpsAsIllegal(MLIRContext *context, target.addIllegalOp(); target.addIllegalOp(); target.addIllegalOp(); - target.addIllegalOp(); - target.addIllegalOp(); - target.addIllegalOp(); target.addIllegalOp(); target.addIllegalOp(); target.addIllegalOp(); diff --git a/projects/CMakeLists.txt b/projects/CMakeLists.txt index 9699b9b662b0..dc0b8364f783 100644 --- a/projects/CMakeLists.txt +++ b/projects/CMakeLists.txt @@ -1,5 +1,7 @@ include(AddMLIRPython) +set(CMAKE_VERBOSE_MAKEFILE ON) + if(TORCH_MLIR_ENABLE_ONNX_C_IMPORTER) add_subdirectory(onnx_c_importer) endif() @@ -9,11 +11,13 @@ endif() # Configure PyTorch if we have any features enabled which require it. ################################################################################ - message(STATUS "CHECK RAHUL LTC ENABLED") +message(STATUS "CHECK RAHUL LTC ENABLED") +set(TORCH_MLIR_ENABLE_LTC 1) if(TORCH_MLIR_ENABLE_LTC) message(STATUS "RAHUL LTC ENABLED") endif() + if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER OR TORCH_MLIR_ENABLE_LTC) if (NOT TORCH_MLIR_USE_INSTALLED_PYTORCH) diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index 38eb1f573362..3405efbf5478 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -1190,10 +1190,6 @@ "ElementwisePreluStaticModule_basic", "ElementwiseReciprocalModule_basic", "ElementwiseReluModule_basic", - "ElementwiseRreluWithNoiseEvalStaticModule_basic", - "ElementwiseRreluWithNoiseTrainStaticModule_basic", - "RreluWithNoiseBackwardEvalStaticModule_basic", - "RreluWithNoiseBackwardTrainStaticModule_basic", "ElementwiseRemainderTensorModule_Float_basic", "ElementwiseRemainderTensorModule_Float_NegativeDividend_basic", "ElementwiseRemainderTensorModule_Int_Float_basic", @@ -2197,7 +2193,6 @@ "ElementwiseReciprocalModule_basic", "ElementwiseRelu6Module_basic", "ElementwiseReluModule_basic", - "ElementwiseRreluWithNoiseEvalStaticModule_basic", "ElementwiseRemainderScalarModule_Float_NegativeDividend_basic", "ElementwiseRemainderScalarModule_Float_NegativeDivisor_basic", "ElementwiseRemainderScalarModule_Int_Float_NegativeDividend_basic", @@ -2334,10 +2329,6 @@ "ReduceSumFloatModule_basic", "ReduceSumSignedIntModule_basic", "ReduceSumUnsignedIntModule_basic", - "RreluWithNoiseBackwardEvalModule_basic", - "RreluWithNoiseBackwardEvalStaticModule_basic", - "RreluWithNoiseBackwardTrainModule_basic", - "RreluWithNoiseBackwardTrainStaticModule_basic", "RepeatModule_basic", "RepeatInterleaveSelfIntNoDimModule_basic", "ReshapeAliasCollapseModule_basic", @@ -2534,10 +2525,6 @@ "ViewSizeFromOtherTensor_basic", "RenormModuleFloat32NegativeDim_basic", "RenormModuleFloat32_basic", - "RreluWithNoiseBackwardEvalModule_basic", - "RreluWithNoiseBackwardEvalStaticModule_basic", - "RreluWithNoiseBackwardTrainModule_basic", - "RreluWithNoiseBackwardTrainStaticModule_basic", } ) - { ### Test failing in make_fx_tosa but not in tosa @@ -2970,10 +2957,6 @@ "ElementwiseRemainderTensorModule_Int_basic", "ElementwiseRemainderTensorModule_Int_NegativeDividend_basic", "ElementwiseRemainderTensorModule_Int_NegativeDivisor_basic", - "ElementwiseRreluWithNoiseEvalModule_basic", - "ElementwiseRreluWithNoiseEvalStaticModule_basic", - "ElementwiseRreluWithNoiseTrainModule_basic", - "ElementwiseRreluWithNoiseTrainStaticModule_basic", "ElementwiseSgnModule_basic", "EmptyStridedModule_basic", "EmptyStridedSizeIntStrideModule_basic", @@ -3131,11 +3114,6 @@ "ReflectionPad3dModule_basic", "ReflectionPad3dModuleFront_basic", "ReflectionPad3dModuleBack_basic", - "RreluWithNoiseBackwardEvalModule_basic", - "RreluWithNoiseBackwardEvalStaticModule_basic", - "RreluWithNoiseBackwardTrainModule_basic", - "RreluWithNoiseBackwardTrainStaticModule_basic", - "RreluWithNoiseForwardBackwardModule_basic", "ReshapeAliasCollapseModule_basic", "ReshapeAliasExpandModule_basic", "ReshapeExpandModule_basic", @@ -3949,15 +3927,6 @@ "NllLossStaticModule_sum_basic", "NllLossStaticModule_weight_basic", "Exp2StaticModule_basic", - "ElementwiseRreluWithNoiseEvalModule_basic", - "ElementwiseRreluWithNoiseEvalStaticModule_basic", - "ElementwiseRreluWithNoiseTrainModule_basic", - "ElementwiseRreluWithNoiseTrainStaticModule_basic", - "RreluWithNoiseBackwardEvalModule_basic", - "RreluWithNoiseBackwardEvalStaticModule_basic", - "RreluWithNoiseBackwardTrainModule_basic", - "RreluWithNoiseBackwardTrainStaticModule_basic", - "RreluWithNoiseForwardBackwardModule_basic", "Unfold_Module_Dynamic_basic", "Unfold_Module_Rank_4", "Unfold_Module_Rank_Zero_Size_Zero_basic", diff --git a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py index d0170b1bf9b0..2e23e9d3e5c5 100644 --- a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py +++ b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py @@ -307,9 +307,6 @@ def aten〇gelu_backward〡shape(grad_output: List[int], self: List[int], approx def aten〇leaky_relu_backward〡shape(grad_output: List[int], self: List[int], negative_slope: float, self_is_result: bool) -> List[int]: return upstream_shape_functions.unary(grad_output) -def aten〇rrelu_with_noise_backward〡shape(grad_output: List[int], self: List[int], noise: List[int], lower: float, upper: float, training: bool, self_is_result: bool) -> List[int]: - return upstream_shape_functions.unary(grad_output) - def aten〇hardtanh_backward〡shape(grad_output: List[int], self: List[int], min_val: float, max_val: float) -> List[int]: return upstream_shape_functions.unary(grad_output) @@ -646,12 +643,6 @@ def aten〇celu〡shape(self: List[int], alpha: float = 1.) -> List[int]: def aten〇rrelu〡shape(self: List[int], lower: float = 0.125, upper: float = 0.33333333333333331, training: bool = False, generator: Any = None) -> List[int]: return upstream_shape_functions.unary(self) -def aten〇rrelu_with_noise〡shape(self: List[int], noise: List[int], lower: float = 0.125, upper: float = 0.33333333333333331, training: bool = False, generator: Any = None) -> List[int]: - return upstream_shape_functions.unary(self) - -def aten〇rrelu_with_noise_functional〡shape(self: List[int], noise: List[int], lower: float = 0.125, upper: float = 0.33333333333333331, training: bool = False, generator: Any = None) -> Tuple[List[int], List[int]]: - return upstream_shape_functions.unary(self), upstream_shape_functions.unary(noise) - def aten〇selu〡shape(self: List[int]) -> List[int]: return upstream_shape_functions.unary(self) @@ -3306,15 +3297,6 @@ def aten〇leaky_relu_backward〡dtype(grad_output_rank_dtype: Tuple[int, int], promoted_dtype = promote_dtypes(ranks, dtypes) return promoted_dtype -@check_dtype_function([Invocation(TensorOfShape(3, 3, dtype=dtype), TensorOfShape(3, 3, dtype=dtype), TensorOfShape(3, 3, dtype=dtype), 0.1, 0.9, False, False) for dtype in _SORTED_TORCH_TYPES]) -def aten〇rrelu_with_noise_backward〡dtype(grad_output_rank_dtype: Tuple[int, int], self_rank_dtype: Tuple[int, int], noise_rank_dtype: Tuple[int, int], lower: Union[int, float, complex], upper: Union[int, float, complex], training: bool, self_is_result: bool) -> int: - grad_output_rank, grad_output_dtype = grad_output_rank_dtype - self_rank, self_dtype = self_rank_dtype - ranks: List[Optional[int]] = [grad_output_rank, self_rank] - dtypes = [grad_output_dtype, self_dtype] - promoted_dtype = promote_dtypes(ranks, dtypes) - return promoted_dtype - @check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1)) def aten〇lift_fresh_copy〡dtype(self_rank_dtype: Tuple[int, int]) -> int: self_rank, self_dtype = self_rank_dtype @@ -3481,20 +3463,6 @@ def aten〇rrelu〡dtype(self_rank_dtype: Tuple[int, int], lower: Union[int, flo self_rank, self_dtype = self_rank_dtype return self_dtype -@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=2)) -def aten〇rrelu_with_noise〡dtype(self_rank_dtype: Tuple[int, int], noise_rank_dtype: Tuple[int, int], lower: Union[int, float, complex] = 0.125, upper: Union[int, float, complex] = 0.33333333333333331, training: bool = False, generator: Any = None) -> int: - self_rank, self_dtype = self_rank_dtype - noise_rank, noise_dtype = noise_rank_dtype - assert self_rank == noise_rank - return self_dtype - -@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=2)) -def aten〇rrelu_with_noise_functional〡dtype(self_rank_dtype: Tuple[int, int], noise_rank_dtype: Tuple[int, int], lower: Union[int, float, complex] = 0.125, upper: Union[int, float, complex] = 0.33333333333333331, training: bool = False, generator: Any = None) -> Tuple[int, int]: - self_rank, self_dtype = self_rank_dtype - noise_rank, noise_dtype = noise_rank_dtype - assert self_rank == noise_rank - return self_dtype, noise_dtype - @check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, error_types={torch.bool})) def aten〇relu6〡dtype(self_rank_dtype: Tuple[int, int]) -> int: self_rank, self_dtype = self_rank_dtype diff --git a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py index 8a9c990de9a0..9d7b697f3de0 100644 --- a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py +++ b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py @@ -302,7 +302,6 @@ def emit_with_mutating_variants(key, **kwargs): "aten::relu6 : (Tensor) -> (Tensor)", "aten::leaky_relu : (Tensor, Scalar) -> (Tensor)", "aten::rrelu : (Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor)", - "aten::rrelu_with_noise : (Tensor, Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor)", "aten::celu : (Tensor, Scalar) -> (Tensor)", "aten::selu : (Tensor) -> (Tensor)", "aten::sigmoid : (Tensor) -> (Tensor)", @@ -1210,12 +1209,6 @@ def emit_with_mutating_variants(key, **kwargs): "aten::elu_backward : (Tensor, Scalar, Scalar, Scalar, bool, Tensor) -> (Tensor)" ) emit("aten::leaky_relu_backward : (Tensor, Tensor, Scalar, bool) -> (Tensor)") - emit( - "aten::rrelu_with_noise_backward : (Tensor, Tensor, Tensor, Scalar, Scalar, bool, bool) -> (Tensor)" - ) - emit( - "aten::rrelu_with_noise_functional : (Tensor, Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor, Tensor)" - ) # quantized ops emit("aten::quantize_per_channel : (Tensor, Tensor, Tensor, int, int) -> (Tensor)") diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/backprop.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/backprop.py index 5e6e093902c4..a5de0391f9eb 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/backprop.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/backprop.py @@ -323,163 +323,3 @@ def forward(self, grad, input): def LeakyReluBackwardStaticModule_basic(module, tu: TestUtils): module.forward(tu.rand(3, 4, 5), tu.rand(3, 4, 5)) - -# ============================================================================== - - -class RreluWithNoiseBackwardTrainModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, grad, input, noise): - return torch.ops.aten.rrelu_with_noise_backward( - grad, - input, - noise, - lower=0.1, - upper=0.9, - training=True, - self_is_result=False, - ) - - -@register_test_case(module_factory=lambda: RreluWithNoiseBackwardTrainModule()) -def RreluWithNoiseBackwardTrainModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 4, 5), tu.rand(3, 4, 5), tu.rand(3, 4, 5)) - - -class RreluWithNoiseBackwardTrainStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([3, 4, 5], torch.float32, True), - ([3, 4, 5], torch.float32, True), - ([3, 4, 5], torch.float32, True), - ] - ) - def forward(self, grad, input, noise): - return torch.ops.aten.rrelu_with_noise_backward( - grad, - input, - noise, - lower=0.1, - upper=0.9, - training=True, - self_is_result=False, - ) - - -@register_test_case(module_factory=lambda: RreluWithNoiseBackwardTrainStaticModule()) -def RreluWithNoiseBackwardTrainStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 4, 5), tu.rand(3, 4, 5), tu.rand(3, 4, 5)) - - -# ============================================================================== - - -class RreluWithNoiseBackwardEvalModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, grad, input, noise): - return torch.ops.aten.rrelu_with_noise_backward( - grad, - input, - noise, - lower=0.1, - upper=0.9, - training=False, - self_is_result=False, - ) - - -@register_test_case(module_factory=lambda: RreluWithNoiseBackwardEvalModule()) -def RreluWithNoiseBackwardEvalModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 4, 5), tu.rand(3, 4, 5), tu.rand(3, 4, 5)) - - -class RreluWithNoiseBackwardEvalStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([3, 4, 5], torch.float32, True), - ([3, 4, 5], torch.float32, True), - ([3, 4, 5], torch.float32, True), - ] - ) - def forward(self, grad, input, noise): - return torch.ops.aten.rrelu_with_noise_backward( - grad, - input, - noise, - lower=0.1, - upper=0.9, - training=False, - self_is_result=False, - ) - - -@register_test_case(module_factory=lambda: RreluWithNoiseBackwardEvalStaticModule()) -def RreluWithNoiseBackwardEvalStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 4, 5), tu.rand(3, 4, 5), tu.rand(3, 4, 5)) - - -class RreluWithNoiseForwardBackwardModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1], torch.float32, True), - ([-1, -1], torch.float32, True), - ([-1, -1], torch.float32, True), - ] - ) - def forward(self, grad, input, noise): - res = torch.ops.aten.rrelu_with_noise_backward( - grad, - input, - noise, - lower=0.4, - upper=0.6, - training=True, - self_is_result=False, - ) - return torch.mean(res), torch.std(res) - - -@register_test_case(module_factory=lambda: RreluWithNoiseForwardBackwardModule()) -def RreluWithNoiseForwardBackwardModule_basic(module, tu: TestUtils): - grad = tu.rand(256, 244) - input = tu.rand(256, 244, low=-1.0, high=1.0) - noise = tu.rand(256, 244) - torch.ops.aten.rrelu_with_noise(input, noise, lower=0.4, upper=0.6, training=True) - module.forward(grad, input, noise) From 38f59fac8cb9095391c0d0cceaa52ea54858f837 Mon Sep 17 00:00:00 2001 From: rahul shrivastava Date: Fri, 10 Jan 2025 02:13:33 -0800 Subject: [PATCH 5/6] Verbose output Signed-off-by: rahul shrivastava --- CMakeLists.txt | 36 +++++++++++++++++++ .../python_deploy/build_linux_packages.sh | 3 +- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 770f4e5d3517..7540221665c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -80,6 +80,42 @@ endif() #------------------------------------------------------------------------------- # Configure out-of-tree vs in-tree build #------------------------------------------------------------------------------- +execute_process( + COMMAND nm -D /opt/python/cp311-cp311/lib/python3.11/site-packages/torch/lib/libtorch_cpu.so + OUTPUT_VARIABLE OUTPUT + RESULT_VARIABLE RESULT + ERROR_VARIABLE ERR + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Print the command output +message("Command output:\n${OUTPUT}") + +# Print the result status +if(RESULT EQUAL 0) + message("Command executed successfully.") +else() + message("Command failed with error: ${ERR}") +endif() + +execute_process( + COMMAND ldd /opt/python/cp311-cp311/lib/python3.11/site-packages/torch/lib/libtorch_cpu.so + OUTPUT_VARIABLE OUTPUT1 + RESULT_VARIABLE RESULT1 + ERROR_VARIABLE ERR1 + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Print the command output +message("Command output:\n${OUTPUT1}") + +# Print the result status +if(RESULT1 EQUAL 0) + message("Command executed successfully.") +else() + message("Command failed with error: ${ERR1}") +endif() + if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR TORCH_MLIR_OUT_OF_TREE_BUILD) message(STATUS "Torch-MLIR out-of-tree build.") diff --git a/build_tools/python_deploy/build_linux_packages.sh b/build_tools/python_deploy/build_linux_packages.sh index a20358b73978..80ccef46978b 100755 --- a/build_tools/python_deploy/build_linux_packages.sh +++ b/build_tools/python_deploy/build_linux_packages.sh @@ -223,7 +223,7 @@ function build_in_tree() { #fi echo ":::: Build in-tree Torch from binary: $torch_from_bin with Python: $python_version" - cmake -GNinja -B/main_checkout/torch-mlir/build \ + cmake -GNinja -B/main_checkout/torch-mlir/build \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ @@ -244,6 +244,7 @@ function build_in_tree() { -DTORCH_MLIR_SRC_PYTORCH_BRANCH=${TORCH_MLIR_SRC_PYTORCH_BRANCH} \ -DTM_PYTORCH_INSTALL_WITHOUT_REBUILD=${TM_PYTORCH_INSTALL_WITHOUT_REBUILD} \ -DPython3_EXECUTABLE="$(which python3)" \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ /main_checkout/torch-mlir/externals/llvm-project/llvm cmake --build /main_checkout/torch-mlir/build --target tools/torch-mlir/all ccache -s From 71f62e15177a321ca359ea5fdd238ff8d49868cd Mon Sep 17 00:00:00 2001 From: rahul shrivastava Date: Fri, 10 Jan 2025 09:34:23 -0800 Subject: [PATCH 6/6] More commits Signed-off-by: rahul shrivastava --- CMakeLists.txt | 60 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 20 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7540221665c0..984e9bdc6f42 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -80,43 +80,63 @@ endif() #------------------------------------------------------------------------------- # Configure out-of-tree vs in-tree build #------------------------------------------------------------------------------- -execute_process( - COMMAND nm -D /opt/python/cp311-cp311/lib/python3.11/site-packages/torch/lib/libtorch_cpu.so - OUTPUT_VARIABLE OUTPUT - RESULT_VARIABLE RESULT - ERROR_VARIABLE ERR - OUTPUT_STRIP_TRAILING_WHITESPACE -) +#execute_process( +# COMMAND nm -D /opt/python/cp311-cp311/lib/python3.11/site-packages/torch/lib/libtorch_cpu.so +# OUTPUT_VARIABLE OUTPUT +# RESULT_VARIABLE RESULT +# ERROR_VARIABLE ERR +# OUTPUT_STRIP_TRAILING_WHITESPACE +#) # Print the command output -message("Command output:\n${OUTPUT}") +#message("Command output:\n${OUTPUT}") # Print the result status -if(RESULT EQUAL 0) - message("Command executed successfully.") -else() - message("Command failed with error: ${ERR}") -endif() +#if(RESULT EQUAL 0) +# message("Command executed successfully.") +#else() +# message("Command failed with error: ${ERR}") +#endif() + +#execute_process( +# COMMAND ldd /opt/python/cp311-cp311/lib/python3.11/site-packages/torch/lib/libtorch_cpu.so +# OUTPUT_VARIABLE OUTPUT1 +# RESULT_VARIABLE RESULT1 +# ERROR_VARIABLE ERR1 +# OUTPUT_STRIP_TRAILING_WHITESPACE +#) + +# Print the command output +#message("Command output:\n${OUTPUT1}") + +# Print the result status +#if(RESULT1 EQUAL 0) +# message("Command executed successfully.") +#else() +# message("Command failed with error: ${ERR1}") +#endif() execute_process( - COMMAND ldd /opt/python/cp311-cp311/lib/python3.11/site-packages/torch/lib/libtorch_cpu.so - OUTPUT_VARIABLE OUTPUT1 - RESULT_VARIABLE RESULT1 - ERROR_VARIABLE ERR1 + COMMAND file /opt/python/cp311-cp311/lib/python3.11/site-packages/torch/lib/libtorch_cpu.so + OUTPUT_VARIABLE OUTPUT2 + RESULT_VARIABLE RESULT2 + ERROR_VARIABLE ERR2 OUTPUT_STRIP_TRAILING_WHITESPACE ) # Print the command output -message("Command output:\n${OUTPUT1}") +message("Command output:\n${OUTPUT2}") # Print the result status -if(RESULT1 EQUAL 0) +if(RESULT2 EQUAL 0) message("Command executed successfully.") else() - message("Command failed with error: ${ERR1}") + message("Command failed with error: ${ERR2}") endif() + + if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR TORCH_MLIR_OUT_OF_TREE_BUILD) message(STATUS "Torch-MLIR out-of-tree build.") # Out-of-tree build