Skip to content

Commit

Permalink
[NFC reformat] Run pre-commit on all files and format misc.
Browse files Browse the repository at this point in the history
This is part 1 of ~3, formatting all miscellaneous text files and CPP files matched by a first run of pre-commit. These tend to be low change-traffic and are likely not disruptive.

Subsequent patches will format Python files and remaining CPP files.
  • Loading branch information
stellaraccident committed Apr 27, 2024
1 parent 6679728 commit 5d4b803
Show file tree
Hide file tree
Showing 40 changed files with 100 additions and 114 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -247,4 +247,4 @@ add_subdirectory(projects)
# Finish with top-level Python bindings so it can handle additional deps.
if(MLIR_ENABLE_BINDINGS_PYTHON)
add_subdirectory(python)
endif()
endif()
2 changes: 1 addition & 1 deletion build_tools/ci/test_posix.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ echo "::endgroup::"

case $torch_version in
nightly)
# Failing with: NotImplementedError:
# Failing with: NotImplementedError:
# Could not run 'aten::empty.memory_format' with arguments from the 'Lazy' backend.
# As of 2024-01-07
# echo "::group::Run Lazy Tensor Core e2e integration tests"
Expand Down
2 changes: 1 addition & 1 deletion build_tools/python_deploy/build_linux_packages.sh
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ function _check_file_not_changed_by() {

function test_in_tree() {
local torch_version="$1"

echo ":::: Test in-tree"
cmake --build /main_checkout/torch-mlir/build --target check-torch-mlir-all

Expand Down
1 change: 0 additions & 1 deletion docs/importers/onnx_importer.md
Original file line number Diff line number Diff line change
Expand Up @@ -140,4 +140,3 @@ torch-mlir's representation:
* `ConstantOfShape`: Mapped to `torch.vtensor.literal` with
a corresponding `value` attribute.
1 change: 0 additions & 1 deletion docs/roadmap.md
Original file line number Diff line number Diff line change
Expand Up @@ -277,4 +277,3 @@ directly provided a way to plug into this.

Additionally, we can leverage the [`pytorch-jit-paritybench`](https://github.com/jansel/pytorch-jit-paritybench)
to verify our end-to-end correctness on real models.

2 changes: 1 addition & 1 deletion include/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
add_subdirectory(torch-mlir)
add_subdirectory(torch-mlir-dialects)
add_subdirectory(torch-mlir-dialects)
18 changes: 9 additions & 9 deletions include/torch-mlir/Dialect/Torch/IR/TorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -756,12 +756,12 @@ def Torch_ConstantNumberOp : Torch_Op<"constant.number",
[ConstantLike, Pure]> {
let summary = "Materialize a constant `number` value.";
let description = [{
This op is used as a workaround to the fact that the constant
materialization in MLIR must materialize a constant with a single op.
To materialize ops with a static `!torch.number` type, we must use this op,
This op is used as a workaround to the fact that the constant
materialization in MLIR must materialize a constant with a single op.
To materialize ops with a static `!torch.number` type, we must use this op,
even though we statically know if it is an integer or a float.

Note: This op unconditionally canonicalizes to
Note: This op unconditionally canonicalizes to
`torch.constant.{float,int}` + `torch.derefine`
}];
let arguments = (ins
Expand Down Expand Up @@ -846,7 +846,7 @@ def Torch_OperatorOp : Torch_Op<"operator", [
let regions = (region VariadicRegion<AnyRegion>:$regions);

let assemblyFormat = [{
$name `(` $operands `)` attr-dict `:` functional-type($operands, $results) $regions
$name `(` $operands `)` attr-dict `:` functional-type($operands, $results) $regions
}];
}

Expand Down Expand Up @@ -1146,10 +1146,10 @@ def Torch_PromoteDtypesOp: Torch_Op<"promote_dtypes", [
let assemblyFormat = "$ranks `,` $dtypes attr-dict `:` functional-type(operands, results)";
}

// To handle runtime assertions, torchscript provides us `torch._assert` operation.
// But TS compiler introduces control flow for `torch._assert` operation. The
// `torch._assert` would introduce control flow like:
//
// To handle runtime assertions, torchscript provides us `torch._assert` operation.
// But TS compiler introduces control flow for `torch._assert` operation. The
// `torch._assert` would introduce control flow like:
//
// %cond = "torch.aten.Bool.Tensor"(%0) : (!torch.tensor) -> !torch.bool
// "torch.prim.If"(%cond) ({
// "torch.prim.If.yield"() : () -> ()
Expand Down
2 changes: 1 addition & 1 deletion include/torch-mlir/Dialect/Torch/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ def LowerToBackendContract
to the backend contract. This pass does not do any global program
restructuring -- it works entirely within a single semantic model
of a `builtin.module` with `torch.global_slot` ops and `func.func` ops.

This pass runs a set of simplifications within that semantic model until
the backend contract is satisfied, and fails if it cannot be satisfied.
In particular, the backend contract consists of:
Expand Down
67 changes: 32 additions & 35 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -628,42 +628,39 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
binder.op, resultType, operand);
return success();
});
patterns.onOp("Not", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Value operand;
if (binder.tensorOperand(operand) ||
binder.tensorResultType(resultType)) {
return failure();
}
patterns.onOp(
"Not", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Value operand;
if (binder.tensorOperand(operand) ||
binder.tensorResultType(resultType)) {
return failure();
}

auto loc = binder.getLoc();
auto operandTy =
cast<Torch::ValueTensorType>(operand.getType());
auto eTy = operandTy.getDtype();

if (!eTy.isInteger(1)) {
auto i1ty = rewriter.getI1Type();
auto ty = rewriter.getType<Torch::ValueTensorType>(
operandTy.getSizes(), i1ty);
auto torchqTy = Torch::getScalarTypeForType(i1ty);
Value tyConst = rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getType<Torch::IntType>(),
rewriter.getIntegerAttr(
rewriter.getIntegerType(64),
static_cast<int64_t>(torchqTy)));
Value none = rewriter.create<Torch::ConstantNoneOp>(loc);
Value cstFalse =
rewriter.create<Torch::ConstantBoolOp>(loc, false);
operand = rewriter.create<Torch::AtenToDtypeOp>(
loc, ty, operand, tyConst,
/*non_blocking=*/cstFalse, /*copy=*/cstFalse,
/*memory_format=*/none);
}
rewriter.replaceOpWithNewOp<Torch::AtenBitwiseNotOp>(
binder.op, resultType, operand);
return success();
});
auto loc = binder.getLoc();
auto operandTy = cast<Torch::ValueTensorType>(operand.getType());
auto eTy = operandTy.getDtype();

if (!eTy.isInteger(1)) {
auto i1ty = rewriter.getI1Type();
auto ty = rewriter.getType<Torch::ValueTensorType>(
operandTy.getSizes(), i1ty);
auto torchqTy = Torch::getScalarTypeForType(i1ty);
Value tyConst = rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getType<Torch::IntType>(),
rewriter.getIntegerAttr(rewriter.getIntegerType(64),
static_cast<int64_t>(torchqTy)));
Value none = rewriter.create<Torch::ConstantNoneOp>(loc);
Value cstFalse = rewriter.create<Torch::ConstantBoolOp>(loc, false);
operand = rewriter.create<Torch::AtenToDtypeOp>(
loc, ty, operand, tyConst,
/*non_blocking=*/cstFalse, /*copy=*/cstFalse,
/*memory_format=*/none);
}
rewriter.replaceOpWithNewOp<Torch::AtenBitwiseNotOp>(
binder.op, resultType, operand);
return success();
});
patterns.onOp("Or", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Expand Down
5 changes: 2 additions & 3 deletions lib/Conversion/TorchToStablehlo/StablehloLegalizeUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,9 +189,8 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
do_bcast = true;
} else {
op->emitError("The size of tensor a (")
<< inDim << ")"
<< "must match the size of tensor b (" << outDim << ")"
<< "at non-singleton dimension " << inPos;
<< inDim << ")" << "must match the size of tensor b (" << outDim
<< ")" << "at non-singleton dimension " << inPos;
}
}
std::reverse(bcastDims.begin(), bcastDims.end());
Expand Down
8 changes: 4 additions & 4 deletions lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -287,19 +287,19 @@ static LogicalResult checkValidityOfCast(Type src, Type dest) {
(src.isInteger(1) && dest.isInteger(64)) ||
(src.isInteger(1) && dest.isF32()) ||
// f64 -> *
(src.isF64() && dest.isF32()) ||
(src.isF64() && dest.isF32()) ||
(src.isF64() && dest.isBF16()) ||
// f32 -> *
(src.isF32() && dest.isF64()) ||
(src.isF32() && dest.isF64()) ||
(src.isF32() && dest.isBF16()) ||
(src.isF32() && dest.isF16()) ||
(src.isF32() && dest.isF16()) ||
(src.isF32() && dest.isInteger(8)) ||
(src.isF32() && dest.isInteger(64)) ||
(src.isF32() && dest.isInteger(1)) ||
// bf16 -> *
(src.isBF16() && dest.isInteger(8)) ||
(src.isBF16() && dest.isInteger(16)) ||
(src.isBF16() && dest.isInteger(32)) ||
(src.isBF16() && dest.isInteger(32)) ||
(src.isBF16() && dest.isF32())) {
return success();
}
Expand Down
2 changes: 1 addition & 1 deletion lib/Dialect/TMTensor/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@ add_mlir_library(TorchMLIRTMTensorPasses
MLIRTransforms
)

torch_mlir_target_includes(TorchMLIRTMTensorPasses)
torch_mlir_target_includes(TorchMLIRTMTensorPasses)
3 changes: 1 addition & 2 deletions lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -305,8 +305,7 @@ class LowerToBackendContractPass
return signalPassFailure();
} while (!satisfiesBackendContract(module, target));
LLVM_DEBUG({
llvm::dbgs() << "LowerToBackendContractPass: "
<< "succeeded after " << i
llvm::dbgs() << "LowerToBackendContractPass: " << "succeeded after " << i
<< " iterations of the simplification pipeline\n";
});
}
Expand Down
2 changes: 1 addition & 1 deletion lib/Dialect/TorchConversion/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ endif()

add_mlir_library(TorchMLIRTorchConversionPasses
BackendTypeConversion.cpp
BackendTypeConversionPasses.cpp
BackendTypeConversionPasses.cpp
Passes.cpp
ConvertCustomQuantOp.cpp
UnpackQuantTensor.cpp
Expand Down
8 changes: 4 additions & 4 deletions projects/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -44,16 +44,16 @@ if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER OR TORCH_MLIR_ENABLE_LTC)
message(FATAL_ERROR "Without TORCH_MLIR_USE_INSTALLED_PYTORCH, expected to find Torch configuration at ${Torch_DIR}, which does not exist")
endif()
endif()

find_package(Torch 1.11 REQUIRED)

set(TORCHGEN_DIR ${Torch_ROOT}/../../../torchgen)

include_directories(BEFORE
${TORCH_INCLUDE_DIRS}
${Python3_INCLUDE_DIRS}
)
link_directories("${TORCH_INSTALL_PREFIX}/lib")
link_directories("${TORCH_INSTALL_PREFIX}/lib")
message(STATUS "TORCH_CXXFLAGS is = ${TORCH_CXXFLAGS}")
if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND NOT TORCH_CXXFLAGS)
message(WARNING
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -713,4 +713,4 @@ at::Tensor &LazyNativeFunctions::logsumexp_out(const at::Tensor &self,
void InitializeAtenBindings() {}

} // namespace lazy
} // namespace torch
} // namespace torch
2 changes: 1 addition & 1 deletion projects/ltc/csrc/base_lazy_backend/ops/unbind_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ class UnbindCopyInt : public torch::lazy::TorchMlirNode {
};

} // namespace lazy
} // namespace torch
} // namespace torch
4 changes: 2 additions & 2 deletions projects/pt1/python/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ endif()
# Can we build the JIT IR importer with `declare_mlir_python_extension`?
# Then it would "just work".
if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER)
add_dependencies(TorchMLIRPythonTorchExtensionsSources
add_dependencies(TorchMLIRPythonTorchExtensionsSources
TorchMLIRJITIRImporter
TorchMLIRJITIRImporterPybind
TorchMLIRE2ETestPythonModules
Expand All @@ -65,7 +65,7 @@ endif()

if(TORCH_MLIR_ENABLE_LTC)
# Add Torch-MLIR LTC backend as dependency
add_dependencies(TorchMLIRPythonTorchExtensionsSources
add_dependencies(TorchMLIRPythonTorchExtensionsSources
torch_mlir_ltc_backend
reference_lazy_backend
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,3 @@ set_target_properties(torch_mlir_custom_op_example PROPERTIES
)
torch_mlir_python_target_compile_options(torch_mlir_custom_op_example)
mlir_check_all_link_libraries(torch_mlir_custom_op_example)

2 changes: 1 addition & 1 deletion projects/pt1/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ configure_lit_site_cfg(
set(TORCH_MLIR_TEST_DEPENDS
FileCheck count not
TorchMLIRPythonModules
torch-mlir-opt
torch-mlir-opt
torch-mlir-capi-torch-test
)

Expand Down
2 changes: 1 addition & 1 deletion test/CAPI/lit.local.cfg
Original file line number Diff line number Diff line change
@@ -1 +1 @@
config.suffixes.add('.c')
config.suffixes.add('.c')
14 changes: 9 additions & 5 deletions test/CAPI/torch.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static void testTensor(MlirContext ctx, intptr_t numSizes, int64_t *sizes,
fprintf(stderr, #TTT "Type %s rank: %zu\n", testName, \
torchMlirTorch##TTT##TypeGetRank(TTT##Type)); \
int64_t *TTT##Sizes = malloc(sizeof(int64_t) * numSizes); \
torchMlirTorch##TTT##TypeGetSizes(TTT##Type, TTT##Sizes); \
torchMlirTorch##TTT##TypeGetSizes(TTT##Type, TTT##Sizes); \
for (int i = 0; i < numSizes; ++i) { \
fprintf(stderr, #TTT "Type %s pos %d size: %ld\n", testName, i, \
TTT##Sizes[i]); \
Expand Down Expand Up @@ -157,22 +157,26 @@ static void testTypeMetaDataAccessors(MlirContext ctx) {
MlirType dictType1 = torchMlirTorchDictTypeGet(strType, floatType);

fprintf(stderr, "dict keyType: ");
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr, NULL);
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr,
NULL);
fprintf(stderr, "\n");
// CHECK: dict keyType: !torch.str
fprintf(stderr, "dict valueType: ");
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr, NULL);
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr,
NULL);
fprintf(stderr, "\n");
// CHECK: dict valueType: !torch.float

MlirType dictType2 = torchMlirTorchDictTypeGet(floatType, strType);

fprintf(stderr, "dict keyType: ");
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr, NULL);
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr,
NULL);
fprintf(stderr, "\n");
// CHECK: dict keyType: !torch.float
fprintf(stderr, "dict valueType: ");
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr, NULL);
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr,
NULL);
fprintf(stderr, "\n");
// CHECK: dict valueType: !torch.str
}
Expand Down
2 changes: 1 addition & 1 deletion test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ configure_lit_site_cfg(
set(TORCH_MLIR_TEST_DEPENDS
FileCheck count not
TorchMLIRPythonModules
torch-mlir-opt
torch-mlir-opt
torch-mlir-capi-torch-test
)

Expand Down
Loading

0 comments on commit 5d4b803

Please sign in to comment.