Skip to content

Commit

Permalink
Bump llvm-project to f66cd9e. (llvm#2466)
Browse files Browse the repository at this point in the history
Picks up DenseResourceElementsAttr python support and fixes minf/maxf
C++ rename.
  • Loading branch information
stellaraccident authored Sep 19, 2023
1 parent b03efdf commit 278c41e
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
url = https://github.com/llvm/llvm-project.git
[submodule "externals/stablehlo"]
path = externals/stablehlo
url = https://github.com/openxla/stablehlo.git
url = https://github.com/shark-infra/stablehlo.git
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ LogicalResult AttentionOp::generateScalarImplementation(OpBuilder &b,
loc, init,
[&](OpBuilder &b, Location loc, Value elem, Value acc) {
Value x = b.create<memref::LoadOp>(loc, weight, localIVs);
Value max = b.create<arith::MaxFOp>(loc, x, acc);
Value max = b.create<arith::MaximumFOp>(loc, x, acc);
b.create<scf::ReduceReturnOp>(loc, max);
});
})
Expand Down
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 4670 files
8 changes: 4 additions & 4 deletions lib/Conversion/TorchToLinalg/Reduction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,8 +176,8 @@ class ConvertAtenMaxDimOp : public OpConversionPattern<AtenMaxDimOp> {

Value resultMax, predicate;
if (inElementType.isa<mlir::FloatType>()) {
resultMax =
rewriter.create<arith::MaxFOp>(nestedLoc, newValue, oldValue);
resultMax = rewriter.create<arith::MaximumFOp>(nestedLoc, newValue,
oldValue);
predicate = rewriter.create<arith::CmpFOp>(
nestedLoc, arith::CmpFPredicate::OGT, newValue, oldValue);
} else {
Expand Down Expand Up @@ -280,7 +280,7 @@ static Value createLinalgPayloadForReduceOp(OpBuilder &b, Location loc,
convertScalarToDtype(b, loc, payloadArgs[0], resultElementType);
Value result = payloadArgs[1];
if (resultElementType.isa<mlir::FloatType>())
return b.create<arith::MaxFOp>(loc, self, result);
return b.create<arith::MaximumFOp>(loc, self, result);
else if (resultElementType.isa<mlir::IntegerType>()) {
IntegerType intType = max.getSelf()
.getType()
Expand All @@ -297,7 +297,7 @@ static Value createLinalgPayloadForReduceOp(OpBuilder &b, Location loc,
convertScalarToDtype(b, loc, payloadArgs[0], resultElementType);
Value result = payloadArgs[1];
if (resultElementType.isa<mlir::FloatType>())
return b.create<arith::MinFOp>(loc, self, result);
return b.create<arith::MinimumFOp>(loc, self, result);
else if (resultElementType.isa<mlir::IntegerType>()) {
IntegerType intType = min.getSelf()
.getType()
Expand Down
4 changes: 2 additions & 2 deletions lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1332,15 +1332,15 @@ class ConvertAtenScatterReduceTwoOp
if (update.getType().isa<mlir::IntegerType>()) {
result = b.create<arith::MaxSIOp>(loc, update, current);
} else if (update.getType().isa<mlir::FloatType>()) {
result = b.create<arith::MaxFOp>(loc, update, current);
result = b.create<arith::MaximumFOp>(loc, update, current);
} else {
llvm_unreachable("Only integer/float types supported!");
}
} else if (reduceEnum == torch_upstream::ReductionType::MIN) {
if (update.getType().isa<mlir::IntegerType>()) {
result = b.create<arith::MinSIOp>(loc, update, current);
} else if (update.getType().isa<mlir::FloatType>()) {
result = b.create<arith::MinFOp>(loc, update, current);
result = b.create<arith::MinimumFOp>(loc, update, current);
} else {
llvm_unreachable("Only integer/float types supported!");
}
Expand Down

0 comments on commit 278c41e

Please sign in to comment.