Skip to content

Commit 8d563af

Browse files
authored
Drop revert on LLVM c0b42ec05344707d94805ec795a7bc8d33a09594 (#4368)
This is a follow-up to #4358. Remove the revert by fixing all deprecated usages of `OpBuilder::create`, as requested in llvm/llvm-project@c0b42ec --------- Signed-off-by: Yu-Zhewen <[email protected]>
1 parent e530dca commit 8d563af

File tree

60 files changed

+10445
-10172
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+10445
-10172
lines changed

.gitmodules

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[submodule "externals/llvm-project"]
22
path = externals/llvm-project
3-
url = https://github.com/iree-org/llvm-project.git
3+
url = https://github.com/llvm/llvm-project.git
44
[submodule "externals/stablehlo"]
55
path = externals/stablehlo
66
url = https://github.com/openxla/stablehlo.git

include/torch-mlir/Conversion/TorchOnnxToTorch/Utils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@ Torch::ValueTensorType getQTorchTypeFromTorchIntType(Type ty);
4444
template <typename T>
4545
Value getItemOp(OpBinder binder, ConversionPatternRewriter &rewriter,
4646
Value &ofItem) {
47-
return rewriter.create<Torch::AtenItemOp>(binder.getLoc(),
48-
rewriter.getType<T>(), ofItem);
47+
return Torch::AtenItemOp::create(rewriter, binder.getLoc(),
48+
rewriter.getType<T>(), ofItem);
4949
}
5050

5151
LogicalResult OnnxLstmExpander(OpBinder binder,

lib/Conversion/TorchConversionToMLProgram/TorchConversionToMLProgram.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ static LogicalResult getOrCreateGlobalVariableForSeed(OpBuilder &b,
4040
}
4141

4242
b.setInsertionPointToStart(module.getBody());
43-
b.create<ml_program::GlobalOp>(
44-
UnknownLoc::get(b.getContext()),
43+
ml_program::GlobalOp::create(
44+
b, UnknownLoc::get(b.getContext()),
4545
/*sym_name=*/getSeedGobalVarName(),
4646
/*type=*/tensorType,
4747
/*is_mutable=*/true,
@@ -71,25 +71,25 @@ class ConvertGetNextSeedOp : public OpConversionPattern<GetNextSeedOp> {
7171
// Refer to https://en.wikipedia.org/wiki/Linear_congruential_generator.
7272
// Get the current seed value.
7373
auto tensorType = RankedTensorType::get({}, rewriter.getI64Type());
74-
Value globalVar = rewriter.create<ml_program::GlobalLoadOp>(
75-
loc, tensorType,
74+
Value globalVar = ml_program::GlobalLoadOp::create(
75+
rewriter, loc, tensorType,
7676
SymbolRefAttr::get(op->getContext(), getSeedGobalVarName()));
77-
Value currentSeed = rewriter.create<tensor::ExtractOp>(loc, globalVar);
77+
Value currentSeed = tensor::ExtractOp::create(rewriter, loc, globalVar);
7878

7979
// The value of multiplier and incrementStep are referenced from
8080
// https://en.wikipedia.org/wiki/Linear_congruential_generator for 2^64.
81-
Value multiplier = rewriter.create<arith::ConstantOp>(
82-
loc, rewriter.getI64IntegerAttr(6364136223846793005));
83-
Value incrementStep = rewriter.create<arith::ConstantOp>(
84-
loc, rewriter.getI64IntegerAttr(1442695040888963407));
81+
Value multiplier = arith::ConstantOp::create(
82+
rewriter, loc, rewriter.getI64IntegerAttr(6364136223846793005));
83+
Value incrementStep = arith::ConstantOp::create(
84+
rewriter, loc, rewriter.getI64IntegerAttr(1442695040888963407));
8585
// temp = multiplier * currentSeed + incrementStep
86-
Value mul = rewriter.create<arith::MulIOp>(loc, currentSeed, multiplier);
87-
Value seed = rewriter.create<arith::AddIOp>(loc, mul, incrementStep);
86+
Value mul = arith::MulIOp::create(rewriter, loc, currentSeed, multiplier);
87+
Value seed = arith::AddIOp::create(rewriter, loc, mul, incrementStep);
8888
globalVar =
89-
rewriter.create<tensor::InsertOp>(loc, seed, globalVar, ValueRange());
90-
rewriter.create<ml_program::GlobalStoreOp>(
91-
loc, SymbolRefAttr::get(op->getContext(), getSeedGobalVarName()),
92-
globalVar);
89+
tensor::InsertOp::create(rewriter, loc, seed, globalVar, ValueRange());
90+
ml_program::GlobalStoreOp::create(
91+
rewriter, loc,
92+
SymbolRefAttr::get(op->getContext(), getSeedGobalVarName()), globalVar);
9393
rewriter.replaceOp(op, seed);
9494
return success();
9595
}

lib/Conversion/TorchOnnxToTorch/ComMicrosoftDomain.cpp

Lines changed: 218 additions & 217 deletions
Large diffs are not rendered by default.

lib/Conversion/TorchOnnxToTorch/DefaultDomainAtoF.cpp

Lines changed: 683 additions & 645 deletions
Large diffs are not rendered by default.

lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp

Lines changed: 927 additions & 892 deletions
Large diffs are not rendered by default.

lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp

Lines changed: 1086 additions & 1008 deletions
Large diffs are not rendered by default.

lib/Conversion/TorchOnnxToTorch/OnnxRecurrentLayerOpExpanders.cpp

Lines changed: 252 additions & 247 deletions
Large diffs are not rendered by default.

lib/Conversion/TorchOnnxToTorch/Utils.cpp

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,11 @@ Value mlir::torch::onnx_c::createConstantIntList(
2020
ArrayRef<int64_t> cstInput) {
2121
SmallVector<Value> cstValue;
2222
for (int64_t i : cstInput) {
23-
cstValue.push_back(rewriter.create<Torch::ConstantIntOp>(
24-
binder.getLoc(), rewriter.getI64IntegerAttr(i)));
23+
cstValue.push_back(Torch::ConstantIntOp::create(
24+
rewriter, binder.getLoc(), rewriter.getI64IntegerAttr(i)));
2525
}
26-
return rewriter.create<Torch::PrimListConstructOp>(
27-
binder.getLoc(),
26+
return Torch::PrimListConstructOp::create(
27+
rewriter, binder.getLoc(),
2828
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
2929
cstValue);
3030
}
@@ -109,12 +109,12 @@ LogicalResult mlir::torch::onnx_c::createTorchTransposeOp(
109109
if (failed(getTransposedType(cast<Torch::BaseTensorType>(input.getType()),
110110
dimA, dimB, transposedType)))
111111
return failure();
112-
Value cstDimA = rewriter.create<Torch::ConstantIntOp>(
113-
loc, rewriter.getI64IntegerAttr(dimA));
114-
Value cstDimB = rewriter.create<Torch::ConstantIntOp>(
115-
loc, rewriter.getI64IntegerAttr(dimB));
116-
transposed = rewriter.create<Torch::AtenTransposeIntOp>(
117-
loc, transposedType, input, cstDimA, cstDimB);
112+
Value cstDimA = Torch::ConstantIntOp::create(
113+
rewriter, loc, rewriter.getI64IntegerAttr(dimA));
114+
Value cstDimB = Torch::ConstantIntOp::create(
115+
rewriter, loc, rewriter.getI64IntegerAttr(dimB));
116+
transposed = Torch::AtenTransposeIntOp::create(rewriter, loc, transposedType,
117+
input, cstDimA, cstDimB);
118118
return success();
119119
}
120120

@@ -127,19 +127,19 @@ LogicalResult mlir::torch::onnx_c::createTorchPermuteOp(
127127
permuteDims, permutedType)))
128128
return failure();
129129
Value permuteDimsList = createConstantIntList(binder, rewriter, permuteDims);
130-
permuted = rewriter.create<Torch::AtenPermuteOp>(loc, permutedType, input,
131-
permuteDimsList);
130+
permuted = Torch::AtenPermuteOp::create(rewriter, loc, permutedType, input,
131+
permuteDimsList);
132132
return success();
133133
}
134134

135135
Value mlir::torch::onnx_c::createActivationByName(ImplicitLocOpBuilder &b,
136136
StringRef name, Value input) {
137137
if (name == "Sigmoid")
138-
return b.create<Torch::AtenSigmoidOp>(input.getType(), input);
138+
return Torch::AtenSigmoidOp::create(b, input.getType(), input);
139139
if (name == "Tanh")
140-
return b.create<Torch::AtenTanhOp>(input.getType(), input);
140+
return Torch::AtenTanhOp::create(b, input.getType(), input);
141141
if (name == "Relu")
142-
return b.create<Torch::AtenReluOp>(input.getType(), input);
142+
return Torch::AtenReluOp::create(b, input.getType(), input);
143143
llvm_unreachable("Unsupported activation function");
144144
}
145145

@@ -158,23 +158,23 @@ LogicalResult mlir::torch::onnx_c::extractPerTensorQuantizationArguments(
158158
if (!check(inScale) || !check(inZeroPoint))
159159
return failure();
160160

161-
Value emptyList = rewriter.create<Torch::PrimListConstructOp>(
162-
loc,
161+
Value emptyList = Torch::PrimListConstructOp::create(
162+
rewriter, loc,
163163
rewriter.getType<Torch::ListType>(rewriter.getType<Torch::IntType>()),
164164
ValueRange{});
165165
auto extract = [&rewriter, &loc, &emptyList](Value v) {
166166
auto vTy = cast<Torch::ValueTensorType>(v.getType());
167167
if (!vTy.getSizes().empty()) {
168168
vTy = rewriter.getType<Torch::ValueTensorType>(ArrayRef<int64_t>({}),
169169
vTy.getOptionalDtype());
170-
v = rewriter.create<Torch::AtenReshapeOp>(loc, vTy, v, emptyList);
170+
v = Torch::AtenReshapeOp::create(rewriter, loc, vTy, v, emptyList);
171171
}
172172

173173
Type extractTy = rewriter.getType<Torch::FloatType>();
174174
if (isa<IntegerType>(vTy.getDtype()))
175175
extractTy = rewriter.getType<Torch::IntType>();
176176

177-
return rewriter.create<Torch::AtenItemOp>(loc, extractTy, v);
177+
return Torch::AtenItemOp::create(rewriter, loc, extractTy, v);
178178
};
179179

180180
outScale = extract(inScale);
@@ -191,14 +191,13 @@ LogicalResult mlir::torch::onnx_c::createDequantizeTensor(
191191
return failure();
192192

193193
Torch::ValueTensorType makeTensorTy = getQTorchTypeFromTorchIntType(inputTy);
194-
Value quantizedInput =
195-
rewriter.create<Torch::Aten_MakePerTensorQuantizedTensorOp>(
196-
loc, makeTensorTy, input, scale, zeroPoint);
194+
Value quantizedInput = Torch::Aten_MakePerTensorQuantizedTensorOp::create(
195+
rewriter, loc, makeTensorTy, input, scale, zeroPoint);
197196

198197
Torch::ValueTensorType resultTy = rewriter.getType<Torch::ValueTensorType>(
199198
inputTy.getSizes(), rewriter.getF32Type());
200-
output = rewriter.create<Torch::AtenDequantizeSelfOp>(loc, resultTy,
201-
quantizedInput);
199+
output = Torch::AtenDequantizeSelfOp::create(rewriter, loc, resultTy,
200+
quantizedInput);
202201

203202
return success();
204203
}

0 commit comments

Comments
 (0)