@@ -20,11 +20,11 @@ Value mlir::torch::onnx_c::createConstantIntList(
2020 ArrayRef<int64_t > cstInput) {
2121 SmallVector<Value> cstValue;
2222 for (int64_t i : cstInput) {
23- cstValue.push_back (rewriter. create < Torch::ConstantIntOp> (
24- binder.getLoc (), rewriter.getI64IntegerAttr (i)));
23+ cstValue.push_back (Torch::ConstantIntOp::create (
24+ rewriter, binder.getLoc (), rewriter.getI64IntegerAttr (i)));
2525 }
26- return rewriter. create < Torch::PrimListConstructOp> (
27- binder.getLoc (),
26+ return Torch::PrimListConstructOp::create (
27+ rewriter, binder.getLoc (),
2828 Torch::ListType::get (Torch::IntType::get (binder.op ->getContext ())),
2929 cstValue);
3030}
@@ -109,12 +109,12 @@ LogicalResult mlir::torch::onnx_c::createTorchTransposeOp(
109109 if (failed (getTransposedType (cast<Torch::BaseTensorType>(input.getType ()),
110110 dimA, dimB, transposedType)))
111111 return failure ();
112- Value cstDimA = rewriter. create < Torch::ConstantIntOp> (
113- loc, rewriter.getI64IntegerAttr (dimA));
114- Value cstDimB = rewriter. create < Torch::ConstantIntOp> (
115- loc, rewriter.getI64IntegerAttr (dimB));
116- transposed = rewriter. create < Torch::AtenTransposeIntOp>(
117- loc, transposedType, input, cstDimA, cstDimB);
112+ Value cstDimA = Torch::ConstantIntOp::create (
113+ rewriter, loc, rewriter.getI64IntegerAttr (dimA));
114+ Value cstDimB = Torch::ConstantIntOp::create (
115+ rewriter, loc, rewriter.getI64IntegerAttr (dimB));
116+ transposed = Torch::AtenTransposeIntOp::create (rewriter, loc, transposedType,
117+ input, cstDimA, cstDimB);
118118 return success ();
119119}
120120
@@ -127,19 +127,19 @@ LogicalResult mlir::torch::onnx_c::createTorchPermuteOp(
127127 permuteDims, permutedType)))
128128 return failure ();
129129 Value permuteDimsList = createConstantIntList (binder, rewriter, permuteDims);
130- permuted = rewriter. create < Torch::AtenPermuteOp>( loc, permutedType, input,
131- permuteDimsList);
130+ permuted = Torch::AtenPermuteOp::create (rewriter, loc, permutedType, input,
131+ permuteDimsList);
132132 return success ();
133133}
134134
135135Value mlir::torch::onnx_c::createActivationByName (ImplicitLocOpBuilder &b,
136136 StringRef name, Value input) {
137137 if (name == " Sigmoid" )
138- return b. create < Torch::AtenSigmoidOp>( input.getType (), input);
138+ return Torch::AtenSigmoidOp::create (b, input.getType (), input);
139139 if (name == " Tanh" )
140- return b. create < Torch::AtenTanhOp>( input.getType (), input);
140+ return Torch::AtenTanhOp::create (b, input.getType (), input);
141141 if (name == " Relu" )
142- return b. create < Torch::AtenReluOp>( input.getType (), input);
142+ return Torch::AtenReluOp::create (b, input.getType (), input);
143143 llvm_unreachable (" Unsupported activation function" );
144144}
145145
@@ -158,23 +158,23 @@ LogicalResult mlir::torch::onnx_c::extractPerTensorQuantizationArguments(
158158 if (!check (inScale) || !check (inZeroPoint))
159159 return failure ();
160160
161- Value emptyList = rewriter. create < Torch::PrimListConstructOp> (
162- loc,
161+ Value emptyList = Torch::PrimListConstructOp::create (
162+ rewriter, loc,
163163 rewriter.getType <Torch::ListType>(rewriter.getType <Torch::IntType>()),
164164 ValueRange{});
165165 auto extract = [&rewriter, &loc, &emptyList](Value v) {
166166 auto vTy = cast<Torch::ValueTensorType>(v.getType ());
167167 if (!vTy.getSizes ().empty ()) {
168168 vTy = rewriter.getType <Torch::ValueTensorType>(ArrayRef<int64_t >({}),
169169 vTy.getOptionalDtype ());
170- v = rewriter. create < Torch::AtenReshapeOp>( loc, vTy, v, emptyList);
170+ v = Torch::AtenReshapeOp::create (rewriter, loc, vTy, v, emptyList);
171171 }
172172
173173 Type extractTy = rewriter.getType <Torch::FloatType>();
174174 if (isa<IntegerType>(vTy.getDtype ()))
175175 extractTy = rewriter.getType <Torch::IntType>();
176176
177- return rewriter. create < Torch::AtenItemOp>( loc, extractTy, v);
177+ return Torch::AtenItemOp::create (rewriter, loc, extractTy, v);
178178 };
179179
180180 outScale = extract (inScale);
@@ -191,14 +191,13 @@ LogicalResult mlir::torch::onnx_c::createDequantizeTensor(
191191 return failure ();
192192
193193 Torch::ValueTensorType makeTensorTy = getQTorchTypeFromTorchIntType (inputTy);
194- Value quantizedInput =
195- rewriter.create <Torch::Aten_MakePerTensorQuantizedTensorOp>(
196- loc, makeTensorTy, input, scale, zeroPoint);
194+ Value quantizedInput = Torch::Aten_MakePerTensorQuantizedTensorOp::create (
195+ rewriter, loc, makeTensorTy, input, scale, zeroPoint);
197196
198197 Torch::ValueTensorType resultTy = rewriter.getType <Torch::ValueTensorType>(
199198 inputTy.getSizes (), rewriter.getF32Type ());
200- output = rewriter. create < Torch::AtenDequantizeSelfOp>( loc, resultTy,
201- quantizedInput);
199+ output = Torch::AtenDequantizeSelfOp::create (rewriter, loc, resultTy,
200+ quantizedInput);
202201
203202 return success ();
204203}
0 commit comments