Skip to content

Commit af6366a

Browse files
authored
[plugins][NFC] Upgrade plugins/ to free create functions. (#21973)
The builder create methods are deprecated: https://mlir.llvm.org/deprecation/. See https://discourse.llvm.org/t/psa-opty-create-now-with-100-more-tab-complete/87339. The main benefit of free functions is better tab completion with LSP/IDE. Signed-off-by: hanhanW <[email protected]>
1 parent 25a0b88 commit af6366a

File tree

8 files changed

+265
-269
lines changed

8 files changed

+265
-269
lines changed

compiler/plugins/input/StableHLO/Conversion/MapStableHLOToScalarOp.h

Lines changed: 194 additions & 192 deletions
Large diffs are not rendered by default.

compiler/plugins/input/StableHLO/Conversion/Preprocessing/StableHLOToStableHLO.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@ DenseI64ArrayAttr make1DElementsAttr(OpBuilder &b, ArrayRef<int64_t> integers) {
4747
Value getF32Const(ImplicitLocOpBuilder b, ArrayRef<int64_t> shapes,
4848
ArrayRef<float> values) {
4949
RankedTensorType ty = RankedTensorType::get(shapes, b.getF32Type());
50-
return b
51-
.create<mlir::stablehlo::ConstantOp>(DenseFPElementsAttr::get(ty, values))
50+
return mlir::stablehlo::ConstantOp::create(
51+
b, DenseFPElementsAttr::get(ty, values))
5252
.getResult();
5353
}
5454

compiler/plugins/input/StableHLO/Conversion/StableHLOCustomCalls.cpp

Lines changed: 39 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -68,45 +68,45 @@ static Value computeHouseholder(Value v, Value tau, Value k,
6868
Value one =
6969
arith::ConstantOp::create(b, b.getFloatAttr(vTy.getElementType(), 1.0));
7070

71-
return b
72-
.create<linalg::GenericOp>(
73-
hTy, ValueRange{v, v}, empty, affineMaps, iterTypes,
74-
[&](OpBuilder &bb, Location loc, ValueRange args) {
75-
ImplicitLocOpBuilder b(loc, bb);
76-
SmallVector<Value> indices;
77-
for (int i = 0, s = hTy.getRank(); i < s; ++i) {
78-
indices.push_back(linalg::IndexOp::create(b, loc, i));
79-
}
80-
81-
SmallVector<Value> tauIndices(indices.begin(), indices.end() - 2);
82-
tauIndices.push_back(k);
83-
Value t = tensor::ExtractOp::create(b, tau, tauIndices);
84-
85-
// Generates the lower triangularization of the matrix with
86-
// one values on the diagonal.
87-
auto tri = [&](Value v, Value i) {
88-
Value eq =
89-
arith::CmpIOp::create(b, arith::CmpIPredicate::eq, i, k);
90-
Value lt =
91-
arith::CmpIOp::create(b, arith::CmpIPredicate::ult, i, k);
92-
Value sel = arith::SelectOp::create(b, eq, one, v);
93-
return arith::SelectOp::create(b, lt, zero, sel);
94-
};
95-
96-
Value v = tri(args[0], indices[indices.size() - 2]);
97-
Value vT = tri(args[1], indices[indices.size() - 1]);
98-
99-
Value h = arith::MulFOp::create(b, v, vT);
100-
h = arith::MulFOp::create(b, h, t);
101-
102-
Value isDiag = arith::CmpIOp::create(b, arith::CmpIPredicate::eq,
103-
indices[indices.size() - 2],
104-
indices[indices.size() - 1]);
105-
Value diag = arith::SelectOp::create(b, isDiag, one, zero);
106-
Value sub = arith::SubFOp::create(b, diag, h);
107-
108-
linalg::YieldOp::create(b, sub);
109-
})
71+
return linalg::GenericOp::create(
72+
b, hTy, ValueRange{v, v}, empty, affineMaps, iterTypes,
73+
[&](OpBuilder &bb, Location loc, ValueRange args) {
74+
ImplicitLocOpBuilder b(loc, bb);
75+
SmallVector<Value> indices;
76+
for (int i = 0, s = hTy.getRank(); i < s; ++i) {
77+
indices.push_back(linalg::IndexOp::create(b, loc, i));
78+
}
79+
80+
SmallVector<Value> tauIndices(indices.begin(),
81+
indices.end() - 2);
82+
tauIndices.push_back(k);
83+
Value t = tensor::ExtractOp::create(b, tau, tauIndices);
84+
85+
// Generates the lower triangularization of the matrix with
86+
// one values on the diagonal.
87+
auto tri = [&](Value v, Value i) {
88+
Value eq =
89+
arith::CmpIOp::create(b, arith::CmpIPredicate::eq, i, k);
90+
Value lt =
91+
arith::CmpIOp::create(b, arith::CmpIPredicate::ult, i, k);
92+
Value sel = arith::SelectOp::create(b, eq, one, v);
93+
return arith::SelectOp::create(b, lt, zero, sel);
94+
};
95+
96+
Value v = tri(args[0], indices[indices.size() - 2]);
97+
Value vT = tri(args[1], indices[indices.size() - 1]);
98+
99+
Value h = arith::MulFOp::create(b, v, vT);
100+
h = arith::MulFOp::create(b, h, t);
101+
102+
Value isDiag = arith::CmpIOp::create(
103+
b, arith::CmpIPredicate::eq, indices[indices.size() - 2],
104+
indices[indices.size() - 1]);
105+
Value diag = arith::SelectOp::create(b, isDiag, one, zero);
106+
Value sub = arith::SubFOp::create(b, diag, h);
107+
108+
linalg::YieldOp::create(b, sub);
109+
})
110110
.getResult(0);
111111
}
112112

compiler/plugins/input/StableHLO/Conversion/StableHLOToIREEInputDialects.cpp

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -149,14 +149,14 @@ Value createLinalgMatmulOnTensors(OpBuilder b, Location loc,
149149

150150
switch (llvm::cast<RankedTensorType>(lhs.getType()).getRank()) {
151151
case 1:
152-
return b
153-
.create<linalg::VecmatOp>(loc, TypeRange{resultType},
154-
ValueRange{lhs, rhs}, ValueRange{zeroTensor})
152+
return linalg::VecmatOp::create(b, loc, TypeRange{resultType},
153+
ValueRange{lhs, rhs},
154+
ValueRange{zeroTensor})
155155
.getResult(0);
156156
case 2:
157-
return b
158-
.create<linalg::MatmulOp>(loc, TypeRange{resultType},
159-
ValueRange{lhs, rhs}, ValueRange{zeroTensor})
157+
return linalg::MatmulOp::create(b, loc, TypeRange{resultType},
158+
ValueRange{lhs, rhs},
159+
ValueRange{zeroTensor})
160160
.getResult(0);
161161
default:
162162
assert(false && "unhandled matmul type");
@@ -218,10 +218,9 @@ struct OptimizationBarrierOpConversion final
218218
ConversionPatternRewriter &rewriter) const override {
219219
SmallVector<Value> outputs;
220220
for (Value operand : adaptor.getOperands()) {
221-
outputs.push_back(
222-
rewriter
223-
.create<IREE::Util::OptimizationBarrierOp>(op.getLoc(), operand)
224-
.getResult(0));
221+
outputs.push_back(IREE::Util::OptimizationBarrierOp::create(
222+
rewriter, op.getLoc(), operand)
223+
.getResult(0));
225224
}
226225
rewriter.replaceOp(op, outputs);
227226
return success();
@@ -434,10 +433,9 @@ Value scalarToTensor(OpBuilder &builder, Type /*type*/, ValueRange inputs,
434433
if (isa<ShapedType>(inputs.front().getType())) {
435434
return Value();
436435
}
437-
return builder
438-
.create<tensor::FromElementsOp>(
439-
loc, RankedTensorType::get({}, inputs.front().getType()),
440-
inputs.front())
436+
return tensor::FromElementsOp::create(
437+
builder, loc, RankedTensorType::get({}, inputs.front().getType()),
438+
inputs.front())
441439
.getResult();
442440
}
443441

compiler/plugins/input/StableHLO/Conversion/StableHLOToLinalgExt.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -507,14 +507,14 @@ struct ScanOpConversion final
507507
SmallVector<utils::IteratorType> iterators(initDims.size(),
508508
utils::IteratorType::parallel);
509509

510-
newInit = rewriter
511-
.create<linalg::GenericOp>(
512-
op.getLoc(), init0Ty.clone(initDims), ValueRange{init0},
513-
ValueRange{newInit}, indexingMaps, iterators,
514-
[&](OpBuilder &b, Location loc, ValueRange args) {
515-
linalg::YieldOp::create(b, loc, args[0]);
516-
})
517-
.getResult(0);
510+
newInit =
511+
linalg::GenericOp::create(
512+
rewriter, op.getLoc(), init0Ty.clone(initDims), ValueRange{init0},
513+
ValueRange{newInit}, indexingMaps, iterators,
514+
[&](OpBuilder &b, Location loc, ValueRange args) {
515+
linalg::YieldOp::create(b, loc, args[0]);
516+
})
517+
.getResult(0);
518518
outputs.push_back(newInit);
519519

520520
llvm::SmallVector<Type> outputTys;

compiler/plugins/input/TOSA/InputConversion/TosaToLinalgExt.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,10 @@ class ScatterConversion : public OpRewritePattern<tosa::ScatterOp> {
9595
arith::IndexCastOp::create(b, indicesTy.getElementType(), index);
9696
linalg::YieldOp::create(b, cast.getResult());
9797
};
98-
batchIdx = builder
99-
.create<linalg::GenericOp>(indicesTy, indices, empty,
100-
indexingMaps, iterators,
101-
blockBuilder)
102-
.getResult(0);
98+
batchIdx =
99+
linalg::GenericOp::create(builder, indicesTy, indices, empty,
100+
indexingMaps, iterators, blockBuilder)
101+
.getResult(0);
103102
}
104103

105104
indicesTy = llvm::cast<RankedTensorType>(indicesTy.clone(

compiler/plugins/input/Torch/InputConversion/BindSymbolicShapes.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -358,9 +358,8 @@ class BindSymbolicShapesPass final
358358
/*umin=*/optionalUmin,
359359
/*umax=*/optionalUmax,
360360
/*divisor=*/optionalDivisor);
361-
dimValue = builder
362-
.create<IREE::Util::AssumeIntOp>(bindOp->getLoc(),
363-
dimValue, assumption)
361+
dimValue = IREE::Util::AssumeIntOp::create(builder, bindOp->getLoc(),
362+
dimValue, assumption)
364363
.getResult(0);
365364
}
366365

compiler/plugins/input/Torch/InputConversion/FuncConversion.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -531,12 +531,10 @@ void createCoarseFencesSyncWrapper(StringRef syncFunctionName,
531531
callOperands.push_back(waitFence);
532532
callOperands.push_back(signalFence);
533533
std::optional<ArrayAttr> targetTiedOperands = asyncFuncOp.getTiedOperands();
534-
auto callResults =
535-
rewriter
536-
.create<IREE::Util::CallOp>(loc, asyncFuncOp, callOperands,
537-
targetTiedOperands ? *targetTiedOperands
538-
: ArrayAttr{})
539-
.getResults();
534+
auto callResults = IREE::Util::CallOp::create(
535+
rewriter, loc, asyncFuncOp, callOperands,
536+
targetTiedOperands ? *targetTiedOperands : ArrayAttr{})
537+
.getResults();
540538

541539
// Wait forever for signal.
542540
IREE::HAL::FenceAwaitOp::create(

0 commit comments

Comments
 (0)