Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,12 @@ struct CreatorOpShardingInterface
if (!oldType.isDynamicDim(i) && shardType.isDynamicDim(i)) {
if (!newSharding) {
newSharding =
builder.create<ShardingOp>(op->getLoc(), resultShardings[0]);
ShardingOp::create(builder, op->getLoc(), resultShardings[0]);
device =
builder.create<mesh::ProcessMultiIndexOp>(op->getLoc(), mesh)
mesh::ProcessMultiIndexOp::create(builder, op->getLoc(), mesh)
.getResults();
shapeForDevice = builder.create<mesh::ShardShapeOp>(
op->getLoc(), oldType.getShape(), spmdizedOperands,
shapeForDevice = mesh::ShardShapeOp::create(
builder, op->getLoc(), oldType.getShape(), spmdizedOperands,
newSharding->getResult(0), device);
}
newOperands.emplace_back(shapeForDevice.getResult()[i]);
Expand All @@ -88,7 +88,7 @@ struct CreatorOpShardingInterface
newOperands.emplace_back(spmdizedOperands[++currOldOprndNum]);
}
}
newOp = builder.create<OpTy>(op->getLoc(), shardType, newOperands);
newOp = OpTy::create(builder, op->getLoc(), shardType, newOperands);
spmdizationMap.map(op->getResult(0), newOp->getResult(0));
} else {
// `clone` will populate the mapping of old to new results.
Expand Down
138 changes: 71 additions & 67 deletions mlir/lib/Dialect/Tensor/IR/TensorOps.cpp

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,13 +207,13 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
if (isZeroInteger(newLength)) {
hasZeroLen = true;
} else if (!hasZeroLen) {
Value check = b.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::eq,
Value check = arith::CmpIOp::create(
b, loc, arith::CmpIPredicate::eq,
getValueOrCreateConstantIndexOp(b, loc, newLength),
getValueOrCreateConstantIndexOp(b, loc, zero));
dynHasZeroLenCond =
dynHasZeroLenCond
? b.create<arith::OrIOp>(loc, check, dynHasZeroLenCond)
? arith::OrIOp::create(b, loc, check, dynHasZeroLenCond)
: check;
}

Expand All @@ -237,18 +237,18 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
auto castResult = [&](Value val) -> Value {
if (resultType == val.getType())
return val;
return b.create<tensor::CastOp>(loc, resultType, val);
return tensor::CastOp::create(b, loc, resultType, val);
};

// In cases where the original data source is unused: Emit a GenerateOp and
// do not generate a SliceOp. (The result shape of the SliceOp would
// have a dimension of size 0, the semantics of which is unclear.)
auto createGenerateOp = [&]() {
// Create GenerateOp.
auto generateOp = b.create<tensor::GenerateOp>(
loc, resultType, dynDims,
auto generateOp = tensor::GenerateOp::create(
b, loc, resultType, dynDims,
[&](OpBuilder &builder, Location gLoc, ValueRange indices) {
builder.create<tensor::YieldOp>(gLoc, padValue);
tensor::YieldOp::create(builder, gLoc, padValue);
});
return generateOp;
};
Expand All @@ -257,10 +257,10 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
// the result shape of the new SliceOp has a zero dimension.
auto createPadOfExtractSlice = [&]() {
// Create pad(extract_slice(x)).
auto newSliceOp = b.create<tensor::ExtractSliceOp>(
loc, padOp.getSource(), newOffsets, newLengths, newStrides);
auto newPadOp = b.create<PadOp>(
loc, Type(), newSliceOp, newLows, newHighs,
auto newSliceOp = tensor::ExtractSliceOp::create(
b, loc, padOp.getSource(), newOffsets, newLengths, newStrides);
auto newPadOp = PadOp::create(
b, loc, Type(), newSliceOp, newLows, newHighs,
/*nofold=*/padOp.getNofold(),
getPrunedAttributeList(padOp, PadOp::getAttributeNames()));

Expand All @@ -287,17 +287,17 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
Operation *thenOp;
Operation *elseOp;
Operation *sliceOp;
auto result = b.create<scf::IfOp>(
loc, dynHasZeroLenCond,
auto result = scf::IfOp::create(
b, loc, dynHasZeroLenCond,
/*thenBuilder=*/
[&](OpBuilder &b, Location loc) {
thenOp = createGenerateOp();
b.create<scf::YieldOp>(loc, castResult(thenOp->getResult(0)));
scf::YieldOp::create(b, loc, castResult(thenOp->getResult(0)));
},
/*elseBuilder=*/
[&](OpBuilder &b, Location loc) {
std::tie(elseOp, sliceOp) = createPadOfExtractSlice();
b.create<scf::YieldOp>(loc, castResult(elseOp->getResult(0)));
scf::YieldOp::create(b, loc, castResult(elseOp->getResult(0)));
});
return TilingResult{
{elseOp}, SmallVector<Value>(result->getResults()), {sliceOp}};
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Tensor/TransformOps/TensorTransformOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp::
if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) {
return Value();
}
return builder.create<tensor::CastOp>(loc, resultType, input).getResult();
return tensor::CastOp::create(builder, loc, resultType, input).getResult();
});
converter.addTargetMaterialization([](OpBuilder &builder, Type resultType,
ValueRange inputs,
Expand All @@ -177,7 +177,7 @@ void transform::TypeConversionCastShapeDynamicDimsOp::
if (!tensor::CastOp::areCastCompatible(input.getType(), resultType)) {
return Value();
}
return builder.create<tensor::CastOp>(loc, resultType, input).getResult();
return tensor::CastOp::create(builder, loc, resultType, input).getResult();
});
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,8 +222,8 @@ struct CollapseShapeOpInterface
MemRefType::get(collapseShapeOp.getSrcType().getShape(),
collapseShapeOp.getSrcType().getElementType(),
AffineMap(), bufferType.getMemorySpace());
buffer = rewriter.create<bufferization::ToBufferOp>(
op->getLoc(), memrefType, *tensorAlloc);
buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(),
memrefType, *tensorAlloc);
}

// Result type is inferred by the builder.
Expand Down Expand Up @@ -349,8 +349,8 @@ struct ExpandShapeOpInterface
if (failed(buffer))
return failure();

auto memrefExpandShape = rewriter.create<memref::ExpandShapeOp>(
op->getLoc(), tensorResultType.getShape(), *buffer,
auto memrefExpandShape = memref::ExpandShapeOp::create(
rewriter, op->getLoc(), tensorResultType.getShape(), *buffer,
expandShapeOp.getReassociationIndices(),
expandShapeOp.getMixedOutputShape());
replaceOpWithBufferizedValues(rewriter, op,
Expand Down Expand Up @@ -398,8 +398,8 @@ struct ExtractSliceOpInterface
extractSliceOp.getResult(), options, state);
if (failed(resultMemrefType))
return failure();
Value subView = rewriter.create<memref::SubViewOp>(
loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
Value subView = memref::SubViewOp::create(
rewriter, loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
mixedOffsets, mixedSizes, mixedStrides);

replaceOpWithBufferizedValues(rewriter, op, subView);
Expand Down Expand Up @@ -469,7 +469,7 @@ static void createStores(RewriterBase &rewriter, Location loc, int dim,
if (dim == static_cast<int>(shape.size()) - 1) {
for (int i = 0; i < shape.back(); ++i) {
indices.back() = constants[i];
rewriter.create<memref::StoreOp>(loc, *elementIt, buffer, indices);
memref::StoreOp::create(rewriter, loc, *elementIt, buffer, indices);
++elementIt;
}
return;
Expand Down Expand Up @@ -507,8 +507,8 @@ struct FromElementsOpInterface
bufferization::getBufferType(*tensorAlloc, options, state);
if (failed(memrefType))
return failure();
Value buffer = rewriter.create<bufferization::ToBufferOp>(
op->getLoc(), *memrefType, *tensorAlloc);
Value buffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(),
*memrefType, *tensorAlloc);

// Case: tensor<0xelem_type>.
if (fromElementsOp.getElements().empty()) {
Expand All @@ -518,8 +518,8 @@ struct FromElementsOpInterface

// Case: tensor<elem_type>.
if (shape.empty()) {
rewriter.create<memref::StoreOp>(
loc, fromElementsOp.getElements().front(), buffer);
memref::StoreOp::create(rewriter, loc,
fromElementsOp.getElements().front(), buffer);
replaceOpWithBufferizedValues(rewriter, op, buffer);
return success();
}
Expand All @@ -529,7 +529,7 @@ struct FromElementsOpInterface
SmallVector<Value, 2> constants;
constants.reserve(maxDim);
for (int i = 0; i < maxDim; ++i)
constants.push_back(rewriter.create<arith::ConstantIndexOp>(loc, i));
constants.push_back(arith::ConstantIndexOp::create(rewriter, loc, i));

// Traverse all `elements` and create `memref.store` ops.
auto elementIt = fromElementsOp.getElements().begin();
Expand Down Expand Up @@ -576,15 +576,15 @@ static Value lowerGenerateLikeOpBody(RewriterBase &rewriter, Location loc,
// Create linalg::MapOp.
OpBuilder::InsertionGuard g(rewriter);
auto linalgOp =
rewriter.create<linalg::MapOp>(loc, tensorType, /*inputs=*/ValueRange(),
/*init=*/tensorDestination);
linalg::MapOp::create(rewriter, loc, tensorType, /*inputs=*/ValueRange(),
/*init=*/tensorDestination);
Block &linalgBody = linalgOp.getMapper().emplaceBlock();

// Create linalg::IndexOps.
rewriter.setInsertionPointToStart(&linalgBody);
SmallVector<Value> indices;
for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
indices.push_back(rewriter.create<linalg::IndexOp>(loc, dim));
indices.push_back(linalg::IndexOp::create(rewriter, loc, dim));

// Move over body.
rewriter.mergeBlocks(&generateBody.front(), &linalgBody, indices);
Expand Down Expand Up @@ -644,8 +644,8 @@ struct InsertOpInterface
getBuffer(rewriter, insertOp.getDest(), options, state);
if (failed(destMemref))
return failure();
rewriter.create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
*destMemref, insertOp.getIndices());
memref::StoreOp::create(rewriter, insertOp.getLoc(), insertOp.getScalar(),
*destMemref, insertOp.getIndices());
replaceOpWithBufferizedValues(rewriter, op, *destMemref);
return success();
}
Expand Down Expand Up @@ -713,9 +713,9 @@ struct InsertSliceOpInterface
memref::SubViewOp::inferRankReducedResultType(
insertSliceOp.getSourceType().getShape(), dstMemrefType,
mixedOffsets, mixedSizes, mixedStrides);
Value subView = rewriter.create<memref::SubViewOp>(
loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes,
mixedStrides);
Value subView =
memref::SubViewOp::create(rewriter, loc, subviewMemRefType, *dstMemref,
mixedOffsets, mixedSizes, mixedStrides);

// Copy tensor. If this tensor.insert_slice has a matching
// tensor.extract_slice, the copy operation will eventually fold away.
Expand Down Expand Up @@ -796,14 +796,14 @@ struct PadOpInterface
for (int64_t i = 0; i < resultType.getRank(); ++i) {
if (!resultType.isDynamicDim(i))
continue;
Value srcDim = rewriter.create<tensor::DimOp>(loc, padOp.getSource(), i);
Value srcDim = tensor::DimOp::create(rewriter, loc, padOp.getSource(), i);
Value lowPad = toValue(mixedLowPad[i]);
Value highPad = toValue(mixedHighPad[i]);
AffineExpr s0, s1, s2;
bindSymbols(op->getContext(), s0, s1, s2);
AffineExpr sumExpr = s0 + s1 + s2;
Value sum = rewriter.create<affine::AffineApplyOp>(
loc, sumExpr, ValueRange{srcDim, lowPad, highPad});
Value sum = affine::AffineApplyOp::create(
rewriter, loc, sumExpr, ValueRange{srcDim, lowPad, highPad});
dynamicSizes.push_back(sum);
}

Expand Down Expand Up @@ -995,9 +995,9 @@ struct ParallelInsertSliceOpInterface
parallelInsertSliceOp.getMixedOffsets(),
parallelInsertSliceOp.getMixedSizes(),
parallelInsertSliceOp.getMixedStrides());
Value subview = rewriter.create<memref::SubViewOp>(
parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer,
parallelInsertSliceOp.getMixedOffsets(),
Value subview = memref::SubViewOp::create(
rewriter, parallelInsertSliceOp.getLoc(), subviewMemRefType,
*destBuffer, parallelInsertSliceOp.getMixedOffsets(),
parallelInsertSliceOp.getMixedSizes(),
parallelInsertSliceOp.getMixedStrides());

Expand Down Expand Up @@ -1065,14 +1065,14 @@ struct SplatOpInterface
if (options.defaultMemorySpaceFn(tensorType) != Attribute())
return op->emitError("memory space not implemented yet");

auto linalgOp =
rewriter.create<linalg::MapOp>(loc, tensorType, /*inputs=*/ValueRange(),
/*init=*/*tensorAlloc);
auto linalgOp = linalg::MapOp::create(rewriter, loc, tensorType,
/*inputs=*/ValueRange(),
/*init=*/*tensorAlloc);
Block &linalgBody = linalgOp.getMapper().emplaceBlock();

// Create linalg::IndexOps.
rewriter.setInsertionPointToStart(&linalgBody);
rewriter.create<linalg::YieldOp>(loc, splatOp.getInput());
linalg::YieldOp::create(rewriter, loc, splatOp.getInput());
rewriter.replaceOp(splatOp, linalgOp.getResult()[0]);

return success();
Expand Down Expand Up @@ -1126,8 +1126,8 @@ struct ConcatOpInterface
MemRefType memrefType =
MemRefType::get(concatOp.getResultType().getShape(),
concatOp.getResultType().getElementType(), layout);
Value dstBuffer = rewriter.create<bufferization::ToBufferOp>(
op->getLoc(), memrefType, *tensorAlloc);
Value dstBuffer = bufferization::ToBufferOp::create(
rewriter, op->getLoc(), memrefType, *tensorAlloc);

// Extract the dimension for the concat op
uint64_t concatDim = concatOp.getDim();
Expand All @@ -1142,7 +1142,7 @@ struct ConcatOpInterface
for (const auto &[dimIdx, dimSize] :
llvm::enumerate(tensorType.getShape())) {
if (dimSize == ShapedType::kDynamic) {
auto dimOp = rewriter.create<memref::DimOp>(loc, dstBuffer, dimIdx);
auto dimOp = memref::DimOp::create(rewriter, loc, dstBuffer, dimIdx);
sizes.push_back(dimOp.getResult());
if (dimIdx == concatDim)
dynamicConcatDim = true;
Expand All @@ -1157,7 +1157,7 @@ struct ConcatOpInterface
if (dynamicConcatDim) {
// One or more operands have dynamic size, so we must accumulate the
// offset with arith ops.
dynamicOffset = rewriter.create<arith::ConstantIndexOp>(loc, 0);
dynamicOffset = arith::ConstantIndexOp::create(rewriter, loc, 0);
}

for (auto operand : concatOp.getInputs()) {
Expand All @@ -1174,8 +1174,9 @@ struct ConcatOpInterface

if (dynamicConcatDim) {
offsets[concatDim] = dynamicOffset.value();
dynamicSize = rewriter.create<memref::DimOp>(loc, *srcBuffer, concatDim)
.getResult();
dynamicSize =
memref::DimOp::create(rewriter, loc, *srcBuffer, concatDim)
.getResult();
sizes[concatDim] = dynamicSize.value();
} else {
sizes[concatDim] = rewriter.getIndexAttr(operandConcatDimSize);
Expand All @@ -1188,16 +1189,16 @@ struct ConcatOpInterface
memref::SubViewOp::inferRankReducedResultType(
operandTensorType.getShape(), dstMemrefType, offsets, sizes,
strides);
Value subview = rewriter.create<memref::SubViewOp>(
loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);
Value subview = memref::SubViewOp::create(
rewriter, loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);

// Copy the source buffer into the destination subview.
if (failed(options.createMemCpy(rewriter, loc, *srcBuffer, subview)))
return failure();

if (dynamicConcatDim) {
dynamicOffset = rewriter.create<arith::AddIOp>(
loc, dynamicOffset.value(), dynamicSize.value());
dynamicOffset = arith::AddIOp::create(
rewriter, loc, dynamicOffset.value(), dynamicSize.value());
} else {
concatDimOffset += operandConcatDimSize;
}
Expand Down
5 changes: 3 additions & 2 deletions mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,9 @@ struct FoldEmptyTensorWithReshapeOp : public OpRewritePattern<ReshapeOp> {

// Create new tensor.empty op.
// TODO: Do not drop tensor type encoding.
Value emptyTensor = rewriter.create<EmptyOp>(
loc, resultShapes[0], reshapeOp.getResultType().getElementType());
Value emptyTensor =
EmptyOp::create(rewriter, loc, resultShapes[0],
reshapeOp.getResultType().getElementType());
if (emptyTensor.getType() != reshapeOp.getResultType()) {
rewriter.replaceOpWithNewOp<tensor::CastOp>(
reshapeOp, reshapeOp.getResultType(), emptyTensor);
Expand Down
Loading