Skip to content

Commit 81ca5aa

Browse files
[mlir][tensor][NFC] Rename linalg.init_tensor to tensor.empty
tensor.empty/linalg.init_tensor produces an uninititalized tensor that can be used as a destination operand for destination-style ops (ops that implement `DestinationStyleOpInterface`). This change makes it possible to implement `TilingInterface` for non-destination-style ops without depending on the Linalg dialect. RFC: https://discourse.llvm.org/t/rfc-add-tensor-from-shape-operation/65101 Differential Revision: https://reviews.llvm.org/D135129
1 parent b20e34b commit 81ca5aa

File tree

94 files changed

+1235
-1300
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

94 files changed

+1235
-1300
lines changed

mlir/docs/Dialects/Linalg/OpDSL.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -318,8 +318,8 @@ extends to a multi-dimensional pointwise computation. As a result, we may use
318318
`fill` with arbitrary ranked output tensors:
319319

320320
```python
321-
tensor_2d = linalg.InitTensorOp([4, 8], f32)
322-
tensor_3d = linalg.InitTensorOp([4, 8, 16], f32)
321+
tensor_2d = tensor.EmptyOp([4, 8], f32)
322+
tensor_3d = tensor.EmptyOp([4, 8, 16], f32)
323323
fill(value, outs=[tensor_2d])
324324
fill(value, outs=[tensor_3d])
325325
```

mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td

Lines changed: 0 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -24,110 +24,6 @@ include "mlir/Interfaces/ViewLikeInterface.td"
2424
class Linalg_Op<string mnemonic, list<Trait> traits = []> :
2525
Op<Linalg_Dialect, mnemonic, traits>;
2626

27-
def Linalg_InitTensorOp : Linalg_Op<"init_tensor",
28-
[NoSideEffect,
29-
DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>]> {
30-
let summary = "operation to define a tensor of particular shape";
31-
32-
let description = [{
33-
`linalg.init_tensor` is an operation that defines a tensor of a particular
34-
shape. The shape could be dynamic or static. The contents of the tensor are
35-
unspecified and the only purpose of the op result is to materialize the
36-
specified shape in IR and make it available to other transformations.
37-
38-
Note: This op can be lowered to a `bufferization.alloc_tensor`, at which
39-
point it turns into an explicit buffer allocation.
40-
}];
41-
42-
let arguments =
43-
(ins Variadic<Index>:$sizes, I64ArrayAttr:$static_sizes);
44-
45-
let results = (outs AnyTensor:$result);
46-
47-
let assemblyFormat = [{
48-
custom<DynamicIndexList>($sizes, $static_sizes,
49-
"ShapedType::kDynamicSize")
50-
attr-dict `:` type($result)
51-
}];
52-
53-
let extraClassDeclaration = [{
54-
static StringRef getStaticSizesAttrStrName() {
55-
return "static_sizes";
56-
}
57-
58-
RankedTensorType getType() {
59-
return getResult().getType().cast<RankedTensorType>(); }
60-
61-
// Infer the shape of the result tensor given the static shapes
62-
// and element type of the result tensor.
63-
static Type inferResultType(ArrayRef<int64_t> staticSizes, Type elementType,
64-
Attribute encoding = {});
65-
66-
// Return true if the size of the tensor is dynamic at `idx`
67-
bool isDynamicSize(unsigned idx) {
68-
APInt v = *(getStaticSizes().getAsValueRange<IntegerAttr>().begin() + idx);
69-
return ShapedType::isDynamic(v.getSExtValue());
70-
}
71-
72-
// Assert that the size of the result tensor is static at `idx`
73-
// and return the shape.
74-
int64_t getStaticSize(unsigned idx) {
75-
assert(!isDynamicSize(idx) && "expected static size");
76-
APInt v = *(getStaticSizes().
77-
template getAsValueRange<IntegerAttr>().begin() + idx);
78-
return v.getSExtValue();
79-
}
80-
81-
// Return the argument position that contains the dynamic size of
82-
// the tensor at dimension `idx`. Asserts that the shape is
83-
// dynamic at that `idx`.
84-
unsigned getIndexOfDynamicSize(unsigned idx) {
85-
assert(isDynamicSize(idx) && "expected dynamic size");
86-
return std::count_if(
87-
getStaticSizes().getValue().begin(),
88-
getStaticSizes().getValue().begin() + idx,
89-
[&](Attribute attr) {
90-
return ShapedType::isDynamic(attr.cast<IntegerAttr>().getInt());
91-
});
92-
}
93-
94-
// Return both static and dynamic sizes as a list of `OpFoldResult`.
95-
SmallVector<OpFoldResult> getMixedSizes();
96-
97-
// Return the Value of the dynamic size of the tensor at dimension
98-
// `idx`. Asserts that the shape is dynamic at that `idx.
99-
Value getDynamicSize(unsigned idx) {
100-
return getOperand(getIndexOfDynamicSize(idx));
101-
}
102-
}];
103-
104-
let builders = [
105-
OpBuilder<(ins "ValueRange":$shape,
106-
"ArrayRef<int64_t>":$staticShape, "Type":$elementType),
107-
[{
108-
build($_builder, $_state,
109-
InitTensorOp::inferResultType(staticShape, elementType),
110-
shape, $_builder.getI64ArrayAttr(staticShape));
111-
}]>,
112-
OpBuilder<(ins "ValueRange":$shape, "Type":$elementType),
113-
[{
114-
SmallVector<int64_t, 4> staticShape(
115-
shape.size(), ShapedType::kDynamicSize);
116-
build($_builder, $_state, shape, staticShape, elementType);
117-
}]>,
118-
OpBuilder<(ins "ArrayRef<int64_t>":$staticShape, "Type":$elementType),
119-
[{
120-
build($_builder, $_state, ValueRange{}, staticShape, elementType);
121-
}]>,
122-
OpBuilder<(ins "ArrayRef<OpFoldResult>":$sizes, "Type":$elementType,
123-
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>
124-
];
125-
126-
let hasCanonicalizer = 1;
127-
let hasCustomAssemblyFormat = 1;
128-
let hasVerifier = 1;
129-
}
130-
13127
def Linalg_YieldOp : Linalg_Op<"yield", [NoSideEffect, ReturnLike, Terminator]>,
13228
Arguments<(ins Variadic<AnyType>:$values)> {
13329
let summary = "Linalg yield operation";

mlir/include/mlir/Dialect/Linalg/Passes.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,8 @@ createConvertLinalgToParallelLoopsPass();
6161
std::unique_ptr<OperationPass<func::FuncOp>>
6262
createConvertLinalgToAffineLoopsPass();
6363

64-
/// Create a pass that rewrites init_tensor to alloc_tensor.
65-
std::unique_ptr<Pass> createLinalgInitTensorToAllocTensorPass();
64+
/// Create a pass that rewrites tensor.empty to bufferization.alloc_tensor.
65+
std::unique_ptr<Pass> createEmptyTensorToAllocTensorPass();
6666

6767
/// Create a pass to convert Linalg operations which work on tensors to use
6868
/// buffers instead.

mlir/include/mlir/Dialect/Linalg/Passes.td

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,14 @@ def ConvertElementwiseToLinalg : Pass<"convert-elementwise-to-linalg", ""> {
2424
let dependentDialects = ["linalg::LinalgDialect", "memref::MemRefDialect"];
2525
}
2626

27-
def LinalgInitTensorToAllocTensor : Pass<"linalg-init-tensor-to-alloc-tensor"> {
28-
let summary = "Replace all init_tensor ops by alloc_tensor ops.";
27+
def EmptyTensorToAllocTensor : Pass<"empty-tensor-to-alloc-tensor"> {
28+
let summary = "Replace all empty ops by alloc_tensor ops.";
2929
let description = [{
30-
init_tensor ops return a tensor of unspecified contents who's only purpose
30+
tensor.empty ops return a tensor of unspecified contents who's only purpose
3131
is to carry the tensor shape. This pass converts such ops to
3232
bufferization.alloc_tensor ops, which bufferize to buffer allocations.
3333
}];
34-
let constructor = "mlir::createLinalgInitTensorToAllocTensorPass()";
34+
let constructor = "mlir::createEmptyTensorToAllocTensorPass()";
3535
}
3636

3737
def LinalgFoldUnitExtentDims : Pass<"linalg-fold-unit-extent-dims", ""> {

mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,7 @@ def SplitReductionOp : Op<Transform_Dialect, "structured.split_reduction",
506506
```
507507
%cst = arith.constant 0.000000e+00 : f32
508508
%0 = tensor.expand_shape %in [[0, 1]] : tensor<32xf32> into tensor<4x8xf32>
509-
%1 = linalg.init_tensor [4] : tensor<4xf32>
509+
%1 = tensor.empty() : tensor<4xf32>
510510
%2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<4xf32>) -> tensor<4xf32>
511511
%3 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
512512
affine_map<(d0, d1) -> (d0)>],
@@ -557,11 +557,11 @@ def SplitReductionOp : Op<Transform_Dialect, "structured.split_reduction",
557557
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
558558
#map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
559559
#map5 = affine_map<(d0, d1, d2) -> (d0, d1)>
560-
%0 = linalg.init_tensor [16, 32, 64] : tensor<16x32x64xf32>
560+
%0 = tensor.empty() : tensor<16x32x64xf32>
561561
%cst = arith.constant 0.000000e+00 : f32
562562
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<16x32x64xf32>) ->
563563
tensor<16x32x64xf32>
564-
%2 = linalg.init_tensor [64, 4] : tensor<64x4xi1>
564+
%2 = tensor.empty() : tensor<64x4xi1>
565565

566566
%3 = linalg.generic {indexing_maps = [#map0, #map1, #map2, #map3],
567567
iterator_types = ["parallel", "parallel", "parallel", "reduction"]}

mlir/include/mlir/Dialect/Linalg/Transforms/HoistPadding.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class GenericOp;
4949
///
5050
/// ```
5151
/// scf.for (%i) {
52-
/// %packed_init = linalg.init_tensor range(%j) : tensor<?x4x8xf32>
52+
/// %packed_init = tensor.empty range(%j) : tensor<?x4x8xf32>
5353
/// %packed = scf.for (%k) iter_args(%p : %packed_init) {
5454
/// %st0 = tensor.extract_slice f(%i, %k) : ... to tensor<?x?xf32>
5555
/// %0 = tensor.pad %st0 low[0, 0] high[...] {

mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1196,7 +1196,7 @@ rewriteAsPaddedOp(OpBuilder &b, LinalgOp opToPad,
11961196
using OptimizeCopyFn =
11971197
std::function<LogicalResult(PatternRewriter &, tensor::PadOp, Value)>;
11981198

1199-
/// Rewrite a tensor::PadOp into a sequence of InitTensorOp, FillOp and
1199+
/// Rewrite a tensor::PadOp into a sequence of EmptyOp, FillOp and
12001200
/// InsertSliceOp. For now, only constant padding values are supported.
12011201
/// `OptimizeCopyFn` can be used to customize copying step optimization.
12021202
struct GeneralizePadOpPattern : public OpRewritePattern<tensor::PadOp> {
@@ -1407,7 +1407,7 @@ void populateSplitReductionPattern(
14071407
/// ```
14081408
/// %cst = arith.constant 0.000000e+00 : f32
14091409
/// %0 = tensor.expand_shape %in [[0, 1]] : tensor<32xf32> into tensor<4x8xf32>
1410-
/// %1 = linalg.init_tensor [4] : tensor<4xf32>
1410+
/// %1 = tensor.empty [4] : tensor<4xf32>
14111411
/// %2 = linalg.fill ins(%cst : f32) outs(%1 : tensor<4xf32>) -> tensor<4xf32>
14121412
/// %3 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
14131413
/// affine_map<(d0, d1) -> (d0)>],
@@ -1464,11 +1464,11 @@ splitReduction(PatternRewriter &b, LinalgOp op,
14641464
/// #map3 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
14651465
/// #map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
14661466
/// #map5 = affine_map<(d0, d1, d2) -> (d0, d1)>
1467-
/// %0 = linalg.init_tensor [16, 32, 64] : tensor<16x32x64xf32>
1467+
/// %0 = tensor.empty [16, 32, 64] : tensor<16x32x64xf32>
14681468
/// %cst = arith.constant 0.000000e+00 : f32
14691469
/// %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<16x32x64xf32>) ->
14701470
/// tensor<16x32x64xf32>
1471-
/// %2 = linalg.init_tensor [64, 4] : tensor<64x4xi1>
1471+
/// %2 = tensor.empty [64, 4] : tensor<64x4xi1>
14721472
///
14731473
/// %3 = linalg.generic {indexing_maps = [#map0, #map1, #map2, #map3],
14741474
/// iterator_types = ["parallel", "parallel", "parallel", "reduction"]}

mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ def Tensor_Dialect : Dialect {
4747

4848
let hasConstantMaterializer = 1;
4949
let dependentDialects = [
50+
"AffineDialect",
5051
"arith::ArithDialect",
5152
"complex::ComplexDialect",
5253
];

mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,65 @@ def Tensor_DimOp : Tensor_Op<"dim", [NoSideEffect, ShapedDimOpInterface]> {
136136
let hasVerifier = 1;
137137
}
138138

139+
//===----------------------------------------------------------------------===//
140+
// EmptyOp
141+
//===----------------------------------------------------------------------===//
142+
143+
def Tensor_EmptyOp : Tensor_Op<"empty",
144+
[NoSideEffect,
145+
DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>]> {
146+
let summary = "empty tensor operation";
147+
148+
let description = [{
149+
`tensor.empty` is an operation that defines a tensor of a particular shape.
150+
The shape could be dynamic or static. The contents of the tensor are
151+
unspecified and the only purpose of the op result is to materialize the
152+
specified shape in IR and make it available to other transformations.
153+
154+
`tensor.empty` is useful in transformations that expect destination style
155+
ops. I.e., ops that implement `DestinationStyleOpInterface`. Ops that are
156+
not in destination style can be made compatible with such transformations
157+
with a `tensor.empty` destination.
158+
159+
Note: This op can be lowered to a `bufferization.alloc_tensor`, at which
160+
point it turns into an explicit buffer allocation.
161+
}];
162+
163+
let arguments = (ins Variadic<Index>:$dynamicSizes);
164+
165+
let results = (outs AnyRankedTensor:$result);
166+
167+
let assemblyFormat = "`(`$dynamicSizes`)` attr-dict `:` type($result)";
168+
169+
let extraClassDeclaration = [{
170+
RankedTensorType getType() {
171+
return getResult().getType().cast<RankedTensorType>();
172+
}
173+
174+
// Return both static and dynamic sizes as a list of `OpFoldResult`.
175+
SmallVector<OpFoldResult> getMixedSizes();
176+
177+
// Return the Value of the dynamic size of the tensor at dimension `idx`.
178+
// Asserts that the shape is dynamic at that `idx`.
179+
Value getDynamicSize(unsigned idx);
180+
}];
181+
182+
let builders = [
183+
// Build with fully static sizes.
184+
OpBuilder<(ins "ArrayRef<int64_t>":$staticShape, "Type":$elementType)>,
185+
186+
// Build with mixed static/dynamic sizes.
187+
OpBuilder<(ins "ArrayRef<int64_t>":$staticShape, "Type":$elementType,
188+
"ValueRange":$dynamicSizes)>,
189+
190+
// Build with mixed static/dynamic sizes.
191+
OpBuilder<(ins "ArrayRef<OpFoldResult>":$sizes, "Type":$elementType)>
192+
];
193+
194+
let hasCanonicalizer = 1;
195+
let hasVerifier = 1;
196+
}
197+
139198
//===----------------------------------------------------------------------===//
140199
// ExtractOp
141200
//===----------------------------------------------------------------------===//

mlir/include/mlir/Dialect/Tensor/Transforms/TransformUtils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ namespace tensor {
6262
/// We can construct %2 by generating the following, which only uses `%0`:
6363
///
6464
/// ```
65-
/// %dest = linalg.init_tensor [%size0, %size1] : tensor<?x?xf32>
65+
/// %dest = tensor.empty(%size0, %size1) : tensor<?x?xf32>
6666
/// %1 = tensor.dim %0, %c1 : tensor<3x?x?x11x?xf32>
6767
/// %2 = tensor.dim %0, %c2 : tensor<3x?x?x11x?xf32>
6868
/// %3 = tensor.dim %0, %c4 : tensor<3x?x?x11x?xf32>

0 commit comments

Comments
 (0)