Skip to content

Commit 42444d0

Browse files
author
MaheshRavishankar
committed
[mlir][Linalg] NFC: Verify tiling on linalg.generic operation on tensors.
With the recent changes to linalg on tensor semantics, the tiling operations works out-of-the-box for generic operations. Add a test to verify that and some minor refactoring. Differential Revision: https://reviews.llvm.org/D93077
1 parent 774c9c6 commit 42444d0

File tree

3 files changed

+116
-9
lines changed

3 files changed

+116
-9
lines changed

mlir/include/mlir/IR/AffineMap.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -327,6 +327,21 @@ AffineMap inversePermutation(AffineMap map);
327327
/// ```
328328
AffineMap concatAffineMaps(ArrayRef<AffineMap> maps);
329329

330+
/// Returns the map that results from projecting out the dimensions specified in
331+
/// `projectedDimensions`. The projected dimensions are set to 0.
332+
///
333+
/// Example:
334+
/// 1) map : affine_map<(d0, d1, d2) -> (d0, d1)>
335+
/// projected_dimensions : {2}
336+
/// result : affine_map<(d0, d1) -> (d0, d1)>
337+
///
338+
/// 2) map : affine_map<(d0, d1) -> (d0 + d1)>
339+
/// projected_dimensions : {1}
340+
/// result : affine_map<(d0) -> (d0)>
341+
///
342+
/// 3) map : affine_map<(d0, d1, d2) -> (d0, d1)>
343+
/// projected_dimensions : {1}
344+
/// result : affine_map<(d0, d1) -> (d0, 0)>
330345
AffineMap getProjectedMap(AffineMap map,
331346
ArrayRef<unsigned> projectedDimensions);
332347

mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -221,9 +221,8 @@ static bool isTiled(AffineMap map, ValueRange tileSizes) {
221221

222222
static SmallVector<Value, 4>
223223
makeTiledShapes(OpBuilder &b, Location loc, LinalgOp linalgOp,
224-
ValueRange operands, AffineMap map, ValueRange ivs,
224+
ArrayRef<Value> tiledOperands, AffineMap map, ValueRange ivs,
225225
ValueRange tileSizes, ValueRange allShapeSizes) {
226-
assert(operands.size() == linalgOp.getShapedOperands().size());
227226
assert(ivs.size() == static_cast<size_t>(llvm::count_if(
228227
llvm::make_range(tileSizes.begin(), tileSizes.end()),
229228
[](Value v) { return !isZero(v); })) &&
@@ -243,11 +242,9 @@ makeTiledShapes(OpBuilder &b, Location loc, LinalgOp linalgOp,
243242
subShapeSizes.push_back(size - std_constant_index(1));
244243
}
245244

246-
auto *op = linalgOp.getOperation();
247-
248245
SmallVector<Value, 4> res;
249-
res.reserve(op->getNumOperands());
250-
for (auto en : llvm::enumerate(operands)) {
246+
res.reserve(tiledOperands.size());
247+
for (auto en : llvm::enumerate(tiledOperands)) {
251248
Value shapedOp = en.value();
252249
ShapedType shapedType = shapedOp.getType().cast<ShapedType>();
253250
unsigned rank = shapedType.getRank();
@@ -342,6 +339,7 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
342339
LoopIndexToRangeIndexMap loopIndexToRangeIndex;
343340
std::tie(loopRanges, loopIndexToRangeIndex) = makeTiledLoopRanges(
344341
b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes);
342+
345343
SmallVector<Attribute, 4> iteratorTypes;
346344
for (auto attr :
347345
enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) {
@@ -574,10 +572,10 @@ void mlir::linalg::populateLinalgTilingCanonicalizationPatterns(
574572
static void insertTilingPatterns(OwningRewritePatternList &patterns,
575573
const LinalgTilingOptions &options,
576574
MLIRContext *ctx) {
577-
RewritePatternList<
575+
RewritePatternList<GenericOp, IndexedGenericOp,
578576
#define GET_OP_LIST
579577
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
580-
>::insert(patterns, options, ctx);
578+
>::insert(patterns, options, ctx);
581579
}
582580

583581
static void applyTilingToLoopPatterns(LinalgTilingLoopType loopType,

mlir/test/Dialect/Linalg/tile-tensors.mlir

Lines changed: 95 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" | FileCheck %s
1+
// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -split-input-file | FileCheck %s
22

33
// CHECK-LABEL: func @matmul_tensors(
44
// CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor<?x?xf32>
@@ -26,3 +26,97 @@ func @matmul_tensors(
2626
// CHECK: return %[[TD0]] : tensor<?x?xf32>
2727
return %0 : tensor<?x?xf32>
2828
}
29+
30+
// -----
31+
32+
func @generic_op_tensors(
33+
%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
34+
%c0 = constant 0 : index
35+
%c1 = constant 1 : index
36+
%c2 = constant 2 : index
37+
%0 = dim %arg0, %c0 : tensor<?x?x?xf32>
38+
%1 = dim %arg0, %c1 : tensor<?x?x?xf32>
39+
%2 = dim %arg0, %c2 : tensor<?x?x?xf32>
40+
%3 = linalg.init_tensor [%0, %1, %2] : tensor<?x?x?xf32>
41+
%4 = linalg.generic
42+
{indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
43+
affine_map<(d0, d1, d2) -> (d0, d2, d1)>,
44+
affine_map<(d0, d1, d2) -> (d2, d1, d0)>],
45+
iterator_types = ["parallel", "parallel", "parallel"]}
46+
ins(%arg0, %arg1 : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
47+
outs(%3 : tensor<?x?x?xf32>) {
48+
^bb0(%arg2 : f32, %arg3: f32, %arg4: f32):
49+
%5 = addf %arg2, %arg3 : f32
50+
linalg.yield %5 : f32
51+
} -> tensor<?x?x?xf32>
52+
return %4 : tensor<?x?x?xf32>
53+
}
54+
55+
// CHECK-LABEL: func @generic_op_tensors
56+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
57+
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
58+
// CHECK: %[[INIT:.+]] = linalg.init_tensor
59+
// CHECK: %[[TD0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC0:.+]] = %[[INIT]]) -> (tensor<?x?x?xf32>) {
60+
// CHECK: %[[TD1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC1:.+]] = %[[TC0]]) -> (tensor<?x?x?xf32>) {
61+
// CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor<?x?x?xf32>) {
62+
// CHECK: %[[STARG0:.+]] = subtensor %[[ARG0]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
63+
// CHECK: %[[STARG1:.+]] = subtensor %[[ARG1]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
64+
// CHECK: %[[STARG2:.+]] = subtensor %[[TC2]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
65+
// CHECK: %[[STRETURN:.+]] = linalg.generic
66+
// CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
67+
// CHECK-SAME: outs(%[[STARG2]] : tensor<?x?x?xf32>)
68+
// CHECK: %[[TD:.+]] = subtensor_insert %[[STRETURN]] into %[[TC2]]
69+
// CHECK: scf.yield %[[TD]]
70+
// CHECK: }
71+
// CHECK: scf.yield %[[TD2]]
72+
// CHECK: }
73+
// CHECK: scf.yield %[[TD1]]
74+
// CHECK: }
75+
// CHECK: return %[[TD0]]
76+
77+
// -----
78+
79+
func @indexed_generic_op_tensors(
80+
%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
81+
%c0 = constant 0 : index
82+
%c1 = constant 1 : index
83+
%c2 = constant 2 : index
84+
%0 = dim %arg0, %c0 : tensor<?x?x?xf32>
85+
%1 = dim %arg0, %c1 : tensor<?x?x?xf32>
86+
%2 = dim %arg0, %c2 : tensor<?x?x?xf32>
87+
%3 = linalg.init_tensor [%0, %1, %2] : tensor<?x?x?xf32>
88+
%4 = linalg.indexed_generic
89+
{indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
90+
affine_map<(d0, d1, d2) -> (d0, d2, d1)>,
91+
affine_map<(d0, d1, d2) -> (d2, d1, d0)>],
92+
iterator_types = ["parallel", "parallel", "parallel"]}
93+
ins(%arg0, %arg1 : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
94+
outs(%3 : tensor<?x?x?xf32>) {
95+
^bb0(%arg2 : index, %arg3 : index, %arg4 : index, %arg5 : f32, %arg6: f32, %arg7: f32):
96+
%5 = addf %arg5, %arg6 : f32
97+
linalg.yield %5 : f32
98+
} -> tensor<?x?x?xf32>
99+
return %4 : tensor<?x?x?xf32>
100+
}
101+
102+
// CHECK-LABEL: func @indexed_generic_op_tensors
103+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
104+
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
105+
// CHECK: %[[INIT:.+]] = linalg.init_tensor
106+
// CHECK: %[[TD0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC0:.+]] = %[[INIT]]) -> (tensor<?x?x?xf32>) {
107+
// CHECK: %[[TD1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC1:.+]] = %[[TC0]]) -> (tensor<?x?x?xf32>) {
108+
// CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor<?x?x?xf32>) {
109+
// CHECK: %[[STARG0:.+]] = subtensor %[[ARG0]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
110+
// CHECK: %[[STARG1:.+]] = subtensor %[[ARG1]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
111+
// CHECK: %[[STARG2:.+]] = subtensor %[[TC2]][{{.+}}] : tensor<?x?x?xf32> to tensor<?x?x?xf32>
112+
// CHECK: %[[STRETURN:.+]] = linalg.indexed_generic
113+
// CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor<?x?x?xf32>, tensor<?x?x?xf32>)
114+
// CHECK-SAME: outs(%[[STARG2]] : tensor<?x?x?xf32>)
115+
// CHECK: %[[TD:.+]] = subtensor_insert %[[STRETURN]] into %[[TC2]]
116+
// CHECK: scf.yield %[[TD]]
117+
// CHECK: }
118+
// CHECK: scf.yield %[[TD2]]
119+
// CHECK: }
120+
// CHECK: scf.yield %[[TD1]]
121+
// CHECK: }
122+
// CHECK: return %[[TD0]]

0 commit comments

Comments
 (0)