Skip to content

Commit 695b945

Browse files
committed
Add debug-payload-root-tag to transform.named_sequence tests
1 parent eb2f888 commit 695b945

File tree

5 files changed

+241
-207
lines changed

5 files changed

+241
-207
lines changed

mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir

Lines changed: 80 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt --transform-interpreter %s -split-input-file -verify-diagnostics | FileCheck %s
1+
// RUN: mlir-opt --transform-interpreter="debug-payload-root-tag=payload" %s -split-input-file -verify-diagnostics | FileCheck %s
22

33
// Test One-Shot Bufferize.
44

@@ -12,19 +12,21 @@ module attributes {transform.with_named_sequence} {
1212

1313
// CHECK-LABEL: func @test_function(
1414
// CHECK-SAME: %[[A:.*]]: tensor<?xf32>
15-
func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
16-
%c0 = arith.constant 0 : index
15+
module @payload attributes { transform.target_tag = "payload" } {
16+
func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
17+
%c0 = arith.constant 0 : index
1718

18-
// CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
19-
// CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
20-
// CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
21-
// CHECK: memref.copy %[[A_memref]], %[[alloc]]
22-
// CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
23-
// CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
24-
%0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
19+
// CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
20+
// CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
21+
// CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
22+
// CHECK: memref.copy %[[A_memref]], %[[alloc]]
23+
// CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
24+
// CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
25+
%0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
2526

26-
// CHECK: return %[[res_tensor]]
27-
return %0 : tensor<?xf32>
27+
// CHECK: return %[[res_tensor]]
28+
return %0 : tensor<?xf32>
29+
}
2830
}
2931

3032
// -----
@@ -42,19 +44,21 @@ module attributes {transform.with_named_sequence} {
4244
// CHECK-LABEL: func @test_function(
4345
// CHECK-SAME: %[[A:.*]]: tensor<?xf32>
4446
// CHECK-NOT: memref.copy
45-
func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
46-
%c0 = arith.constant 0 : index
47+
module @payload attributes { transform.target_tag = "payload" } {
48+
func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
49+
%c0 = arith.constant 0 : index
4750

48-
// CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
49-
// CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
50-
// CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
51-
// CHECK: linalg.copy ins(%[[A_memref]] : memref<{{.*}}>) outs(%[[alloc]]
52-
// CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
53-
// CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
54-
%0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
51+
// CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
52+
// CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
53+
// CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
54+
// CHECK: linalg.copy ins(%[[A_memref]] : memref<{{.*}}>) outs(%[[alloc]]
55+
// CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
56+
// CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
57+
%0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
5558

56-
// CHECK: return %[[res_tensor]]
57-
return %0 : tensor<?xf32>
59+
// CHECK: return %[[res_tensor]]
60+
return %0 : tensor<?xf32>
61+
}
5862
}
5963

6064
// -----
@@ -72,13 +76,15 @@ module attributes {transform.with_named_sequence} {
7276

7377
// CHECK-LABEL: func @test_function_analysis(
7478
// CHECK-SAME: %[[A:.*]]: tensor<?xf32>
75-
func.func @test_function_analysis(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
76-
%c0 = arith.constant 0 : index
77-
// CHECK: vector.transfer_write
78-
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "false", "none"]}
79-
// CHECK-SAME: tensor<?xf32>
80-
%0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
81-
return %0 : tensor<?xf32>
79+
module @payload attributes { transform.target_tag = "payload" } {
80+
func.func @test_function_analysis(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
81+
%c0 = arith.constant 0 : index
82+
// CHECK: vector.transfer_write
83+
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "false", "none"]}
84+
// CHECK-SAME: tensor<?xf32>
85+
%0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
86+
return %0 : tensor<?xf32>
87+
}
8288
}
8389

8490
// -----
@@ -95,10 +101,12 @@ module attributes {transform.with_named_sequence} {
95101
}
96102
}
97103

98-
func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
99-
// expected-error @+1 {{op was not bufferized}}
100-
%0 = "test.dummy_op"() : () -> (tensor<?xf32>)
101-
return %0 : tensor<?xf32>
104+
module @payload attributes { transform.target_tag = "payload" } {
105+
func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
106+
// expected-error @+1 {{op was not bufferized}}
107+
%0 = "test.dummy_op"() : () -> (tensor<?xf32>)
108+
return %0 : tensor<?xf32>
109+
}
102110
}
103111

104112
// -----
@@ -111,7 +119,7 @@ module attributes {transform.with_named_sequence} {
111119
}
112120
}
113121

114-
module {
122+
module @payload attributes { transform.target_tag = "payload" } {
115123
// CHECK-LABEL: func @test_function(
116124
// CHECK-SAME: %[[A:.*]]: tensor<?xf32>
117125
func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
@@ -146,11 +154,13 @@ module attributes {transform.with_named_sequence} {
146154
// CHECK-SAME: %[[A:.*]]: memref<12x9xf32>,
147155
// CHECK-SAME: %[[B:.*]]: memref<9x6xf32>,
148156
// CHECK-SAME: %[[C:.*]]: memref<12x6xf32>) -> memref<12x6xf32> {
149-
func.func @matmul(%A: tensor<12x9xf32>, %B: tensor<9x6xf32>, %C: tensor<12x6xf32>) -> tensor<12x6xf32> {
150-
// CHECK: linalg.matmul ins(%[[A]], %[[B]] : memref<12x9xf32>, memref<9x6xf32>) outs(%[[C]] : memref<12x6xf32>)
151-
%D = linalg.matmul ins(%A, %B: tensor<12x9xf32>, tensor<9x6xf32>) outs(%C: tensor<12x6xf32>) -> tensor<12x6xf32>
152-
// CHECK: return %[[C]] : memref<12x6xf32>
153-
return %D : tensor<12x6xf32>
157+
module @payload attributes { transform.target_tag = "payload" } {
158+
func.func @matmul(%A: tensor<12x9xf32>, %B: tensor<9x6xf32>, %C: tensor<12x6xf32>) -> tensor<12x6xf32> {
159+
// CHECK: linalg.matmul ins(%[[A]], %[[B]] : memref<12x9xf32>, memref<9x6xf32>) outs(%[[C]] : memref<12x6xf32>)
160+
%D = linalg.matmul ins(%A, %B: tensor<12x9xf32>, tensor<9x6xf32>) outs(%C: tensor<12x6xf32>) -> tensor<12x6xf32>
161+
// CHECK: return %[[C]] : memref<12x6xf32>
162+
return %D : tensor<12x6xf32>
163+
}
154164
}
155165

156166
// -----
@@ -165,10 +175,12 @@ module attributes {transform.with_named_sequence} {
165175
}
166176

167177
// Expect `bufferization.empty_tensor_to_alloc_tensor` to replace the tensor.empty.
168-
func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
169-
// CHECK: bufferization.alloc_tensor
170-
%0 = tensor.empty() : tensor<2x2xf32>
171-
return %0 : tensor<2x2xf32>
178+
module @payload attributes { transform.target_tag = "payload" } {
179+
func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
180+
// CHECK: bufferization.alloc_tensor
181+
%0 = tensor.empty() : tensor<2x2xf32>
182+
return %0 : tensor<2x2xf32>
183+
}
172184
}
173185

174186
// -----
@@ -185,13 +197,15 @@ module attributes {transform.with_named_sequence} {
185197
// CHECK: tensor.extract_slice
186198
// CHECK: linalg.fill
187199
// CHECK: tensor.insert_slice
188-
func.func @empty_tensor_elimination(
189-
%t: tensor<10xf32>, %f: f32) -> tensor<10xf32> {
190-
%0 = tensor.empty() : tensor<5xf32>
191-
%1 = linalg.fill ins(%f : f32) outs(%0 : tensor<5xf32>) -> tensor<5xf32>
192-
%2 = tensor.insert_slice %1 into %t [1][5][1]
193-
: tensor<5xf32> into tensor<10xf32>
194-
return %2 : tensor<10xf32>
200+
module @payload attributes { transform.target_tag = "payload" } {
201+
func.func @empty_tensor_elimination(
202+
%t: tensor<10xf32>, %f: f32) -> tensor<10xf32> {
203+
%0 = tensor.empty() : tensor<5xf32>
204+
%1 = linalg.fill ins(%f : f32) outs(%0 : tensor<5xf32>) -> tensor<5xf32>
205+
%2 = tensor.insert_slice %1 into %t [1][5][1]
206+
: tensor<5xf32> into tensor<10xf32>
207+
return %2 : tensor<10xf32>
208+
}
195209
}
196210

197211
// -----
@@ -208,12 +222,14 @@ module attributes {transform.with_named_sequence} {
208222
// CHECK: memref.alloca
209223
// CHECK: scf.for
210224
// CHECK: memref.store
211-
func.func @buffer_loop_hoisting(%lb: index, %ub: index, %step: index, %f: f32, %pos: index) {
212-
scf.for %iv = %lb to %ub step %step {
213-
%0 = memref.alloca() : memref<5xf32>
214-
memref.store %f, %0[%pos] : memref<5xf32>
225+
module @payload attributes { transform.target_tag = "payload" } {
226+
func.func @buffer_loop_hoisting(%lb: index, %ub: index, %step: index, %f: f32, %pos: index) {
227+
scf.for %iv = %lb to %ub step %step {
228+
%0 = memref.alloca() : memref<5xf32>
229+
memref.store %f, %0[%pos] : memref<5xf32>
230+
}
231+
return
215232
}
216-
return
217233
}
218234

219235
// -----
@@ -231,10 +247,12 @@ module attributes {transform.with_named_sequence} {
231247

232248
// Expect `bufferization.bufferize_to_allocation` to create an alloc.
233249
// CHECK-LABEL: func.func @empty_to_tensor_alloc()
234-
func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
235-
// CHECK-NEXT: %[[alloca:.*]] = memref.alloca() : memref<2x2xf32>
236-
// CHECK-NEXT: %[[tensor:.*]] = bufferization.to_tensor %[[alloca]] restrict writable : memref<2x2xf32>
237-
// CHECK-NEXT: return %[[tensor]] : tensor<2x2xf32>
238-
%0 = bufferization.alloc_tensor() : tensor<2x2xf32>
239-
return %0 : tensor<2x2xf32>
250+
module @payload attributes { transform.target_tag = "payload" } {
251+
func.func @empty_to_tensor_alloc() -> tensor<2x2xf32> {
252+
// CHECK-NEXT: %[[alloca:.*]] = memref.alloca() : memref<2x2xf32>
253+
// CHECK-NEXT: %[[tensor:.*]] = bufferization.to_tensor %[[alloca]] restrict writable : memref<2x2xf32>
254+
// CHECK-NEXT: return %[[tensor]] : tensor<2x2xf32>
255+
%0 = bufferization.alloc_tensor() : tensor<2x2xf32>
256+
return %0 : tensor<2x2xf32>
257+
}
240258
}

mlir/test/Dialect/LLVM/transform-e2e.mlir

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,17 @@
1-
// RUN: mlir-opt %s --transform-interpreter -test-transform-dialect-erase-schedule --test-lower-to-llvm --split-input-file | FileCheck %s
1+
// RUN: mlir-opt %s --transform-interpreter="debug-payload-root-tag=payload" -test-transform-dialect-erase-schedule --test-lower-to-llvm --split-input-file | FileCheck %s
22

33
// CHECK-LABEL: llvm.func @matmul_tensors
4-
func.func @matmul_tensors(
5-
%arg0: tensor<2x4xf32>, %arg1: tensor<4x6xf32>, %arg2: tensor<2x6xf32>)
6-
-> tensor<2x6xf32> {
7-
// CHECK-NOT: linalg
8-
// CHECK: llvm.intr.fmuladd{{.*}}
9-
%0 = linalg.matmul ins(%arg0, %arg1: tensor<2x4xf32>, tensor<4x6xf32>)
10-
outs(%arg2: tensor<2x6xf32>)
11-
-> tensor<2x6xf32>
12-
return %0 : tensor<2x6xf32>
4+
module @payload attributes { transform.target_tag = "payload" } {
5+
func.func @matmul_tensors(
6+
%arg0: tensor<2x4xf32>, %arg1: tensor<4x6xf32>, %arg2: tensor<2x6xf32>)
7+
-> tensor<2x6xf32> {
8+
// CHECK-NOT: linalg
9+
// CHECK: llvm.intr.fmuladd{{.*}}
10+
%0 = linalg.matmul ins(%arg0, %arg1: tensor<2x4xf32>, tensor<4x6xf32>)
11+
outs(%arg2: tensor<2x6xf32>)
12+
-> tensor<2x6xf32>
13+
return %0 : tensor<2x6xf32>
14+
}
1315
}
1416

1517
module attributes {transform.with_named_sequence} {

mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt --split-input-file --transform-interpreter %s | FileCheck %s
1+
// RUN: mlir-opt --split-input-file --transform-interpreter="debug-payload-root-tag=payload" %s | FileCheck %s
22

33
// CHECK-LABEL: func @matmul_divisible
44
// CHECK: scf.forall
@@ -24,19 +24,21 @@
2424
// CHECK: scf.forall
2525
// CHECK: vector.transfer_read
2626
// CHECK: vector.transfer_write
27-
func.func @matmul_divisible(%A: tensor<1024x1024xf32>,
28-
%B: tensor<1024x1024xf32>,
29-
%C: tensor<1024x1024xf32>)
30-
-> tensor<1024x1024xf32>
31-
{
32-
%cst = arith.constant 0.000000e+00 : f32
33-
%0 = linalg.fill ins(%cst : f32)
34-
outs(%C : tensor<1024x1024xf32>)
27+
module @payload attributes { transform.target_tag = "payload" } {
28+
func.func @matmul_divisible(%A: tensor<1024x1024xf32>,
29+
%B: tensor<1024x1024xf32>,
30+
%C: tensor<1024x1024xf32>)
3531
-> tensor<1024x1024xf32>
36-
%1 = linalg.matmul ins(%A, %B : tensor<1024x1024xf32>, tensor<1024x1024xf32>)
37-
outs(%0 : tensor<1024x1024xf32>)
38-
-> tensor<1024x1024xf32>
39-
return %1 : tensor<1024x1024xf32>
32+
{
33+
%cst = arith.constant 0.000000e+00 : f32
34+
%0 = linalg.fill ins(%cst : f32)
35+
outs(%C : tensor<1024x1024xf32>)
36+
-> tensor<1024x1024xf32>
37+
%1 = linalg.matmul ins(%A, %B : tensor<1024x1024xf32>, tensor<1024x1024xf32>)
38+
outs(%0 : tensor<1024x1024xf32>)
39+
-> tensor<1024x1024xf32>
40+
return %1 : tensor<1024x1024xf32>
41+
}
4042
}
4143

4244
module attributes {transform.with_named_sequence} {
@@ -143,19 +145,21 @@ module attributes {transform.with_named_sequence} {
143145
// CHECK: linalg.matmul
144146
// CHECK: vector.transfer_read
145147
// CHECK: vector.transfer_write
148+
module @payload attributes { transform.target_tag = "payload" } {
146149
func.func @matmul_not_divisible(%A: tensor<1023x1023xf32>,
147-
%B: tensor<1023x1023xf32>,
148-
%C: tensor<1023x1023xf32>)
149-
-> tensor<1023x1023xf32>
150-
{
151-
%cst = arith.constant 0.000000e+00 : f32
152-
%0 = linalg.fill ins(%cst : f32)
153-
outs(%C : tensor<1023x1023xf32>)
150+
%B: tensor<1023x1023xf32>,
151+
%C: tensor<1023x1023xf32>)
154152
-> tensor<1023x1023xf32>
155-
%1 = linalg.matmul ins(%A, %B : tensor<1023x1023xf32>, tensor<1023x1023xf32>)
156-
outs(%0 : tensor<1023x1023xf32>)
157-
-> tensor<1023x1023xf32>
158-
return %1 : tensor<1023x1023xf32>
153+
{
154+
%cst = arith.constant 0.000000e+00 : f32
155+
%0 = linalg.fill ins(%cst : f32)
156+
outs(%C : tensor<1023x1023xf32>)
157+
-> tensor<1023x1023xf32>
158+
%1 = linalg.matmul ins(%A, %B : tensor<1023x1023xf32>, tensor<1023x1023xf32>)
159+
outs(%0 : tensor<1023x1023xf32>)
160+
-> tensor<1023x1023xf32>
161+
return %1 : tensor<1023x1023xf32>
162+
}
159163
}
160164

161165
module attributes {transform.with_named_sequence} {

0 commit comments

Comments
 (0)