1- // RUN: mlir-opt --transform-interpreter %s -split-input-file -verify-diagnostics | FileCheck %s
1+ // RUN: mlir-opt --transform-interpreter="debug-payload-root-tag=payload" %s -split-input-file -verify-diagnostics | FileCheck %s
22
33// Test One-Shot Bufferize.
44
@@ -12,19 +12,21 @@ module attributes {transform.with_named_sequence} {
1212
1313// CHECK-LABEL: func @test_function(
1414// CHECK-SAME: %[[A:.*]]: tensor<?xf32>
15- func.func @test_function (%A : tensor <?xf32 >, %v : vector <4 xf32 >) -> (tensor <?xf32 >) {
16- %c0 = arith.constant 0 : index
15+ module @payload attributes { transform.target_tag = " payload" } {
16+ func.func @test_function (%A : tensor <?xf32 >, %v : vector <4 xf32 >) -> (tensor <?xf32 >) {
17+ %c0 = arith.constant 0 : index
1718
18- // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
19- // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
20- // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
21- // CHECK: memref.copy %[[A_memref]], %[[alloc]]
22- // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
23- // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
24- %0 = vector.transfer_write %v , %A [%c0 ] : vector <4 xf32 >, tensor <?xf32 >
19+ // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
20+ // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
21+ // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
22+ // CHECK: memref.copy %[[A_memref]], %[[alloc]]
23+ // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
24+ // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
25+ %0 = vector.transfer_write %v , %A [%c0 ] : vector <4 xf32 >, tensor <?xf32 >
2526
26- // CHECK: return %[[res_tensor]]
27- return %0 : tensor <?xf32 >
27+ // CHECK: return %[[res_tensor]]
28+ return %0 : tensor <?xf32 >
29+ }
2830}
2931
3032// -----
@@ -42,19 +44,21 @@ module attributes {transform.with_named_sequence} {
4244// CHECK-LABEL: func @test_function(
4345// CHECK-SAME: %[[A:.*]]: tensor<?xf32>
4446// CHECK-NOT: memref.copy
45- func.func @test_function (%A : tensor <?xf32 >, %v : vector <4 xf32 >) -> (tensor <?xf32 >) {
46- %c0 = arith.constant 0 : index
47+ module @payload attributes { transform.target_tag = " payload" } {
48+ func.func @test_function (%A : tensor <?xf32 >, %v : vector <4 xf32 >) -> (tensor <?xf32 >) {
49+ %c0 = arith.constant 0 : index
4750
48- // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
49- // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
50- // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
51- // CHECK: linalg.copy ins(%[[A_memref]] : memref<{{.*}}>) outs(%[[alloc]]
52- // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
53- // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
54- %0 = vector.transfer_write %v , %A [%c0 ] : vector <4 xf32 >, tensor <?xf32 >
51+ // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
52+ // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]]
53+ // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
54+ // CHECK: linalg.copy ins(%[[A_memref]] : memref<{{.*}}>) outs(%[[alloc]]
55+ // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
56+ // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
57+ %0 = vector.transfer_write %v , %A [%c0 ] : vector <4 xf32 >, tensor <?xf32 >
5558
56- // CHECK: return %[[res_tensor]]
57- return %0 : tensor <?xf32 >
59+ // CHECK: return %[[res_tensor]]
60+ return %0 : tensor <?xf32 >
61+ }
5862}
5963
6064// -----
@@ -72,13 +76,15 @@ module attributes {transform.with_named_sequence} {
7276
7377// CHECK-LABEL: func @test_function_analysis(
7478// CHECK-SAME: %[[A:.*]]: tensor<?xf32>
75- func.func @test_function_analysis (%A : tensor <?xf32 >, %v : vector <4 xf32 >) -> (tensor <?xf32 >) {
76- %c0 = arith.constant 0 : index
77- // CHECK: vector.transfer_write
78- // CHECK-SAME: {__inplace_operands_attr__ = ["none", "false", "none"]}
79- // CHECK-SAME: tensor<?xf32>
80- %0 = vector.transfer_write %v , %A [%c0 ] : vector <4 xf32 >, tensor <?xf32 >
81- return %0 : tensor <?xf32 >
79+ module @payload attributes { transform.target_tag = " payload" } {
80+ func.func @test_function_analysis (%A : tensor <?xf32 >, %v : vector <4 xf32 >) -> (tensor <?xf32 >) {
81+ %c0 = arith.constant 0 : index
82+ // CHECK: vector.transfer_write
83+ // CHECK-SAME: {__inplace_operands_attr__ = ["none", "false", "none"]}
84+ // CHECK-SAME: tensor<?xf32>
85+ %0 = vector.transfer_write %v , %A [%c0 ] : vector <4 xf32 >, tensor <?xf32 >
86+ return %0 : tensor <?xf32 >
87+ }
8288}
8389
8490// -----
@@ -95,10 +101,12 @@ module attributes {transform.with_named_sequence} {
95101 }
96102}
97103
98- func.func @test_unknown_op_failure () -> (tensor <?xf32 >) {
99- // expected-error @+1 {{op was not bufferized}}
100- %0 = " test.dummy_op" () : () -> (tensor <?xf32 >)
101- return %0 : tensor <?xf32 >
104+ module @payload attributes { transform.target_tag = " payload" } {
105+ func.func @test_unknown_op_failure () -> (tensor <?xf32 >) {
106+ // expected-error @+1 {{op was not bufferized}}
107+ %0 = " test.dummy_op" () : () -> (tensor <?xf32 >)
108+ return %0 : tensor <?xf32 >
109+ }
102110}
103111
104112// -----
@@ -111,7 +119,7 @@ module attributes {transform.with_named_sequence} {
111119 }
112120}
113121
114- module {
122+ module @payload attributes { transform.target_tag = " payload " } {
115123 // CHECK-LABEL: func @test_function(
116124 // CHECK-SAME: %[[A:.*]]: tensor<?xf32>
117125 func.func @test_function (%A : tensor <?xf32 >, %v : vector <4 xf32 >) -> (tensor <?xf32 >) {
@@ -146,11 +154,13 @@ module attributes {transform.with_named_sequence} {
146154// CHECK-SAME: %[[A:.*]]: memref<12x9xf32>,
147155// CHECK-SAME: %[[B:.*]]: memref<9x6xf32>,
148156// CHECK-SAME: %[[C:.*]]: memref<12x6xf32>) -> memref<12x6xf32> {
149- func.func @matmul (%A: tensor <12 x9 xf32 >, %B: tensor <9 x6 xf32 >, %C: tensor <12 x6 xf32 >) -> tensor <12 x6 xf32 > {
150- // CHECK: linalg.matmul ins(%[[A]], %[[B]] : memref<12x9xf32>, memref<9x6xf32>) outs(%[[C]] : memref<12x6xf32>)
151- %D = linalg.matmul ins (%A , %B: tensor <12 x9 xf32 >, tensor <9 x6 xf32 >) outs (%C: tensor <12 x6 xf32 >) -> tensor <12 x6 xf32 >
152- // CHECK: return %[[C]] : memref<12x6xf32>
153- return %D : tensor <12 x6 xf32 >
157+ module @payload attributes { transform.target_tag = " payload" } {
158+ func.func @matmul (%A: tensor <12 x9 xf32 >, %B: tensor <9 x6 xf32 >, %C: tensor <12 x6 xf32 >) -> tensor <12 x6 xf32 > {
159+ // CHECK: linalg.matmul ins(%[[A]], %[[B]] : memref<12x9xf32>, memref<9x6xf32>) outs(%[[C]] : memref<12x6xf32>)
160+ %D = linalg.matmul ins (%A , %B: tensor <12 x9 xf32 >, tensor <9 x6 xf32 >) outs (%C: tensor <12 x6 xf32 >) -> tensor <12 x6 xf32 >
161+ // CHECK: return %[[C]] : memref<12x6xf32>
162+ return %D : tensor <12 x6 xf32 >
163+ }
154164}
155165
156166// -----
@@ -165,10 +175,12 @@ module attributes {transform.with_named_sequence} {
165175}
166176
167177// Expect `bufferization.empty_tensor_to_alloc_tensor` to replace the tensor.empty.
168- func.func @empty_to_tensor_alloc () -> tensor <2 x2 xf32 > {
169- // CHECK: bufferization.alloc_tensor
170- %0 = tensor.empty () : tensor <2 x2 xf32 >
171- return %0 : tensor <2 x2 xf32 >
178+ module @payload attributes { transform.target_tag = " payload" } {
179+ func.func @empty_to_tensor_alloc () -> tensor <2 x2 xf32 > {
180+ // CHECK: bufferization.alloc_tensor
181+ %0 = tensor.empty () : tensor <2 x2 xf32 >
182+ return %0 : tensor <2 x2 xf32 >
183+ }
172184}
173185
174186// -----
@@ -185,13 +197,15 @@ module attributes {transform.with_named_sequence} {
185197// CHECK: tensor.extract_slice
186198// CHECK: linalg.fill
187199// CHECK: tensor.insert_slice
188- func.func @empty_tensor_elimination (
189- %t: tensor <10 xf32 >, %f: f32 ) -> tensor <10 xf32 > {
190- %0 = tensor.empty () : tensor <5 xf32 >
191- %1 = linalg.fill ins (%f : f32 ) outs (%0 : tensor <5 xf32 >) -> tensor <5 xf32 >
192- %2 = tensor.insert_slice %1 into %t [1 ][5 ][1 ]
193- : tensor <5 xf32 > into tensor <10 xf32 >
194- return %2 : tensor <10 xf32 >
200+ module @payload attributes { transform.target_tag = " payload" } {
201+ func.func @empty_tensor_elimination (
202+ %t: tensor <10 xf32 >, %f: f32 ) -> tensor <10 xf32 > {
203+ %0 = tensor.empty () : tensor <5 xf32 >
204+ %1 = linalg.fill ins (%f : f32 ) outs (%0 : tensor <5 xf32 >) -> tensor <5 xf32 >
205+ %2 = tensor.insert_slice %1 into %t [1 ][5 ][1 ]
206+ : tensor <5 xf32 > into tensor <10 xf32 >
207+ return %2 : tensor <10 xf32 >
208+ }
195209}
196210
197211// -----
@@ -208,12 +222,14 @@ module attributes {transform.with_named_sequence} {
208222// CHECK: memref.alloca
209223// CHECK: scf.for
210224// CHECK: memref.store
211- func.func @buffer_loop_hoisting (%lb: index , %ub: index , %step: index , %f: f32 , %pos: index ) {
212- scf.for %iv = %lb to %ub step %step {
213- %0 = memref.alloca () : memref <5 xf32 >
214- memref.store %f , %0 [%pos ] : memref <5 xf32 >
225+ module @payload attributes { transform.target_tag = " payload" } {
226+ func.func @buffer_loop_hoisting (%lb: index , %ub: index , %step: index , %f: f32 , %pos: index ) {
227+ scf.for %iv = %lb to %ub step %step {
228+ %0 = memref.alloca () : memref <5 xf32 >
229+ memref.store %f , %0 [%pos ] : memref <5 xf32 >
230+ }
231+ return
215232 }
216- return
217233}
218234
219235// -----
@@ -231,10 +247,12 @@ module attributes {transform.with_named_sequence} {
231247
232248// Expect `bufferization.bufferize_to_allocation` to create an alloc.
233249// CHECK-LABEL: func.func @empty_to_tensor_alloc()
234- func.func @empty_to_tensor_alloc () -> tensor <2 x2 xf32 > {
235- // CHECK-NEXT: %[[alloca:.*]] = memref.alloca() : memref<2x2xf32>
236- // CHECK-NEXT: %[[tensor:.*]] = bufferization.to_tensor %[[alloca]] restrict writable : memref<2x2xf32>
237- // CHECK-NEXT: return %[[tensor]] : tensor<2x2xf32>
238- %0 = bufferization.alloc_tensor () : tensor <2 x2 xf32 >
239- return %0 : tensor <2 x2 xf32 >
250+ module @payload attributes { transform.target_tag = " payload" } {
251+ func.func @empty_to_tensor_alloc () -> tensor <2 x2 xf32 > {
252+ // CHECK-NEXT: %[[alloca:.*]] = memref.alloca() : memref<2x2xf32>
253+ // CHECK-NEXT: %[[tensor:.*]] = bufferization.to_tensor %[[alloca]] restrict writable : memref<2x2xf32>
254+ // CHECK-NEXT: return %[[tensor]] : tensor<2x2xf32>
255+ %0 = bufferization.alloc_tensor () : tensor <2 x2 xf32 >
256+ return %0 : tensor <2 x2 xf32 >
257+ }
240258}
0 commit comments