1- // RUN: mlir-opt %s -scf- bufferize | FileCheck %s
1+ // RUN: mlir-opt %s -one-shot- bufferize="dialect-filter=scf,bufferization copy-before-write unknown-type-conversion=identity-layout-map" -split-input-file | FileCheck %s
22
33// CHECK-LABEL: func @if(
44// CHECK-SAME: %[[PRED:.*]]: i1,
@@ -23,15 +23,21 @@ func.func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) ->
2323 return %0 : tensor <?xf32 >
2424}
2525
26+ // -----
27+
2628// CHECK-LABEL: func @for(
2729// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
2830// CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index,
2931// CHECK-SAME: %[[STEP:.*]]: index) -> tensor<f32> {
3032// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
31- // CHECK: %[[RESULT_MEMREF:.*]] = scf.for %[[VAL_6:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] iter_args(%[[ITER:.*]] = %[[MEMREF]]) -> (memref<f32>) {
33+ // Note: scf.for iter_args always bufferize to a memory write. This could be
34+ // optimized by analyzing the loop body.
35+ // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
36+ // CHECK: memref.copy %[[MEMREF]], %[[MEMREF_COPY]]
37+ // CHECK: %[[RESULT_MEMREF:.*]] = scf.for %{{.*}} = %[[LB]] to %[[UB]] step %[[STEP]] iter_args(%[[ITER:.*]] = %[[MEMREF_COPY]]) -> (memref<f32>) {
3238// CHECK: scf.yield %[[ITER]] : memref<f32>
3339// CHECK: } {some_attr}
34- // CHECK: %[[VAL_8:.*]] = bufferization.to_tensor %[[VAL_9:.* ]] : memref<f32>
40+ // CHECK: %[[VAL_8:.*]] = bufferization.to_tensor %[[RESULT_MEMREF ]] : memref<f32>
3541// CHECK: return %[[VAL_8]] : tensor<f32>
3642// CHECK: }
3743func.func @for (%arg0: tensor <f32 >, %lb: index , %ub: index , %step: index ) -> tensor <f32 > {
@@ -41,6 +47,8 @@ func.func @for(%arg0: tensor<f32>, %lb: index, %ub: index, %step: index) -> tens
4147 return %ret : tensor <f32 >
4248}
4349
50+ // -----
51+
4452// Check whether this converts at all.
4553//
4654// It would previously fail altogether.
@@ -57,17 +65,23 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor
5765 return %0 : tensor <f32 >
5866}
5967
68+ // -----
69+
6070// CHECK-LABEL: func @for_correct_recursive_legalization_behavior(
6171// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
6272// CHECK-SAME: %[[INDEX:.*]]: index) -> tensor<f32> {
6373// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
64- // CHECK: %[[RESULT:.*]] = scf.for %[[IV:.*]] = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF]]) -> (memref<f32>) {
74+ // Note: scf.for iter_args always bufferize to a memory write. This could be
75+ // optimized by analyzing the loop body.
76+ // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
77+ // CHECK: memref.copy %[[MEMREF]], %[[MEMREF_COPY]]
78+ // CHECK: %[[RESULT:.*]] = scf.for %{{.*}} = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF_COPY]]) -> (memref<f32>) {
6579// CHECK: %[[TENSOR_ITER:.*]] = bufferization.to_tensor %[[MEMREF_ITER]] : memref<f32>
6680// CHECK: %[[TENSOR_MUNGED:.*]] = "test.munge_tensor"(%[[TENSOR_ITER]]) : (tensor<f32>) -> tensor<f32>
6781// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : memref<f32>
6882// CHECK: scf.yield %[[MEMREF_MUNGED]] : memref<f32>
6983// CHECK: }
70- // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT:.* ]] : memref<f32>
84+ // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT]] : memref<f32>
7185// CHECK: return %[[TENSOR]] : tensor<f32>
7286// CHECK: }
7387func.func @for_correct_recursive_legalization_behavior (%arg0: tensor <f32 >, %index: index ) -> tensor <f32 > {
@@ -78,11 +92,17 @@ func.func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %inde
7892 return %ret : tensor <f32 >
7993}
8094
95+ // -----
96+
8197// CHECK-LABEL: func @bufferize_while(
8298// CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: tensor<f32>
8399// CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : memref<f32>
84- // CHECK: %[[RES1:.*]]:3 = scf.while (%{{.*}} = %[[ARG0]], %{{.*}} = %[[M]]) : (i64, memref<f32>) -> (i64, i64, memref<f32>)
85- // CHECK: scf.condition(%{{.*}}) %{{.*}}, %{{.*}}, %{{.*}} : i64, i64, memref<f32>
100+ // Note: scf.while iter_args always bufferize to a memory write. This could be
101+ // optimized by analyzing the loop body.
102+ // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
103+ // CHECK: memref.copy %[[M]], %[[MEMREF_COPY]]
104+ // CHECK: %[[RES1:.*]]:3 = scf.while (%{{.*}} = %[[ARG0]], %[[ITER:.*]] = %[[MEMREF_COPY]]) : (i64, memref<f32>) -> (i64, i64, memref<f32>)
105+ // CHECK: scf.condition(%{{.*}}) %{{.*}}, %{{.*}}, %[[ITER]] : i64, i64, memref<f32>
86106// CHECK: ^bb0(%{{.*}}: i64, %{{.*}}: i64, %{{.*}}: memref<f32>):
87107// CHECK: scf.yield %{{.*}}, %{{.*}} : i64, memref<f32>
88108// CHECK: %[[RES2:.*]] = bufferization.to_tensor %[[RES1]]#2 : memref<f32>
0 commit comments