|
| 1 | +// RUN: mlir-query %s -c "m getDefinitionsByPredicate(hasOpName(\"memref.store\"),hasOpName(\"memref.alloc\"),true,false,false).extract(\"backward_slice\")" | FileCheck %s |
| 2 | + |
| 3 | +// CHECK: func.func @backward_slice(%{{.*}}: memref<10xf32>) -> (f32, index, index, f32, index, index, f32) { |
| 4 | +// CHECK: %[[CST0:.*]] = arith.constant 0.000000e+00 : f32 |
| 5 | +// CHECK-NEXT: %[[C0:.*]] = arith.constant 0 : index |
| 6 | +// CHECK-NEXT: %[[I0:.*]] = affine.apply affine_map<()[s0] -> (s0)>()[%[[C0]]] |
| 7 | +// CHECK-NEXT: memref.store %[[CST0]], %{{.*}}[%[[I0]]] : memref<10xf32> |
| 8 | +// CHECK-NEXT: %[[CST2:.*]] = arith.constant 0.000000e+00 : f32 |
| 9 | +// CHECK-NEXT: %[[I1:.*]] = affine.apply affine_map<() -> (0)>() |
| 10 | +// CHECK-NEXT: memref.store %[[CST2]], %{{.*}}[%[[I1]]] : memref<10xf32> |
| 11 | +// CHECK-NEXT: %[[C1:.*]] = arith.constant 0 : index |
| 12 | +// CHECK-NEXT: %[[LOAD:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<10xf32> |
| 13 | +// CHECK-NEXT: memref.store %[[LOAD]], %{{.*}}[%[[C1]]] : memref<10xf32> |
| 14 | +// CHECK-NEXT: return %[[CST0]], %[[C0]], %[[I0]], %[[CST2]], %[[I1]], %[[C1]], %[[LOAD]] : f32, index, index, f32, index, index, f32 |
| 15 | + |
| 16 | +func.func @slicing_memref_store_trivial() { |
| 17 | + %0 = memref.alloc() : memref<10xf32> |
| 18 | + %c0 = arith.constant 0 : index |
| 19 | + %cst = arith.constant 0.000000e+00 : f32 |
| 20 | + affine.for %i1 = 0 to 10 { |
| 21 | + %1 = affine.apply affine_map<()[s0] -> (s0)>()[%c0] |
| 22 | + memref.store %cst, %0[%1] : memref<10xf32> |
| 23 | + %2 = memref.load %0[%c0] : memref<10xf32> |
| 24 | + %3 = affine.apply affine_map<()[] -> (0)>()[] |
| 25 | + memref.store %cst, %0[%3] : memref<10xf32> |
| 26 | + memref.store %2, %0[%c0] : memref<10xf32> |
| 27 | + } |
| 28 | + return |
| 29 | +} |
0 commit comments