|
| 1 | +// RUN: mlir-opt --flatten-memref %s --split-input-file --verify-diagnostics | FileCheck %s |
| 2 | + |
| 3 | +func.func @load_scalar_from_memref(%input: memref<4x8xf32, strided<[8, 1], offset: 100>>) -> f32 { |
| 4 | + %c1 = arith.constant 1 : index |
| 5 | + %c2 = arith.constant 2 : index |
| 6 | + %value = memref.load %input[%c1, %c2] : memref<4x8xf32, strided<[8, 1], offset: 100>> |
| 7 | + return %value : f32 |
| 8 | +} |
| 9 | +// CHECK: func @load_scalar_from_memref |
| 10 | +// CHECK: %[[C10:.*]] = arith.constant 10 : index |
| 11 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %arg0 to offset: [100], sizes: [32], strides: [1] |
| 12 | +// CHECK-SAME: memref<4x8xf32, strided<[8, 1], offset: 100>> to memref<32xf32, strided<[1], offset: 100>> |
| 13 | +// CHECK: memref.load %[[REINT]][%[[C10]]] : memref<32xf32, strided<[1], offset: 100>> |
| 14 | + |
| 15 | +// ----- |
| 16 | + |
| 17 | +func.func @load_scalar_from_memref_static_dim_2(%input: memref<4x8xf32, strided<[8, 12], offset: 100>>, %row: index, %col: index) -> f32 { |
| 18 | + %value = memref.load %input[%col, %row] : memref<4x8xf32, strided<[8, 12], offset: 100>> |
| 19 | + return %value : f32 |
| 20 | +} |
| 21 | +// CHECK: [[MAP:.+]] = affine_map<()[s0, s1] -> (s0 * 8 + s1 * 12)> |
| 22 | +// CHECK: func @load_scalar_from_memref_static_dim_2 |
| 23 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<4x8xf32, strided<[8, 12], offset: 100>>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index) |
| 24 | +// CHECK: %[[IDX:.*]] = affine.apply [[MAP]]()[%[[ARG2]], %[[ARG1]]] |
| 25 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] to offset: [100], sizes: [32], strides: [12] |
| 26 | +// CHECK-SAME: to memref<32xf32, strided<[12], offset: 100>> |
| 27 | +// CHECK: memref.load %[[REINT]][%[[IDX]]] |
| 28 | + |
| 29 | +// ----- |
| 30 | + |
| 31 | +func.func @load_scalar_from_memref_dynamic_dim(%input: memref<?x?xf32, strided<[?, ?], offset: ?>>, %row: index, %col: index) -> f32 { |
| 32 | + %value = memref.load %input[%col, %row] : memref<?x?xf32, strided<[?, ?], offset: ?>> |
| 33 | + return %value : f32 |
| 34 | +} |
| 35 | + |
| 36 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1, s2, s3] -> (s0 * s1 + s2 * s3)> |
| 37 | +// CHECK: #[[MAP1:.*]] = affine_map<()[s0, s1] -> (s0 * s1)> |
| 38 | +// CHECK: func @load_scalar_from_memref_dynamic_dim |
| 39 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<?x?xf32, strided<[?, ?], offset: ?>>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index) |
| 40 | +// CHECK: %[[BASE:.*]], %[[OFFSET:.*]], %[[SIZES:.*]]:2, %[[STRIDES:.*]]:2 = memref.extract_strided_metadata %[[ARG0]] |
| 41 | +// CHECK: %[[IDX:.*]] = affine.apply #[[MAP]]()[%[[ARG2]], %[[STRIDES]]#0, %[[ARG1]], %[[STRIDES]]#1] |
| 42 | +// CHECK: %[[SIZE:.*]] = affine.apply #[[MAP1]]()[%[[STRIDES]]#0, %[[SIZES]]#0] |
| 43 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] to offset: [%[[OFFSET]]], sizes: [%[[SIZE]]], strides: [%[[STRIDES]]#1] |
| 44 | +// CHECK: memref.load %[[REINT]][%[[IDX]]] |
| 45 | + |
| 46 | +// ----- |
| 47 | + |
| 48 | +func.func @load_scalar_from_memref_subview(%input: memref<4x8xf32>, %row: index, %col: index) -> memref<1x1xf32, strided<[8, 1], offset: ?>> { |
| 49 | + %subview = memref.subview %input[%col, %row] [1, 1] [1, 1] : memref<4x8xf32> to memref<1x1xf32, strided<[8, 1], offset: ?>> |
| 50 | + return %subview : memref<1x1xf32, strided<[8, 1], offset: ?>> |
| 51 | +} |
| 52 | +// CHECK: func @load_scalar_from_memref_subview |
| 53 | + |
| 54 | +// ----- |
| 55 | + |
| 56 | +func.func @store_scalar_from_memref_static_dim(%input: memref<4x8xf32, strided<[8, 12], offset: 100>>, %row: index, %col: index, %value: f32) { |
| 57 | + memref.store %value, %input[%col, %row] : memref<4x8xf32, strided<[8, 12], offset: 100>> |
| 58 | + return |
| 59 | +} |
| 60 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1] -> (s0 * 8 + s1 * 12)> |
| 61 | +// CHECK: func @store_scalar_from_memref_static_dim |
| 62 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<4x8xf32, strided<[8, 12], offset: 100>>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: f32) |
| 63 | +// CHECK: %[[IDX:.*]] = affine.apply #[[MAP]]()[%[[ARG2]], %[[ARG1]]] |
| 64 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] |
| 65 | +// CHECK: memref.store %[[ARG3]], %[[REINT]][%[[IDX]]] : memref<32xf32, strided<[12], offset: 100>> |
| 66 | + |
| 67 | +// ----- |
| 68 | + |
| 69 | +func.func @store_scalar_from_memref_dynamic_dim(%input: memref<?x?xf32, strided<[?, ?], offset: ?>>, %row: index, %col: index, %value: f32) { |
| 70 | + memref.store %value, %input[%col, %row] : memref<?x?xf32, strided<[?, ?], offset: ?>> |
| 71 | + return |
| 72 | +} |
| 73 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1, s2, s3] -> (s0 * s1 + s2 * s3)> |
| 74 | +// CHECK: #[[MAP1:.*]] = affine_map<()[s0, s1] -> (s0 * s1)> |
| 75 | +// CHECK: func @store_scalar_from_memref_dynamic_dim |
| 76 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<?x?xf32, strided<[?, ?], offset: ?>>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: f32) |
| 77 | +// CHECK: %[[BASE:.*]], %[[OFFSET:.*]], %[[SIZES:.*]]:2, %[[STRIDES:.*]]:2 = memref.extract_strided_metadata %[[ARG0]] |
| 78 | +// CHECK: %[[IDX:.*]] = affine.apply #[[MAP]]()[%[[ARG2]], %[[STRIDES]]#0, %[[ARG1]], %[[STRIDES]]#1] |
| 79 | +// CHECK: %[[SIZE:.*]] = affine.apply #[[MAP1]]()[%[[STRIDES]]#0, %[[SIZES]]#0] |
| 80 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] to offset: [%[[OFFSET]]], sizes: [%[[SIZE]]], strides: [%[[STRIDES]]#1] |
| 81 | +// CHECK: memref.store %[[ARG3]], %[[REINT]][%[[IDX]]] |
| 82 | + |
| 83 | +// ----- |
| 84 | + |
| 85 | +func.func @load_vector_from_memref(%input: memref<4x8xf32>) -> vector<8xf32> { |
| 86 | + %c3 = arith.constant 3 : index |
| 87 | + %c6 = arith.constant 6 : index |
| 88 | + %value = vector.load %input[%c3, %c6] : memref<4x8xf32>, vector<8xf32> |
| 89 | + return %value : vector<8xf32> |
| 90 | +} |
| 91 | +// CHECK: func @load_vector_from_memref |
| 92 | +// CHECK: %[[C30:.*]] = arith.constant 30 |
| 93 | +// CHECK-NEXT: %[[REINT:.*]] = memref.reinterpret_cast %arg0 to offset: [0], sizes: [32], strides: [1] |
| 94 | +// CHECK-NEXT: vector.load %[[REINT]][%[[C30]]] |
| 95 | + |
| 96 | +// ----- |
| 97 | + |
| 98 | +func.func @load_vector_from_memref_odd(%input: memref<3x7xi2>) -> vector<3xi2> { |
| 99 | + %c1 = arith.constant 1 : index |
| 100 | + %c3 = arith.constant 3 : index |
| 101 | + %value = vector.load %input[%c1, %c3] : memref<3x7xi2>, vector<3xi2> |
| 102 | + return %value : vector<3xi2> |
| 103 | +} |
| 104 | +// CHECK: func @load_vector_from_memref_odd |
| 105 | +// CHECK: %[[C10:.*]] = arith.constant 10 : index |
| 106 | +// CHECK-NEXT: %[[REINT:.*]] = memref.reinterpret_cast |
| 107 | +// CHECK-NEXT: vector.load %[[REINT]][%[[C10]]] |
| 108 | + |
| 109 | +// ----- |
| 110 | + |
| 111 | +func.func @load_vector_from_memref_dynamic(%input: memref<3x7xi2>, %row: index, %col: index) -> vector<3xi2> { |
| 112 | + %value = vector.load %input[%col, %row] : memref<3x7xi2>, vector<3xi2> |
| 113 | + return %value : vector<3xi2> |
| 114 | +} |
| 115 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1] -> (s0 * 7 + s1)> |
| 116 | +// CHECK: func @load_vector_from_memref_dynamic |
| 117 | +// CHECK: %[[IDX:.*]] = affine.apply #[[MAP]]() |
| 118 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast |
| 119 | +// CHECK: vector.load %[[REINT]][%[[IDX]]] : memref<21xi2, strided<[1]>>, vector<3xi2> |
| 120 | + |
| 121 | +// ----- |
| 122 | + |
| 123 | +func.func @store_vector_to_memref_odd(%input: memref<3x7xi2>, %value: vector<3xi2>) { |
| 124 | + %c1 = arith.constant 1 : index |
| 125 | + %c3 = arith.constant 3 : index |
| 126 | + vector.store %value, %input[%c1, %c3] : memref<3x7xi2>, vector<3xi2> |
| 127 | + return |
| 128 | +} |
| 129 | +// CHECK: func @store_vector_to_memref_odd |
| 130 | +// CHECK: %[[C10:.*]] = arith.constant 10 : index |
| 131 | +// CHECK-NEXT: %[[REINT:.*]] = memref.reinterpret_cast |
| 132 | +// CHECK-NEXT: vector.store %arg1, %[[REINT]][%[[C10]]] : memref<21xi2, strided<[1]> |
| 133 | + |
| 134 | +// ----- |
| 135 | + |
| 136 | +func.func @store_vector_to_memref_dynamic(%input: memref<3x7xi2>, %value: vector<3xi2>, %row: index, %col: index) { |
| 137 | + vector.store %value, %input[%col, %row] : memref<3x7xi2>, vector<3xi2> |
| 138 | + return |
| 139 | +} |
| 140 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1] -> (s0 * 7 + s1)> |
| 141 | +// CHECK: func @store_vector_to_memref_dynamic |
| 142 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<3x7xi2>, %[[ARG1:.*]]: vector<3xi2>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index) |
| 143 | +// CHECK: %[[IDX:.*]] = affine.apply #[[MAP]]()[%[[ARG3]], %[[ARG2]]] |
| 144 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] to offset: [0], sizes: [21], strides: [1] |
| 145 | +// CHECK: vector.store %[[ARG1]], %[[REINT]][%[[IDX]]] |
| 146 | + |
| 147 | +// ----- |
| 148 | + |
| 149 | +func.func @mask_store_vector_to_memref_odd(%input: memref<3x7xi2>, %value: vector<3xi2>, %mask: vector<3xi1>) { |
| 150 | + %c1 = arith.constant 1 : index |
| 151 | + %c3 = arith.constant 3 : index |
| 152 | + vector.maskedstore %input[%c1, %c3], %mask, %value : memref<3x7xi2>, vector<3xi1>, vector<3xi2> |
| 153 | + return |
| 154 | +} |
| 155 | +// CHECK: func @mask_store_vector_to_memref_odd |
| 156 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<3x7xi2>, %[[ARG1:.*]]: vector<3xi2>, %[[ARG2:.*]]: vector<3xi1>) |
| 157 | +// CHECK: %[[C10:.*]] = arith.constant 10 : index |
| 158 | +// CHECK-NEXT: %[[REINT:.*]] = memref.reinterpret_cast |
| 159 | +// CHECK: vector.maskedstore %[[REINT]][%[[C10]]], %[[ARG2]], %[[ARG1]] |
| 160 | + |
| 161 | +// ----- |
| 162 | + |
| 163 | +func.func @mask_store_vector_to_memref_dynamic(%input: memref<3x7xi2>, %value: vector<3xi2>, %row: index, %col: index, %mask: vector<3xi1>) { |
| 164 | + vector.maskedstore %input[%col, %row], %mask, %value : memref<3x7xi2>, vector<3xi1>, vector<3xi2> |
| 165 | + return |
| 166 | +} |
| 167 | +// CHECK: #map = affine_map<()[s0, s1] -> (s0 * 7 + s1)> |
| 168 | +// CHECK: func @mask_store_vector_to_memref_dynamic |
| 169 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<3x7xi2>, %[[ARG1:.*]]: vector<3xi2>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: vector<3xi1>) |
| 170 | +// CHECK: %[[IDX:.*]] = affine.apply #map()[%[[ARG3]], %[[ARG2]]] |
| 171 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] |
| 172 | +// CHECK: vector.maskedstore %[[REINT]][%[[IDX]]], %[[ARG4]], %[[ARG1]] |
| 173 | + |
| 174 | +// ----- |
| 175 | +func.func @mask_load_vector_from_memref_odd(%input: memref<3x7xi2>, %mask: vector<3xi1>, %passthru: vector<3xi2>) -> vector<3xi2> { |
| 176 | + %c1 = arith.constant 1 : index |
| 177 | + %c3 = arith.constant 3 : index |
| 178 | + %result = vector.maskedload %input[%c1, %c3], %mask, %passthru : memref<3x7xi2>, vector<3xi1>, vector<3xi2> into vector<3xi2> |
| 179 | + return %result : vector<3xi2> |
| 180 | +} |
| 181 | +// CHECK: func @mask_load_vector_from_memref_odd |
| 182 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<3x7xi2>, %[[MASK:.*]]: vector<3xi1>, %[[PASSTHRU:.*]]: vector<3xi2>) |
| 183 | +// CHECK: %[[C10:.*]] = arith.constant 10 : index |
| 184 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] to offset: [0], sizes: [21], strides: [1] |
| 185 | +// CHECK: vector.maskedload %[[REINT]][%[[C10]]], %[[MASK]], %[[PASSTHRU]] |
| 186 | + |
| 187 | +// ----- |
| 188 | + |
| 189 | +func.func @mask_load_vector_from_memref_dynamic(%input: memref<3x7xi2>, %row: index, %col: index, %mask: vector<3xi1>, %passthru: vector<3xi2>) -> vector<3xi2> { |
| 190 | + %result = vector.maskedload %input[%col, %row], %mask, %passthru : memref<3x7xi2>, vector<3xi1>, vector<3xi2> into vector<3xi2> |
| 191 | + return %result : vector<3xi2> |
| 192 | +} |
| 193 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1] -> (s0 * 7 + s1)> |
| 194 | +// CHECK: func @mask_load_vector_from_memref_dynamic |
| 195 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<3x7xi2>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index, %[[ARG3:.*]]: vector<3xi1>, %[[ARG4:.*]]: vector<3xi2>) |
| 196 | +// CHECK: %[[IDX:.*]] = affine.apply #[[MAP]]()[%[[ARG2]], %[[ARG1]]] |
| 197 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] |
| 198 | +// CHECK: vector.maskedload %[[REINT]][%[[IDX]]], %[[ARG3]] |
| 199 | + |
| 200 | +// ----- |
| 201 | + |
| 202 | +func.func @transfer_read_memref(%input: memref<4x8xi2>, %value: vector<8xi2>, %row: index, %col: index) -> vector<8xi2> { |
| 203 | + %c0 = arith.constant 0 : i2 |
| 204 | + %0 = vector.transfer_read %input[%col, %row], %c0 : memref<4x8xi2>, vector<8xi2> |
| 205 | + return %0 : vector<8xi2> |
| 206 | +} |
| 207 | +// CHECK: func @transfer_read_memref |
| 208 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<4x8xi2>, %[[ARG1:.*]]: vector<8xi2>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index) |
| 209 | +// CHECK: %[[C0:.*]] = arith.constant 0 : i2 |
| 210 | +// CHECK: %[[IDX:.*]] = affine.apply #map()[%[[ARG3]], %[[ARG2]]] |
| 211 | +// CHECK-NEXT: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] |
| 212 | +// CHECK-NEXT: vector.transfer_read %[[REINT]][%[[IDX]]], %[[C0]] |
| 213 | + |
| 214 | +// ----- |
| 215 | + |
| 216 | +func.func @transfer_write_memref(%input: memref<4x8xi2>, %value: vector<8xi2>, %row: index, %col: index) { |
| 217 | + vector.transfer_write %value, %input[%col, %row] : vector<8xi2>, memref<4x8xi2> |
| 218 | + return |
| 219 | +} |
| 220 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0, s1] -> (s0 * 8 + s1)> |
| 221 | +// CHECK: func @transfer_write_memref |
| 222 | +// CHECK-SAME: (%[[ARG0:.*]]: memref<4x8xi2>, %[[ARG1:.*]]: vector<8xi2>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index) |
| 223 | +// CHECK: %[[IDX:.*]] = affine.apply #[[MAP]]()[%[[ARG3]], %[[ARG2]]] |
| 224 | +// CHECK: %[[REINT:.*]] = memref.reinterpret_cast %[[ARG0]] |
| 225 | +// CHECK: vector.transfer_write %[[ARG1]], %[[REINT]][%[[IDX]]] |
0 commit comments