@@ -113,14 +113,14 @@ func.func @transfer_read_dims_mismatch_non_zero_indices(
113113// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
114114
115115// CHECK-LABEL: func.func @transfer_read_dims_mismatch_non_zero_indices(
116- // CHECK-SAME: %[[IDX_1:.* ]]: index, %[[IDX_2:.* ]]: index,
117- // CHECK-SAME: %[[MEM:.* ]]: memref<1x43x4x6xi32>
118- // CHECK: %[[C_0:.* ]] = arith.constant 0 : i32
119- // CHECK: %[[COLLAPSED_IN:.* ]] = memref.collapse_shape %[[MEM]]
116+ // CHECK-SAME: %[[IDX_1:.+ ]]: index, %[[IDX_2:.+ ]]: index,
117+ // CHECK-SAME: %[[MEM:.+ ]]: memref<1x43x4x6xi32>
118+ // CHECK: %[[C_0:.+ ]] = arith.constant 0 : i32
119+ // CHECK: %[[COLLAPSED_IN:.+ ]] = memref.collapse_shape %[[MEM]]
120120// CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
121121// CHECK-SAME: : memref<1x43x4x6xi32> into memref<1032xi32>
122- // CHECK: %[[COLLAPSED_IDX:.* ]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
123- // CHECK: %[[READ:.* ]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1032xi32>, vector<12xi32>
122+ // CHECK: %[[COLLAPSED_IDX:.+ ]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
123+ // CHECK: %[[READ:.+ ]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1032xi32>, vector<12xi32>
124124
125125// CHECK-128B-LABEL: func @transfer_read_dims_mismatch_non_zero_indices(
126126// CHECK-128B-NOT: memref.collapse_shape
@@ -191,7 +191,7 @@ func.func @transfer_read_leading_dynamic_dims(
191191
192192// -----
193193
194- // One of the dims to be flattened is dynamic - not supported ATM.
194+ // One of the dims to be flattened can be dynamic if it's leftmost
195195
196196func.func @transfer_read_dynamic_dim_to_flatten (
197197 %idx_1: index ,
@@ -206,20 +206,21 @@ func.func @transfer_read_dynamic_dim_to_flatten(
206206 return %res : vector <1 x2 x6 xi32 >
207207}
208208
209- // CHECK: #[[$MAP:.* ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
209+ // CHECK: #[[$MAP:.+ ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
210210
211211// CHECK-LABEL: func.func @transfer_read_dynamic_dim_to_flatten
212212// CHECK-SAME: %[[IDX_1:arg0]]
213213// CHECK-SAME: %[[IDX_2:arg1]]
214214// CHECK-SAME: %[[MEM:arg2]]
215- // CHECK: %[[C0_I32:.*]] = arith.constant 0 : i32
216- // CHECK: %[[COLLAPSED:.*]] = memref.collapse_shape %[[MEM]]
217- // CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
218- // CHECK-SAME: memref<1x?x4x6xi32> into memref<?xi32>
219- // CHECK: %[[COLLAPSED_IDX:.*]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
220- // CHECK: %[[VEC_1D:.*]] = vector.transfer_read %[[COLLAPSED]][%[[COLLAPSED_IDX]]],
221- // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<?xi32>, vector<12xi32>
222- // CHECK: %[[RESULT:.*]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
215+ // CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32
216+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
217+ // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
218+ // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]]
219+ // CHECK-SAME: memref<1x?x4x6xi32> into memref<1x?xi32>
220+ // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
221+ // CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED]][%[[C0]], %[[COLLAPSED_IDX]]],
222+ // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<1x?xi32>, vector<12xi32>
223+ // CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
223224// CHECK: return %[[RESULT]] : vector<1x2x6xi32>
224225
225226
@@ -468,7 +469,7 @@ func.func @transfer_write_leading_dynamic_dims(
468469
469470// -----
470471
471- // One of the dims to be flattened is dynamic - not supported ATM.
472+ // One of the dims to be flattened can be dynamic, if it's leftmost
472473
473474func.func @transfer_write_dynamic_to_flatten (
474475 %idx_1: index ,
@@ -483,21 +484,21 @@ func.func @transfer_write_dynamic_to_flatten(
483484 return
484485}
485486
486- // CHECK: #[[$MAP:.* ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
487+ // CHECK: #[[$MAP:.+ ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
487488
488489// CHECK-LABEL: func.func @transfer_write_dynamic_to_flatten
489490// CHECK-SAME: %[[IDX_1:arg0]]: index
490491// CHECK-SAME: %[[IDX_2:arg1]]: index
491492// CHECK-SAME: %[[VEC:arg2]]: vector<1x2x6xi32>
492493// CHECK-SAME: %[[MEM:arg3]]: memref<1x?x4x6xi32>
493-
494- // CHECK: %[[COLLAPSED_MEM:.* ]] = memref.collapse_shape %[[MEM]]
495- // CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
496- // CHECK-SAME: : memref<1x?x4x6xi32> into memref<?xi32>
497- // CHECK: %[[COLLAPSED_IDX:.* ]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
498- // CHECK: %[[VEC_1D:.* ]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
499- // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[COLLAPSED_IDX]]]
500- // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<?xi32>
494+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
495+ // CHECK: %[[COLLAPSED_MEM:.+ ]] = memref.collapse_shape %[[MEM]]
496+ // CHECK-SAME{LITERAL}: [[0], [ 1, 2, 3]]
497+ // CHECK-SAME: : memref<1x?x4x6xi32> into memref<1x ?xi32>
498+ // CHECK: %[[COLLAPSED_IDX:.+ ]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
499+ // CHECK: %[[VEC_1D:.+ ]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
500+ // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[ COLLAPSED_IDX]]]
501+ // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<1x ?xi32>
501502
502503// CHECK-128B-LABEL: func @transfer_write_dynamic_to_flatten
503504// CHECK-128B-NOT: memref.collapse_shape
0 commit comments