Skip to content

Commit 99df9ef

Browse files
committed
23 -> 22
Signed-off-by: hanhanW <[email protected]>
1 parent 9dcfb2f commit 99df9ef

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

mlir/test/Interfaces/TilingInterface/tile-and-fuse-consumer.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ module attributes {transform.with_named_sequence} {
503503
// It is valid to fuse the pack op with padding semantics if the tiled
504504
// dimensions do not need padding.
505505

506-
func.func @fuse_pack_consumer_with_padding_semantics(%arg0: tensor<64x32xf32>, %arg1: tensor<64x32xf32>) -> tensor<23x2x3x16xf32> {
506+
func.func @fuse_pack_consumer_with_padding_semantics(%arg0: tensor<64x32xf32>, %arg1: tensor<64x32xf32>) -> tensor<22x2x3x16xf32> {
507507
%0 = scf.forall (%arg2) = (0) to (32) step (16) shared_outs(%arg3 = %arg1) -> (tensor<64x32xf32>) {
508508
%src = tensor.extract_slice %arg0[0, %arg2] [64, 16] [1, 1] : tensor<64x32xf32> to tensor<64x16xf32>
509509
%dest = tensor.extract_slice %arg3[0, %arg2] [64, 16] [1, 1] : tensor<64x32xf32> to tensor<64x16xf32>
@@ -512,10 +512,10 @@ func.func @fuse_pack_consumer_with_padding_semantics(%arg0: tensor<64x32xf32>, %
512512
tensor.parallel_insert_slice %2 into %arg3[0, %arg2] [64, 16] [1, 1] : tensor<64x16xf32> into tensor<64x32xf32>
513513
}
514514
}
515-
%1 = tensor.empty() : tensor<23x2x3x16xf32>
515+
%1 = tensor.empty() : tensor<22x2x3x16xf32>
516516
%cst = arith.constant 0.000000e+00 : f32
517-
%pack = linalg.pack %0 padding_value(%cst : f32) inner_dims_pos = [0, 1] inner_tiles = [3, 16] into %1 : tensor<64x32xf32> -> tensor<23x2x3x16xf32>
518-
return %pack : tensor<23x2x3x16xf32>
517+
%pack = linalg.pack %0 padding_value(%cst : f32) inner_dims_pos = [0, 1] inner_tiles = [3, 16] into %1 : tensor<64x32xf32> -> tensor<22x2x3x16xf32>
518+
return %pack : tensor<22x2x3x16xf32>
519519
}
520520

521521
module attributes {transform.with_named_sequence} {
@@ -530,7 +530,7 @@ module attributes {transform.with_named_sequence} {
530530
// CHECK: func.func @fuse_pack_consumer_with_padding_semantics(
531531
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
532532
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
533-
// CHECK-DAG: %[[OUT_INIT:.*]] = tensor.empty() : tensor<23x2x3x16xf32>
533+
// CHECK-DAG: %[[OUT_INIT:.*]] = tensor.empty() : tensor<22x2x3x16xf32>
534534
// CHECK-DAG: %[[PAD_VAL:.*]] = arith.constant 0.000000e+00 : f32
535535
// CHECK: %{{.*}}:2 = scf.forall (%[[IV:.*]]) = (0) to (32) step (16)
536536
// CHECK-SAME: shared_outs(%[[FIRST_OUT_ARG:.*]] = %[[ARG1]], %[[PACK_OUT_ARG:.*]] = %[[OUT_INIT]])
@@ -540,14 +540,14 @@ module attributes {transform.with_named_sequence} {
540540
// CHECK-SAME: ins(%[[ELEM_SRC]]
541541
// CHECK-SAME: outs(%[[ELEM_DEST]]
542542
// CHECK-DAG: %[[PACK_RESULT_OFFSET:.*]] = affine.apply #[[PACK_RESULT_MAP]](%[[IV]])
543-
// CHECK-DAG: %[[TILED_PACK_DEST:.*]] = tensor.extract_slice %[[PACK_OUT_ARG]][0, %[[PACK_RESULT_OFFSET]], 0, 0] [23, 1, 3, 16] [1, 1, 1, 1]
543+
// CHECK-DAG: %[[TILED_PACK_DEST:.*]] = tensor.extract_slice %[[PACK_OUT_ARG]][0, %[[PACK_RESULT_OFFSET]], 0, 0] [22, 1, 3, 16] [1, 1, 1, 1]
544544
// CHECK: %[[TILED_PACK_OUT:.*]] = linalg.pack %[[ELEM]]
545545
// CHECK-SAME: padding_value(%[[PAD_VAL]] : f32)
546546
// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [3, 16]
547547
// CHECK-SAME: into %[[TILED_PACK_DEST]]
548548
// CHECK: scf.forall.in_parallel {
549549
// CHECK: tensor.parallel_insert_slice %[[GENERIC_OUT]] into %[[FIRST_OUT_ARG]][0, %[[IV]]] [64, 16] [1, 1]
550-
// CHECK: tensor.parallel_insert_slice %[[TILED_PACK_OUT]] into %[[PACK_OUT_ARG]][0, %[[PACK_RESULT_OFFSET]], 0, 0] [23, 1, 3, 16] [1, 1, 1, 1]
550+
// CHECK: tensor.parallel_insert_slice %[[TILED_PACK_OUT]] into %[[PACK_OUT_ARG]][0, %[[PACK_RESULT_OFFSET]], 0, 0] [22, 1, 3, 16] [1, 1, 1, 1]
551551

552552
// -----
553553

0 commit comments

Comments
 (0)