1
- // RUN: triton-opt %s -split-input-file --allocate-shared-memory -- tritonintelgpu-optimize-block-io-encoding | FileCheck %s
1
+ // RUN: triton-opt %s -split-input-file --tritonintelgpu-optimize-block-io-encoding | FileCheck %s
2
2
3
3
#blocked = #ttg.blocked <{sizePerThread = [4 , 4 ], threadsPerWarp = [1 , 16 ], warpsPerCTA = [8 , 4 ], order = [1 , 0 ]}>
4
4
#blocked1 = #ttg.blocked <{sizePerThread = [1 , 8 ], threadsPerWarp = [4 , 4 ], warpsPerCTA = [32 , 1 ], order = [1 , 0 ]}>
8
8
// CHECK: #mma2 = #ttig.dpas<{repeatCount = 8, systolicDepth = 8, executionSize = 16, opsPerChan = 2, threadsPerWarp = 16, warpsPerCTA = [8, 4], repCluster = [4, 2], A = [32, 16], B = [16, 32], C = [32, 32]}>
9
9
#mma = #ttig.dpas <{repeatCount = 8 , systolicDepth = 8 , executionSize = 16 , opsPerChan = 2 , threadsPerWarp = 16 , warpsPerCTA = [8 , 4 ], repCluster = [4 , 2 ], A = [32 , 16 ], B = [16 , 32 ], C = [32 , 32 ]}>
10
10
module attributes {" ttg.num-ctas" = 1 : i32 , " ttg.num-warps" = 32 : i32 , ttg.target = " xpu" , " ttg.threads-per-warp" = 16 : i32 , ttig.min_sg_size = 16 : i32 , ttig.support_bf16_conversion , ttig.support_dpas , ttig.support_sg_2d_block , ttig.target_arch = " spir64" } {
11
- tt.func public @matmul_kernel_with_block_pointers (%arg0: !tt.ptr <f16 >, %arg1: !tt.ptr <f16 >, %arg2: !tt.ptr <f16 >) attributes { noinline = false } {
11
+ tt.func public @matmul_kernel_with_block_pointers (%arg0: !tt.ptr <f16 >, %arg1: !tt.ptr <f16 >, %arg2: !tt.ptr <f16 >) {
12
12
%c4_i32 = arith.constant 4 : i32
13
13
%c256_i32 = arith.constant 256 : i32
14
14
%c1024_i64 = arith.constant 1024 : i64
@@ -30,17 +30,18 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 32 : i32, ttg.tar
30
30
%7 = arith.remsi %0 , %c64_i32 : i32
31
31
%8 = arith.divsi %7 , %4 : i32
32
32
%9 = arith.muli %6 , %c256_i32 : i32
33
- // CHECK: tt.make_tensor_ptr {{.*}} : <tensor<256x32xf16, #mma>>
33
+ // CHECK: %[[MAKE_TENSOR_PTR_A:.*]] = tt.make_tensor_ptr {{.*}} : <tensor<256x32xf16, #mma>>
34
34
%10 = tt.make_tensor_ptr %arg0 , [%c1024_i64 , %c5120_i64 ], [%c5120_i64 , %c1_i64 ], [%9 , %c0_i32 ] {order = array<i32 : 1 , 0 >} : <tensor <256 x32 xf16 , #blocked1 >>
35
35
%11 = arith.muli %8 , %c256_i32 : i32
36
- // CHECK: tt.make_tensor_ptr {{.*}} : <tensor<32x256xf16, #mma1>>
36
+ // CHECK: %[[MAKE_TENSOR_PTR_B:.*]] = tt.make_tensor_ptr {{.*}} : <tensor<32x256xf16, #mma1>>
37
37
%12 = tt.make_tensor_ptr %arg1 , [%c5120_i64 , %c4096_i64 ], [%c4096_i64 , %c1_i64 ], [%c0_i32 , %11 ] {order = array<i32 : 1 , 0 >} : <tensor <32 x256 xf16 , #blocked2 >>
38
+ // CHECK: scf.for {{.*}} iter_args({{.*}} = {{.*}}, %[[ARG5:.*]] = %[[MAKE_TENSOR_PTR_A]], %[[ARG6:.*]] = %[[MAKE_TENSOR_PTR_B]])
38
39
%13:3 = scf.for %arg3 = %c0_i32 to %c5120_i32 step %c32_i32 iter_args (%arg4 = %cst , %arg5 = %10 , %arg6 = %12 ) -> (tensor <256 x256 xf32 , #blocked >, !tt.ptr <tensor <256 x32 xf16 , #blocked1 >>, !tt.ptr <tensor <32 x256 xf16 , #blocked2 >>) : i32 {
39
40
%17 = tt.load %arg5 {boundaryCheck = array<i32 : 0 , 1 >, ttig.block_io = " row_major" } : !tt.ptr <tensor <256 x32 xf16 , #blocked1 >>
40
- // CHECK: %[[A_LOAD:.*]] = tt.load %arg5 {boundaryCheck = array<i32: 0, 1>, ttig.block_io = "row_major"} : !tt.ptr<tensor<256x32xf16, #mma>>
41
+ // CHECK: %[[A_LOAD:.*]] = tt.load %[[ARG5]] {boundaryCheck = array<i32: 0, 1>, ttig.block_io = "row_major"} : !tt.ptr<tensor<256x32xf16, #mma>>
41
42
// CHECK: {{.*}} = ttg.convert_layout %[[A_LOAD]] : tensor<256x32xf16, #mma> -> tensor<256x32xf16, #blocked1>
42
43
%18 = tt.load %arg6 {boundaryCheck = array<i32 : 0 , 1 >, ttig.block_io = " row_major" } : !tt.ptr <tensor <32 x256 xf16 , #blocked2 >>
43
- // CHECK: %[[B_LOAD:.*]] = tt.load %arg6 {boundaryCheck = array<i32: 0, 1>, ttig.block_io = "row_major"} : !tt.ptr<tensor<32x256xf16, #mma1>>
44
+ // CHECK: %[[B_LOAD:.*]] = tt.load %[[ARG6]] {boundaryCheck = array<i32: 0, 1>, ttig.block_io = "row_major"} : !tt.ptr<tensor<32x256xf16, #mma1>>
44
45
// CHECK: {{.*}} = ttg.convert_layout %[[B_LOAD]] : tensor<32x256xf16, #mma1> -> tensor<32x256xf16, #blocked2>
45
46
%19 = ttg.convert_layout %17 : tensor <256 x32 xf16 , #blocked1 > -> tensor <256 x32 xf16 , #ttg.dot_op <{opIdx = 0 , parent = #blocked }>>
46
47
%20 = ttg.convert_layout %18 : tensor <32 x256 xf16 , #blocked2 > -> tensor <32 x256 xf16 , #ttg.dot_op <{opIdx = 1 , parent = #blocked }>>
@@ -50,10 +51,11 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 32 : i32, ttg.tar
50
51
// CHECK: tt.dot {{.*}} : tensor<256x32xf16, #ttg.dot_op<{opIdx = 0, parent = #mma2, kWidth = 1}>> * tensor<32x256xf16, #ttg.dot_op<{opIdx = 1, parent = #mma2, kWidth = 2}>> -> tensor<256x256xf32, #mma2>
51
52
%24 = tt.dot %22 , %23 , %21 , inputPrecision = tf32 : tensor <256 x32 xf16 , #ttg.dot_op <{opIdx = 0 , parent = #mma , kWidth = 1 }>> * tensor <32 x256 xf16 , #ttg.dot_op <{opIdx = 1 , parent = #mma , kWidth = 2 }>> -> tensor <256 x256 xf32 , #mma >
52
53
%25 = ttg.convert_layout %24 : tensor <256 x256 xf32 , #mma > -> tensor <256 x256 xf32 , #blocked >
53
- // CHECK: tt.advance {{.*}} : <tensor<256x32xf16, #mma>>
54
+ // CHECK: %[[ADVANCE_A:.*]] = tt.advance {{.*}} : <tensor<256x32xf16, #mma>>
54
55
%26 = tt.advance %arg5 , [%c0_i32 , %c32_i32 ] : <tensor <256 x32 xf16 , #blocked1 >>
55
- // CHECK: tt.advance {{.*}} : <tensor<32x256xf16, #mma1>>
56
+ // CHECK: %[[ADVANCE_B:.*]] = tt.advance {{.*}} : <tensor<32x256xf16, #mma1>>
56
57
%27 = tt.advance %arg6 , [%c32_i32 , %c0_i32 ] : <tensor <32 x256 xf16 , #blocked2 >>
58
+ // CHECK: scf.yield {{.*}}, %[[ADVANCE_A]], %[[ADVANCE_B]]
57
59
scf.yield %25 , %26 , %27 : tensor <256 x256 xf32 , #blocked >, !tt.ptr <tensor <256 x32 xf16 , #blocked1 >>, !tt.ptr <tensor <32 x256 xf16 , #blocked2 >>
58
60
}
59
61
%14 = tt.make_tensor_ptr %arg2 , [%c1024_i64 , %c4096_i64 ], [%c4096_i64 , %c1_i64 ], [%9 , %11 ] {order = array<i32 : 1 , 0 >} : <tensor <256 x256 xf16 , #blocked2 >>
0 commit comments