|
| 1 | +// RUN: triton-opt %s -split-input-file --allocate-shared-memory --convert-triton-amdgpu-to-llvm=arch=gfx942 --cse| FileCheck %s |
| 2 | + |
| 3 | +#blocked0 = #ttg.blocked<{sizePerThread = [1, 4], threadsPerWarp = [16, 4], warpsPerCTA = [2, 2], order = [1, 0], CTAsPerCGA = [1, 1], CTASplitNum = [1, 1], CTAOrder = [1, 0]}> |
| 4 | +#blocked1 = #ttg.blocked<{sizePerThread = [4, 1], threadsPerWarp = [4, 16], warpsPerCTA = [2, 2], order = [0, 1], CTAsPerCGA = [1, 1], CTASplitNum = [1, 1], CTAOrder = [1, 0]}> |
| 5 | +module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.target = "hip:gfx942", "ttg.threads-per-warp" = 64 : i32} { |
| 6 | + // CHECK: llvm.mlir.global external @global_smem |
| 7 | + tt.func @convert_layout_general_swizzling(%arg0: tensor<64x64xf32, #blocked0>, %arg1: tensor<64x64x!tt.ptr<f32>, #blocked1>) { |
| 8 | + |
| 9 | + // verify that following convert layout uses general swizzling path |
| 10 | + |
| 11 | + // CHECK: [[CST_128:%.*]] = llvm.mlir.constant(128 : i32) : i32 |
| 12 | + |
| 13 | + // Part of offset computation generated by applyLinearLayout function |
| 14 | + // CHECK: [[SEL:%.*]]= llvm.select {{.*}}, {{.*}}, [[CST_128]] |
| 15 | + // CHECK: [[OFFSET_0:%.*]] = llvm.xor {{.*}}, [[SEL]] |
| 16 | + // CHECK: [[OFFSET_1:%.*]] = llvm.xor {{.*}}, [[OFFSET_0]] : i32 |
| 17 | + |
| 18 | + // Part of offset computation generated by lowerLdSt function after applyLinearLayout |
| 19 | + // CHECK: [[OFFSET_2:%.*]] = llvm.xor [[OFFSET_1]], {{.*}} : i32 |
| 20 | + // CHECK: [[OFFSET_3:%.*]] = llvm.xor [[OFFSET_2]], {{.*}} : i32 |
| 21 | + // CHECK: [[OFFSET_4:%.*]] = llvm.add [[OFFSET_3]], {{.*}} : i32 |
| 22 | + // CHECK: llvm.getelementptr inbounds {{.*}}{{\[}}[[OFFSET_4]]{{\]}} |
| 23 | + |
| 24 | + %0 = ttg.convert_layout %arg0 : tensor<64x64xf32, #blocked0> -> tensor<64x64xf32, #blocked1> |
| 25 | + tt.store %arg1, %0 : tensor<64x64x!tt.ptr<f32>, #blocked1> |
| 26 | + tt.return |
| 27 | + } |
| 28 | +} |
| 29 | + |
| 30 | +// ----- |
| 31 | + |
| 32 | +#blocked0 = #ttg.blocked<{sizePerThread = [1, 4], threadsPerWarp = [16, 4], warpsPerCTA = [2, 2], order = [1, 0], CTAsPerCGA = [1, 1], CTASplitNum = [1, 1], CTAOrder = [1, 0]}> |
| 33 | +#blocked1 = #ttg.blocked<{sizePerThread = [4, 1], threadsPerWarp = [4, 16], warpsPerCTA = [2, 2], order = [0, 1], CTAsPerCGA = [1, 1], CTASplitNum = [1, 1], CTAOrder = [1, 0]}> |
| 34 | +module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.target = "hip:gfx942", "ttg.threads-per-warp" = 64 : i32} { |
| 35 | + // CHECK-LABEL: convert_layout_padding_swizzling |
| 36 | + tt.func @convert_layout_padding_swizzling(%arg0: tensor<64x64xf32, #blocked0>, %arg1: tensor<64x64x!tt.ptr<f32>, #blocked1>) { |
| 37 | + |
| 38 | + // verify that following convert layout uses padded path |
| 39 | + // see getVecAddr lambda in transferWithinBlockImpl function |
| 40 | + |
| 41 | + // CHECK-DAG: [[CST_0:%.*]] = llvm.mlir.constant(0 : i32) : i32 |
| 42 | + // CHECK-DAG: [[CST_5:%.*]] = llvm.mlir.constant(5 : i32) : i32 |
| 43 | + // CHECK-DAG: [[OFFSET_0:%.*]] = llvm.lshr {{.*}}, [[CST_5]] : i32 |
| 44 | + // CHECK: [[OFFSET_1:%.*]] = llvm.shl [[OFFSET_0]], [[CST_0]] : i32 |
| 45 | + // CHECK: [[OFFSET_2:%.*]] = llvm.add [[OFFSET_1]], {{.*}} : i32 |
| 46 | + // CHECK: llvm.getelementptr inbounds {{.*}}{{\[}}[[OFFSET_2]]{{\]}} |
| 47 | + |
| 48 | + %0 = ttg.convert_layout %arg0 {amdgpu.use_padded_scratch_shmem} : tensor<64x64xf32, #blocked0> -> tensor<64x64xf32, #blocked1> |
| 49 | + tt.store %arg1, %0 : tensor<64x64x!tt.ptr<f32>, #blocked1> |
| 50 | + tt.return |
| 51 | + } |
| 52 | +} |
0 commit comments