Skip to content

Commit 166e348

Browse files
committed
Add test cases for SG and WG.
1 parent 3072c1c commit 166e348

File tree

3 files changed

+269
-2
lines changed

3 files changed

+269
-2
lines changed
Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=subgroup" \
2+
// RUN: | mlir-runner \
3+
// RUN: --shared-libs=%mlir_levelzero_runtime \
4+
// RUN: --shared-libs=%mlir_runner_utils \
5+
// RUN: --entry-point-result=void \
6+
// RUN: | FileCheck %s
7+
8+
module @gemm attributes {gpu.container_module} {
9+
gpu.module @kernel {
10+
gpu.func @simple_gemm(%a: memref<256x256xf16>, %b: memref<256x256xf16>, %c: memref<256x256xf32>) kernel {
11+
%c0 = arith.constant 0 : index
12+
%c1 = arith.constant 1 : index
13+
%c8 = arith.constant 8 : index
14+
%c16 = arith.constant 16 : index
15+
%c32 = arith.constant 32 : index
16+
%c256 = arith.constant 256 : index
17+
%block_x = gpu.block_id x
18+
%block_y = gpu.block_id y
19+
%x_block_offset = arith.muli %block_x, %c8 : index
20+
%y_block_offset = arith.muli %block_y, %c16 : index
21+
22+
%c_tdesc = xegpu.create_nd_tdesc %c : memref<256x256xf32> -> !xegpu.tensor_desc<8x16xf32>
23+
%c_init_value = xegpu.load_nd %c_tdesc[%x_block_offset, %y_block_offset] : !xegpu.tensor_desc<8x16xf32> -> vector<8x16xf32>
24+
%a_tdesc = xegpu.create_nd_tdesc %a : memref<256x256xf16> -> !xegpu.tensor_desc<8x16xf16>
25+
%b_tdesc = xegpu.create_nd_tdesc %b : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16>
26+
27+
%r = scf.for %k = %c0 to %c256 step %c16 iter_args(%arg_c = %c_init_value) -> (vector<8x16xf32>) {
28+
%a_val = xegpu.load_nd %a_tdesc[%x_block_offset, %k] : !xegpu.tensor_desc<8x16xf16> -> vector<8x16xf16>
29+
%b_val = xegpu.load_nd %b_tdesc[%k, %y_block_offset] : !xegpu.tensor_desc<16x16xf16> -> vector<16x16xf16>
30+
%dpas = xegpu.dpas %a_val, %b_val, %arg_c : vector<8x16xf16>, vector<16x16xf16>, vector<8x16xf32> -> vector<8x16xf32>
31+
scf.yield %dpas : vector<8x16xf32>
32+
}
33+
xegpu.store_nd %r, %c_tdesc[%x_block_offset, %y_block_offset] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32>
34+
gpu.return
35+
}
36+
}
37+
38+
func.func @test(%a : memref<256x256xf16>, %b : memref<256x256xf16>, %c : memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
39+
%c1 = arith.constant 1 : index
40+
%c16 = arith.constant 16 : index
41+
%c32 = arith.constant 32 : index
42+
%memref_a = gpu.alloc () : memref<256x256xf16>
43+
gpu.memcpy %memref_a, %a : memref<256x256xf16>, memref<256x256xf16>
44+
%memref_b = gpu.alloc () : memref<256x256xf16>
45+
gpu.memcpy %memref_b, %b : memref<256x256xf16>, memref<256x256xf16>
46+
%memref_c = gpu.alloc () : memref<256x256xf32>
47+
gpu.memcpy %memref_c, %c : memref<256x256xf32>, memref<256x256xf32>
48+
gpu.launch_func @kernel::@simple_gemm blocks in (%c32, %c16, %c1) threads in (%c16, %c1, %c1) args(%memref_a : memref<256x256xf16>, %memref_b : memref<256x256xf16>, %memref_c : memref<256x256xf32>)
49+
gpu.wait // Wait for the kernel to finish.
50+
gpu.memcpy %c, %memref_c : memref<256x256xf32>, memref<256x256xf32>
51+
gpu.dealloc %memref_a : memref<256x256xf16>
52+
gpu.dealloc %memref_b : memref<256x256xf16>
53+
gpu.dealloc %memref_c : memref<256x256xf32>
54+
return %c : memref<256x256xf32>
55+
}
56+
57+
58+
func.func @main() attributes {llvm.emit_c_interface} {
59+
%c0 = arith.constant 0 : index
60+
%c1 = arith.constant 1 : index
61+
%c1_f16 = arith.constant 1.0 : f16
62+
%c2_f16 = arith.constant 2.0 : f16
63+
%c256 = arith.constant 256 : index
64+
%cf_0 = arith.constant 0.0 : f16
65+
%cf_1 = arith.constant 1.0 : f16
66+
%A = memref.alloc() : memref<256x256xf16>
67+
%B = memref.alloc() : memref<256x256xf16>
68+
%C = memref.alloc() : memref<256x256xf32>
69+
%C_ref = memref.alloc() : memref<256x256xf32>
70+
%c_gen_int = arith.constant 0 : i1
71+
%cf_lower = arith.constant -0.5 : f32
72+
%cf_upper = arith.constant 0.5 : f32
73+
// Option 1: intialize matrix A ; A[i, j] = j
74+
scf.for %i = %c0 to %c256 step %c1 {
75+
scf.for %j = %c0 to %c256 step %c1 {
76+
%t = index.castu %j : index to i16
77+
%val = arith.uitofp %t : i16 to f16
78+
memref.store %val, %A[%i, %j] : memref<256x256xf16>
79+
}
80+
}
81+
82+
// Initialize the B matrix
83+
// Make matrix B an identity matrix
84+
scf.for %i = %c0 to %c256 step %c1 {
85+
scf.for %j = %c0 to %c256 step %c1 {
86+
%i_i32 = index.castu %i : index to i32
87+
%j_i32 = index.castu %j : index to i32
88+
%i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
89+
90+
scf.if %i_j_same {
91+
memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
92+
} else {
93+
memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
94+
}
95+
}
96+
}
97+
// intialize matrix C and C_ref ; C[i, j] = 0
98+
%c0_f32 = arith.constant 0.0 : f32
99+
scf.for %i = %c0 to %c256 step %c1 {
100+
scf.for %j = %c0 to %c256 step %c1 {
101+
memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
102+
memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
103+
}
104+
}
105+
106+
// Run GPU.
107+
%2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
108+
%cast_C = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
109+
// CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
110+
// CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
111+
call @printMemrefF32(%cast_C) : (memref<*xf32>) -> ()
112+
113+
memref.dealloc %A : memref<256x256xf16>
114+
memref.dealloc %B : memref<256x256xf16>
115+
memref.dealloc %C : memref<256x256xf32>
116+
memref.dealloc %C_ref : memref<256x256xf32>
117+
return
118+
}
119+
func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
120+
}

mlir/test/Integration/Dialect/XeGPU/SIMT/simple_gemm.mlir

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ module @gemm attributes {gpu.container_module} {
2525
%b_tdesc = xegpu.create_nd_tdesc %b : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16>
2626

2727
%r = scf.for %k = %c0 to %c256 step %c16 iter_args(%arg_c = %c_init_value) -> (vector<8xf32>) {
28-
2928
%a_val = xegpu.load_nd %a_tdesc[%x_block_offset, %k] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16>
3029
%b_val = xegpu.load_nd %b_tdesc[%k, %y_block_offset] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16>
3130
%dpas = xegpu.dpas %a_val, %b_val, %arg_c : vector<8xf16>, vector<16xf16>, vector<8xf32> -> vector<8xf32>
@@ -118,6 +117,5 @@ module @gemm attributes {gpu.container_module} {
118117
memref.dealloc %C_ref : memref<256x256xf32>
119118
return
120119
}
121-
func.func private @printMemrefF16(memref<*xf16>) attributes {llvm.emit_c_interface}
122120
func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
123121
}
Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=workgroup" \
2+
// RUN: | mlir-runner \
3+
// RUN: --shared-libs=%mlir_levelzero_runtime \
4+
// RUN: --shared-libs=%mlir_runner_utils \
5+
// RUN: --entry-point-result=void \
6+
// RUN: | FileCheck %s
7+
8+
#a = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32], inst_data = [8, 16]>
9+
#b = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [16, 16]>
10+
#c = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [8, 16]>
11+
#a_prefetch = #xegpu.layout<sg_layout = [32, 1], sg_data = [8, 32], inst_data = [8, 16]>
12+
#b_prefetch = #xegpu.layout<sg_layout = [4, 8], sg_data = [8, 32], inst_data = [8, 16]>
13+
module @gemm attributes {gpu.container_module} {
14+
func.func @test(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} {
15+
%c1 = arith.constant 1 : index
16+
%c4 = arith.constant 4 : index
17+
%c8 = arith.constant 8 : index
18+
%c16 = arith.constant 16 : index
19+
%c32 = arith.constant 32 : index
20+
%c64 = arith.constant 64 : index
21+
%c128 = arith.constant 128 : index
22+
%c512 = arith.constant 512 : index
23+
%A_gpu = gpu.alloc () : memref<256x256xf16>
24+
gpu.memcpy %A_gpu, %A : memref<256x256xf16>, memref<256x256xf16>
25+
%B_gpu = gpu.alloc () : memref<256x256xf16>
26+
gpu.memcpy %B_gpu, %B : memref<256x256xf16>, memref<256x256xf16>
27+
%C_gpu = gpu.alloc () : memref<256x256xf32>
28+
gpu.memcpy %C_gpu, %C : memref<256x256xf32>, memref<256x256xf32>
29+
// NOTE: Here we can't use [8, 64] wi threads following the SG thread layout of [8, 4]. Because runtime will linearize the x dimension first (we need y dimension to be linearized first).
30+
// So just use linearized thread layout of [512, 1] wi threads.
31+
gpu.launch_func @test_kernel::@test_kernel blocks in (%c1, %c1, %c1) threads in (%c512, %c1, %c1) args(%A_gpu : memref<256x256xf16>, %B_gpu : memref<256x256xf16>, %C_gpu : memref<256x256xf32>)
32+
gpu.wait // Wait for the kernel to finish.
33+
gpu.memcpy %C, %C_gpu : memref<256x256xf32>, memref<256x256xf32>
34+
gpu.dealloc %A_gpu : memref<256x256xf16>
35+
gpu.dealloc %B_gpu : memref<256x256xf16>
36+
gpu.dealloc %C_gpu : memref<256x256xf32>
37+
return %C : memref<256x256xf32>
38+
}
39+
40+
gpu.module @test_kernel {
41+
gpu.func @test_kernel(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) kernel {
42+
%c0 = arith.constant 0 : index
43+
%c1 = arith.constant 1 : index
44+
%c32 = arith.constant 32 : index
45+
%c64 = arith.constant 64 : index
46+
%c96 = arith.constant 96 : index
47+
%c256 = arith.constant 256 : index
48+
%c4096 = arith.constant 4096 : index
49+
%block_id_x = gpu.block_id x
50+
%block_id_y = gpu.block_id y
51+
%m = arith.muli %block_id_x, %c256 : index
52+
%n = arith.muli %block_id_y, %c256 : index
53+
%c_tdesc = xegpu.create_nd_tdesc %C : memref<256x256xf32> -> !xegpu.tensor_desc<256x256xf32, #c>
54+
%c_init_value = xegpu.load_nd %c_tdesc[%m, %n] : !xegpu.tensor_desc<256x256xf32, #c> -> vector<256x256xf32>
55+
%a_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a>
56+
%b_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b>
57+
// Prefetch A 3 times.
58+
%a_prefetch_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a_prefetch>
59+
xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c0] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
60+
xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c32] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
61+
xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c64] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
62+
// Prefetch B 3 times.
63+
%b_prefetch_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b_prefetch>
64+
xegpu.prefetch_nd %b_prefetch_tdesc[%c0, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
65+
xegpu.prefetch_nd %b_prefetch_tdesc[%c32, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
66+
xegpu.prefetch_nd %b_prefetch_tdesc[%c64, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
67+
68+
%out = scf.for %k = %c0 to %c256 step %c32
69+
iter_args(%c_value = %c_init_value)
70+
-> (vector<256x256xf32>) {
71+
%a_value = xegpu.load_nd %a_tdesc[%m, %k] : !xegpu.tensor_desc<256x32xf16, #a> -> vector<256x32xf16>
72+
%b_value = xegpu.load_nd %b_tdesc[%k, %n] : !xegpu.tensor_desc<32x256xf16, #b> -> vector<32x256xf16>
73+
// Prefetch next tiles.
74+
%prefetch_offset = arith.addi %k, %c96 : index
75+
xegpu.prefetch_nd %a_prefetch_tdesc[%m, %prefetch_offset] : !xegpu.tensor_desc<256x32xf16, #a_prefetch>
76+
xegpu.prefetch_nd %b_prefetch_tdesc[%prefetch_offset, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch>
77+
%c_new_value = xegpu.dpas %a_value, %b_value, %c_value {layout_result_0 = #c}
78+
: vector<256x32xf16>, vector<32x256xf16>, vector<256x256xf32> -> vector<256x256xf32>
79+
scf.yield %c_new_value : vector<256x256xf32>
80+
}
81+
xegpu.store_nd %out, %c_tdesc[%m, %n] : vector<256x256xf32>, !xegpu.tensor_desc<256x256xf32, #c>
82+
gpu.return
83+
}
84+
}
85+
86+
func.func @main() attributes {llvm.emit_c_interface} {
87+
%c0 = arith.constant 0 : index
88+
%c1 = arith.constant 1 : index
89+
%c1_f16 = arith.constant 1.0 : f16
90+
%c2_f16 = arith.constant 2.0 : f16
91+
%c256 = arith.constant 256 : index
92+
%cf_0 = arith.constant 0.0 : f16
93+
%cf_1 = arith.constant 1.0 : f16
94+
%A = memref.alloc() : memref<256x256xf16>
95+
%B = memref.alloc() : memref<256x256xf16>
96+
%C = memref.alloc() : memref<256x256xf32>
97+
%C_ref = memref.alloc() : memref<256x256xf32>
98+
%c_gen_int = arith.constant 0 : i1
99+
%cf_lower = arith.constant -0.5 : f32
100+
%cf_upper = arith.constant 0.5 : f32
101+
// Intialize matrix A ; A[i, j] = j
102+
scf.for %i = %c0 to %c256 step %c1 {
103+
scf.for %j = %c0 to %c256 step %c1 {
104+
%t = index.castu %j : index to i16
105+
%val = arith.uitofp %t : i16 to f16
106+
memref.store %val, %A[%i, %j] : memref<256x256xf16>
107+
}
108+
}
109+
110+
// Initialize the B matrix
111+
// Make matrix B an identity matrix
112+
scf.for %i = %c0 to %c256 step %c1 {
113+
scf.for %j = %c0 to %c256 step %c1 {
114+
%i_i32 = index.castu %i : index to i32
115+
%j_i32 = index.castu %j : index to i32
116+
%i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32
117+
118+
scf.if %i_j_same {
119+
memref.store %cf_1, %B[%i, %j] : memref<256x256xf16>
120+
} else {
121+
memref.store %cf_0, %B[%i, %j] : memref<256x256xf16>
122+
}
123+
}
124+
}
125+
126+
// Initialize matrix C and C_ref ; C[i, j] = 0
127+
%c0_f32 = arith.constant 0.0 : f32
128+
scf.for %i = %c0 to %c256 step %c1 {
129+
scf.for %j = %c0 to %c256 step %c1 {
130+
memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32>
131+
memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32>
132+
}
133+
}
134+
135+
// Run GPU version.
136+
%2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32>
137+
%gpu_result_cast = memref.cast %2 : memref<256x256xf32> to memref<*xf32>
138+
// CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}}
139+
// CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
140+
call @printMemrefF32(%gpu_result_cast) : (memref<*xf32>) -> ()
141+
142+
memref.dealloc %A : memref<256x256xf16>
143+
memref.dealloc %B : memref<256x256xf16>
144+
memref.dealloc %C : memref<256x256xf32>
145+
memref.dealloc %C_ref : memref<256x256xf32>
146+
return
147+
}
148+
func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface}
149+
}

0 commit comments

Comments
 (0)