|
| 1 | +// RUN: mlir-opt %s --gpu-lower-to-xevm-pipeline="xegpu-op-level=workgroup" \ |
| 2 | +// RUN: | mlir-runner \ |
| 3 | +// RUN: --shared-libs=%mlir_levelzero_runtime \ |
| 4 | +// RUN: --shared-libs=%mlir_runner_utils \ |
| 5 | +// RUN: --entry-point-result=void \ |
| 6 | +// RUN: | FileCheck %s |
| 7 | + |
| 8 | +#a = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 32], inst_data = [8, 16]> |
| 9 | +#b = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [16, 16]> |
| 10 | +#c = #xegpu.layout<sg_layout = [8, 4], sg_data = [32, 64], inst_data = [8, 16]> |
| 11 | +#a_prefetch = #xegpu.layout<sg_layout = [32, 1], sg_data = [8, 32], inst_data = [8, 16]> |
| 12 | +#b_prefetch = #xegpu.layout<sg_layout = [4, 8], sg_data = [8, 32], inst_data = [8, 16]> |
| 13 | +module @gemm attributes {gpu.container_module} { |
| 14 | + func.func @test(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) -> memref<256x256xf32> attributes {llvm.emit_c_interface} { |
| 15 | + %c1 = arith.constant 1 : index |
| 16 | + %c4 = arith.constant 4 : index |
| 17 | + %c8 = arith.constant 8 : index |
| 18 | + %c16 = arith.constant 16 : index |
| 19 | + %c32 = arith.constant 32 : index |
| 20 | + %c64 = arith.constant 64 : index |
| 21 | + %c128 = arith.constant 128 : index |
| 22 | + %c512 = arith.constant 512 : index |
| 23 | + %A_gpu = gpu.alloc () : memref<256x256xf16> |
| 24 | + gpu.memcpy %A_gpu, %A : memref<256x256xf16>, memref<256x256xf16> |
| 25 | + %B_gpu = gpu.alloc () : memref<256x256xf16> |
| 26 | + gpu.memcpy %B_gpu, %B : memref<256x256xf16>, memref<256x256xf16> |
| 27 | + %C_gpu = gpu.alloc () : memref<256x256xf32> |
| 28 | + gpu.memcpy %C_gpu, %C : memref<256x256xf32>, memref<256x256xf32> |
| 29 | + // NOTE: Here we can't use [8, 64] wi threads following the SG thread layout of [8, 4]. Because runtime will linearize the x dimension first (we need y dimension to be linearized first). |
| 30 | + // So just use linearized thread layout of [512, 1] wi threads. |
| 31 | + gpu.launch_func @test_kernel::@test_kernel blocks in (%c1, %c1, %c1) threads in (%c512, %c1, %c1) args(%A_gpu : memref<256x256xf16>, %B_gpu : memref<256x256xf16>, %C_gpu : memref<256x256xf32>) |
| 32 | + gpu.wait // Wait for the kernel to finish. |
| 33 | + gpu.memcpy %C, %C_gpu : memref<256x256xf32>, memref<256x256xf32> |
| 34 | + gpu.dealloc %A_gpu : memref<256x256xf16> |
| 35 | + gpu.dealloc %B_gpu : memref<256x256xf16> |
| 36 | + gpu.dealloc %C_gpu : memref<256x256xf32> |
| 37 | + return %C : memref<256x256xf32> |
| 38 | + } |
| 39 | + |
| 40 | + gpu.module @test_kernel { |
| 41 | + gpu.func @test_kernel(%A: memref<256x256xf16>, %B: memref<256x256xf16>, %C: memref<256x256xf32>) kernel { |
| 42 | + %c0 = arith.constant 0 : index |
| 43 | + %c1 = arith.constant 1 : index |
| 44 | + %c32 = arith.constant 32 : index |
| 45 | + %c64 = arith.constant 64 : index |
| 46 | + %c96 = arith.constant 96 : index |
| 47 | + %c256 = arith.constant 256 : index |
| 48 | + %c4096 = arith.constant 4096 : index |
| 49 | + %block_id_x = gpu.block_id x |
| 50 | + %block_id_y = gpu.block_id y |
| 51 | + %m = arith.muli %block_id_x, %c256 : index |
| 52 | + %n = arith.muli %block_id_y, %c256 : index |
| 53 | + %c_tdesc = xegpu.create_nd_tdesc %C : memref<256x256xf32> -> !xegpu.tensor_desc<256x256xf32, #c> |
| 54 | + %c_init_value = xegpu.load_nd %c_tdesc[%m, %n] : !xegpu.tensor_desc<256x256xf32, #c> -> vector<256x256xf32> |
| 55 | + %a_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a> |
| 56 | + %b_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b> |
| 57 | + // Prefetch A 3 times. |
| 58 | + %a_prefetch_tdesc = xegpu.create_nd_tdesc %A : memref<256x256xf16> -> !xegpu.tensor_desc<256x32xf16, #a_prefetch> |
| 59 | + xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c0] : !xegpu.tensor_desc<256x32xf16, #a_prefetch> |
| 60 | + xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c32] : !xegpu.tensor_desc<256x32xf16, #a_prefetch> |
| 61 | + xegpu.prefetch_nd %a_prefetch_tdesc[%m, %c64] : !xegpu.tensor_desc<256x32xf16, #a_prefetch> |
| 62 | + // Prefetch B 3 times. |
| 63 | + %b_prefetch_tdesc = xegpu.create_nd_tdesc %B : memref<256x256xf16> -> !xegpu.tensor_desc<32x256xf16, #b_prefetch> |
| 64 | + xegpu.prefetch_nd %b_prefetch_tdesc[%c0, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch> |
| 65 | + xegpu.prefetch_nd %b_prefetch_tdesc[%c32, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch> |
| 66 | + xegpu.prefetch_nd %b_prefetch_tdesc[%c64, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch> |
| 67 | + |
| 68 | + %out = scf.for %k = %c0 to %c256 step %c32 |
| 69 | + iter_args(%c_value = %c_init_value) |
| 70 | + -> (vector<256x256xf32>) { |
| 71 | + %a_value = xegpu.load_nd %a_tdesc[%m, %k] : !xegpu.tensor_desc<256x32xf16, #a> -> vector<256x32xf16> |
| 72 | + %b_value = xegpu.load_nd %b_tdesc[%k, %n] : !xegpu.tensor_desc<32x256xf16, #b> -> vector<32x256xf16> |
| 73 | + // Prefetch next tiles. |
| 74 | + %prefetch_offset = arith.addi %k, %c96 : index |
| 75 | + xegpu.prefetch_nd %a_prefetch_tdesc[%m, %prefetch_offset] : !xegpu.tensor_desc<256x32xf16, #a_prefetch> |
| 76 | + xegpu.prefetch_nd %b_prefetch_tdesc[%prefetch_offset, %n] : !xegpu.tensor_desc<32x256xf16, #b_prefetch> |
| 77 | + %c_new_value = xegpu.dpas %a_value, %b_value, %c_value {layout_result_0 = #c} |
| 78 | + : vector<256x32xf16>, vector<32x256xf16>, vector<256x256xf32> -> vector<256x256xf32> |
| 79 | + scf.yield %c_new_value : vector<256x256xf32> |
| 80 | + } |
| 81 | + xegpu.store_nd %out, %c_tdesc[%m, %n] : vector<256x256xf32>, !xegpu.tensor_desc<256x256xf32, #c> |
| 82 | + gpu.return |
| 83 | + } |
| 84 | + } |
| 85 | + |
| 86 | + func.func @main() attributes {llvm.emit_c_interface} { |
| 87 | + %c0 = arith.constant 0 : index |
| 88 | + %c1 = arith.constant 1 : index |
| 89 | + %c1_f16 = arith.constant 1.0 : f16 |
| 90 | + %c2_f16 = arith.constant 2.0 : f16 |
| 91 | + %c256 = arith.constant 256 : index |
| 92 | + %cf_0 = arith.constant 0.0 : f16 |
| 93 | + %cf_1 = arith.constant 1.0 : f16 |
| 94 | + %A = memref.alloc() : memref<256x256xf16> |
| 95 | + %B = memref.alloc() : memref<256x256xf16> |
| 96 | + %C = memref.alloc() : memref<256x256xf32> |
| 97 | + %C_ref = memref.alloc() : memref<256x256xf32> |
| 98 | + %c_gen_int = arith.constant 0 : i1 |
| 99 | + %cf_lower = arith.constant -0.5 : f32 |
| 100 | + %cf_upper = arith.constant 0.5 : f32 |
| 101 | + // Intialize matrix A ; A[i, j] = j |
| 102 | + scf.for %i = %c0 to %c256 step %c1 { |
| 103 | + scf.for %j = %c0 to %c256 step %c1 { |
| 104 | + %t = index.castu %j : index to i16 |
| 105 | + %val = arith.uitofp %t : i16 to f16 |
| 106 | + memref.store %val, %A[%i, %j] : memref<256x256xf16> |
| 107 | + } |
| 108 | + } |
| 109 | + |
| 110 | + // Initialize the B matrix |
| 111 | + // Make matrix B an identity matrix |
| 112 | + scf.for %i = %c0 to %c256 step %c1 { |
| 113 | + scf.for %j = %c0 to %c256 step %c1 { |
| 114 | + %i_i32 = index.castu %i : index to i32 |
| 115 | + %j_i32 = index.castu %j : index to i32 |
| 116 | + %i_j_same = arith.cmpi eq, %i_i32, %j_i32 : i32 |
| 117 | + |
| 118 | + scf.if %i_j_same { |
| 119 | + memref.store %cf_1, %B[%i, %j] : memref<256x256xf16> |
| 120 | + } else { |
| 121 | + memref.store %cf_0, %B[%i, %j] : memref<256x256xf16> |
| 122 | + } |
| 123 | + } |
| 124 | + } |
| 125 | + |
| 126 | + // Initialize matrix C and C_ref ; C[i, j] = 0 |
| 127 | + %c0_f32 = arith.constant 0.0 : f32 |
| 128 | + scf.for %i = %c0 to %c256 step %c1 { |
| 129 | + scf.for %j = %c0 to %c256 step %c1 { |
| 130 | + memref.store %c0_f32, %C[%i, %j] : memref<256x256xf32> |
| 131 | + memref.store %c0_f32, %C_ref[%i, %j] : memref<256x256xf32> |
| 132 | + } |
| 133 | + } |
| 134 | + |
| 135 | + // Run GPU version. |
| 136 | + %2 = call @test(%A, %B, %C) : (memref<256x256xf16>, memref<256x256xf16>, memref<256x256xf32>) -> memref<256x256xf32> |
| 137 | + %gpu_result_cast = memref.cast %2 : memref<256x256xf32> to memref<*xf32> |
| 138 | + // CHECK: Unranked Memref base@ = 0x{{[0-9a-f]+}} |
| 139 | + // CHECK-COUNT-256: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] |
| 140 | + call @printMemrefF32(%gpu_result_cast) : (memref<*xf32>) -> () |
| 141 | + |
| 142 | + memref.dealloc %A : memref<256x256xf16> |
| 143 | + memref.dealloc %B : memref<256x256xf16> |
| 144 | + memref.dealloc %C : memref<256x256xf32> |
| 145 | + memref.dealloc %C_ref : memref<256x256xf32> |
| 146 | + return |
| 147 | + } |
| 148 | + func.func private @printMemrefF32(memref<*xf32>) attributes {llvm.emit_c_interface} |
| 149 | +} |
0 commit comments