@@ -7,35 +7,86 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
77
88FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
99
10- void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
10+ // ------------------ calc_superblock (final optimized version) ------------------
11+ void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i,
12+ const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
13+ // Compute starting index in matrix B for this superblock
1114 const uint y_idx = i * QUANT_K + 32 * ib32;
12-
1315 uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
16+
17+ // Precompute indices for quantization lookup tables
18+ const uint qh_base = 2 * ib32;
19+ const uint qs_base = 4 * ib32;
20+ const uint sc_index = ib32 / 2;
21+ const uint sc_shift = 6 * (ib32 & 1);
22+
23+ // Loop over rows in the superblock
1424 [[unroll]] for (uint n = 0; n < num_rows; ++n) {
25+ // Load per-block scales and shift for quantization
1526 const uint16_t[4] scales = data_a[ibi].scales;
1627 const u16vec4 s = u16vec4(scales[0], scales[1], scales[2], scales[3]) >> 12;
1728 const float d = float(unpackHalf2x16(s.x | (s.y << 4) | (s.z << 8) | (s.w << 12)).x);
18-
19- const uint sc = data_a[ibi].scales[ib32 / 2] >> (6 * (ib32 & 1));
29+ const uint sc = data_a[ibi].scales[sc_index] >> sc_shift;
30+
31+ // Temporary caches for decoding
32+ FLOAT_TYPE dl_cache[4];
33+ uint16_t gvf_cache[4];
34+ float delta_cache[4];
35+
36+ // Precompute the multiplier and lookup values for 4 sub-blocks
2037 [[unroll]] for (uint l = 0; l < 4; ++l) {
21- const uint qh = data_a[ibi].qh[2 * ib32 + l / 2] >> (4 * (l&1));
22- const uint qs = data_a[ibi].qs[4 * ib32 + l];
23- const float delta = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
24- const float dl = d * (2 * bitfieldExtract(sc, 3 * int(l / 2), 3) + 1);
25-
26- const int16_t grid = int16_t(iq1s_grid[qs | ((qh & 7) << 8)]);
27-
28- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
29- vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]);
30- vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]);
31-
32- FLOAT_TYPE sum = FLOAT_TYPE(0.0);
33- [[unroll]] for (int k = 0; k < 4; ++k) {
34- sum = fma(FLOAT_TYPE(b0[k]), bitfieldExtract(grid, 2 * k, 2) + delta,
35- fma(FLOAT_TYPE(b4[k]), bitfieldExtract(grid, 8 + 2 * k, 2) + delta, sum));
36- }
37- temp[j][n] = fma(dl, sum, temp[j][n]);
38+ dl_cache[l] = FLOAT_TYPE(d * (2 * bitfieldExtract(sc, 3 * int(l / 2), 3) + 1));
39+ const uint qh = data_a[ibi].qh[qh_base + l / 2] >> (4 * (l & 1));
40+ const uint qs = data_a[ibi].qs[qs_base + l];
41+ gvf_cache[l] = iq1s_grid[qs | ((qh & 7) << 8)];
42+ delta_cache[l] = ((qh & 8) != 0) ? -IQ1M_DELTA : IQ1M_DELTA;
43+ }
44+
45+ // Loop over columns of the output
46+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
47+ // Compute base index for matrix B
48+ const uint base_b_idx = (j * p.batch_stride_b + b_offset + y_idx) / 4;
49+ vec4 b_vals[8];
50+
51+ // Load 8 vec4 values from matrix B
52+ [[unroll]] for (int idx = 0; idx < 8; ++idx) {
53+ b_vals[idx] = vec4(data_b_v4[base_b_idx + idx]);
54+ }
55+
56+ FLOAT_TYPE col_sum = FLOAT_TYPE(0.0);
57+
58+ // Loop over sub-blocks
59+ [[unroll]] for (uint l = 0; l < 4; ++l) {
60+ const uint16_t grid = gvf_cache[l];
61+ const float dl = dl_cache[l];
62+
63+ // Decode 8 2-bit fbits from gvf_cache
64+ float f0 = float(bitfieldExtract(grid, 0, 2));
65+ float f1 = float(bitfieldExtract(grid, 2, 2));
66+ float f2 = float(bitfieldExtract(grid, 4, 2));
67+ float f3 = float(bitfieldExtract(grid, 6, 2));
68+ float f4 = float(bitfieldExtract(grid, 8, 2));
69+ float f5 = float(bitfieldExtract(grid, 10, 2));
70+ float f6 = float(bitfieldExtract(grid, 12, 2));
71+ float f7 = float(bitfieldExtract(grid, 14, 2));
72+
73+ // Pack into vec4 for vectorized FMA
74+ const vec4 fbits_v0 = vec4(f0, f1, f2, f3);
75+ const vec4 fbits_v1 = vec4(f4, f5, f6, f7);
76+ const vec4 delta_v = vec4(delta_cache[l]);
77+
78+ // Vectorized fused multiply-add
79+ vec4 sum_v = fma(b_vals[2*l + 0], fbits_v0 + delta_v, vec4(0.0));
80+ sum_v = fma(b_vals[2*l + 1], fbits_v1 + delta_v, sum_v);
81+
82+ // Horizontal add to get scalar sum
83+ FLOAT_TYPE sum = sum_v.x + sum_v.y + sum_v.z + sum_v.w;
84+
85+ // Accumulate to column sum
86+ col_sum = fma(dl, sum, col_sum);
3887 }
88+ // Write result to temporary buffer
89+ temp[j][n] += col_sum;
3990 }
4091 ibi += num_blocks_per_row;
4192 }
@@ -44,39 +95,39 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32,
4495void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
4596 uint a_offset, b_offset, d_offset;
4697 get_offsets(a_offset, b_offset, d_offset);
47-
4898 const uint num_blocks_per_row = p.ncols / QUANT_K;
49-
50- // 8 threads are used to process each block
51- const uint blocks_per_wg = gl_WorkGroupSize.x/8;
99+ const uint blocks_per_wg = gl_WorkGroupSize.x / 8;
52100 const uint tid = gl_LocalInvocationID.x;
53- const uint itid = tid % 8; // 0...7
101+ const uint itid = tid % 8;
54102 const uint ix = tid / 8;
55103
56- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
57- [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
104+ // Initialize temporary storage
105+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j)
106+ [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i)
58107 temp[j][i] = FLOAT_TYPE(0);
59- }
60- }
61108
109+ // Loop over blocks assigned to this thread
62110 [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += blocks_per_wg)
63111 calc_superblock(a_offset, b_offset, itid, i, num_blocks_per_row, first_row, num_rows);
64112
113+ // Reduce results from temporary buffer to output
65114 reduce_result(temp, d_offset, first_row, num_rows, tid);
66115}
67116
68117void main() {
118+ // Compute first row for this workgroup
69119 const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
70120
121+ // Initialize shared memory for quantization lookups
71122 init_iq_shmem(gl_WorkGroupSize);
72123
73- // do NUM_ROWS at a time, unless there aren't enough remaining rows
74- if (first_row + NUM_ROWS < = p.stride_d) {
75- compute_outputs(first_row, NUM_ROWS) ;
76- } else {
77- if (first_row >= p.stride_d) {
78- return ;
79- }
80- compute_outputs(first_row, p.stride_d - first_row);
81- }
124+ // Early exit if out-of-bounds
125+ if (first_row > = p.stride_d)
126+ return ;
127+
128+ // Number of rows to process for this workgroup
129+ const uint rows_to_process = min(NUM_ROWS, p.stride_d - first_row) ;
130+
131+ // Compute outputs for assigned rows
132+ compute_outputs(first_row, rows_to_process);
82133}
0 commit comments