Skip to content

Commit 2c4e113

Browse files
rootshalinib-ibm
authored andcommitted
PowerPC: Sgemm Optimization
This patch improves GEMM for FP32 Data Type on PowerPC Implements GEMM on large blocks with configurable block size mc, nc, kc (default: 256, 256, 256). Packing Function optimized to access blocks as per memory layout. GEMM Optimized to work on larger blocks. Isolated Packing from GEMM Operations for better MMA utilization. Verified functionality and correctness uing llama-cli and stand alone test case (performs matmul and compares final mattrix C result with base). Performance Testing: Observed 50% ~ 70% improvement in Prompt Processing Speed mesured using llama-bench with Meta-Llama3-8B FP32 Model. Similar gains observed with Mistral-7b-Instruct-v0.3 Model. model                   Size Params Backend Threads Test Patch Base llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp512 98.58 60.3 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp1024 95.88 57.36 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp2048 85.46 53.26 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp4096 68.66 45.78 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp6144 57.35 40.44 25 ~ 30% improvement in llama-batched-bench with Metla-Llama3-8B in Prompt Processing Speed for large prompts (256, 512, 1024, 2048, 4096)tokens with various batch sizes ( 1, 2, 4, 8, 16) Signed-off-by: root <[email protected]>
1 parent ee3a9fc commit 2c4e113

File tree

1 file changed

+110
-5
lines changed

1 file changed

+110
-5
lines changed

ggml/src/ggml-cpu/llamafile/sgemm.cpp

Lines changed: 110 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2175,14 +2175,39 @@ class tinyBLAS_PPC {
21752175
int ith, int nth)
21762176
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
21772177
}
2178-
21792178
void matmul(int64_t m, int64_t n) {
2180-
mnpack(0, m, 0, n);
2179+
int64_t mc = 256; int64_t nc = 256; int64_t kc = 256;
2180+
if ( m%mc == 0 && n%nc == 0 && k%kc == 0) {
2181+
matmul_tiled(m, n, mc, nc, kc);
2182+
} else {
2183+
mnpack(0, m, 0, n);
2184+
}
21812185
}
21822186

21832187
private:
21842188

21852189
void (tinyBLAS_PPC::*kernel)(int64_t, int64_t);
2190+
2191+
inline void save_acc(acc_t* ACC, int64_t ii, int64_t jj) {
2192+
vec_t vec_C[4];
2193+
__builtin_mma_disassemble_acc(vec_C, ACC);
2194+
for (int I = 0; I < 4; I++) {
2195+
for (int J = 0; J < 4; J++) {
2196+
*((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&vec_C[I]+J);
2197+
}
2198+
}
2199+
}
2200+
2201+
inline void add_save_acc(acc_t* ACC, int64_t ii, int64_t jj) {
2202+
vec_t vec_C[4];
2203+
__builtin_mma_disassemble_acc(vec_C, ACC);
2204+
for (int I = 0; I < 4; I++) {
2205+
for (int J = 0; J < 4; J++) {
2206+
float* c_ptr = (float*)(C+ii+((jj+J)*ldc)+I);// += *((float*)&vec_C[I]+J);
2207+
*c_ptr += *((float*)&vec_C[I]+J);
2208+
}
2209+
}
2210+
}
21862211

21872212
inline void vector_permute_store_4(vector float *src, float *vecOffset) {
21882213
vector float t1, t2, t3, t4, t5, t6, t7, t8;
@@ -2235,7 +2260,7 @@ class tinyBLAS_PPC {
22352260
vec_xst(t8, 0, vecOffset + 28);
22362261
}
22372262

2238-
void packTranspose(const float* a, int64_t lda, int rows, int cols, float* vec) {
2263+
void packTranspose(const float* a, int64_t lda, int rows, int cols, float* vec) {
22392264
int64_t i, j;
22402265
float * aoffsets[8];
22412266
float *aoffset = NULL, *boffset = NULL;
@@ -2265,10 +2290,13 @@ class tinyBLAS_PPC {
22652290

22662291
vector_permute_store_8(c1, boffset);
22672292
vector_permute_store_8(c2, boffset+32);
2268-
for (int it = 0; it < 4; it++)
2269-
aoffsets[it] = aoffsets[it] + 8*lda;
22702293
boffset += 64;
22712294
i--;
2295+
if (i > 0) {
2296+
for (int it = 0; it < 8; it++) {
2297+
aoffsets[it] = aoffsets[it] + 8;
2298+
}
2299+
}
22722300
} while(i > 0);
22732301
}
22742302
if (cols & 4) {
@@ -2401,6 +2429,83 @@ class tinyBLAS_PPC {
24012429
SAVE_ACC(&acc_3, ii+4, jj+4);
24022430
}
24032431

2432+
inline void MMA_16x8(vec_t *vec_A0, vec_t* vec_A1, vec_t *vec_B, acc_t * acc) {
2433+
for (int x = 0; x < 16; x += 2) {
2434+
__builtin_mma_xvf32gerpp(&acc[0], vec_A0[x + 0], vec_B[x]);
2435+
__builtin_mma_xvf32gerpp(&acc[1], vec_A0[x + 0], vec_B[x + 1]);
2436+
__builtin_mma_xvf32gerpp(&acc[2], vec_A0[x + 1], vec_B[x]);
2437+
__builtin_mma_xvf32gerpp(&acc[3], vec_A0[x + 1], vec_B[x + 1]);
2438+
__builtin_mma_xvf32gerpp(&acc[4], vec_A1[x + 0], vec_B[x]);
2439+
__builtin_mma_xvf32gerpp(&acc[5], vec_A1[x + 0], vec_B[x + 1]);
2440+
__builtin_mma_xvf32gerpp(&acc[6], vec_A1[x + 1], vec_B[x]);
2441+
__builtin_mma_xvf32gerpp(&acc[7], vec_A1[x + 1], vec_B[x + 1]);
2442+
}
2443+
}
2444+
2445+
void KERNEL(int64_t ii, int64_t jj, int64_t mc, int64_t nc, int64_t kc, vec_t * vec_A, vec_t* vec_B, int64_t kk) {
2446+
for (int64_t i = 0; i <mc; i += 16) {
2447+
int A_base_addr = (mc/8)* (i/8)*16;
2448+
for (int64_t j = 0; j < nc; j += 8) {
2449+
int B_base_addr = (nc/8)* (j/8)*16;
2450+
acc_t acc[8];
2451+
vec_t A0_block[16]; vec_t A1_block[16];
2452+
for (int x = 0; x < 8; x++)
2453+
__builtin_mma_xxsetaccz(&acc[x]);
2454+
for (int64_t l = 0; l < kc; l+=8) {
2455+
int A0_block_idx = A_base_addr + (l/8)*16;
2456+
int A1_block_idx = A0_block_idx + (mc/ 8) * 16;
2457+
int B_block_idx = B_base_addr + (l/8)*16;
2458+
vec_t* A0_block = &vec_A[A0_block_idx];
2459+
vec_t* A1_block = &vec_A[A1_block_idx];
2460+
vec_t* B_block = &vec_B[B_block_idx];
2461+
MMA_16x8(A0_block, A1_block, B_block, acc);
2462+
}
2463+
if ( kk == 0) {
2464+
save_acc(&acc[0], ii + i, jj + j);
2465+
save_acc(&acc[1], ii + i, jj + j + 4);
2466+
save_acc(&acc[2], ii + i + 4, jj + j);
2467+
save_acc(&acc[3], ii + i + 4, jj + j + 4);
2468+
save_acc(&acc[4], ii + i + 8, jj + j);
2469+
save_acc(&acc[5], ii + i + 8, jj + j + 4);
2470+
save_acc(&acc[6], ii + i + 12, jj + j);
2471+
save_acc(&acc[7], ii + i + 12, jj + j + 4);
2472+
} else {
2473+
add_save_acc(&acc[0], ii + i, jj + j);
2474+
add_save_acc(&acc[1], ii + i, jj + j + 4);
2475+
add_save_acc(&acc[2], ii + i + 4, jj + j);
2476+
add_save_acc(&acc[3], ii + i + 4, jj + j + 4);
2477+
add_save_acc(&acc[4], ii + i + 8, jj + j);
2478+
add_save_acc(&acc[5], ii + i + 8, jj + j + 4);
2479+
add_save_acc(&acc[6], ii + i + 12, jj + j);
2480+
add_save_acc(&acc[7], ii + i + 12, jj + j + 4);
2481+
}
2482+
}
2483+
}
2484+
}
2485+
2486+
void matmul_tiled(int64_t m , int64_t n, int64_t mc, int64_t nc, int64_t kc) {
2487+
int64_t ytiles = m / mc;
2488+
int64_t xtiles = n / nc;
2489+
int64_t tiles = xtiles * ytiles;
2490+
int64_t duty = (tiles + nth - 1) / nth;
2491+
int64_t start = duty * ith;
2492+
int64_t end = start + duty;
2493+
if (end > tiles) {
2494+
end = tiles;
2495+
}
2496+
for (int64_t job = start; job < end; ++job) {
2497+
int64_t ii = (job / xtiles) * mc;
2498+
int64_t jj = (job % xtiles) * nc;
2499+
for (int64_t kk = 0; kk < k; kk += kc) {
2500+
vec_t A_pack[kc*mc/4];
2501+
vec_t B_pack[kc*nc/4];
2502+
packTranspose(A+(ii*lda)+kk, lda, kc, mc, (float*)A_pack);
2503+
packTranspose(B+(jj*ldb)+kk, ldb, kc, nc, (float*)B_pack);
2504+
KERNEL(ii, jj, mc, nc, kc, A_pack, B_pack, kk);
2505+
}
2506+
}
2507+
}
2508+
24042509
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
24052510
int m_rem = MIN(m - m0, 8);
24062511
int n_rem = MIN(n - n0, 8);

0 commit comments

Comments
 (0)