Skip to content

Commit 96b1f4d

Browse files
rootshalinib-ibm
authored andcommitted
PowerPC: Sgemm Optimization
This patch improves GEMM for FP32 Data Type on PowerPC Implements GEMM on large blocks with configurable block size mc, nc, kc (default: 256, 256, 256). Packing Function optimized to access blocks as per memory layout. GEMM Optimized to work on larger blocks. Isolated Packing from GEMM Operations for better MMA utilization. Verified functionality and correctness uing llama-cli and stand alone test case (performs matmul and compares final mattrix C result with base). Performance Testing: Observed 50% ~ 70% improvement in Prompt Processing Speed mesured using llama-bench with Meta-Llama3-8B FP32 Model. Similar gains observed with Mistral-7b-Instruct-v0.3 Model. model                   Size Params Backend Threads Test Patch Base llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp512 98.58 60.3 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp1024 95.88 57.36 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp2048 85.46 53.26 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp4096 68.66 45.78 llama 8B all F32        29.92 GiB     8.03 B CPU        20 pp6144 57.35 40.44 25 ~ 30% improvement in llama-batched-bench with Metla-Llama3-8B in Prompt Processing Speed for large prompts (256, 512, 1024, 2048, 4096)tokens with various batch sizes ( 1, 2, 4, 8, 16) Signed-off-by: root <[email protected]>
1 parent ee3a9fc commit 96b1f4d

File tree

1 file changed

+94
-5
lines changed

1 file changed

+94
-5
lines changed

ggml/src/ggml-cpu/llamafile/sgemm.cpp

Lines changed: 94 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2175,14 +2175,29 @@ class tinyBLAS_PPC {
21752175
int ith, int nth)
21762176
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
21772177
}
2178-
21792178
void matmul(int64_t m, int64_t n) {
2180-
mnpack(0, m, 0, n);
2179+
int64_t mc = 256; int64_t nc = 256; int64_t kc = 256;
2180+
if ( m%mc == 0 && n%nc == 0) {
2181+
matmul_tiled(m, n, mc, nc, kc);
2182+
} else {
2183+
mnpack(0, m, 0, n);
2184+
}
21812185
}
21822186

21832187
private:
21842188

21852189
void (tinyBLAS_PPC::*kernel)(int64_t, int64_t);
2190+
2191+
inline void add_save_acc(acc_t* ACC, int64_t ii, int64_t jj) {
2192+
vec_t vec_C[4];
2193+
__builtin_mma_disassemble_acc(vec_C, ACC);
2194+
for (int I = 0; I < 4; I++) {
2195+
for (int J = 0; J < 4; J++) {
2196+
float* c_ptr = (float*)(C+ii+((jj+J)*ldc)+I);// += *((float*)&vec_C[I]+J);
2197+
*c_ptr += *((float*)&vec_C[I]+J);
2198+
}
2199+
}
2200+
}
21862201

21872202
inline void vector_permute_store_4(vector float *src, float *vecOffset) {
21882203
vector float t1, t2, t3, t4, t5, t6, t7, t8;
@@ -2235,7 +2250,8 @@ class tinyBLAS_PPC {
22352250
vec_xst(t8, 0, vecOffset + 28);
22362251
}
22372252

2238-
void packTranspose(const float* a, int64_t lda, int rows, int cols, float* vec) {
2253+
void packTranspose(const float* a, int64_t lda, int rows, int cols, float* vec) {
2254+
//printf("In packTranspose\n");
22392255
int64_t i, j;
22402256
float * aoffsets[8];
22412257
float *aoffset = NULL, *boffset = NULL;
@@ -2249,13 +2265,18 @@ class tinyBLAS_PPC {
22492265
if (j > 0) {
22502266

22512267
do {
2268+
//printf("j=%d\n", j);
22522269
aoffsets[0] = aoffset;
22532270
for (int it = 1; it< 8; it++)
22542271
aoffsets[it] = aoffsets[it-1] + lda;
2272+
/*for (int x = 0; x< 8; x++)
2273+
printf("x=%d val=%f\n", x, *(aoffsets[x]));*/
22552274
aoffset += 8 * lda;
2275+
//aoffset += 8;// * lda;
22562276
i = (cols >> 3);
22572277
if (i > 0) {
22582278
do {
2279+
//printf("i=%d\n", i);
22592280
for (int it = 0; it< 8; it++) {
22602281
arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]);
22612282
__builtin_vsx_disassemble_pair(c[it], &arr[it]);
@@ -2265,10 +2286,16 @@ class tinyBLAS_PPC {
22652286

22662287
vector_permute_store_8(c1, boffset);
22672288
vector_permute_store_8(c2, boffset+32);
2268-
for (int it = 0; it < 4; it++)
2269-
aoffsets[it] = aoffsets[it] + 8*lda;
22702289
boffset += 64;
22712290
i--;
2291+
if (i > 0) {
2292+
for (int it = 0; it < 8; it++) {
2293+
//aoffsets[it] = aoffsets[it] + 8*lda;
2294+
aoffsets[it] = aoffsets[it] + 8;
2295+
}
2296+
/*for (int x = 0; x< 8; x++)
2297+
printf("x=%d val=%f\n", x, *(aoffsets[x]));*/
2298+
}
22722299
} while(i > 0);
22732300
}
22742301
if (cols & 4) {
@@ -2401,6 +2428,68 @@ class tinyBLAS_PPC {
24012428
SAVE_ACC(&acc_3, ii+4, jj+4);
24022429
}
24032430

2431+
inline void MMA_16x8(vec_t *vec_A, vec_t *vec_B, acc_t * acc) {
2432+
for (int x = 0; x < 16; x += 2) {
2433+
__builtin_mma_xvf32gerpp(&acc[0], vec_A[x + 0], vec_B[x]);
2434+
__builtin_mma_xvf32gerpp(&acc[1], vec_A[x + 0], vec_B[x + 1]);
2435+
__builtin_mma_xvf32gerpp(&acc[2], vec_A[x + 1], vec_B[x]);
2436+
__builtin_mma_xvf32gerpp(&acc[3], vec_A[x + 1], vec_B[x + 1]);
2437+
__builtin_mma_xvf32gerpp(&acc[4], vec_A[x + 16], vec_B[x]);
2438+
__builtin_mma_xvf32gerpp(&acc[5], vec_A[x + 16], vec_B[x + 1]);
2439+
__builtin_mma_xvf32gerpp(&acc[6], vec_A[x + 17], vec_B[x]);
2440+
__builtin_mma_xvf32gerpp(&acc[7], vec_A[x + 17], vec_B[x + 1]);
2441+
}
2442+
2443+
}
2444+
2445+
void KERNEL(int64_t ii, int64_t jj, int64_t mc, int64_t nc, int64_t kc, vec_t * vec_A, vec_t* vec_B) {
2446+
for (int64_t i = 0; i <mc; i += 16) {
2447+
for (int64_t j = 0; j < nc; j += 8) {
2448+
acc_t acc[8];
2449+
for (int x = 0; x < 8; x++)
2450+
__builtin_mma_xxsetaccz(&acc[x]);
2451+
for (int64_t l = 0; l < kc; l+=8) {
2452+
int A_block_idx = (l/8)*16+(i / 16)*32;
2453+
int B_block_idx = (l/8)*16+ (j / 8)*16;
2454+
vec_t* A_block = &vec_A[A_block_idx];
2455+
vec_t* B_block = &vec_B[B_block_idx];
2456+
MMA_16x8(A_block, B_block, acc);
2457+
}
2458+
add_save_acc(&acc[0], ii + i, jj + j);
2459+
add_save_acc(&acc[1], ii + i, jj + j + 4);
2460+
add_save_acc(&acc[2], ii + i + 4, jj + j);
2461+
add_save_acc(&acc[3], ii + i + 4, jj + j + 4);
2462+
add_save_acc(&acc[4], ii + i + 8, jj + j);
2463+
add_save_acc(&acc[5], ii + i + 8, jj + j + 4);
2464+
add_save_acc(&acc[6], ii + i + 12, jj + j);
2465+
add_save_acc(&acc[7], ii + i + 12, jj + j + 4);
2466+
}
2467+
}
2468+
}
2469+
2470+
void matmul_tiled(int64_t m , int64_t n, int64_t mc, int64_t nc, int64_t kc) {
2471+
int64_t ytiles = m / mc;
2472+
int64_t xtiles = n / nc;
2473+
int64_t tiles = xtiles * ytiles;
2474+
int64_t duty = (tiles + nth - 1) / nth;
2475+
int64_t start = duty * ith;
2476+
int64_t end = start + duty;
2477+
if (end > tiles) {
2478+
end = tiles;
2479+
}
2480+
for (int64_t job = start; job < end; ++job) {
2481+
int64_t ii = (job / xtiles) * mc;
2482+
int64_t jj = (job % xtiles) * nc;
2483+
for (int64_t kk = 0; kk < k; kk += kc) {
2484+
vec_t A_pack[kc*mc/4];
2485+
vec_t B_pack[kc*nc/4];
2486+
packTranspose(A+(ii*lda)+kk, lda, kc, mc, (float*)A_pack);
2487+
packTranspose(B+(jj*ldb)+kk, ldb, kc, nc, (float*)B_pack);
2488+
KERNEL(ii, jj, mc, nc, kc, A_pack, B_pack);
2489+
}
2490+
}
2491+
}
2492+
24042493
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
24052494
int m_rem = MIN(m - m0, 8);
24062495
int n_rem = MIN(n - n0, 8);

0 commit comments

Comments
 (0)