Skip to content

Commit cc327f5

Browse files
author
bssrdf
committed
added a specialization for cuda copy op when tensor is transposed
1 parent 3099078 commit cc327f5

File tree

2 files changed

+65
-3
lines changed

2 files changed

+65
-3
lines changed

ggml/src/ggml-cuda/cpy.cu

Lines changed: 60 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,48 @@ static __global__ void cpy_flt(const char * cx, char * cdst_direct, const int ne
3737
cpy_1(cx + x_offset, cdst + dst_offset);
3838
}
3939

40+
41+
template <typename T>
42+
static __global__ void cpy_flt_transpose(char * cx, char * cdst_direct,, const int ne,
43+
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
44+
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
45+
const int nb12, const int nb13, char ** cdst_indirect, int graph_cpynode_index) {
46+
47+
char * cdst = (cdst_indirect != nullptr) ? cdst_indirect[graph_cpynode_index]: cdst_direct;
48+
49+
const T* src = reinterpret_cast<const T*>(cx);
50+
T* dst = reinterpret_cast<T*>(cdst);
51+
52+
const int64_t nmat = ne /(ne00 * ne01);
53+
const int64_t n = ne00 * ne01;
54+
// const int64_t n = ne01 * ne02;
55+
int width = gridDim.x * TILE_DIM;
56+
int x = blockIdx.x * TILE_DIM + threadIdx.x;
57+
int y = blockIdx.y * TILE_DIM + threadIdx.y;
58+
int tx = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
59+
int ty = blockIdx.x * TILE_DIM + threadIdx.y;
60+
61+
__shared__ T tile[TILE_DIM * TILE_DIM];
62+
63+
for(int i = 0; i < BLOCK_NM; ++i){
64+
const unsigned int imat = blockIdx.z * BLOCK_NM + i;
65+
if(imat < nmat){
66+
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
67+
const unsigned int idx = (y+j)*width + x;
68+
if(idx < n)
69+
tile[threadIdx.y+j][threadIdx.x] = src[imat*n + idx];
70+
}
71+
__syncthreads();
72+
73+
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
74+
const unsigned int idx = (ty+j)*width + tx;
75+
if(idx < n)
76+
dst[imat*n + idx] = tile[threadIdx.x][threadIdx.y + j];
77+
}
78+
}
79+
}
80+
}
81+
4082
static __device__ void cpy_blck_q8_0_f32(const char * cxi, char * cdsti) {
4183
float * cdstf = (float *)(cdsti);
4284

@@ -143,10 +185,25 @@ static void ggml_cpy_flt_cuda(
143185
const char * cx, char * cdst, const int ne,
144186
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
145187
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) {
146-
147188
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
148-
cpy_flt<cpy_1_flt<src_t, dst_t>><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
149-
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++);
189+
if constexpr (std::is_same_v<src_t, half> && std::is_same_v<dst_t, half> ||
190+
std::is_same_v<src_t, float> && std::is_same_v<dst_t, float>
191+
){
192+
if (ne00 == ne11 && ne01 = ne10 && nb00 == nb11 && nb10 == nb01){ //transpose
193+
dim3 dimGrid( (ne00 + TILE_DIM - 1) / TILE_DIM,
194+
(ne01 + TILE_DIM - 1) / TILE_DIM,
195+
(ne/(ne00*ne01) + BLOCK_NM - 1) / BLOCK_NM );
196+
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
197+
cpy_flt_transpose<cpy_1_flt<dst_t><<<dimGrid, dimBlock, 0, stream>>>
198+
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++);
199+
} else{ // other
200+
cpy_flt<cpy_1_flt<src_t, dst_t>><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
201+
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++);
202+
}
203+
} else{
204+
cpy_flt<cpy_1_flt<src_t, dst_t>><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
205+
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++);
206+
}
150207
}
151208

152209
static void ggml_cpy_f32_q8_0_cuda(

ggml/src/ggml-cuda/cpy.cuh

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,11 @@
22

33
#define CUDA_CPY_BLOCK_SIZE 64
44

5+
const int TILE_DIM = 32;
6+
const int BLOCK_ROWS = 8;
7+
const int BLOCK_NM = 8;
8+
9+
510
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection = false);
611

712
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst);

0 commit comments

Comments
 (0)