@@ -199,21 +199,21 @@ void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
199199/* gated ops */
200200
201201template <float (*op)(float ), typename T>
202- static __global__ void unary_gated_op_kernel (const T * x, T * dst, const int k, const int n, const int o) {
203- const int i = blockDim .x *blockIdx .x + threadIdx .x ;
202+ static __global__ void unary_gated_op_kernel (const T * x, T * dst, const int64_t k, const int64_t n, const int64_t o) {
203+ const int64_t i = blockDim .x *blockIdx .x + threadIdx .x ;
204204
205205 if (i >= k) {
206206 return ;
207207 }
208208
209209 // perform base op on first half of row and multiply with gate in second half
210- const int j = (i / n) * o + (i % n);
210+ const int64_t j = (i / n) * o + (i % n);
211211 dst[i] = (T)(op ((float )x[j]) * (float )x[j + n]);
212212}
213213
214214template <float (*op)(float ), typename T>
215- static void unary_gated_cuda (const T * x, T * dst, const int k, const int n, const int o, cudaStream_t stream) {
216- const int num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1 ) / CUDA_GLU_BLOCK_SIZE;
215+ static void unary_gated_cuda (const T * x, T * dst, const int64_t k, const int64_t n, const int64_t o, cudaStream_t stream) {
216+ const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1 ) / CUDA_GLU_BLOCK_SIZE;
217217 unary_gated_op_kernel<op><<<num_blocks, CUDA_GLU_BLOCK_SIZE, 0 , stream>>> (x, dst, k, n, o);
218218}
219219
@@ -222,10 +222,12 @@ void ggml_cuda_op_unary_gated(ggml_backend_cuda_context & ctx, ggml_tensor * dst
222222 const ggml_tensor * src0 = dst->src [0 ];
223223 const void * src0_d = src0->data ;
224224 void * dst_d = dst->data ;
225- const int nc = src0->ne [0 ] / 2 ;
225+ const int64_t nc = src0->ne [0 ] / 2 ;
226226 cudaStream_t stream = ctx.stream ();
227227
228228 GGML_ASSERT (ggml_is_contiguous_1 (src0));
229+ GGML_ASSERT (src0->nb [0 ] == ggml_element_size (src0));
230+ GGML_ASSERT (ggml_is_contiguous (dst));
229231
230232 GGML_ASSERT (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
231233 GGML_ASSERT ( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
0 commit comments