Skip to content

Commit 6a6c954

Browse files
committed
delete unused commnet
1 parent 65180fb commit 6a6c954

File tree

1 file changed

+0
-143
lines changed

1 file changed

+0
-143
lines changed

ggml/src/ggml-cuda/ssm_scan.cu

Lines changed: 0 additions & 143 deletions
Original file line numberDiff line numberDiff line change
@@ -175,146 +175,3 @@ void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context &ctx, ggml_tensor *dst) {
175175
src3->nb[1], src4->nb[1], src4->nb[2], src5->nb[1],
176176
src5->nb[2], dst_d, nc, nr, n_t, n_s, stream);
177177
}
178-
179-
// #include "ssm_scan.cuh"
180-
181-
// template <int block_size>
182-
// static __global__ void ssm_scan_f32(
183-
// const float *__restrict__ src0, const float *__restrict__ src1,
184-
// const float *__restrict__ src2, const float *__restrict__ src3,
185-
// const float *__restrict__ src4, const float *__restrict__ src5,
186-
// const int src0_nb1, const int src0_nb2, const int src1_nb0,
187-
// const int src1_nb1, const int src1_nb2, const int src1_nb3,
188-
// const int src2_nb0, const int src2_nb1, const int src2_nb2,
189-
// const int src3_nb1, const int src4_nb1, const int src4_nb2,
190-
// const int src5_nb1, const int src5_nb2, float *__restrict__ dst,
191-
// const int nc, const int nr, const int n_t, const int n_s) {
192-
// // const int row = blockIdx.x*blockDim.y + threadIdx.y;
193-
// const int tid = threadIdx.x;
194-
// const int i3 = threadIdx.y;
195-
196-
// const int ith = tid;
197-
// const int nth = WARP_SIZE;
198-
199-
// // rows per thread
200-
// const int dr = (nr + nth - 1) / nth;
201-
202-
// // row range for this thread
203-
// const int ir0 = dr * ith;
204-
// const int ir1 = min(ir0 + dr, nr);
205-
// const int ir = ir1 - ir0;
206-
// for (int i2 = 0; i2 < n_t; ++i2) {
207-
// const float *s0 =
208-
// (const float *)((const char *)src0 + ir0 * src0_nb1 +
209-
// i3 * src0_nb2); // {d_state, d_inner, n_s}
210-
// const float *x =
211-
// (const float *)((const char *)src1 + ir0 * src1_nb0 + i2 * src1_nb1 +
212-
// i3 * src1_nb2); // {d_inner, n_t, n_s}
213-
// const float *dt =
214-
// (const float *)((const char *)src2 + ir0 * src2_nb0 + i2 * src2_nb1 +
215-
// i3 * src2_nb2); // {d_inner, n_t, n_s}
216-
// const float *A = (const float *)((const char *)src3 +
217-
// ir0 * src3_nb1); // {d_state, d_inner}
218-
// const float *B = (const float *)((const char *)src4 + i2 * src4_nb1 +
219-
// i3 * src4_nb2); // {d_state, n_t, n_s}
220-
// const float *C = (const float *)((const char *)src5 + i2 * src5_nb1 +
221-
// i3 * src5_nb2); // {d_state, n_t, n_s}
222-
// float *y = (float *)((char *)dst + ir0 * src1_nb0 + i2 * src1_nb1 +
223-
// i3 * src1_nb2); // {d_inner, n_t, n_s}
224-
// float *s = (float *)((char *)dst + ir0 * src0_nb1 + i3 * src0_nb2 +
225-
// src1_nb3); // {d_state, d_inner, n_s}
226-
227-
// // use the output as the source for the next token-wise iterations
228-
// if (i2 > 0) {
229-
// s0 = s;
230-
// }
231-
232-
// // d_inner
233-
// for (int i1 = 0; i1 < ir; ++i1) {
234-
// // ref:
235-
// //
236-
// https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78
237-
// float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1];
238-
// float x_dt = x[i1] * dt_soft_plus;
239-
// float sumf = 0.0f;
240-
// // d_state
241-
// #pragma unroll
242-
// for (int i0 = 0; i0 < nc; ++i0) {
243-
// int i = i0 + i1 * nc;
244-
// // state = prev_state * dA + dB * x
245-
// float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt);
246-
// // y = rowwise_dotprod(state, C)
247-
// sumf += state * C[i0];
248-
// s[i] = state;
249-
// }
250-
// y[i1] = sumf;
251-
// }
252-
// }
253-
// }
254-
255-
// static void ssm_scan_f32_cuda(
256-
// const float *src0, const float *src1, const float *src2, const float
257-
// *src3, const float *src4, const float *src5, const int src0_nb1, const
258-
// int src0_nb2, const int src1_nb0, const int src1_nb1, const int src1_nb2,
259-
// const int src1_nb3, const int src2_nb0, const int src2_nb1, const int
260-
// src2_nb2, const int src3_nb1, const int src4_nb1, const int src4_nb2,
261-
// const int src5_nb1, const int src5_nb2, float *dst, const int nc, const
262-
// int nr, const int n_t, const int n_s, cudaStream_t stream) {
263-
// const dim3 block_dims(WARP_SIZE, n_s, 1);
264-
// const int nblocks = 1; // TODO
265-
266-
// ssm_scan_f32<WARP_SIZE><<<nblocks, block_dims, 0, stream>>>(
267-
// src0, src1, src2, src3, src4, src5, src0_nb1, src0_nb2, src1_nb0,
268-
// src1_nb1, src1_nb2, src1_nb3, src2_nb0, src2_nb1, src2_nb2, src3_nb1,
269-
// src4_nb1, src4_nb2, src5_nb1, src5_nb2, dst, nc, nr, n_t, n_s);
270-
// }
271-
272-
// void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context &ctx, ggml_tensor *dst)
273-
// {
274-
// const struct ggml_tensor *src0 = dst->src[0]; // s
275-
// const struct ggml_tensor *src1 = dst->src[1]; // x
276-
// const struct ggml_tensor *src2 = dst->src[2]; // dt
277-
// const struct ggml_tensor *src3 = dst->src[3]; // A
278-
// const struct ggml_tensor *src4 = dst->src[4]; // B
279-
// const struct ggml_tensor *src5 = dst->src[5]; // C
280-
281-
// const int64_t nc = src0->ne[0]; // d_state
282-
// const int64_t nr = src0->ne[1]; // d_inner
283-
// const int64_t n_t = src1->ne[1]; // number of tokens per sequence
284-
// const int64_t n_s = src0->ne[2]; // number of sequences in the batch
285-
286-
// GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) ==
287-
// ggml_nelements(dst));
288-
// GGML_ASSERT(src0->nb[0] == sizeof(float));
289-
// GGML_ASSERT(src1->nb[0] == sizeof(float));
290-
// GGML_ASSERT(src2->nb[0] == sizeof(float));
291-
// GGML_ASSERT(src3->nb[0] == sizeof(float));
292-
// GGML_ASSERT(src4->nb[0] == sizeof(float));
293-
// GGML_ASSERT(src5->nb[0] == sizeof(float));
294-
// // required for the dot product between s and C
295-
// GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float));
296-
// // required for per-sequence offsets for states
297-
// GGML_ASSERT(src0->nb[2] == src0->ne[0] * src0->ne[1] * sizeof(float));
298-
// // required to get correct offset for state destination (i.e. src1->nb[3])
299-
// GGML_ASSERT(src1->nb[3] ==
300-
// src1->ne[0] * src1->ne[1] * src1->ne[2] * sizeof(float));
301-
302-
// const float *src0_d = (const float *)src0->data;
303-
// const float *src1_d = (const float *)src1->data;
304-
// const float *src2_d = (const float *)src2->data;
305-
// const float *src3_d = (const float *)src3->data;
306-
// const float *src4_d = (const float *)src4->data;
307-
// const float *src5_d = (const float *)src5->data;
308-
// float *dst_d = (float *)dst->data;
309-
// cudaStream_t stream = ctx.stream();
310-
311-
// GGML_ASSERT(src0->type == GGML_TYPE_F32);
312-
// GGML_ASSERT(dst->type == GGML_TYPE_F32);
313-
314-
// ssm_scan_f32_cuda(src0_d, src1_d, src2_d, src3_d, src4_d, src5_d,
315-
// src0->nb[1],
316-
// src0->nb[2], src1->nb[0], src1->nb[1], src1->nb[2],
317-
// src1->nb[3], src2->nb[0], src2->nb[1], src2->nb[2],
318-
// src3->nb[1], src4->nb[1], src4->nb[2], src5->nb[1],
319-
// src5->nb[2], dst_d, nc, nr, n_t, n_s, stream);
320-
// }

0 commit comments

Comments
 (0)