We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent af6465a commit f79bd92Copy full SHA for f79bd92
ggml/src/ggml-cuda/mmvq.cu
@@ -190,8 +190,8 @@ static __global__ void mul_mat_vec_q(
190
191
const uint32_t channel_bias = ids ? channel_x : channel_dst;
192
193
- float x_biases[ncols_dst] = { { 0.0f } };
194
- float gate_biases[ncols_dst] = { { 0.0f } };
+ float x_biases[ncols_dst] = { 0.0f };
+ float gate_biases[ncols_dst] = { 0.0f };
195
if constexpr (has_fusion) {
196
if (use_bias) {
197
x_bias = x_bias + sample_dst*stride_sample_dst + channel_bias*stride_channel_dst + row0;
0 commit comments