Skip to content

Commit ca70564

Browse files
authored
Merge branch 'ggml-org:master' into master
2 parents 979f453 + 8ff2060 commit ca70564

File tree

11 files changed

+106
-38
lines changed

11 files changed

+106
-38
lines changed

.github/workflows/build.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ jobs:
138138
ctest -L main --verbose --timeout 900
139139
140140
macOS-latest-cmake-arm64-webgpu:
141-
runs-on: latest
141+
runs-on: macos-latest
142142

143143
steps:
144144
- name: Clone

common/arg.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2548,7 +2548,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25482548
{"--cpu-moe", "-cmoe"},
25492549
"keep all Mixture of Experts (MoE) weights in the CPU",
25502550
[](common_params & params) {
2551-
params.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
2551+
params.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
25522552
}
25532553
).set_env("LLAMA_ARG_CPU_MOE"));
25542554
add_opt(common_arg(
@@ -2561,7 +2561,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25612561
for (int i = 0; i < value; ++i) {
25622562
// keep strings alive and avoid leaking memory by storing them in a static vector
25632563
static std::list<std::string> buft_overrides;
2564-
buft_overrides.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
2564+
buft_overrides.push_back(llm_ffn_exps_block_regex(i));
25652565
params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), ggml_backend_cpu_buffer_type()});
25662566
}
25672567
}
@@ -2570,7 +2570,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25702570
{"--cpu-moe-draft", "-cmoed"},
25712571
"keep all Mixture of Experts (MoE) weights in the CPU for the draft model",
25722572
[](common_params & params) {
2573-
params.speculative.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
2573+
params.speculative.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
25742574
}
25752575
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CPU_MOE_DRAFT"));
25762576
add_opt(common_arg(
@@ -2582,7 +2582,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25822582
}
25832583
for (int i = 0; i < value; ++i) {
25842584
static std::list<std::string> buft_overrides_draft;
2585-
buft_overrides_draft.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
2585+
buft_overrides_draft.push_back(llm_ffn_exps_block_regex(i));
25862586
params.speculative.tensor_buft_overrides.push_back({buft_overrides_draft.back().c_str(), ggml_backend_cpu_buffer_type()});
25872587
}
25882588
}

common/common.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,20 @@ const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
734734

735735
}
736736

737+
//
738+
// MoE utils
739+
//
740+
741+
const char * const LLM_FFN_EXPS_REGEX = "\\.ffn_(up|down|gate)_exps";
742+
743+
static std::string llm_ffn_exps_block_regex(int idx) {
744+
return string_format("blk\\.%d%s", idx, LLM_FFN_EXPS_REGEX);
745+
}
746+
747+
static llama_model_tensor_buft_override llm_ffn_exps_cpu_override() {
748+
return { LLM_FFN_EXPS_REGEX, ggml_backend_cpu_buffer_type() };
749+
}
750+
737751
//
738752
// training utils
739753
//

ggml/src/ggml-cpu/ops.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8599,7 +8599,6 @@ static void ggml_compute_forward_timestep_embedding_f32(
85998599
}
86008600
if (dim % 2 != 0 && ith == 0) {
86018601
embed_data[2 * half] = 0.f;
8602-
embed_data[dim] = 0.f;
86038602
}
86048603
}
86058604
}

ggml/src/ggml-cuda/tsembd.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,11 @@ static __global__ void timestep_embedding_f32(const float * timesteps, float * d
77
int j = threadIdx.x + blockIdx.x * blockDim.x;
88
float * embed_data = (float *)((char *)dst + i*nb1);
99

10-
if (dim % 2 != 0 && j == ((dim + 1) / 2)) {
11-
embed_data[dim] = 0.f;
10+
int half = dim / 2;
11+
if (dim % 2 != 0 && j == half) {
12+
embed_data[2 * half] = 0.f;
1213
}
1314

14-
int half = dim / 2;
1515
if (j >= half) {
1616
return;
1717
}

ggml/src/ggml-metal/ggml-metal.metal

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4167,7 +4167,7 @@ kernel void kernel_timestep_embedding_f32(
41674167
}
41684168

41694169
if (args.dim % 2 != 0 && tpitg.x == 0) {
4170-
embed_data[args.dim] = 0.f;
4170+
embed_data[2 * half_] = 0.f;
41714171
}
41724172
}
41734173

ggml/src/ggml-opencl/kernels/tsembd.cl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ kernel void kernel_timestep_embedding(
2626
local_half_dim = logical_dim / 2;
2727
local_embed_data_ptr = (global float *)((global char *)local_dst_output_base_ptr + local_i * dst_nb1_bytes);
2828

29-
if (logical_dim % 2 != 0 && local_j == ((logical_dim + 1) / 2)) {
30-
local_embed_data_ptr[logical_dim] = 0.0f;
29+
if (logical_dim % 2 != 0 && local_j == local_half_dim) {
30+
local_embed_data_ptr[2 * local_half_dim] = 0.0f;
3131
}
3232

3333
if (local_j >= local_half_dim) {

ggml/src/ggml-sycl/tsembd.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,12 @@ static void timestep_embedding_f32(
2121
int j = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2);
2222
float * embed_data = (float *)((char *)dst + i*nb1);
2323

24-
if (dim % 2 != 0 && j == ((dim + 1) / 2)) {
25-
embed_data[dim] = 0.f;
24+
int half = dim / 2;
25+
26+
if (dim % 2 != 0 && j == half) {
27+
embed_data[2 * half] = 0.f;
2628
}
2729

28-
int half = dim / 2;
2930
if (j >= half) {
3031
return;
3132
}

ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,12 @@ void main() {
2424
const uint j = gl_GlobalInvocationID.x;
2525
const uint d_offset = i * p.nb1;
2626

27-
if (p.dim % 2 != 0 && j == ((p.dim + 1) / 2)) {
28-
data_d[d_offset + p.dim] = 0.f;
27+
const uint half_dim = p.dim / 2;
28+
29+
if (p.dim % 2 != 0 && j == half_dim) {
30+
data_d[d_offset + 2 * half_dim] = 0.f;
2931
}
3032

31-
const uint half_dim = p.dim / 2;
3233
if (j >= half_dim) {
3334
return;
3435
}

ggml/src/ggml.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4923,12 +4923,8 @@ struct ggml_tensor * ggml_timestep_embedding(
49234923
struct ggml_tensor * timesteps,
49244924
int dim,
49254925
int max_period) {
4926-
int actual_dim = dim;
4927-
if (dim % 2 != 0) {
4928-
actual_dim = dim + 1;
4929-
}
49304926

4931-
struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, actual_dim, timesteps->ne[0]);
4927+
struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, dim, timesteps->ne[0]);
49324928

49334929
ggml_set_op_params_i32(result, 0, dim);
49344930
ggml_set_op_params_i32(result, 1, max_period);

0 commit comments

Comments
 (0)