Skip to content

Commit 6d27076

Browse files
committed
Revert "Disable some fusion, RoPE cache off by default (ikawrakow#894)"
This reverts commit cd8d0b0.
1 parent bb5e2f4 commit 6d27076

File tree

4 files changed

+11
-7
lines changed

4 files changed

+11
-7
lines changed

common/common.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1106,8 +1106,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
11061106
params.fused_mmad = false;
11071107
return true;
11081108
}
1109-
if (arg == "-rcache" || arg == "--rope-cache") {
1110-
params.rope_cache = true;
1109+
if (arg == "-no-rcache" || arg == "--no-rope-cache") {
1110+
params.rope_cache = false;
11111111
return true;
11121112
}
11131113
if (arg == "-ser" || arg == "--smart-expert-reduction") {
@@ -1918,7 +1918,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
19181918
options.push_back({ "*", "-ger, --grouped-expert-routing", "enable grouped expert routing (default: %s)", params.grouped_expert_routing ? "enabled" : "disabled" });
19191919
options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" });
19201920
options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" });
1921-
options.push_back({ "*", "-rcache, --rope-cache", "enable RoPE cache (default: %s)", params.rope_cache ? "enabled" : "disabled" });
1921+
options.push_back({ "*", "-no-rcache, --no-rope-cache", "disaable RoPE cache (default: %s)", params.rope_cache ? "enabled" : "disabled" });
19221922
options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
19231923
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
19241924
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ struct gpt_params {
249249
bool fused_up_gate = true; // fused up*unary(gate) op
250250
bool fused_mmad = true; // fused mul+multi_add op
251251
bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch)
252-
bool rope_cache = false; // if to use RoPE cache (for supported models)
252+
bool rope_cache = true; // if to use RoPE cache (for supported models)
253253
int min_experts = -1;
254254
float thresh_experts = 0;
255255

ggml/src/ggml-cuda.cu

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3244,15 +3244,19 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
32443244
ggml_cuda_op_rms_norm(ctx, dst);
32453245
break;
32463246
case GGML_OP_FUSED_RMS_NORM:
3247-
if (false && ENABLE_FUSION && i + 4 < cgraph->n_nodes &&
3247+
//if (i + 6 < cgraph->n_nodes) {
3248+
// printf("=== Fused rms_norm(%s)\n", dst->name);
3249+
// for (int j = 1; j <= 6; ++j) printf(" %s(%s)\n", ggml_op_name(cgraph->nodes[i+j]->op), cgraph->nodes[i+j]->name);
3250+
//}
3251+
if (ENABLE_FUSION && i + 4 < cgraph->n_nodes &&
32483252
cgraph->nodes[i+1]->op == GGML_OP_VIEW &&
32493253
cgraph->nodes[i+2]->op == GGML_OP_FUSED_RMS_NORM &&
32503254
cgraph->nodes[i+3]->op == GGML_OP_ROPE_FAST &&
32513255
cgraph->nodes[i+4]->op == GGML_OP_ROPE_FAST &&
32523256
ggml_cuda_op_fused_rms_rope_fast(ctx, cgraph->nodes[i+3], cgraph->nodes[i+4])) {
32533257
i += 4;
32543258
}
3255-
else if (false && ENABLE_FUSION && i + 4 < cgraph->n_nodes &&
3259+
else if (ENABLE_FUSION && i + 4 < cgraph->n_nodes &&
32563260
cgraph->nodes[i+1]->op == GGML_OP_ROPE_FAST &&
32573261
cgraph->nodes[i+2]->op == GGML_OP_RESHAPE &&
32583262
cgraph->nodes[i+3]->op == GGML_OP_FUSED_RMS_NORM &&

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3837,7 +3837,7 @@ struct llama_context_params llama_context_default_params() {
38373837
/*.grouped_expert_routing =*/ false,
38383838
/*.fused_up_gate =*/ true,
38393839
/*.fused_mmad =*/ true,
3840-
/*.rope_cache =*/ false,
3840+
/*.rope_cache =*/ true,
38413841
/*.min_experts =*/ -1,
38423842
/*.thtesh_experts =*/ 0.0f,
38433843
/*.only_active_experts =*/ false,

0 commit comments

Comments
 (0)