Skip to content

Commit c7dbe3f

Browse files
authored
Disable pipeline parallel for tensor override or allocation failed (#879)
* disable pipeline parallelism when tensor override present * disable pipeline parallel if allocation failed --------- Co-authored-by: firecoperana <firecoperana>
1 parent 14760aa commit c7dbe3f

File tree

4 files changed

+28
-7
lines changed

4 files changed

+28
-7
lines changed

common/common.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2658,7 +2658,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
26582658
auto mparams = llama_model_params_from_gpt_params(params);
26592659

26602660
llama_model * model = nullptr;
2661-
2661+
26622662
if (!params.hf_repo.empty() && !params.hf_file.empty()) {
26632663
model = llama_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
26642664
} else if (!params.model_url.empty()) {

src/llama-model.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1237,6 +1237,10 @@ std::string LLM_TN::operator()(llm_tensor tensor, const std::string & suffix, in
12371237
return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid, xid) + "." + suffix;
12381238
}
12391239

1240+
void llama_model::set_tensor_overrides(const llama_model_params& params) {
1241+
tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
1242+
}
1243+
12401244
std::string llama_model_ftype_name(llama_ftype ftype) {
12411245
if (ftype & LLAMA_FTYPE_GUESSED) {
12421246
return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";

src/llama-model.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,10 +305,18 @@ struct llama_model {
305305
// keep track of loaded lora adapters
306306
std::set<llama_lora_adapter *> lora_adapters;
307307

308+
bool tensor_overrides;
309+
308310
~llama_model();
309311

310312
// Not actually needed, but left in place for now
311313
size_t max_nodes() const { return 65536; }
314+
315+
bool has_tensor_overrides() const {
316+
return tensor_overrides;
317+
};
318+
319+
void set_tensor_overrides(const llama_model_params& params);
312320
};
313321

314322
struct llama_lora_weight {

src/llama.cpp

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3969,7 +3969,7 @@ struct llama_model * llama_load_model_from_file(
39693969
return true;
39703970
};
39713971
}
3972-
3972+
model->set_tensor_overrides(params);
39733973
// model->devices hold device indices that are used to offload
39743974
// use model->devices to determine offload device
39753975
// if no device is specified, all device are included
@@ -4479,7 +4479,7 @@ struct llama_context * llama_new_context_with_model(
44794479
llama_get_device_count(*model) > 1 &&
44804480
model->n_gpu_layers > (int)model->hparams.n_layer &&
44814481
model->split_mode == LLAMA_SPLIT_MODE_LAYER &&
4482-
params.offload_kqv;
4482+
params.offload_kqv && !model->has_tensor_overrides();
44834483
#ifndef GGML_USE_CUDA
44844484
// pipeline parallelism requires support for async compute and events
44854485
// currently this is only implemented in the CUDA backend
@@ -4498,10 +4498,19 @@ struct llama_context * llama_new_context_with_model(
44984498
ggml_cgraph * gf = llm_build_context::llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
44994499

45004500
// initialize scheduler with the worst-case graph
4501-
if (!ggml_backend_sched_reserve(ctx->sched, gf)) {
4502-
LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
4503-
llama_free(ctx);
4504-
return nullptr;
4501+
bool gf_success = ggml_backend_sched_reserve(ctx->sched, gf);
4502+
if (!gf_success)
4503+
{
4504+
if (pipeline_parallel) {
4505+
LLAMA_LOG_WARN("%s: compute buffer allocation failed, retrying without pipeline parallelism\n", __func__);
4506+
ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), max_nodes, false);
4507+
gf_success = ggml_backend_sched_reserve(ctx->sched, gf);
4508+
}
4509+
if (!gf_success) {
4510+
LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
4511+
llama_free(ctx);
4512+
return nullptr;
4513+
}
45054514
}
45064515

45074516
for (size_t i = 0; i < ctx->backends.size(); i++) {

0 commit comments

Comments
 (0)