File tree Expand file tree Collapse file tree 1 file changed +9
-1
lines changed Expand file tree Collapse file tree 1 file changed +9
-1
lines changed Original file line number Diff line number Diff line change @@ -19086,7 +19086,9 @@ struct llama_model_params llama_model_default_params() {
1908619086
1908719087#ifdef GGML_USE_METAL
1908819088 // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
19089- result.n_gpu_layers = 999;
19089+ if (result.n_gpu_layers > 0) {
19090+ result.n_gpu_layers = 999;
19091+ }
1909019092#endif
1909119093
1909219094 return result;
@@ -19289,7 +19291,13 @@ struct llama_model * llama_load_model_from_file(
1928919291 break;
1929019292
1929119293 case GGML_BACKEND_DEVICE_TYPE_GPU:
19294+ #ifdef GGML_USE_METAL
19295+ if (params.n_gpu_layers > 0) {
19296+ model->devices.push_back(dev);
19297+ }
19298+ #else
1929219299 model->devices.push_back(dev);
19300+ #endif
1929319301 break;
1929419302 }
1929519303 }
You can’t perform that action at this time.
0 commit comments