File tree Expand file tree Collapse file tree 1 file changed +3
-6
lines changed
Expand file tree Collapse file tree 1 file changed +3
-6
lines changed Original file line number Diff line number Diff line change @@ -1149,8 +1149,7 @@ common_init_result::common_init_result(common_params & params) :
11491149
11501150 llama_context * lctx = llama_init_from_model (model, cparams);
11511151 if (lctx == NULL ) {
1152- LOG_ERR (" %s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n " ,
1153- __func__, params.model .path .c_str ());
1152+ LOG_ERR (" %s: failed to create context with model '%s'\n " , __func__, params.model .path .c_str ());
11541153 return ;
11551154 }
11561155
@@ -1182,15 +1181,13 @@ common_init_result_ptr common_init_from_params(common_params & params) {
11821181
11831182 llama_model * model = res->model ();
11841183 if (model == NULL ) {
1185- LOG_ERR (" %s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n " ,
1186- __func__, params.model .path .c_str ());
1184+ LOG_ERR (" %s: failed to load model '%s'\n " , __func__, params.model .path .c_str ());
11871185 return res;
11881186 }
11891187
11901188 llama_context * lctx = res->context ();
11911189 if (lctx == NULL ) {
1192- LOG_ERR (" %s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n " ,
1193- __func__, params.model .path .c_str ());
1190+ LOG_ERR (" %s: failed to create context with model '%s'\n " , __func__, params.model .path .c_str ());
11941191 return res;
11951192 }
11961193
You can’t perform that action at this time.
0 commit comments