Skip to content

Commit cfb22b5

Browse files
committed
rename a missed BLAS -> batch
1 parent 978d755 commit cfb22b5

File tree

3 files changed

+5
-2
lines changed

3 files changed

+5
-2
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -469,7 +469,7 @@ ifndef LLAMA_CUBLAS
469469
ifndef LLAMA_HIPBLAS
470470
ifndef LLAMA_VULKAN
471471
ifndef LLAMA_METAL
472-
NOTIFY_MSG = @echo -e '\n***\nYou did a basic CPU build. For faster speeds, consider installing and linking a GPU BLAS library. For example, set LLAMA_CLBLAST=1 LLAMA_VULKAN=1 to compile with Vulkan and CLBlast support. Add LLAMA_PORTABLE=1 to make a sharable build that other devices can use. Read the KoboldCpp Wiki for more information. This is just a reminder, not an error.\n***\n'
472+
NOTIFY_MSG = @echo -e '\n***\nYou did a basic CPU build. For faster speeds, consider installing and linking a GPU library. For example, set LLAMA_CLBLAST=1 LLAMA_VULKAN=1 to compile with Vulkan and CLBlast support. Add LLAMA_PORTABLE=1 to make a sharable build that other devices can use. Read the KoboldCpp Wiki for more information. This is just a reminder, not an error.\n***\n'
473473
endif
474474
endif
475475
endif

gpttype_adapter.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3941,7 +3941,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs)
39413941
//print progress
39423942
if (!startedsampling && allow_regular_prints)
39433943
{
3944-
printf("\rProcessing Prompt%s (%d / %zu tokens)", (blasmode ? " [BLAS]" : ""), input_consumed, embd_inp.size());
3944+
printf("\rProcessing Prompt%s (%d / %zu tokens)", (blasmode ? " [BATCH]" : ""), input_consumed, embd_inp.size());
39453945
}
39463946
fflush(stdout);
39473947

koboldcpp.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6593,6 +6593,7 @@ def tunnel_reader():
65936593
if global_memory and global_memory["load_complete"]:
65946594
print(f"Your remote Kobold API can be found at {tunneloutput}/api")
65956595
print(f"Your remote OpenAI Compatible API can be found at {tunneloutput}/v1")
6596+
print(f"Your remote llama.cpp secondary WebUI at {tunneloutput}/lcpp/")
65966597
if has_sd:
65976598
print(f"StableUI is available at {tunneloutput}/sdui/")
65986599
print("======\n")
@@ -7733,6 +7734,7 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
77337734
if not args.remotetunnel:
77347735
print(f"Starting Kobold API on port {args.port} at {endpoint_url}/api/")
77357736
print(f"Starting OpenAI Compatible API on port {args.port} at {endpoint_url}/v1/")
7737+
print(f"Starting llama.cpp secondary WebUI at {endpoint_url}/lcpp/")
77367738
if args.sdmodel:
77377739
print(f"StableUI is available at {endpoint_url}/sdui/")
77387740
elif global_memory:
@@ -7742,6 +7744,7 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
77427744
remote_url = val
77437745
print(f"Your remote Kobold API can be found at {endpoint_url}/api")
77447746
print(f"Your remote OpenAI Compatible API can be found at {endpoint_url}/v1")
7747+
print(f"Starting llama.cpp secondary WebUI at {endpoint_url}/lcpp/")
77457748
if args.sdmodel:
77467749
print(f"StableUI is available at {endpoint_url}/sdui/")
77477750
global_memory["load_complete"] = True

0 commit comments

Comments
 (0)