Skip to content

Commit a09d833

Browse files
committed
allow lowvram (nkvo) to be used with vulkan.
1 parent b5bec86 commit a09d833

File tree

1 file changed

+13
-8
lines changed

1 file changed

+13
-8
lines changed

koboldcpp.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1381,7 +1381,7 @@ def load_model(model_filename):
13811381
inputs.model_filename = model_filename.encode("UTF-8")
13821382
inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten
13831383
inputs.threads = args.threads
1384-
inputs.low_vram = (True if (args.usecuda and "lowvram" in args.usecuda) else False)
1384+
inputs.low_vram = True if args.lowvram else False
13851385
inputs.use_mmq = (True if (args.usecuda and "nommq" not in args.usecuda) else False)
13861386
inputs.use_rowsplit = (True if (args.usecuda and "rowsplit" in args.usecuda) else False)
13871387
inputs.vulkan_info = "0".encode("UTF-8")
@@ -5121,13 +5121,15 @@ def changerunmode(a,b,c):
51215121
maingpu_entry.grid_remove()
51225122
if gpu_choice_var.get()=="All":
51235123
gpu_choice_var.set("1")
5124+
lowvram_box.grid_remove()
51245125
elif index == "Use Vulkan" or index == "Use Vulkan (Old CPU)" or index == "Use CUDA" or index == "Use hipBLAS (ROCm)":
51255126
gpu_selector_box.grid_remove()
51265127
quick_gpu_selector_box.grid_remove()
51275128
CUDA_gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
51285129
CUDA_quick_gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
51295130
maingpu_label.grid(row=10, column=0, padx = 8, pady=1, stick="nw")
51305131
maingpu_entry.grid(row=10, column=1, padx = 8, pady=1, stick="nw")
5132+
lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
51315133
else:
51325134
quick_gpuname_label.grid_remove()
51335135
gpuname_label.grid_remove()
@@ -5139,16 +5141,15 @@ def changerunmode(a,b,c):
51395141
CUDA_quick_gpu_selector_box.grid_remove()
51405142
maingpu_label.grid_remove()
51415143
maingpu_entry.grid_remove()
5144+
lowvram_box.grid_remove()
51425145

51435146
if index == "Use CUDA" or index == "Use hipBLAS (ROCm)":
5144-
lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
51455147
mmq_box.grid(row=4, column=1, padx=8, pady=1, stick="nw")
51465148
quick_mmq_box.grid(row=4, column=1, padx=8, pady=1, stick="nw")
51475149
splitmode_box.grid(row=5, column=1, padx=8, pady=1, stick="nw")
51485150
tensor_split_label.grid(row=8, column=0, padx = 8, pady=1, stick="nw")
51495151
tensor_split_entry.grid(row=8, column=1, padx=8, pady=1, stick="nw")
51505152
else:
5151-
lowvram_box.grid_remove()
51525153
mmq_box.grid_remove()
51535154
quick_mmq_box.grid_remove()
51545155
tensor_split_label.grid_remove()
@@ -5246,7 +5247,7 @@ def changerunmode(a,b,c):
52465247
layercounter_label.grid(row=6, column=1, padx=75, sticky="W")
52475248
layercounter_label.configure(text_color="#ffff00")
52485249
tensor_split_entry,tensor_split_label = makelabelentry(hardware_tab, "Tensor Split:", tensor_split_str_vars, 8, 80, tooltip='When using multiple GPUs this option controls how large tensors should be split across all GPUs.\nUses a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order.\nFor example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1.')
5249-
lowvram_box = makecheckbox(hardware_tab, "Low VRAM (No KV offload)", lowvram_var, 4,0, tooltiptxt='Avoid offloading KV Cache or scratch buffers to VRAM.\nAllows more layers to fit, but may result in a speed loss.')
5250+
lowvram_box = makecheckbox(hardware_tab, "Low VRAM (No KV offload)", lowvram_var, 4,0, tooltiptxt='Avoid offloading KV Cache or scratch buffers to VRAM.\nAllows more layers to fit, but may result in a large speed loss.')
52505251
mmq_box = makecheckbox(hardware_tab, "Use QuantMatMul (mmq)", mmq_var, 4,1, tooltiptxt="Enable MMQ mode to use finetuned kernels instead of default CuBLAS/HipBLAS for prompt processing.\nRead the wiki. Speed may vary.")
52515252
splitmode_box = makecheckbox(hardware_tab, "Row-Split", rowsplit_var, 5,0, tooltiptxt="Split rows across GPUs instead of splitting layers and KV across GPUs.\nUses the main GPU for small tensors and intermediate results. Speed may vary.")
52525253

@@ -5557,6 +5558,7 @@ def export_vars():
55575558
args.nocertify = nocertifymode.get()==1
55585559
args.nomodel = nomodel.get()==1
55595560
args.quantkv = quantkv_var.get()
5561+
args.lowvram = lowvram_var.get()==1
55605562

55615563
gpuchoiceidx = 0
55625564
args.usecpu = False
@@ -5575,9 +5577,9 @@ def export_vars():
55755577
args.failsafe = True
55765578
if runopts_var.get() == "Use CUDA" or runopts_var.get() == "Use hipBLAS (ROCm)":
55775579
if gpu_choice_var.get()=="All":
5578-
args.usecuda = ["lowvram"] if lowvram_var.get() == 1 else ["normal"]
5580+
args.usecuda = ["normal"]
55795581
else:
5580-
args.usecuda = ["lowvram",str(gpuchoiceidx)] if lowvram_var.get() == 1 else ["normal",str(gpuchoiceidx)]
5582+
args.usecuda = ["normal",str(gpuchoiceidx)]
55815583
if mmq_var.get()==1:
55825584
args.usecuda.append("mmq")
55835585
else:
@@ -5770,6 +5772,7 @@ def import_vars(dict):
57705772
quietmode.set(1 if "quiet" in dict and dict["quiet"] else 0)
57715773
nocertifymode.set(1 if "nocertify" in dict and dict["nocertify"] else 0)
57725774
nomodel.set(1 if "nomodel" in dict and dict["nomodel"] else 0)
5775+
lowvram_var.set(1 if "lowvram" in dict and dict["lowvram"] else 0)
57735776
if "quantkv" in dict:
57745777
quantkv_var.set(dict["quantkv"])
57755778
if "useclblast" in dict and dict["useclblast"]:
@@ -5787,7 +5790,6 @@ def import_vars(dict):
57875790
runopts_var.set(cublas_option)
57885791
elif hipblas_option:
57895792
runopts_var.set(hipblas_option)
5790-
lowvram_var.set(1 if "lowvram" in dict["usecuda"] else 0)
57915793
mmq_var.set(1 if "mmq" in dict["usecuda"] else 0)
57925794
rowsplit_var.set(1 if "rowsplit" in dict["usecuda"] else 0)
57935795
gpu_choice_var.set("All")
@@ -6282,6 +6284,8 @@ def convert_invalid_args(args):
62826284
dict = vars(args)
62836285
if "usecuda" not in dict and "usecublas" in dict and dict["usecublas"]:
62846286
dict["usecuda"] = dict["usecublas"]
6287+
if "usecuda" in dict and dict["usecuda"] and "lowvram" in dict["usecuda"]:
6288+
dict["lowvram"] = True
62856289
if "sdconfig" in dict and dict["sdconfig"] and len(dict["sdconfig"])>0:
62866290
dict["sdmodel"] = dict["sdconfig"][0]
62876291
if dict["sdconfig"] and len(dict["sdconfig"]) > 1:
@@ -7677,7 +7681,7 @@ def range_checker(arg: str):
76777681
parser.add_argument("--config", metavar=('[filename]'), help="Load settings from a .kcpps file. Other arguments will be ignored", type=str, nargs=1)
76787682
parser.add_argument("--threads","-t", metavar=('[threads]'), help="Use a custom number of threads if specified. Otherwise, uses an amount based on CPU cores", type=int, default=get_default_threads())
76797683
compatgroup = parser.add_mutually_exclusive_group()
7680-
compatgroup.add_argument("--usecuda", "--usecublas", "--usehipblas", help="Use CUDA for GPU Acceleration. Requires CUDA. Enter a number afterwards to select and use 1 GPU. Leaving no number will use all GPUs.", nargs='*',metavar=('[lowvram|normal] [main GPU ID] [mmq|nommq] [rowsplit]'), choices=['normal', 'lowvram', '0', '1', '2', '3', 'all', 'mmq', 'nommq', 'rowsplit'])
7684+
compatgroup.add_argument("--usecuda", "--usecublas", "--usehipblas", help="Use CUDA for GPU Acceleration. Requires CUDA. Enter a number afterwards to select and use 1 GPU. Leaving no number will use all GPUs.", nargs='*',metavar=('[main GPU ID] [mmq|nommq] [rowsplit]'), choices=['normal', 'lowvram', '0', '1', '2', '3', 'all', 'mmq', 'nommq', 'rowsplit'])
76817685
compatgroup.add_argument("--usevulkan", help="Use Vulkan for GPU Acceleration. Can optionally specify one or more GPU Device ID (e.g. --usevulkan 0), leave blank to autodetect.", metavar=('[Device IDs]'), nargs='*', type=int, default=None)
76827686
compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
76837687
compatgroup.add_argument("--usecpu", help="Do not use any GPU acceleration (CPU Only)", action='store_true')
@@ -7733,6 +7737,7 @@ def range_checker(arg: str):
77337737
advparser.add_argument("--ignoremissing", help="Ignores all missing non-essential files, just skipping them instead.", action='store_true')
77347738
advparser.add_argument("--chatcompletionsadapter", metavar=('[filename]'), help="Select an optional ChatCompletions Adapter JSON file to force custom instruct tags.", default="AutoGuess")
77357739
advparser.add_argument("--flashattention","--flash-attn","-fa", help="Enables flash attention.", action='store_true')
7740+
advparser.add_argument("--lowvram","-nkvo","--no-kv-offload", help="If supported by the backend, do not offload KV to GPU (lowvram mode). Not recommended, will be slow.", action='store_true')
77367741
advparser.add_argument("--quantkv", help="Sets the KV cache data type quantization, 0=f16, 1=q8, 2=q4. Requires Flash Attention for full effect, otherwise only K cache is quantized.",metavar=('[quantization level 0/1/2]'), type=int, choices=[0,1,2], default=0)
77377742
advparser.add_argument("--forceversion", help="If the model file format detection fails (e.g. rogue modified model) you can set this to override the detected format (enter desired version, e.g. 401 for GPTNeoX-Type2).",metavar=('[version]'), type=int, default=0)
77387743
advparser.add_argument("--smartcontext", help="Reserving a portion of context to try processing less frequently. Outdated. Not recommended.", action='store_true')

0 commit comments

Comments
 (0)