Skip to content

Commit 7d20e6b

Browse files
committed
updated layer count to be more accurate +1 instead of +3
1 parent f47a069 commit 7d20e6b

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

koboldcpp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1181,7 +1181,7 @@ def autoset_gpu_layers(ctxsize, sdquanted, bbs, qkv_level): #shitty algo to dete
11811181
mem1 = layers*(4 if bbs <= 512 else (bbs/128))*headkvlen*cs*fattn_discount*4*1.45
11821182
mem2 = layers*headcount*headkvlen*cs*fattn_discount*4*1.15
11831183
ratio = max(ratio,(mem - reservedmem - mem1) / (fsize + mem2))
1184-
layerlimit = min(int(ratio*layers), (layers + 3))
1184+
layerlimit = min(int(ratio*layers), (layers + 1))
11851185
layerlimit = (0 if layerlimit<=2 else layerlimit)
11861186
return layerlimit
11871187
except Exception:
@@ -5133,7 +5133,7 @@ def gui_changed_modelfile(*args):
51335133

51345134
def changed_gpulayers_estimate(*args):
51355135
predicted_gpu_layers = autoset_gpu_layers(int(contextsize_text[context_var.get()]),sd_quant_option(sd_quant_var.get()),int(blasbatchsize_values[int(blas_size_var.get())]),(quantkv_var.get() if flashattention_var.get()==1 else 0))
5136-
max_gpu_layers = (f"/{modelfile_extracted_meta[1][0]+3}" if (modelfile_extracted_meta and modelfile_extracted_meta[1] and modelfile_extracted_meta[1][0]!=0) else "")
5136+
max_gpu_layers = (f"/{modelfile_extracted_meta[1][0]+1}" if (modelfile_extracted_meta and modelfile_extracted_meta[1] and modelfile_extracted_meta[1][0]!=0) else "")
51375137
index = runopts_var.get()
51385138
gpu_be = (index == "Use Vulkan" or index == "Use Vulkan (Old CPU)" or index == "Use CLBlast" or index == "Use CLBlast (Old CPU)" or index == "Use CLBlast (Older CPU)" or index == "Use CUDA" or index == "Use hipBLAS (ROCm)")
51395139
layercounter_label.grid(row=6, column=1, padx=75, sticky="W")

0 commit comments

Comments
 (0)