Skip to content

Commit 7966bdd

Browse files
committed
allow embeddings model to use gpu
1 parent 4356a00 commit 7966bdd

File tree

1 file changed

+19
-7
lines changed

1 file changed

+19
-7
lines changed

koboldcpp.py

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -981,14 +981,15 @@ def read_gguf_key(keyname,data,maxval):
981981
except Exception:
982982
return None
983983

984-
def extract_modelfile_params(filepath,sdfilepath,whisperfilepath,mmprojfilepath,draftmodelpath,ttsmodelpath):
984+
def extract_modelfile_params(filepath,sdfilepath,whisperfilepath,mmprojfilepath,draftmodelpath,ttsmodelpath,embdmodelpath):
985985
global modelfile_extracted_meta
986986
modelfile_extracted_meta = None
987987
sdfsize = 0
988988
whisperfsize = 0
989989
mmprojsize = 0
990990
draftmodelsize = 0
991991
ttsmodelsize = 0
992+
embdmodelsize = 0
992993
if sdfilepath and os.path.exists(sdfilepath):
993994
sdfsize = os.path.getsize(sdfilepath)
994995
if whisperfilepath and os.path.exists(whisperfilepath):
@@ -999,12 +1000,14 @@ def extract_modelfile_params(filepath,sdfilepath,whisperfilepath,mmprojfilepath,
9991000
draftmodelsize = os.path.getsize(draftmodelpath)
10001001
if ttsmodelpath and os.path.exists(ttsmodelpath):
10011002
ttsmodelsize = os.path.getsize(ttsmodelpath)
1003+
if embdmodelpath and os.path.exists(embdmodelpath):
1004+
embdmodelsize = os.path.getsize(embdmodelpath)
10021005
if filepath and os.path.exists(filepath):
10031006
try:
10041007
fsize = os.path.getsize(filepath)
10051008
if fsize>10000000: #dont bother with models < 10mb as they are probably bad
10061009
ggufmeta = read_gguf_metadata(filepath)
1007-
modelfile_extracted_meta = [filepath,ggufmeta,fsize,sdfsize,whisperfsize,mmprojsize,draftmodelsize,ttsmodelsize] #extract done. note that meta may be null
1010+
modelfile_extracted_meta = [filepath,ggufmeta,fsize,sdfsize,whisperfsize,mmprojsize,draftmodelsize,ttsmodelsize,embdmodelsize] #extract done. note that meta may be null
10081011
except Exception:
10091012
modelfile_extracted_meta = None
10101013

@@ -1048,6 +1051,8 @@ def autoset_gpu_layers(ctxsize, sdquanted, bbs, qkv_level): #shitty algo to dete
10481051
mem -= (modelfile_extracted_meta[6] * 1.5)
10491052
if modelfile_extracted_meta[7] > 1024*1024*10: #tts model tax
10501053
mem -= max(600*1024*1024, modelfile_extracted_meta[7] * 3)
1054+
if modelfile_extracted_meta[8] > 1024*1024*10: #embeddings model tax
1055+
mem -= max(350*1024*1024, modelfile_extracted_meta[8] * 1.5)
10511056
mem = 0 if mem < 0 else mem
10521057

10531058
csmul = (cs/4096) if cs >= 8192 else 1.8 if cs > 4096 else 1.2 if cs > 2048 else 1.0
@@ -1762,7 +1767,7 @@ def embeddings_load_model(model_filename):
17621767
global args
17631768
inputs = embeddings_load_model_inputs()
17641769
inputs.model_filename = model_filename.encode("UTF-8")
1765-
inputs.gpulayers = 0
1770+
inputs.gpulayers = (999 if args.embeddingsgpu else 0)
17661771
inputs.flash_attention = False
17671772
inputs.threads = args.threads
17681773
inputs.use_mmap = args.usemmap
@@ -4299,6 +4304,7 @@ def hide_tooltip(event):
42994304

43004305
embeddings_model_var = ctk.StringVar()
43014306
embeddings_ctx_var = ctk.StringVar(value=str(""))
4307+
embeddings_gpu_var = ctk.IntVar(value=0)
43024308

43034309
admin_var = ctk.IntVar(value=0)
43044310
admin_dir_var = ctk.StringVar()
@@ -4598,7 +4604,8 @@ def gui_changed_modelfile(*args):
45984604
mmprojfilepath = mmproj_var.get()
45994605
draftmodelpath = draftmodel_var.get()
46004606
ttsmodelpath = tts_model_var.get() if ttsgpu_var.get()==1 else ""
4601-
extract_modelfile_params(filepath,sdfilepath,whisperfilepath,mmprojfilepath,draftmodelpath,ttsmodelpath)
4607+
embdmodelpath = embeddings_model_var.get() if embeddings_gpu_var.get()==1 else ""
4608+
extract_modelfile_params(filepath,sdfilepath,whisperfilepath,mmprojfilepath,draftmodelpath,ttsmodelpath,embdmodelpath)
46024609
changed_gpulayers_estimate()
46034610
pass
46044611

@@ -4914,8 +4921,10 @@ def togglerope(a,b,c):
49144921
makelabelentry(model_tab, "Draft Amount: ", draftamount_var, 13, 50,padx=100,singleline=True,tooltip="How many tokens to draft per chunk before verifying results")
49154922
makelabelentry(model_tab, "Splits: ", draftgpusplit_str_vars, 13, 50,padx=210,singleline=True,tooltip="Distribution of draft model layers. Leave blank to follow main model's gpu split. Only works if multi-gpu (All) selected in main model.", labelpadx=160)
49164923
makelabelentry(model_tab, "Layers: ", draftgpulayers_var, 13, 50,padx=320,singleline=True,tooltip="How many layers to GPU offload for the draft model", labelpadx=270)
4917-
makefileentry(model_tab, "Embeds Model:", "Select Embeddings Model File", embeddings_model_var, 15, width=160,singlerow=True, filetypes=[("*.gguf","*.gguf")], tooltiptxt="Select an embeddings GGUF model that can be used to generate embedding vectors.")
4918-
makelabelentry(model_tab, "EmbdCtx: ", embeddings_ctx_var, 15, 50,padx=390,singleline=True,tooltip="If set above 0, limits max context for embedding model to save memory.", labelpadx=330)
4924+
makefileentry(model_tab, "Embeds Model:", "Select Embeddings Model File", embeddings_model_var, 15, width=130,singlerow=True, filetypes=[("*.gguf","*.gguf")], tooltiptxt="Select an embeddings GGUF model that can be used to generate embedding vectors.")
4925+
makelabelentry(model_tab, "ECtx: ", embeddings_ctx_var, 15, 50,padx=335,singleline=True,tooltip="If set above 0, limits max context for embedding model to save memory.", labelpadx=302)
4926+
makecheckbox(model_tab, "GPU", embeddings_gpu_var, 15, 0,padx=390,tooltiptxt="Uses the GPU for TTS.")
4927+
embeddings_gpu_var.trace("w", gui_changed_modelfile)
49194928
makefileentry(model_tab, "Preload Story:", "Select Preloaded Story File", preloadstory_var, 17,width=280,singlerow=True,tooltiptxt="Select an optional KoboldAI JSON savefile \nto be served on launch to any client.")
49204929
makefileentry(model_tab, "SaveData File:", "Select or Create New SaveData Database File", savedatafile_var, 19,width=280,filetypes=[("KoboldCpp SaveDB", "*.jsondb")],singlerow=True,dialog_type=1,tooltiptxt="Selecting a file will allow data to be loaded and saved persistently to this KoboldCpp server remotely. File is created if it does not exist.")
49214930
makefileentry(model_tab, "ChatCompletions Adapter:", "Select ChatCompletions Adapter File", chatcompletionsadapter_var, 24, width=250, filetypes=[("JSON Adapter", "*.json")], tooltiptxt="Select an optional ChatCompletions Adapter JSON file to force custom instruct tags.")
@@ -5275,6 +5284,7 @@ def export_vars():
52755284

52765285
if embeddings_ctx_var.get() != "":
52775286
args.embeddingsmaxctx = (0 if embeddings_ctx_var.get()=="" else int(embeddings_ctx_var.get()))
5287+
args.embeddingsgpu = (embeddings_gpu_var.get()==1)
52785288

52795289
if tts_model_var.get() != "" and wavtokenizer_var.get() != "":
52805290
args.ttsthreads = (0 if tts_threads_var.get()=="" else int(tts_threads_var.get()))
@@ -5476,6 +5486,7 @@ def import_vars(dict):
54765486

54775487
embeddings_model_var.set(dict["embeddingsmodel"] if ("embeddingsmodel" in dict and dict["embeddingsmodel"]) else "")
54785488
embeddings_ctx_var.set(str(dict["embeddingsmaxctx"]) if ("embeddingsmaxctx" in dict and dict["embeddingsmaxctx"]) else "")
5489+
embeddings_gpu_var.set(dict["embeddingsgpu"] if ("embeddingsgpu" in dict) else 0)
54795490

54805491
admin_var.set(dict["admin"] if ("admin" in dict) else 0)
54815492
admin_dir_var.set(dict["admindir"] if ("admindir" in dict and dict["admindir"]) else "")
@@ -6649,7 +6660,7 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
66496660
pass
66506661
if args.gpulayers==-1:
66516662
if MaxMemory[0] > 0 and (not args.usecpu) and ((args.usecublas is not None) or (args.usevulkan is not None) or (args.useclblast is not None) or sys.platform=="darwin"):
6652-
extract_modelfile_params(args.model_param,args.sdmodel,args.whispermodel,args.mmproj,args.draftmodel,args.ttsmodel if args.ttsgpu else "")
6663+
extract_modelfile_params(args.model_param,args.sdmodel,args.whispermodel,args.mmproj,args.draftmodel,args.ttsmodel if args.ttsgpu else "",args.embeddingsmodel if args.embeddingsgpu else "")
66536664
layeramt = autoset_gpu_layers(args.contextsize,args.sdquant,args.blasbatchsize,(args.quantkv if args.flashattention else 0))
66546665
print(f"Auto Recommended GPU Layers: {layeramt}")
66556666
args.gpulayers = layeramt
@@ -7235,6 +7246,7 @@ def range_checker(arg: str):
72357246
embeddingsparsergroup = parser.add_argument_group('Embeddings Model Commands')
72367247
embeddingsparsergroup.add_argument("--embeddingsmodel", metavar=('[filename]'), help="Specify an embeddings model to be loaded for generating embedding vectors.", default="")
72377248
embeddingsparsergroup.add_argument("--embeddingsmaxctx", metavar=('[amount]'), help="Overrides the default maximum supported context of an embeddings model (defaults to trained context).", type=int, default=0)
7249+
embeddingsparsergroup.add_argument("--embeddingsgpu", help="Attempts to offload layers of the embeddings model to GPU. Usually not needed.", action='store_true')
72387250

72397251
admingroup = parser.add_argument_group('Administration Commands')
72407252
admingroup.add_argument("--admin", help="Enables admin mode, allowing you to unload and reload different configurations or models.", action='store_true')

0 commit comments

Comments
 (0)