4
4
import sys
5
5
import time
6
6
import inquirer
7
+ import psutil
8
+ import wget
7
9
from interpreter import interpreter
8
10
9
-
10
11
def get_ram ():
11
- import psutil
12
-
13
12
total_ram = psutil .virtual_memory ().total / (
14
13
1024 * 1024 * 1024
15
14
) # Convert bytes to GB
16
15
return total_ram
17
16
18
17
def download_model (models_dir , models , interpreter ):
19
- # For some reason, these imports need to be inside the function
20
- import inquirer
21
- import psutil
22
- import wget
23
-
24
18
# Get RAM and disk information
25
19
total_ram = get_ram ()
26
20
free_disk_space = psutil .disk_usage ("/" ).free / (
@@ -77,6 +71,12 @@ def download_model(models_dir, models, interpreter):
77
71
"size" : 1.96 ,
78
72
"url" : "https://huggingface.co/jartine/phi-2-llamafile/resolve/main/phi-2.Q5_K_M.llamafile?download=true" ,
79
73
},
74
+ {
75
+ "name" : "Phi-3-mini" ,
76
+ "file_name" : "Phi-3-mini-4k-instruct.Q5_K_M.llamafile" ,
77
+ "size" : 2.84 ,
78
+ "url" : "https://huggingface.co/jartine/Phi-3-mini-4k-instruct-llamafile/resolve/main/Phi-3-mini-4k-instruct.Q5_K_M.llamafile?download=true" ,
79
+ },
80
80
{
81
81
"name" : "LLaVA 1.5" ,
82
82
"file_name" : "llava-v1.5-7b-q4.llamafile" ,
@@ -89,6 +89,12 @@ def download_model(models_dir, models, interpreter):
89
89
"size" : 5.15 ,
90
90
"url" : "https://huggingface.co/jartine/Mistral-7B-Instruct-v0.2-llamafile/resolve/main/mistral-7b-instruct-v0.2.Q5_K_M.llamafile?download=true" ,
91
91
},
92
+ {
93
+ "name" : "Llama-3-8B-Instruct" ,
94
+ "file_name" : " Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile" ,
95
+ "size" : 5.76 ,
96
+ "url" : "https://huggingface.co/jartine/Meta-Llama-3-8B-Instruct-llamafile/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile?download=true" ,
97
+ },
92
98
{
93
99
"name" : "WizardCoder-Python-13B" ,
94
100
"file_name" : "wizardcoder-python-13b.llamafile" ,
@@ -130,6 +136,9 @@ def download_model(models_dir, models, interpreter):
130
136
)
131
137
]
132
138
answers = inquirer .prompt (questions )
139
+
140
+ if answers == None :
141
+ exit ()
133
142
134
143
# Get the selected model
135
144
selected_model = next (
@@ -195,6 +204,8 @@ def download_model(models_dir, models, interpreter):
195
204
]
196
205
answers = inquirer .prompt (questions )
197
206
207
+ if answers == None :
208
+ exit ()
198
209
199
210
selected_model = answers ["model" ]
200
211
@@ -260,9 +271,13 @@ def list_ollama_models():
260
271
),
261
272
]
262
273
name_answer = inquirer .prompt (name_question )
263
- selected_name = name_answer ["name" ] if name_answer else None
264
274
265
- if selected_name is "llama3" :
275
+ if name_answer == None :
276
+ exit ()
277
+
278
+ selected_name = name_answer ["name" ]
279
+
280
+ if selected_name == "llama3" :
266
281
# If the user selects llama3, we need to check if it's installed, and if not, install it
267
282
all_models = list_ollama_models ()
268
283
if "llama3" not in all_models :
@@ -310,7 +325,11 @@ def list_ollama_models():
310
325
),
311
326
]
312
327
model_name_answer = inquirer .prompt (model_name_question )
313
- jan_model_name = model_name_answer ["jan_model_name" ] if model_name_answer else None
328
+
329
+ if model_name_answer == None :
330
+ exit ()
331
+
332
+ jan_model_name = model_name_answer ["jan_model_name" ]
314
333
interpreter .llm .model = f"jan/{ jan_model_name } "
315
334
interpreter .display_message (f"\n Using Jan model: `{ jan_model_name } ` \n " )
316
335
time .sleep (1 )
@@ -354,6 +373,9 @@ def list_ollama_models():
354
373
)
355
374
]
356
375
answers = inquirer .prompt (questions )
376
+
377
+ if answers == None :
378
+ exit ()
357
379
358
380
if answers ["model" ] == " ↓ Download new model" :
359
381
model_path = download_model (models_dir , models , interpreter )
@@ -383,7 +405,7 @@ def list_ollama_models():
383
405
print ("Model process terminated." )
384
406
385
407
# Set flags for Llamafile to work with interpreter
386
- interpreter .llm .model = "local"
408
+ interpreter .llm .model = "openai/ local"
387
409
interpreter .llm .temperature = 0
388
410
interpreter .llm .api_base = "http://localhost:8080/v1"
389
411
interpreter .llm .supports_functions = False
0 commit comments