Skip to content

Commit ece8d03

Browse files
committed
Add Ollama with llama3 as Default
1 parent e1da1cd commit ece8d03

File tree

1 file changed

+51
-34
lines changed
  • interpreter/terminal_interface/profiles/defaults

1 file changed

+51
-34
lines changed

interpreter/terminal_interface/profiles/defaults/local.py

Lines changed: 51 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,19 @@
77

88
from interpreter import interpreter
99

10+
def get_ram():
11+
import psutil
12+
total_ram = psutil.virtual_memory().total / (1024 * 1024 * 1024) # Convert bytes to GB
13+
return total_ram
14+
1015
def download_model(models_dir, models, interpreter):
1116
# For some reason, these imports need to be inside the function
1217
import inquirer
1318
import wget
1419
import psutil
1520

1621
# Get RAM and disk information
17-
total_ram = psutil.virtual_memory().total / (1024 * 1024 * 1024) # Convert bytes to GB
22+
total_ram = get_ram()
1823
free_disk_space = psutil.disk_usage('/').free / (1024 * 1024 * 1024) # Convert bytes to GB
1924

2025
time.sleep(1)
@@ -148,8 +153,8 @@ def download_model(models_dir, models, interpreter):
148153

149154
# Define the choices for local models
150155
choices = [
151-
"Llamafile",
152156
"Ollama",
157+
"Llamafile",
153158
"LM Studio",
154159
"Jan",
155160
]
@@ -185,44 +190,51 @@ def download_model(models_dir, models, interpreter):
185190
)
186191

187192
interpreter.llm.api_base = "http://localhost:1234/v1"
188-
interpreter.llm.max_tokens = 1000
189-
interpreter.llm.context_window = 3000
190193
interpreter.llm.api_key = "x"
191194

192195
elif selected_model == "Ollama":
193196
try:
194-
195197
# List out all downloaded ollama models. Will fail if ollama isn't installed
196-
result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True)
197-
lines = result.stdout.split('\n')
198-
names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header
198+
def list_ollama_models():
199+
result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True)
200+
lines = result.stdout.split('\n')
201+
names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header
202+
return names
199203

200-
# If there are no downloaded models, prompt them to download a model and try again
201-
if not names:
202-
time.sleep(1)
203-
204-
interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new interpreter session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")
205-
206-
print("Please download a model then try again\n")
207-
time.sleep(2)
208-
sys.exit(1)
204+
llama3_installed = True
205+
names = list_ollama_models()
206+
if "llama3" not in names:
207+
# If a user has other models installed but not llama3, let's display the correct message
208+
if not names:
209+
llama3_installed = False
210+
names.insert(0, "llama3")
209211

210212
# If there are models, prompt them to select one
211-
else:
212-
time.sleep(1)
213+
time.sleep(1)
214+
215+
if llama3_installed:
213216
interpreter.display_message(f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run <model-name>`, then start a new interpreter session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")
214217

215-
# Create a new inquirer selection from the names
216-
name_question = [
217-
inquirer.List('name', message="Select a downloaded Ollama model", choices=names),
218-
]
219-
name_answer = inquirer.prompt(name_question)
220-
selected_name = name_answer['name'] if name_answer else None
221-
222-
# Set the model to the selected model
223-
interpreter.llm.model = f"ollama/{selected_name}"
224-
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
225-
time.sleep(1)
218+
219+
220+
# Create a new inquirer selection from the names
221+
name_question = [
222+
inquirer.List('name', message="Select a downloaded Ollama model:" if llama3_installed else "No models found. Select a model to install:", choices=names),
223+
]
224+
name_answer = inquirer.prompt(name_question)
225+
selected_name = name_answer['name'] if name_answer else None
226+
227+
if selected_name is "llama3":
228+
# If the user selects llama3, we need to check if it's installed, and if not, install it
229+
all_models = list_ollama_models()
230+
if "llama3" not in all_models:
231+
interpreter.display_message(f"\nDownloading Llama3...\n")
232+
subprocess.run(["ollama", "pull", "llama3"], check=True)
233+
234+
# Set the model to the selected model
235+
interpreter.llm.model = f"ollama/{selected_name}"
236+
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
237+
time.sleep(1)
226238

227239
# If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again
228240
except (subprocess.CalledProcessError, FileNotFoundError) as e:
@@ -248,8 +260,6 @@ def download_model(models_dir, models, interpreter):
248260
"""
249261
)
250262
interpreter.llm.api_base = "http://localhost:1337/v1"
251-
interpreter.llm.max_tokens = 1000
252-
interpreter.llm.context_window = 3000
253263
time.sleep(1)
254264

255265
# Prompt the user to enter the name of the model running on Jan
@@ -327,10 +337,17 @@ def download_model(models_dir, models, interpreter):
327337
interpreter.llm.model = "local"
328338
interpreter.llm.temperature = 0
329339
interpreter.llm.api_base = "http://localhost:8080/v1"
330-
interpreter.llm.max_tokens = 1000
331-
interpreter.llm.context_window = 3000
332340
interpreter.llm.supports_functions = False
333341

342+
343+
user_ram = get_ram()
344+
# Set context window and max tokens for all local models based on the users available RAM
345+
if user_ram and user_ram > 9:
346+
interpreter.llm.max_tokens = 1200
347+
interpreter.llm.context_window = 8000
348+
else:
349+
interpreter.llm.max_tokens = 1000
350+
interpreter.llm.context_window = 3000
334351
# Set the system message to a minimal version for all local models.
335352
interpreter.system_message = "You are Open Interpreter, a world-class programmer that can execute code on the user's machine."
336353
# Set offline for all local models

0 commit comments

Comments
 (0)