Skip to content

Commit 69dc8e6

Browse files
committed
Better messaging
1 parent ccd034a commit 69dc8e6

File tree

2 files changed

+44
-58
lines changed

2 files changed

+44
-58
lines changed

interpreter/core/llm/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ def fixed_litellm_completions(**params):
234234

235235
if "local" in params.get("model"):
236236
# Kinda hacky, but this helps
237-
params["stop"] = ["<|assistant|>", "<|end|>"]
237+
params["stop"] = ["<|assistant|>", "<|end|>", "<|eot_id|>"]
238238

239239
# Run completion
240240
first_error = None

interpreter/terminal_interface/profiles/defaults/local.py

Lines changed: 43 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010

1111
from interpreter import interpreter
1212

13+
model = None
14+
1315

1416
def download_model(models_dir, models, interpreter):
1517
# Get RAM and disk information
@@ -20,15 +22,11 @@ def download_model(models_dir, models, interpreter):
2022
1024 * 1024 * 1024
2123
) # Convert bytes to GB
2224

23-
time.sleep(1)
24-
2525
# Display the users hardware specs
2626
interpreter.display_message(
2727
f"Your machine has `{total_ram:.2f}GB` of RAM, and `{free_disk_space:.2f}GB` of free storage space."
2828
)
2929

30-
time.sleep(2)
31-
3230
if total_ram < 10:
3331
interpreter.display_message(
3432
f"\nYour computer realistically can only run smaller models less than 4GB, Phi-2 might be the best model for your computer.\n"
@@ -42,14 +40,10 @@ def download_model(models_dir, models, interpreter):
4240
f"\nYour computer should have enough RAM to run any model below.\n"
4341
)
4442

45-
time.sleep(1)
46-
4743
interpreter.display_message(
4844
f"In general, the larger the model, the better the performance, but choose a model that best fits your computer's hardware. \nOnly models you have the storage space to download are shown:\n"
4945
)
5046

51-
time.sleep(1)
52-
5347
try:
5448
model_list = [
5549
{
@@ -182,7 +176,7 @@ def download_model(models_dir, models, interpreter):
182176

183177
# START OF LOCAL MODEL PROVIDER LOGIC
184178
interpreter.display_message(
185-
"> Open Interpreter is compatible with several local model providers.\n"
179+
"\n**Open Interpreter** supports multiple local model providers.\n"
186180
)
187181

188182
# Define the choices for local models
@@ -197,7 +191,7 @@ def download_model(models_dir, models, interpreter):
197191
questions = [
198192
inquirer.List(
199193
"model",
200-
message="What one would you like to use?",
194+
message="Select a provider",
201195
choices=choices,
202196
),
203197
]
@@ -231,41 +225,23 @@ def download_model(models_dir, models, interpreter):
231225
elif selected_model == "Ollama":
232226
try:
233227
# List out all downloaded ollama models. Will fail if ollama isn't installed
234-
def list_ollama_models():
235-
result = subprocess.run(
236-
["ollama", "list"], capture_output=True, text=True, check=True
237-
)
238-
lines = result.stdout.split("\n")
239-
names = [
240-
line.split()[0].replace(":latest", "")
241-
for line in lines[1:]
242-
if line.strip()
243-
] # Extract names, trim out ":latest", skip header
244-
return names
245-
246-
llama3_installed = True
247-
names = list_ollama_models()
248-
if "llama3" not in names:
249-
# If a user has other models installed but not llama3, let's display the correct message
250-
if not names:
251-
llama3_installed = False
252-
names.insert(0, "llama3")
253-
254-
# If there are models, prompt them to select one
255-
time.sleep(1)
228+
result = subprocess.run(
229+
["ollama", "list"], capture_output=True, text=True, check=True
230+
)
231+
lines = result.stdout.split("\n")
232+
names = [
233+
line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()
234+
] # Extract names, trim out ":latest", skip header
256235

257-
if llama3_installed:
258-
interpreter.display_message(
259-
f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run <model-name>`, then start a new interpreter session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n"
260-
)
236+
for model in ["llama3", "phi3", "wizardlm2"]:
237+
if model not in names:
238+
names.append("→ Download " + model)
261239

262240
# Create a new inquirer selection from the names
263241
name_question = [
264242
inquirer.List(
265243
"name",
266-
message="Select a downloaded Ollama model:"
267-
if llama3_installed
268-
else "No models found. Select a model to install:",
244+
message="Select a model",
269245
choices=names,
270246
),
271247
]
@@ -276,17 +252,16 @@ def list_ollama_models():
276252

277253
selected_name = name_answer["name"]
278254

279-
if selected_name == "llama3":
280-
# If the user selects llama3, we need to check if it's installed, and if not, install it
281-
all_models = list_ollama_models()
282-
if "llama3" not in all_models:
283-
interpreter.display_message(f"\nDownloading Llama3...\n")
284-
subprocess.run(["ollama", "pull", "llama3"], check=True)
255+
if "download" in selected_name.lower():
256+
model = selected_name.split(" ")[-1]
257+
interpreter.display_message(f"\nDownloading {model}...\n")
258+
subprocess.run(["ollama", "pull", model], check=True)
259+
else:
260+
model = selected_name.strip()
285261

286262
# Set the model to the selected model
287-
interpreter.llm.model = f"ollama/{selected_name}"
288-
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
289-
time.sleep(1)
263+
interpreter.llm.model = f"ollama/{model}"
264+
interpreter.display_message(f"> Model set to `{model}`")
290265

291266
# If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again
292267
except (subprocess.CalledProcessError, FileNotFoundError) as e:
@@ -359,15 +334,17 @@ def list_ollama_models():
359334
models = [f for f in os.listdir(models_dir) if f.endswith(".llamafile")]
360335

361336
if not models:
362-
print("\nThere are no models currently downloaded. Let's download a new one.\n")
337+
print(
338+
"\nNo models currently downloaded. Please select a new model to download.\n"
339+
)
363340
model_path = download_model(models_dir, models, interpreter)
364341
else:
365342
# Prompt the user to select a downloaded model or download a new one
366-
model_choices = models + [" ↓ Download new model"]
343+
model_choices = models + ["↓ Download new model"]
367344
questions = [
368345
inquirer.List(
369346
"model",
370-
message="Select a Llamafile model to run or download a new one:",
347+
message="Select a model",
371348
choices=model_choices,
372349
)
373350
]
@@ -376,7 +353,7 @@ def list_ollama_models():
376353
if answers == None:
377354
exit()
378355

379-
if answers["model"] == " ↓ Download new model":
356+
if answers["model"] == "↓ Download new model":
380357
model_path = download_model(models_dir, models, interpreter)
381358
else:
382359
model_path = os.path.join(models_dir, answers["model"])
@@ -392,11 +369,8 @@ def list_ollama_models():
392369
text=True,
393370
)
394371

395-
print("Waiting for the model to load...")
396372
for line in process.stdout:
397373
if "llama server listening at http://127.0.0.1:8080" in line:
398-
print("\nModel loaded \n")
399-
time.sleep(1)
400374
break # Exit the loop once the server is ready
401375
except Exception as e:
402376
process.kill() # Force kill if not terminated after timeout
@@ -409,6 +383,9 @@ def list_ollama_models():
409383
interpreter.llm.api_base = "http://localhost:8080/v1"
410384
interpreter.llm.supports_functions = False
411385

386+
model_name = model_path.split("/")[-1]
387+
interpreter.display_message(f"> Model set to `{model_name}`")
388+
412389
user_ram = total_ram = psutil.virtual_memory().total / (
413390
1024 * 1024 * 1024
414391
) # Convert bytes to GB
@@ -419,6 +396,15 @@ def list_ollama_models():
419396
else:
420397
interpreter.llm.max_tokens = 1000
421398
interpreter.llm.context_window = 3000
399+
400+
# Display intro message
401+
if interpreter.auto_run == False:
402+
interpreter.display_message(
403+
"**Open Interpreter** will require approval before running code."
404+
+ "\n\nUse `interpreter -y` to bypass this."
405+
+ "\n\nPress `CTRL-C` to exit.\n"
406+
)
407+
422408
# Set the system message to a minimal version for all local models.
423409
interpreter.system_message = """
424410
You are Open Interpreter, a world-class programmer that can execute code on the user's machine.
@@ -450,14 +436,14 @@ def list_ollama_models():
450436
Now, your turn:
451437
"""
452438

453-
interpreter.user_message_template = "{content} Please send me some code that would be able to answer my question, in the form of ```python\n... the code ...\n``` or ```shell\n... the code ...\n```"
439+
# interpreter.user_message_template = "{content} Please send me some code that would be able to answer my question, in the form of ```python\n... the code ...\n``` or ```shell\n... the code ...\n```"
454440
interpreter.code_output_template = "I executed that code. This was the ouput: {content}\n\nWhat does this output mean / what's next (if anything, or are we done)?"
455441
interpreter.empty_code_output_template = "The code above was executed on my machine. It produced no text output. what's next (if anything, or are we done?)"
456442
interpreter.code_output_sender = "user"
457443
interpreter.max_output = 500
458444
interpreter.llm.context_window = 8000
459445
interpreter.force_task_completion = False
460-
# interpreter.user_message_template = "{content}. If my question must be solved by running code on my computer, send me code to run enclosed in ```python or ```shell. Otherwise, don't send code and answer like a chatbot. Be concise, don't include anything unnecessary. Don't use placeholders, I can't edit code."
446+
interpreter.user_message_template = "{content}. If my question must be solved by running code on my computer, send me code to run enclosed in ```python or ```shell. Otherwise, don't send code. Be concise, don't include anything unnecessary. Don't use placeholders, I can't edit code."
461447

462448
# Set offline for all local models
463449
interpreter.offline = True

0 commit comments

Comments
 (0)