Skip to content

Commit 1b7883a

Browse files
committed
Local II
1 parent 827e0d4 commit 1b7883a

File tree

4 files changed

+93
-24
lines changed

4 files changed

+93
-24
lines changed

interpreter/terminal_interface/profiles/defaults/codestral-os.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,42 @@
33

44
from interpreter import interpreter
55

6+
interpreter.llm.model = "ollama/codestral"
7+
interpreter.llm.max_tokens = 1000
8+
interpreter.llm.context_window = 7000
9+
10+
model_name = interpreter.llm.model.replace("ollama/", "")
11+
try:
12+
# List out all downloaded ollama models. Will fail if ollama isn't installed
13+
result = subprocess.run(
14+
["ollama", "list"], capture_output=True, text=True, check=True
15+
)
16+
except Exception as e:
17+
print(str(e))
18+
interpreter.display_message(
19+
f"> Ollama not found\n\nPlease download Ollama from [ollama.com](https://ollama.com/) to use `codestral`.\n"
20+
)
21+
exit()
22+
23+
lines = result.stdout.split("\n")
24+
names = [
25+
line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()
26+
] # Extract names, trim out ":latest", skip header
27+
28+
if model_name not in names:
29+
interpreter.display_message(f"\nDownloading {model_name}...\n")
30+
subprocess.run(["ollama", "pull", model_name], check=True)
31+
32+
# Send a ping, which will actually load the model
33+
interpreter.display_message("\n*Loading model...*\n")
34+
35+
old_max_tokens = interpreter.llm.max_tokens
36+
interpreter.llm.max_tokens = 1
37+
interpreter.computer.ai.chat("ping")
38+
interpreter.llm.max_tokens = old_max_tokens
39+
40+
interpreter.display_message("> Model set to `codestral`")
41+
642
# Check if required packages are installed
743

844
# THERE IS AN INCONSISTENCY HERE.

interpreter/terminal_interface/profiles/defaults/codestral.py

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,19 +3,38 @@
33
from interpreter import interpreter
44

55
interpreter.llm.model = "ollama/codestral"
6+
interpreter.llm.max_tokens = 1000
7+
interpreter.llm.context_window = 7000
8+
9+
model_name = interpreter.llm.model.replace("ollama/", "")
10+
try:
11+
# List out all downloaded ollama models. Will fail if ollama isn't installed
12+
result = subprocess.run(
13+
["ollama", "list"], capture_output=True, text=True, check=True
14+
)
15+
except Exception as e:
16+
print(str(e))
17+
interpreter.display_message(
18+
f"> Ollama not found\n\nPlease download Ollama from [ollama.com](https://ollama.com/) to use `codestral`.\n"
19+
)
20+
exit()
21+
22+
lines = result.stdout.split("\n")
23+
names = [
24+
line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()
25+
] # Extract names, trim out ":latest", skip header
26+
27+
if model_name not in names:
28+
interpreter.display_message(f"\nDownloading {model_name}...\n")
29+
subprocess.run(["ollama", "pull", model_name], check=True)
630

731
# Send a ping, which will actually load the model
8-
interpreter.display_message("\nLoading model...")
32+
interpreter.display_message("\n*Loading model...*\n")
933

1034
old_max_tokens = interpreter.llm.max_tokens
11-
old_context_window = interpreter.llm.context_window
1235
interpreter.llm.max_tokens = 1
13-
interpreter.llm.context_window = 100
14-
1536
interpreter.computer.ai.chat("ping")
16-
1737
interpreter.llm.max_tokens = old_max_tokens
18-
interpreter.llm.context_window = old_context_window
1938

2039
interpreter.display_message("> Model set to `codestral`")
2140

interpreter/terminal_interface/profiles/defaults/llama3.py

Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -151,25 +151,40 @@
151151
Now, your turn:"""
152152

153153

154+
interpreter.llm.model = "ollama/llama3"
155+
interpreter.llm.max_tokens = 1000
156+
interpreter.llm.context_window = 7000
157+
158+
# The below should be pushed into interpreter.llm.load()?
159+
160+
model_name = interpreter.llm.model.replace("ollama/", "")
154161
try:
155162
# List out all downloaded ollama models. Will fail if ollama isn't installed
156163
result = subprocess.run(
157164
["ollama", "list"], capture_output=True, text=True, check=True
158165
)
159-
lines = result.stdout.split("\n")
160-
names = [
161-
line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()
162-
] # Extract names, trim out ":latest", skip header
163-
164-
if "llama3" not in names:
165-
interpreter.display_message(f"\nDownloading llama3...\n")
166-
subprocess.run(["ollama", "pull", "llama3"], check=True)
167-
168-
# Set the model to codestral
169-
interpreter.llm.model = f"ollama/llama3"
170-
interpreter.display_message(f"> Model set to `llama3`")
171-
except:
166+
except Exception as e:
167+
print(str(e))
172168
interpreter.display_message(
173-
f"> Ollama not found\n\nPlease download Ollama from [ollama.com](https://ollama.com/) to use `codestral`.\n"
169+
f"> Ollama not found\n\nPlease download Ollama from [ollama.com](https://ollama.com/) to use `llama3`.\n"
174170
)
175171
exit()
172+
173+
lines = result.stdout.split("\n")
174+
names = [
175+
line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()
176+
] # Extract names, trim out ":latest", skip header
177+
178+
if model_name not in names:
179+
interpreter.display_message(f"\nDownloading {model_name}...\n")
180+
subprocess.run(["ollama", "pull", model_name], check=True)
181+
182+
# Send a ping, which will actually load the model
183+
interpreter.display_message("\n*Loading model...*\n")
184+
185+
old_max_tokens = interpreter.llm.max_tokens
186+
interpreter.llm.max_tokens = 1
187+
interpreter.computer.ai.chat("ping")
188+
interpreter.llm.max_tokens = old_max_tokens
189+
190+
interpreter.display_message("> Model set to `llama3`")

interpreter/terminal_interface/validate_llm_settings.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""
22
I do not like this and I want to get rid of it lol. Like, what is it doing..?
3+
I guess it's setting up the model. So maybe this should be like, interpreter.llm.load() soon
34
"""
45

56
import os
@@ -92,7 +93,7 @@ def validate_llm_settings(interpreter):
9293
except Exception as e:
9394
print(str(e))
9495
interpreter.display_message(
95-
f"> Ollama not found\n\nPlease download Ollama from [ollama.com](https://ollama.com/) to use `codestral`.\n"
96+
f"> Ollama not found\n\nPlease download Ollama from [ollama.com](https://ollama.com/) to use `{model_name}`.\n"
9697
)
9798
exit()
9899

@@ -108,15 +109,13 @@ def validate_llm_settings(interpreter):
108109
subprocess.run(["ollama", "pull", model_name], check=True)
109110

110111
# Send a ping, which will actually load the model
111-
print("Loading model...")
112+
interpreter.display_message("\n*Loading model...*\n")
112113

113114
old_max_tokens = interpreter.llm.max_tokens
114115
interpreter.llm.max_tokens = 1
115116
interpreter.computer.ai.chat("ping")
116117
interpreter.llm.max_tokens = old_max_tokens
117118

118-
print("Model loaded.")
119-
120119
# interpreter.display_message(f"> Model set to `{model_name}`")
121120

122121
# This is a model we don't have checks for yet.

0 commit comments

Comments
 (0)