Skip to content

Commit cb486a7

Browse files
committed
Fixed ollama not working with llama3.1 and probably other models too
1 parent 907ee8f commit cb486a7

File tree

1 file changed

+7
-1
lines changed

1 file changed

+7
-1
lines changed

interpreter/core/llm/llm.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,9 @@ def load(self):
309309
if self._is_loaded:
310310
return
311311

312+
if self.model.startswith("ollama/") and not ":" in self.model:
313+
self.model = self.model + ":latest"
314+
312315
self._is_loaded = True
313316

314317
if self.model.startswith("ollama/"):
@@ -321,7 +324,7 @@ def load(self):
321324
if response.ok:
322325
data = response.json()
323326
names = [
324-
model["name"].replace(":latest", "")
327+
model["name"]
325328
for model in data["models"]
326329
if "name" in model and model["name"]
327330
]
@@ -356,6 +359,7 @@ def load(self):
356359
self.max_tokens = int(self.context_window * 0.2)
357360

358361
# Send a ping, which will actually load the model
362+
model_name = model_name.replace(":latest", "")
359363
print(f"Loading {model_name}...\n")
360364

361365
old_max_tokens = self.max_tokens
@@ -396,6 +400,8 @@ def fixed_litellm_completions(**params):
396400
else:
397401
litellm.drop_params = True
398402

403+
params["model"] = params["model"].replace(":latest", "")
404+
399405
# Run completion
400406
attempts = 4
401407
first_error = None

0 commit comments

Comments
 (0)