Skip to content

Commit ff1b937

Browse files
authored
Merge pull request #1416 from OpenInterpreter/development
Merge Development Branch
2 parents 6e003f2 + 30df78f commit ff1b937

File tree

8 files changed

+275
-248
lines changed

8 files changed

+275
-248
lines changed

docs/getting-started/setup.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ title: Setup
55
<iframe
66
width="560"
77
height="315"
8-
src="https://www.youtube.com/watch?v=5sk3t8ilDR8"
8+
src="https://www.youtube.com/embed/5sk3t8ilDR8"
99
frameBorder="0"
1010
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
1111
allowFullScreen

docs/guides/profiles.mdx

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,15 @@
22
title: Profiles
33
---
44

5+
<iframe
6+
width="560"
7+
height="315"
8+
src="https://www.youtube.com/embed/NxfdrGQrkHQ"
9+
frameBorder="0"
10+
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
11+
allowFullScreen
12+
></iframe>
13+
514
Profiles are a powerful way to customize your instance of Open Interpreter.
615

716
Profiles are Python files that configure Open Interpreter. A wide range of fields from the [model](/settings/all-settings#model-selection) to the [context window](/settings/all-settings#context-window) to the [message templates](/settings/all-settings#user-message-template) can be configured in a Profile. This allows you to save multiple variations of Open Interpreter to optimize for your specific use-cases.

docs/language-models/custom-models.mdx

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,21 +9,20 @@ As long as your system can accept an input and stream an output (and can be inte
99
Simply replace the OpenAI-compatible `completions` function in your language model with one of your own:
1010

1111
```python
12-
def custom_language_model(openai_message):
12+
def custom_language_model(messages, model, stream, max_tokens):
1313
"""
1414
OpenAI-compatible completions function (this one just echoes what the user said back).
15+
To make it OpenAI-compatible and parsable, `choices` has to be the root property.
16+
The property `delta` is used to signify streaming.
1517
"""
16-
users_content = openai_message[-1].get("content") # Get last message's content
17-
18-
# To make it OpenAI-compatible, we yield this first:
19-
yield {"delta": {"role": "assistant"}}
18+
users_content = messages[-1].get("content") # Get last message's content
2019

2120
for character in users_content:
22-
yield {"delta": {"content": character}}
21+
yield {"choices": [{"delta": {"content": character}}]}
2322

2423
# Tell Open Interpreter to power the language model with this function
2524

26-
interpreter.llm.completion = custom_language_model
25+
interpreter.llm.completions = custom_language_model
2726
```
2827

2928
Then, set the following settings:
@@ -39,4 +38,4 @@ And start using it:
3938

4039
```
4140
interpreter.chat("Hi!") # Returns/displays "Hi!" character by character
42-
```
41+
```

interpreter/core/async_core.py

Lines changed: 36 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -701,47 +701,45 @@ async def openai_compatible_generator():
701701
async def chat_completion(request: ChatCompletionRequest):
702702
# Convert to LMC
703703

704-
user_messages = []
705-
for message in reversed(request.messages):
706-
if message.role == "user":
707-
user_messages.append(message)
708-
else:
709-
break
710-
user_messages.reverse()
711-
712-
for message in user_messages:
713-
if type(message.content) == str:
714-
async_interpreter.messages.append(
715-
{"role": "user", "type": "message", "content": message.content}
716-
)
717-
if type(message.content) == list:
718-
for content in message.content:
719-
if content["type"] == "text":
720-
async_interpreter.messages.append(
721-
{"role": "user", "type": "message", "content": content}
704+
last_message = request.messages[-1]
705+
706+
if last_message.role != "user":
707+
raise ValueError("Last message must be from the user.")
708+
709+
if last_message.content == "{STOP}":
710+
# Handle special STOP token
711+
return
712+
713+
if type(last_message.content) == str:
714+
async_interpreter.messages.append(last_message)
715+
if type(last_message.content) == list:
716+
for content in last_message.content:
717+
if content["type"] == "text":
718+
async_interpreter.messages.append(
719+
{"role": "user", "type": "message", "content": content}
720+
)
721+
elif content["type"] == "image_url":
722+
if "url" not in content["image_url"]:
723+
raise Exception("`url` must be in `image_url`.")
724+
url = content["image_url"]["url"]
725+
print(url[:100])
726+
if "base64," not in url:
727+
raise Exception(
728+
'''Image must be in the format: "data:image/jpeg;base64,{base64_image}"'''
722729
)
723-
elif content["type"] == "image_url":
724-
if "url" not in content["image_url"]:
725-
raise Exception("`url` must be in `image_url`.")
726-
url = content["image_url"]["url"]
727-
print(url[:100])
728-
if "base64," not in url:
729-
raise Exception(
730-
'''Image must be in the format: "data:image/jpeg;base64,{base64_image}"'''
731-
)
732730

733-
# data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA6oA...
731+
# data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA6oA...
734732

735-
data = url.split("base64,")[1]
736-
format = "base64." + url.split(";")[0].split("/")[1]
737-
async_interpreter.messages.append(
738-
{
739-
"role": "user",
740-
"type": "image",
741-
"format": format,
742-
"content": data,
743-
}
744-
)
733+
data = url.split("base64,")[1]
734+
format = "base64." + url.split(";")[0].split("/")[1]
735+
async_interpreter.messages.append(
736+
{
737+
"role": "user",
738+
"type": "image",
739+
"format": format,
740+
"content": data,
741+
}
742+
)
745743

746744
if request.stream:
747745
return StreamingResponse(

interpreter/core/llm/llm.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,27 @@
2525
from .run_tool_calling_llm import run_tool_calling_llm
2626
from .utils.convert_to_openai_messages import convert_to_openai_messages
2727

28+
import logging
29+
30+
# Create or get the logger
31+
logger = logging.getLogger('LiteLLM')
32+
33+
class SuppressDebugFilter(logging.Filter):
34+
def filter(self, record):
35+
# Suppress only the specific message containing the keywords
36+
if "cost map" in record.getMessage():
37+
return False # Suppress this log message
38+
return True # Allow all other messages
2839

2940
class Llm:
3041
"""
3142
A stateless LMC-style LLM with some helpful properties.
3243
"""
3344

3445
def __init__(self, interpreter):
46+
# Add the filter to the logger
47+
logger.addFilter(SuppressDebugFilter())
48+
3549
# Store a reference to parent interpreter
3650
self.interpreter = interpreter
3751

@@ -311,6 +325,9 @@ def load(self):
311325
if self._is_loaded:
312326
return
313327

328+
if self.model.startswith("ollama/") and not ":" in self.model:
329+
self.model = self.model + ":latest"
330+
314331
self._is_loaded = True
315332

316333
if self.model.startswith("ollama/"):
@@ -323,7 +340,7 @@ def load(self):
323340
if response.ok:
324341
data = response.json()
325342
names = [
326-
model["name"].replace(":latest", "")
343+
model["name"]
327344
for model in data["models"]
328345
if "name" in model and model["name"]
329346
]
@@ -358,6 +375,7 @@ def load(self):
358375
self.max_tokens = int(self.context_window * 0.2)
359376

360377
# Send a ping, which will actually load the model
378+
model_name = model_name.replace(":latest", "")
361379
print(f"Loading {model_name}...\n")
362380

363381
old_max_tokens = self.max_tokens
@@ -398,6 +416,8 @@ def fixed_litellm_completions(**params):
398416
else:
399417
litellm.drop_params = True
400418

419+
params["model"] = params["model"].replace(":latest", "")
420+
401421
# Run completion
402422
attempts = 4
403423
first_error = None

interpreter/terminal_interface/local_setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ def download_model(models_dir, models, interpreter):
257257
names=[name for name in names if not any(word.lower() in name.lower() for word in priority_models)]
258258
names=priority_models_found+names
259259

260-
for model in ["llama3", "phi3", "wizardlm2", "codestral"]:
260+
for model in ["llama3.1", "phi3", "mistral-nemo", "gemma2", "codestral"]:
261261
if model not in names:
262262
names.append("↓ Download " + model)
263263

0 commit comments

Comments
 (0)