Skip to content

Commit a28073b

Browse files
authored
Merge pull request #1418 from CyanideByte/gpt-4o-mini-fast
Updated fast mode to gpt-4o-mini & Profile version updated
2 parents 7b0e5e1 + 59f07e1 commit a28073b

File tree

10 files changed

+16
-15
lines changed

10 files changed

+16
-15
lines changed

interpreter/core/llm/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def __init__(self, interpreter):
5151
self.completions = fixed_litellm_completions
5252

5353
# Settings
54-
self.model = "gpt-4-turbo"
54+
self.model = "gpt-4o"
5555
self.temperature = 0
5656

5757
self.supports_vision = None # Will try to auto-detect

interpreter/terminal_interface/profiles/defaults/default.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
# LLM Settings
66
llm:
7-
model: "gpt-4-turbo"
7+
model: "gpt-4o"
88
temperature: 0
99
# api_key: ... # Your API key, if the API requires it
1010
# api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests
@@ -26,7 +26,7 @@ computer:
2626

2727
# To use a separate model for the `wtf` command:
2828
# wtf:
29-
# model: "gpt-3.5-turbo"
29+
# model: "gpt-4o-mini"
3030

3131
# Documentation
3232
# All options: https://docs.openinterpreter.com/settings

interpreter/terminal_interface/profiles/defaults/fast.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# Remove the "#" before the settings below to use them.
44

55
llm:
6-
model: "gpt-3.5-turbo"
6+
model: "gpt-4o-mini"
77
temperature: 0
88
# api_key: ... # Your API key, if the API requires it
99
# api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests
@@ -23,4 +23,4 @@ custom_instructions: "The user has set you to FAST mode. **No talk, just code.**
2323

2424
# All options: https://docs.openinterpreter.com/settings
2525

26-
version: 0.2.1 # Configuration file version (do not modify)
26+
version: 0.2.5 # Configuration file version (do not modify)

interpreter/terminal_interface/profiles/defaults/snowpark.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
# LLM Settings
66
llm:
7-
model: "gpt-4-turbo"
7+
model: "gpt-4o"
88
temperature: 0
99
# api_key: ... # Your API key, if the API requires it
1010
# api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests

interpreter/terminal_interface/profiles/defaults/vision.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,5 +17,5 @@ llm:
1717
1818
# All options: https://docs.openinterpreter.com/usage/terminal/settings
1919

20-
version: 0.2.1 # Configuration file version (do not modify)
20+
version: 0.2.5 # Configuration file version (do not modify)
2121

interpreter/terminal_interface/profiles/profiles.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -170,11 +170,11 @@ def apply_profile(interpreter, profile, profile_path):
170170

171171
try:
172172
if profile["llm"]["model"] == "gpt-4":
173-
text = text.replace("gpt-4", "gpt-4-turbo")
174-
profile["llm"]["model"] = "gpt-4-turbo"
173+
text = text.replace("gpt-4", "gpt-4o")
174+
profile["llm"]["model"] = "gpt-4o"
175175
elif profile["llm"]["model"] == "gpt-4-turbo-preview":
176-
text = text.replace("gpt-4-turbo-preview", "gpt-4-turbo")
177-
profile["llm"]["model"] = "gpt-4-turbo"
176+
text = text.replace("gpt-4-turbo-preview", "gpt-4o")
177+
profile["llm"]["model"] = "gpt-4o"
178178
except:
179179
raise
180180
pass # fine

interpreter/terminal_interface/start_terminal_interface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ def start_terminal_interface(interpreter):
197197
{
198198
"name": "fast",
199199
"nickname": "f",
200-
"help_text": "runs `interpreter --model gpt-3.5-turbo` and asks OI to be extremely concise (shortcut for `interpreter --profile fast`)",
200+
"help_text": "runs `interpreter --model gpt-4o-mini` and asks OI to be extremely concise (shortcut for `interpreter --profile fast`)",
201201
"type": bool,
202202
},
203203
{

interpreter/terminal_interface/validate_llm_settings.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ def validate_llm_settings(interpreter):
3737
"gpt-4",
3838
"gpt-3.5-turbo",
3939
"gpt-4o",
40+
"gpt-4o-mini",
4041
"gpt-4-turbo",
4142
]:
4243
if (
@@ -50,7 +51,7 @@ def validate_llm_settings(interpreter):
5051
"""---
5152
> OpenAI API key not found
5253
53-
To use `gpt-4-turbo` (recommended) please provide an OpenAI API key.
54+
To use `gpt-4o` (recommended) please provide an OpenAI API key.
5455
5556
To use another language model, run `interpreter --local` or consult the documentation at [docs.openinterpreter.com](https://docs.openinterpreter.com/language-model-setup/).
5657

scripts/wtf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ def get_lines_from_file(filename, line_number):
374374
if wtf_model:
375375
model = wtf_model
376376
else:
377-
model = profile.get("llm", {}).get("model", "gpt-3.5-turbo")
377+
model = profile.get("llm", {}).get("model", "gpt-4o-mini")
378378
except:
379379
model = "gpt-4o-mini"
380380

tests/config.test.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,6 @@ system_message: |
1313
In general, try to **make plans** with as few steps as possible. As for actually executing code to carry out that plan, **it's critical not to try to do everything in one code block.** You should try something, print information about it, then continue from there in tiny, informed steps. You will never get it on the first try, and attempting it in one go will often lead to errors you cant see.
1414
You are capable of **any** task.
1515
offline: false
16-
llm.model: "gpt-3.5-turbo"
16+
llm.model: "gpt-4o-mini"
1717
llm.temperature: 0.25
1818
verbose: true

0 commit comments

Comments
 (0)