Skip to content

Commit 65247ad

Browse files
authored
Merge pull request #1358 from OpenInterpreter/development
Claude support
2 parents 197d4bc + ca5f45d commit 65247ad

File tree

5 files changed

+115
-28
lines changed

5 files changed

+115
-28
lines changed

interpreter/core/async_core.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -533,6 +533,10 @@ async def set_settings(payload: Dict[str, Any]):
533533
for key, value in payload.items():
534534
print(f"Updating settings: {key} = {value}")
535535
if key in ["llm", "computer"] and isinstance(value, dict):
536+
if key == "auto_run":
537+
return {
538+
"error": f"The setting {key} is not modifiable through the server due to security constraints."
539+
}, 403
536540
if hasattr(async_interpreter, key):
537541
for sub_key, sub_value in value.items():
538542
if hasattr(getattr(async_interpreter, key), sub_key):

interpreter/core/llm/llm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
from ...terminal_interface.utils.display_markdown_message import (
1616
display_markdown_message,
1717
)
18-
from .run_function_calling_llm import run_function_calling_llm
19-
20-
# from .run_tool_calling_llm import run_tool_calling_llm
2118
from .run_text_llm import run_text_llm
19+
20+
# from .run_function_calling_llm import run_function_calling_llm
21+
from .run_tool_calling_llm import run_tool_calling_llm
2222
from .utils.convert_to_openai_messages import convert_to_openai_messages
2323

2424

@@ -287,8 +287,8 @@ def run(self, messages):
287287
time.sleep(5)
288288

289289
if self.supports_functions:
290-
yield from run_function_calling_llm(self, params)
291-
# yield from run_tool_calling_llm(self, params)
290+
# yield from run_function_calling_llm(self, params)
291+
yield from run_tool_calling_llm(self, params)
292292
else:
293293
yield from run_text_llm(self, params)
294294

interpreter/core/llm/run_function_calling_llm.py

Lines changed: 32 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,9 @@ def run_function_calling_llm(llm, request_params):
4242
code = ""
4343
function_call_detected = False
4444

45+
accumulated_review = ""
46+
review_category = None
47+
4548
for chunk in llm.completions(**request_params):
4649
if "choices" not in chunk or len(chunk["choices"]) == 0:
4750
# This happens sometimes
@@ -54,22 +57,35 @@ def run_function_calling_llm(llm, request_params):
5457
if "content" in delta and delta["content"]:
5558
if function_call_detected:
5659
# More content after a code block? This is a code review by a judge layer.
57-
if delta["content"].strip() == "<SAFE>":
58-
yield {"type": "review", "format": "safe", "content": ""}
59-
elif "<UNSAFE>" in delta["content"]:
60-
content = (
61-
delta["content"]
62-
.replace("<UNSAFE>", "")
63-
.replace("</UNSAFE>", "")
64-
)
65-
yield {"type": "review", "format": "unsafe", "content": content}
66-
else:
67-
content = (
68-
delta["content"]
69-
.replace("<WARNING>", "")
70-
.replace("</WARNING>", "")
71-
)
72-
yield {"type": "review", "format": "warning", "content": content}
60+
61+
# print("Code safety review:", delta["content"])
62+
63+
if review_category == None:
64+
accumulated_review += delta["content"]
65+
66+
if "<unsafe>" in accumulated_review:
67+
review_category = "unsafe"
68+
if "<warning>" in accumulated_review:
69+
review_category = "warning"
70+
if "<safe>" in accumulated_review:
71+
review_category = "safe"
72+
73+
if review_category != None:
74+
for tag in [
75+
"<safe>",
76+
"</safe>",
77+
"<warning>",
78+
"</warning>",
79+
"<unsafe>",
80+
"</unsafe>",
81+
]:
82+
delta["content"] = delta["content"].replace(tag, "")
83+
84+
yield {
85+
"type": "review",
86+
"format": review_category,
87+
"content": delta["content"],
88+
}
7389

7490
else:
7591
yield {"type": "message", "content": delta["content"]}

interpreter/core/llm/run_tool_calling_llm.py

Lines changed: 71 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,28 @@ def run_tool_calling_llm(llm, request_params):
3636
]
3737
request_params["tools"] = [tool_schema]
3838

39+
last_tool_id = 0
40+
for i, message in enumerate(request_params["messages"]):
41+
if "function_call" in message:
42+
last_tool_id += 1
43+
function = message.pop("function_call")
44+
message["tool_calls"] = [
45+
{
46+
"id": "toolu_" + str(last_tool_id),
47+
"type": "function",
48+
"function": function,
49+
}
50+
]
51+
if message["role"] == "function":
52+
if i != 0 and request_params["messages"][i - 1]["role"] == "tool":
53+
request_params["messages"][i]["content"] += message["content"]
54+
message = None
55+
else:
56+
message["role"] = "tool"
57+
message["tool_call_id"] = "toolu_" + str(last_tool_id)
58+
59+
request_params["messages"] = [m for m in request_params["messages"] if m != None]
60+
3961
# Add OpenAI's recommended function message
4062
# request_params["messages"][0][
4163
# "content"
@@ -46,6 +68,9 @@ def run_tool_calling_llm(llm, request_params):
4668
accumulated_deltas = {}
4769
language = None
4870
code = ""
71+
function_call_detected = False
72+
accumulated_review = ""
73+
review_category = None
4974

5075
for chunk in llm.completions(**request_params):
5176
if "choices" not in chunk or len(chunk["choices"]) == 0:
@@ -55,18 +80,57 @@ def run_tool_calling_llm(llm, request_params):
5580
delta = chunk["choices"][0]["delta"]
5681

5782
# Convert tool call into function call, which we have great parsing logic for below
58-
if "tool_calls" in delta:
59-
if (
60-
len(delta["tool_calls"]) > 0
61-
and "function_call" in delta["tool_calls"][0]
62-
):
63-
delta["function_call"] = delta["tool_calls"][0]["function_call"]
83+
if "tool_calls" in delta and delta["tool_calls"]:
84+
function_call_detected = True
85+
86+
# import pdb; pdb.set_trace()
87+
if len(delta["tool_calls"]) > 0 and delta["tool_calls"][0].function:
88+
delta = {
89+
# "id": delta["tool_calls"][0],
90+
"function_call": {
91+
"name": delta["tool_calls"][0].function.name,
92+
"arguments": delta["tool_calls"][0].function.arguments,
93+
}
94+
}
6495

6596
# Accumulate deltas
6697
accumulated_deltas = merge_deltas(accumulated_deltas, delta)
6798

6899
if "content" in delta and delta["content"]:
69-
yield {"type": "message", "content": delta["content"]}
100+
if function_call_detected:
101+
# More content after a code block? This is a code review by a judge layer.
102+
103+
# print("Code safety review:", delta["content"])
104+
105+
if review_category == None:
106+
accumulated_review += delta["content"]
107+
108+
if "<unsafe>" in accumulated_review:
109+
review_category = "unsafe"
110+
if "<warning>" in accumulated_review:
111+
review_category = "warning"
112+
if "<safe>" in accumulated_review:
113+
review_category = "safe"
114+
115+
if review_category != None:
116+
for tag in [
117+
"<safe>",
118+
"</safe>",
119+
"<warning>",
120+
"</warning>",
121+
"<unsafe>",
122+
"</unsafe>",
123+
]:
124+
delta["content"] = delta["content"].replace(tag, "")
125+
126+
yield {
127+
"type": "review",
128+
"format": review_category,
129+
"content": delta["content"],
130+
}
131+
132+
else:
133+
yield {"type": "message", "content": delta["content"]}
70134

71135
if (
72136
accumulated_deltas.get("function_call")

numbers.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
1
2+
2
3+
3

0 commit comments

Comments
 (0)