Skip to content

Commit 59d19c5

Browse files
authored
Merge pull request #1426 from OpenInterpreter/development
Server fixes, `stdin`, better skills
2 parents aebdd0f + 247e2cd commit 59d19c5

22 files changed

+557
-279
lines changed

interpreter/core/async_core.py

Lines changed: 153 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -103,65 +103,103 @@ async def output(self):
103103
return await self.output_queue.async_q.get()
104104

105105
def respond(self, run_code=None):
106-
try:
107-
if run_code == None:
108-
run_code = self.auto_run
109-
110-
for chunk_og in self._respond_and_store():
111-
chunk = (
112-
chunk_og.copy()
113-
) # This fixes weird double token chunks. Probably a deeper problem?
114-
115-
if chunk["type"] == "confirmation":
116-
if run_code:
117-
run_code = False
118-
continue
119-
else:
120-
break
121-
122-
if self.stop_event.is_set():
123-
return
106+
for attempt in range(5): # 5 attempts
107+
try:
108+
if run_code == None:
109+
run_code = self.auto_run
124110

125-
if self.print:
126-
if "start" in chunk:
127-
print("\n")
128-
if chunk["type"] in ["code", "console"] and "format" in chunk:
129-
if "start" in chunk:
130-
print("\n------------\n\n```" + chunk["format"], flush=True)
131-
if "end" in chunk:
132-
print("\n```\n\n------------\n\n", flush=True)
133-
if chunk.get("format") != "active_line":
134-
if "format" in chunk and "base64" in chunk["format"]:
135-
print("\n[An image was produced]")
136-
else:
137-
content = chunk.get("content", "")
138-
content = (
139-
str(content).encode("ascii", "ignore").decode("ascii")
140-
)
141-
print(content, end="", flush=True)
111+
sent_chunks = False
142112

143-
if self.debug:
144-
print("Interpreter produced this chunk:", chunk)
113+
for chunk_og in self._respond_and_store():
114+
chunk = (
115+
chunk_og.copy()
116+
) # This fixes weird double token chunks. Probably a deeper problem?
145117

146-
self.output_queue.sync_q.put(chunk)
118+
if chunk["type"] == "confirmation":
119+
if run_code:
120+
run_code = False
121+
continue
122+
else:
123+
break
147124

148-
self.output_queue.sync_q.put(complete_message)
125+
if self.stop_event.is_set():
126+
return
149127

150-
if self.print or self.debug:
151-
print("\nServer response complete.\n")
128+
if self.print:
129+
if "start" in chunk:
130+
print("\n")
131+
if chunk["type"] in ["code", "console"] and "format" in chunk:
132+
if "start" in chunk:
133+
print(
134+
"\n------------\n\n```" + chunk["format"],
135+
flush=True,
136+
)
137+
if "end" in chunk:
138+
print("\n```\n\n------------\n\n", flush=True)
139+
if chunk.get("format") != "active_line":
140+
if "format" in chunk and "base64" in chunk["format"]:
141+
print("\n[An image was produced]")
142+
else:
143+
content = chunk.get("content", "")
144+
content = (
145+
str(content)
146+
.encode("ascii", "ignore")
147+
.decode("ascii")
148+
)
149+
print(content, end="", flush=True)
150+
151+
if self.debug:
152+
print("Interpreter produced this chunk:", chunk)
153+
154+
self.output_queue.sync_q.put(chunk)
155+
sent_chunks = True
156+
157+
if not sent_chunks:
158+
print("ERROR. NO CHUNKS SENT. TRYING AGAIN.")
159+
print("Messages:", self.messages)
160+
messages = [
161+
"Hello? Answer please.",
162+
"Just say something, anything.",
163+
"Are you there?",
164+
"Can you respond?",
165+
"Please reply.",
166+
]
167+
self.messages.append(
168+
{
169+
"role": "user",
170+
"type": "message",
171+
"content": messages[attempt % len(messages)],
172+
}
173+
)
174+
time.sleep(1)
175+
else:
176+
self.output_queue.sync_q.put(complete_message)
177+
if self.debug:
178+
print("\nServer response complete.\n")
179+
return
152180

153-
except Exception as e:
154-
error = traceback.format_exc() + "\n" + str(e)
155-
error_message = {
156-
"role": "server",
157-
"type": "error",
158-
"content": traceback.format_exc() + "\n" + str(e),
159-
}
160-
self.output_queue.sync_q.put(error_message)
161-
self.output_queue.sync_q.put(complete_message)
162-
print("\n\n--- SENT ERROR: ---\n\n")
163-
print(error)
164-
print("\n\n--- (ERROR ABOVE WAS SENT) ---\n\n")
181+
except Exception as e:
182+
error = traceback.format_exc() + "\n" + str(e)
183+
error_message = {
184+
"role": "server",
185+
"type": "error",
186+
"content": traceback.format_exc() + "\n" + str(e),
187+
}
188+
self.output_queue.sync_q.put(error_message)
189+
self.output_queue.sync_q.put(complete_message)
190+
print("\n\n--- SENT ERROR: ---\n\n")
191+
print(error)
192+
print("\n\n--- (ERROR ABOVE WAS SENT) ---\n\n")
193+
return
194+
195+
error_message = {
196+
"role": "server",
197+
"type": "error",
198+
"content": "No chunks sent or unknown error.",
199+
}
200+
self.output_queue.sync_q.put(error_message)
201+
self.output_queue.sync_q.put(complete_message)
202+
raise Exception("No chunks sent or unknown error.")
165203

166204
def accumulate(self, chunk):
167205
"""
@@ -678,29 +716,51 @@ class ChatCompletionRequest(BaseModel):
678716
stream: Optional[bool] = False
679717

680718
async def openai_compatible_generator():
681-
for i, chunk in enumerate(async_interpreter._respond_and_store()):
682-
output_content = None
683-
684-
if chunk["type"] == "message" and "content" in chunk:
685-
output_content = chunk["content"]
686-
if chunk["type"] == "code" and "start" in chunk:
687-
output_content = " "
688-
689-
if output_content:
690-
await asyncio.sleep(0)
691-
output_chunk = {
692-
"id": i,
693-
"object": "chat.completion.chunk",
694-
"created": time.time(),
695-
"model": "open-interpreter",
696-
"choices": [{"delta": {"content": output_content}}],
697-
}
698-
yield f"data: {json.dumps(output_chunk)}\n\n"
719+
made_chunk = False
720+
721+
for message in [
722+
".",
723+
"Just say something, anything.",
724+
"Hello? Answer please.",
725+
"Are you there?",
726+
"Can you respond?",
727+
"Please reply.",
728+
]:
729+
for i, chunk in enumerate(
730+
async_interpreter.chat(message=message, stream=True, display=True)
731+
):
732+
made_chunk = True
733+
734+
if async_interpreter.stop_event.is_set():
735+
break
736+
737+
output_content = None
738+
739+
if chunk["type"] == "message" and "content" in chunk:
740+
output_content = chunk["content"]
741+
if chunk["type"] == "code" and "start" in chunk:
742+
output_content = " "
743+
744+
if output_content:
745+
await asyncio.sleep(0)
746+
output_chunk = {
747+
"id": i,
748+
"object": "chat.completion.chunk",
749+
"created": time.time(),
750+
"model": "open-interpreter",
751+
"choices": [{"delta": {"content": output_content}}],
752+
}
753+
yield f"data: {json.dumps(output_chunk)}\n\n"
754+
755+
if made_chunk:
756+
break
699757

700758
@router.post("/openai/chat/completions")
701759
async def chat_completion(request: ChatCompletionRequest):
702760
# Convert to LMC
703761

762+
async_interpreter.stop_event.set()
763+
704764
last_message = request.messages[-1]
705765

706766
if last_message.role != "user":
@@ -711,18 +771,26 @@ async def chat_completion(request: ChatCompletionRequest):
711771
return
712772

713773
if type(last_message.content) == str:
714-
async_interpreter.messages.append(last_message)
715-
if type(last_message.content) == list:
774+
async_interpreter.messages.append(
775+
{
776+
"role": "user",
777+
"type": "message",
778+
"content": last_message.content,
779+
}
780+
)
781+
print(">", last_message.content)
782+
elif type(last_message.content) == list:
716783
for content in last_message.content:
717784
if content["type"] == "text":
718785
async_interpreter.messages.append(
719-
{"role": "user", "type": "message", "content": content}
786+
{"role": "user", "type": "message", "content": str(content)}
720787
)
788+
print(">", content)
721789
elif content["type"] == "image_url":
722790
if "url" not in content["image_url"]:
723791
raise Exception("`url` must be in `image_url`.")
724792
url = content["image_url"]["url"]
725-
print(url[:100])
793+
print("> [user sent an image]", url[:100])
726794
if "base64," not in url:
727795
raise Exception(
728796
'''Image must be in the format: "data:image/jpeg;base64,{base64_image}"'''
@@ -741,12 +809,21 @@ async def chat_completion(request: ChatCompletionRequest):
741809
}
742810
)
743811

812+
if os.getenv("INTERPRETER_SERVER_REQUIRE_START", False):
813+
if last_message.content != "{START}":
814+
return
815+
if async_interpreter.messages[-1]["content"] == "{START}":
816+
# Remove that {START} message that would have just been added
817+
async_interpreter.messages = async_interpreter.messages[:-1]
818+
819+
async_interpreter.stop_event.clear()
820+
744821
if request.stream:
745822
return StreamingResponse(
746823
openai_compatible_generator(), media_type="application/x-ndjson"
747824
)
748825
else:
749-
messages = async_interpreter.chat(message="", stream=False, display=True)
826+
messages = async_interpreter.chat(message=".", stream=False, display=True)
750827
content = messages[-1]["content"]
751828
return {
752829
"id": "200",

0 commit comments

Comments
 (0)