Skip to content

Commit 2e23a67

Browse files
committed
Improved server, cleaned debug
1 parent 18280bf commit 2e23a67

File tree

4 files changed

+80
-55
lines changed

4 files changed

+80
-55
lines changed

interpreter/core/async_core.py

Lines changed: 77 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -103,65 +103,89 @@ async def output(self):
103103
return await self.output_queue.async_q.get()
104104

105105
def respond(self, run_code=None):
106-
try:
107-
if run_code == None:
108-
run_code = self.auto_run
109-
110-
for chunk_og in self._respond_and_store():
111-
chunk = (
112-
chunk_og.copy()
113-
) # This fixes weird double token chunks. Probably a deeper problem?
114-
115-
if chunk["type"] == "confirmation":
116-
if run_code:
117-
run_code = False
118-
continue
119-
else:
120-
break
121-
122-
if self.stop_event.is_set():
123-
return
106+
for i in range(5): # 5 attempts
107+
try:
108+
if run_code == None:
109+
run_code = self.auto_run
124110

125-
if self.print:
126-
if "start" in chunk:
127-
print("\n")
128-
if chunk["type"] in ["code", "console"] and "format" in chunk:
129-
if "start" in chunk:
130-
print("\n------------\n\n```" + chunk["format"], flush=True)
131-
if "end" in chunk:
132-
print("\n```\n\n------------\n\n", flush=True)
133-
if chunk.get("format") != "active_line":
134-
if "format" in chunk and "base64" in chunk["format"]:
135-
print("\n[An image was produced]")
136-
else:
137-
content = chunk.get("content", "")
138-
content = (
139-
str(content).encode("ascii", "ignore").decode("ascii")
140-
)
141-
print(content, end="", flush=True)
111+
sent_chunks = False
142112

143-
if self.debug:
144-
print("Interpreter produced this chunk:", chunk)
113+
for chunk_og in self._respond_and_store():
114+
chunk = (
115+
chunk_og.copy()
116+
) # This fixes weird double token chunks. Probably a deeper problem?
145117

146-
self.output_queue.sync_q.put(chunk)
118+
if chunk["type"] == "confirmation":
119+
if run_code:
120+
run_code = False
121+
continue
122+
else:
123+
break
124+
125+
if self.stop_event.is_set():
126+
return
147127

148-
self.output_queue.sync_q.put(complete_message)
128+
if self.print:
129+
if "start" in chunk:
130+
print("\n")
131+
if chunk["type"] in ["code", "console"] and "format" in chunk:
132+
if "start" in chunk:
133+
print(
134+
"\n------------\n\n```" + chunk["format"],
135+
flush=True,
136+
)
137+
if "end" in chunk:
138+
print("\n```\n\n------------\n\n", flush=True)
139+
if chunk.get("format") != "active_line":
140+
if "format" in chunk and "base64" in chunk["format"]:
141+
print("\n[An image was produced]")
142+
else:
143+
content = chunk.get("content", "")
144+
content = (
145+
str(content)
146+
.encode("ascii", "ignore")
147+
.decode("ascii")
148+
)
149+
print(content, end="", flush=True)
150+
151+
if self.debug:
152+
print("Interpreter produced this chunk:", chunk)
153+
154+
self.output_queue.sync_q.put(chunk)
155+
sent_chunks = True
156+
157+
if not sent_chunks:
158+
print("ERROR. NO CHUNKS SENT. TRYING AGAIN.")
159+
print("Messages:", self.messages)
160+
time.sleep(1)
161+
else:
162+
self.output_queue.sync_q.put(complete_message)
163+
if self.print or self.debug:
164+
print("\nServer response complete.\n")
165+
return
149166

150-
if self.print or self.debug:
151-
print("\nServer response complete.\n")
167+
except Exception as e:
168+
error = traceback.format_exc() + "\n" + str(e)
169+
error_message = {
170+
"role": "server",
171+
"type": "error",
172+
"content": traceback.format_exc() + "\n" + str(e),
173+
}
174+
self.output_queue.sync_q.put(error_message)
175+
self.output_queue.sync_q.put(complete_message)
176+
print("\n\n--- SENT ERROR: ---\n\n")
177+
print(error)
178+
print("\n\n--- (ERROR ABOVE WAS SENT) ---\n\n")
179+
return
152180

153-
except Exception as e:
154-
error = traceback.format_exc() + "\n" + str(e)
155-
error_message = {
156-
"role": "server",
157-
"type": "error",
158-
"content": traceback.format_exc() + "\n" + str(e),
159-
}
160-
self.output_queue.sync_q.put(error_message)
161-
self.output_queue.sync_q.put(complete_message)
162-
print("\n\n--- SENT ERROR: ---\n\n")
163-
print(error)
164-
print("\n\n--- (ERROR ABOVE WAS SENT) ---\n\n")
181+
error_message = {
182+
"role": "server",
183+
"type": "error",
184+
"content": "No chunks sent or unknown error.",
185+
}
186+
self.output_queue.sync_q.put(error_message)
187+
self.output_queue.sync_q.put(complete_message)
188+
raise Exception("No chunks sent or unknown error.")
165189

166190
def accumulate(self, chunk):
167191
"""

interpreter/core/core.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -318,6 +318,7 @@ def is_ephemeral(chunk):
318318
for chunk in respond(self):
319319
# For async usage
320320
if hasattr(self, "stop_event") and self.stop_event.is_set():
321+
print("Open Interpreter stopping.")
321322
break
322323

323324
if chunk["content"] == "":

interpreter/core/llm/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ def run(self, messages):
301301
litellm.set_verbose = True
302302

303303
if (
304-
self.interpreter.debug == True
304+
self.interpreter.debug == True and False # DISABLED
305305
): # debug will equal "server" if we're debugging the server specifically
306306
print("\n\n\nOPENAI COMPATIBLE MESSAGES:\n\n\n")
307307
for message in messages:

interpreter/core/render_message.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def render_message(interpreter, message):
3535
rendered_message = "".join(parts).strip()
3636

3737
if (
38-
interpreter.debug == True
38+
interpreter.debug == True and False # DISABLED
3939
): # debug will equal "server" if we're debugging the server specifically
4040
print("\n\n\nSYSTEM MESSAGE\n\n\n")
4141
print(rendered_message)

0 commit comments

Comments
 (0)