Skip to content

Commit d51ecd4

Browse files
committed
Prep for server, windows fix
1 parent 17bdf39 commit d51ecd4

File tree

1 file changed

+101
-2
lines changed

1 file changed

+101
-2
lines changed

interpreter/computer_use/loop.py

Lines changed: 101 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,19 @@
3333

3434
BETA_FLAG = "computer-use-2024-10-22"
3535

36+
from typing import List, Optional
37+
38+
import uvicorn
39+
from fastapi import FastAPI
40+
from fastapi.responses import StreamingResponse
41+
from pydantic import BaseModel
3642
from rich import print as rich_print
3743
from rich.markdown import Markdown
3844
from rich.rule import Rule
3945

46+
# Add this near the top of the file, with other imports and global variables
47+
messages: List[BetaMessageParam] = []
48+
4049

4150
def print_markdown(message):
4251
"""
@@ -87,7 +96,7 @@ class APIProvider(StrEnum):
8796
* When using your bash tool with commands that are expected to output very large quantities of text, redirect into a tmp file and use str_replace_editor or `grep -n -B <lines before> -A <lines after> <query> <filename>` to confirm output.
8897
* When viewing a page it can be helpful to zoom out so that you can see everything on the page. Either that, or make sure you scroll down to see everything before deciding something isn't available.
8998
* When using your computer function calls, they take a while to run and send back to you. Where possible/feasible, try to chain multiple of these calls all into one function calls request.
90-
* The current date is {datetime.today().strftime('%A, %B %-d, %Y')}.
99+
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
91100
</SYSTEM_CAPABILITY>
92101
93102
<IMPORTANT>
@@ -335,6 +344,81 @@ async def main():
335344
provider = APIProvider.ANTHROPIC
336345
system_prompt_suffix = ""
337346

347+
# Check if running in server mode
348+
if "--server" in sys.argv:
349+
app = FastAPI()
350+
351+
# Start the mouse position checking thread when in server mode
352+
mouse_thread = threading.Thread(target=check_mouse_position)
353+
mouse_thread.daemon = True
354+
mouse_thread.start()
355+
356+
# Get API key from environment variable
357+
api_key = os.environ.get("ANTHROPIC_API_KEY")
358+
if not api_key:
359+
raise ValueError(
360+
"ANTHROPIC_API_KEY environment variable must be set when running in server mode"
361+
)
362+
363+
@app.post("/openai/chat/completions")
364+
async def chat_completion(request: ChatCompletionRequest):
365+
# Check exit flag before processing request
366+
if exit_flag:
367+
return {"error": "Server shutting down due to mouse in corner"}
368+
369+
async def stream_response():
370+
print("is this even happening")
371+
372+
# Instead of creating converted_messages, append the last message to global messages
373+
global messages
374+
messages.append(
375+
{
376+
"role": request.messages[-1].role,
377+
"content": [
378+
{"type": "text", "text": request.messages[-1].content}
379+
],
380+
}
381+
)
382+
383+
response_chunks = []
384+
385+
async def output_callback(content_block: BetaContentBlock):
386+
chunk = f"data: {json.dumps({'choices': [{'delta': {'content': content_block.text}}]})}\n\n"
387+
response_chunks.append(chunk)
388+
yield chunk
389+
390+
async def tool_output_callback(result: ToolResult, tool_id: str):
391+
if result.output or result.error:
392+
content = result.output if result.output else result.error
393+
chunk = f"data: {json.dumps({'choices': [{'delta': {'content': content}}]})}\n\n"
394+
response_chunks.append(chunk)
395+
yield chunk
396+
397+
try:
398+
result = await sampling_loop(
399+
model=model,
400+
provider=provider,
401+
system_prompt_suffix=system_prompt_suffix,
402+
messages=messages, # Now using global messages
403+
output_callback=output_callback,
404+
tool_output_callback=tool_output_callback,
405+
api_key=api_key,
406+
)
407+
408+
# # Yield all stored chunks
409+
# for chunk in response_chunks:
410+
# yield chunk
411+
412+
except Exception as e:
413+
print(f"Error: {e}")
414+
yield f"data: {json.dumps({'error': str(e)})}\n\n"
415+
416+
return StreamingResponse(stream_response(), media_type="text/event-stream")
417+
418+
# Instead of running uvicorn here, we'll return the app
419+
return app
420+
421+
# Original CLI code continues here...
338422
print()
339423
print_markdown("Welcome to **Open Interpreter**.\n")
340424
print_markdown("---")
@@ -428,7 +512,12 @@ def tool_output_callback(result: ToolResult, tool_id: str):
428512

429513

430514
def run_async_main():
431-
asyncio.run(main())
515+
if "--server" in sys.argv:
516+
# Start uvicorn server directly without asyncio.run()
517+
app = asyncio.run(main())
518+
uvicorn.run(app, host="0.0.0.0", port=8000)
519+
else:
520+
asyncio.run(main())
432521

433522

434523
if __name__ == "__main__":
@@ -464,3 +553,13 @@ def check_mouse_position():
464553
print("\nMouse moved to corner. Exiting...")
465554
os._exit(0)
466555
threading.Event().wait(0.1) # Check every 100ms
556+
557+
558+
class ChatMessage(BaseModel):
559+
role: str
560+
content: str
561+
562+
563+
class ChatCompletionRequest(BaseModel):
564+
messages: List[ChatMessage]
565+
stream: Optional[bool] = False

0 commit comments

Comments
 (0)