Skip to content

Commit 827e0d4

Browse files
authored
Merge pull request #1290 from tyfiero/Local-II-changes
Server fixes and jan ai fix
2 parents 5598b03 + 843f917 commit 827e0d4

File tree

5 files changed

+620
-69
lines changed

5 files changed

+620
-69
lines changed

interpreter/core/core.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,10 @@ def __init__(
142142
self.code_output_sender = code_output_sender
143143

144144
def server(self, *args, **kwargs):
145-
server(self, *args, **kwargs)
145+
try:
146+
server(self, *args, **kwargs)
147+
except:
148+
display_markdown_message("Missing dependencies for the server, please run `pip install open-interpreter[server]` and try again.")
146149

147150
def local_setup(self):
148151
"""

interpreter/core/server.py

Lines changed: 10 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,9 @@
1111

1212
import asyncio
1313
import json
14-
import os
15-
import threading
1614

1715
###
18-
# from pynput import keyboard
16+
from pynput import keyboard
1917
# from RealtimeTTS import TextToAudioStream, OpenAIEngine, CoquiEngine
2018
# from RealtimeSTT import AudioToTextRecorder
2119
# from beeper import Beeper
@@ -25,23 +23,9 @@
2523

2624
from fastapi import FastAPI, Header, WebSocket
2725
from fastapi.middleware.cors import CORSMiddleware
28-
from openai import OpenAI
2926
from pydantic import BaseModel
3027
from uvicorn import Config, Server
3128

32-
# import argparse
33-
# from profiles.default import interpreter
34-
# from interpreter import interpreter
35-
36-
# Parse command line arguments for port number
37-
# parser = argparse.ArgumentParser(description="FastAPI server.")
38-
# parser.add_argument("--port", type=int, default=63863, help="Port to run on.")
39-
# args = parser.parse_args()
40-
41-
42-
# interpreter.tts = "openai"
43-
44-
4529
class Settings(BaseModel):
4630
auto_run: bool
4731
custom_instructions: str
@@ -80,12 +64,9 @@ def __init__(self, interpreter):
8064
False # Tracks whether interpreter is trying to use the keyboard
8165
)
8266

83-
# print("oksskk")
8467
# self.loop = asyncio.get_event_loop()
85-
# print("okkk")
8668

8769
async def _add_to_queue(self, queue, item):
88-
print(f"Adding item to output", item)
8970
await queue.put(item)
9071

9172
async def clear_queue(self, queue):
@@ -117,7 +98,6 @@ async def input(self, chunk):
11798
self._last_lmc_start_flag = time.time()
11899
# self.interpreter.computer.terminal.stop() # Stop any code execution... maybe we should make interpreter.stop()?
119100
elif "end" in chunk:
120-
print("yep")
121101
asyncio.create_task(self.run())
122102
else:
123103
await self._add_to_queue(self._input_queue, chunk)
@@ -126,15 +106,12 @@ def add_to_output_queue_sync(self, chunk):
126106
"""
127107
Synchronous function to add a chunk to the output queue.
128108
"""
129-
print("ADDING TO QUEUE:", chunk)
130109
asyncio.create_task(self._add_to_queue(self._output_queue, chunk))
131110

132111
async def run(self):
133112
"""
134113
Runs OI on the audio bytes submitted to the input. Will add streaming LMC chunks to the _output_queue.
135114
"""
136-
print("heyyyy")
137-
# interpreter.messages = self.active_chat_messages
138115
# self.beeper.start()
139116

140117
# self.stt.stop()
@@ -147,10 +124,9 @@ async def run(self):
147124
def generate(message):
148125
last_lmc_start_flag = self._last_lmc_start_flag
149126
# interpreter.messages = self.active_chat_messages
150-
print("🍀🍀🍀🍀GENERATING, using these messages: ", self.interpreter.messages)
127+
# print("🍀🍀🍀🍀GENERATING, using these messages: ", self.interpreter.messages)
151128
print("passing this in:", message)
152129
for chunk in self.interpreter.chat(message, display=False, stream=True):
153-
print("FROM INTERPRETER. CHUNK:", chunk)
154130

155131
if self._last_lmc_start_flag != last_lmc_start_flag:
156132
# self.beeper.stop()
@@ -215,7 +191,7 @@ async def output(self):
215191
return await self._output_queue.get()
216192

217193

218-
def server(interpreter):
194+
def server(interpreter, port=8000): # Default port is 8000 if not specified
219195
async_interpreter = AsyncInterpreter(interpreter)
220196

221197
app = FastAPI()
@@ -226,16 +202,19 @@ def server(interpreter):
226202
allow_methods=["*"], # Allow all methods (GET, POST, etc.)
227203
allow_headers=["*"], # Allow all headers
228204
)
229-
230205
@app.post("/settings")
231206
async def settings(payload: Dict[str, Any]):
232207
for key, value in payload.items():
233208
print("Updating interpreter settings with the following:")
234209
print(key, value)
235-
setattr(async_interpreter.interpreter, key, value)
210+
if key == "llm" and isinstance(value, dict):
211+
for sub_key, sub_value in value.items():
212+
setattr(async_interpreter.interpreter, sub_key, sub_value)
213+
else:
214+
setattr(async_interpreter.interpreter, key, value)
236215

237216
return {"status": "success"}
238-
217+
239218
@app.websocket("/")
240219
async def websocket_endpoint(websocket: WebSocket):
241220
await websocket.accept()
@@ -261,7 +240,6 @@ async def send_output():
261240
# we dont send out bytes rn, no TTS
262241
pass
263242
elif isinstance(output, dict):
264-
print("sending:", output)
265243
await websocket.send_text(json.dumps(output))
266244

267245
await asyncio.gather(receive_input(), send_output())
@@ -271,37 +249,6 @@ async def send_output():
271249
finally:
272250
await websocket.close()
273251

274-
class Rename(BaseModel):
275-
input: str
276-
277-
@app.post("/rename-chat")
278-
async def rename_chat(body_content: Rename, x_api_key: str = Header(None)):
279-
print("RENAME CHAT REQUEST in PY 🌙🌙🌙🌙")
280-
input_value = body_content.input
281-
client = OpenAI(
282-
# defaults to os.environ.get("OPENAI_API_KEY")
283-
api_key=x_api_key,
284-
)
285-
try:
286-
response = client.chat.completions.create(
287-
model="gpt-3.5-turbo",
288-
messages=[
289-
{
290-
"role": "user",
291-
"content": f"Given the following chat snippet, create a unique and descriptive title in less than 8 words. Your answer must not be related to customer service.\n\n{input_value}",
292-
}
293-
],
294-
temperature=0.3,
295-
stream=False,
296-
)
297-
print(response)
298-
completion = response["choices"][0]["message"]["content"]
299-
return {"data": {"content": completion}}
300-
except Exception as e:
301-
print(f"Error: {e}")
302-
traceback.print_exc()
303-
return {"error": str(e)}
304-
305-
config = Config(app, host="0.0.0.0", port=8000)
252+
config = Config(app, host="0.0.0.0", port=port)
306253
interpreter.uvicorn_server = Server(config)
307254
interpreter.uvicorn_server.run()

interpreter/terminal_interface/local_setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ def download_model(models_dir, models, interpreter):
337337
exit()
338338

339339
jan_model_name = model_name_answer["jan_model_name"]
340-
interpreter.llm.model = f"jan/{jan_model_name}"
340+
interpreter.llm.model = jan_model_name
341341
interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n")
342342
time.sleep(1)
343343

0 commit comments

Comments
 (0)