Skip to content

Commit 731b03e

Browse files
committed
Server fixes and jan ai fix
1 parent 0f8bf8a commit 731b03e

File tree

2 files changed

+11
-61
lines changed

2 files changed

+11
-61
lines changed

interpreter/core/server.py

Lines changed: 10 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import threading
1616

1717
###
18-
# from pynput import keyboard
18+
from pynput import keyboard
1919
# from RealtimeTTS import TextToAudioStream, OpenAIEngine, CoquiEngine
2020
# from RealtimeSTT import AudioToTextRecorder
2121
# from beeper import Beeper
@@ -29,19 +29,6 @@
2929
from pydantic import BaseModel
3030
from uvicorn import Config, Server
3131

32-
# import argparse
33-
# from profiles.default import interpreter
34-
# from interpreter import interpreter
35-
36-
# Parse command line arguments for port number
37-
# parser = argparse.ArgumentParser(description="FastAPI server.")
38-
# parser.add_argument("--port", type=int, default=63863, help="Port to run on.")
39-
# args = parser.parse_args()
40-
41-
42-
# interpreter.tts = "openai"
43-
44-
4532
class Settings(BaseModel):
4633
auto_run: bool
4734
custom_instructions: str
@@ -80,12 +67,9 @@ def __init__(self, interpreter):
8067
False # Tracks whether interpreter is trying to use the keyboard
8168
)
8269

83-
# print("oksskk")
8470
# self.loop = asyncio.get_event_loop()
85-
# print("okkk")
8671

8772
async def _add_to_queue(self, queue, item):
88-
print(f"Adding item to output", item)
8973
await queue.put(item)
9074

9175
async def clear_queue(self, queue):
@@ -117,7 +101,6 @@ async def input(self, chunk):
117101
self._last_lmc_start_flag = time.time()
118102
# self.interpreter.computer.terminal.stop() # Stop any code execution... maybe we should make interpreter.stop()?
119103
elif "end" in chunk:
120-
print("yep")
121104
asyncio.create_task(self.run())
122105
else:
123106
await self._add_to_queue(self._input_queue, chunk)
@@ -126,15 +109,12 @@ def add_to_output_queue_sync(self, chunk):
126109
"""
127110
Synchronous function to add a chunk to the output queue.
128111
"""
129-
print("ADDING TO QUEUE:", chunk)
130112
asyncio.create_task(self._add_to_queue(self._output_queue, chunk))
131113

132114
async def run(self):
133115
"""
134116
Runs OI on the audio bytes submitted to the input. Will add streaming LMC chunks to the _output_queue.
135117
"""
136-
print("heyyyy")
137-
# interpreter.messages = self.active_chat_messages
138118
# self.beeper.start()
139119

140120
# self.stt.stop()
@@ -147,10 +127,9 @@ async def run(self):
147127
def generate(message):
148128
last_lmc_start_flag = self._last_lmc_start_flag
149129
# interpreter.messages = self.active_chat_messages
150-
print("🍀🍀🍀🍀GENERATING, using these messages: ", self.interpreter.messages)
130+
# print("🍀🍀🍀🍀GENERATING, using these messages: ", self.interpreter.messages)
151131
print("passing this in:", message)
152132
for chunk in self.interpreter.chat(message, display=False, stream=True):
153-
print("FROM INTERPRETER. CHUNK:", chunk)
154133

155134
if self._last_lmc_start_flag != last_lmc_start_flag:
156135
# self.beeper.stop()
@@ -215,7 +194,7 @@ async def output(self):
215194
return await self._output_queue.get()
216195

217196

218-
def server(interpreter):
197+
def server(interpreter, port=8000): # Default port is 8000 if not specified
219198
async_interpreter = AsyncInterpreter(interpreter)
220199

221200
app = FastAPI()
@@ -226,16 +205,19 @@ def server(interpreter):
226205
allow_methods=["*"], # Allow all methods (GET, POST, etc.)
227206
allow_headers=["*"], # Allow all headers
228207
)
229-
230208
@app.post("/settings")
231209
async def settings(payload: Dict[str, Any]):
232210
for key, value in payload.items():
233211
print("Updating interpreter settings with the following:")
234212
print(key, value)
235-
setattr(async_interpreter.interpreter, key, value)
213+
if key == "llm" and isinstance(value, dict):
214+
for sub_key, sub_value in value.items():
215+
setattr(async_interpreter.interpreter, sub_key, sub_value)
216+
else:
217+
setattr(async_interpreter.interpreter, key, value)
236218

237219
return {"status": "success"}
238-
220+
239221
@app.websocket("/")
240222
async def websocket_endpoint(websocket: WebSocket):
241223
await websocket.accept()
@@ -261,7 +243,6 @@ async def send_output():
261243
# we dont send out bytes rn, no TTS
262244
pass
263245
elif isinstance(output, dict):
264-
print("sending:", output)
265246
await websocket.send_text(json.dumps(output))
266247

267248
await asyncio.gather(receive_input(), send_output())
@@ -271,37 +252,6 @@ async def send_output():
271252
finally:
272253
await websocket.close()
273254

274-
class Rename(BaseModel):
275-
input: str
276-
277-
@app.post("/rename-chat")
278-
async def rename_chat(body_content: Rename, x_api_key: str = Header(None)):
279-
print("RENAME CHAT REQUEST in PY 🌙🌙🌙🌙")
280-
input_value = body_content.input
281-
client = OpenAI(
282-
# defaults to os.environ.get("OPENAI_API_KEY")
283-
api_key=x_api_key,
284-
)
285-
try:
286-
response = client.chat.completions.create(
287-
model="gpt-3.5-turbo",
288-
messages=[
289-
{
290-
"role": "user",
291-
"content": f"Given the following chat snippet, create a unique and descriptive title in less than 8 words. Your answer must not be related to customer service.\n\n{input_value}",
292-
}
293-
],
294-
temperature=0.3,
295-
stream=False,
296-
)
297-
print(response)
298-
completion = response["choices"][0]["message"]["content"]
299-
return {"data": {"content": completion}}
300-
except Exception as e:
301-
print(f"Error: {e}")
302-
traceback.print_exc()
303-
return {"error": str(e)}
304-
305-
config = Config(app, host="0.0.0.0", port=8000)
255+
config = Config(app, host="0.0.0.0", port=port)
306256
interpreter.uvicorn_server = Server(config)
307257
interpreter.uvicorn_server.run()

interpreter/terminal_interface/local_setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ def download_model(models_dir, models, interpreter):
337337
exit()
338338

339339
jan_model_name = model_name_answer["jan_model_name"]
340-
interpreter.llm.model = f"jan/{jan_model_name}"
340+
interpreter.llm.model = jan_model_name
341341
interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n")
342342
time.sleep(1)
343343

0 commit comments

Comments
 (0)