11
11
12
12
import asyncio
13
13
import json
14
- import os
15
- import threading
16
14
17
15
###
18
- # from pynput import keyboard
16
+ from pynput import keyboard
19
17
# from RealtimeTTS import TextToAudioStream, OpenAIEngine, CoquiEngine
20
18
# from RealtimeSTT import AudioToTextRecorder
21
19
# from beeper import Beeper
25
23
26
24
from fastapi import FastAPI , Header , WebSocket
27
25
from fastapi .middleware .cors import CORSMiddleware
28
- from openai import OpenAI
29
26
from pydantic import BaseModel
30
27
from uvicorn import Config , Server
31
28
32
- # import argparse
33
- # from profiles.default import interpreter
34
- # from interpreter import interpreter
35
-
36
- # Parse command line arguments for port number
37
- # parser = argparse.ArgumentParser(description="FastAPI server.")
38
- # parser.add_argument("--port", type=int, default=63863, help="Port to run on.")
39
- # args = parser.parse_args()
40
-
41
-
42
- # interpreter.tts = "openai"
43
-
44
-
45
29
class Settings (BaseModel ):
46
30
auto_run : bool
47
31
custom_instructions : str
@@ -80,12 +64,9 @@ def __init__(self, interpreter):
80
64
False # Tracks whether interpreter is trying to use the keyboard
81
65
)
82
66
83
- # print("oksskk")
84
67
# self.loop = asyncio.get_event_loop()
85
- # print("okkk")
86
68
87
69
async def _add_to_queue (self , queue , item ):
88
- print (f"Adding item to output" , item )
89
70
await queue .put (item )
90
71
91
72
async def clear_queue (self , queue ):
@@ -117,7 +98,6 @@ async def input(self, chunk):
117
98
self ._last_lmc_start_flag = time .time ()
118
99
# self.interpreter.computer.terminal.stop() # Stop any code execution... maybe we should make interpreter.stop()?
119
100
elif "end" in chunk :
120
- print ("yep" )
121
101
asyncio .create_task (self .run ())
122
102
else :
123
103
await self ._add_to_queue (self ._input_queue , chunk )
@@ -126,15 +106,12 @@ def add_to_output_queue_sync(self, chunk):
126
106
"""
127
107
Synchronous function to add a chunk to the output queue.
128
108
"""
129
- print ("ADDING TO QUEUE:" , chunk )
130
109
asyncio .create_task (self ._add_to_queue (self ._output_queue , chunk ))
131
110
132
111
async def run (self ):
133
112
"""
134
113
Runs OI on the audio bytes submitted to the input. Will add streaming LMC chunks to the _output_queue.
135
114
"""
136
- print ("heyyyy" )
137
- # interpreter.messages = self.active_chat_messages
138
115
# self.beeper.start()
139
116
140
117
# self.stt.stop()
@@ -147,10 +124,9 @@ async def run(self):
147
124
def generate (message ):
148
125
last_lmc_start_flag = self ._last_lmc_start_flag
149
126
# interpreter.messages = self.active_chat_messages
150
- print ("🍀🍀🍀🍀GENERATING, using these messages: " , self .interpreter .messages )
127
+ # print("🍀🍀🍀🍀GENERATING, using these messages: ", self.interpreter.messages)
151
128
print ("passing this in:" , message )
152
129
for chunk in self .interpreter .chat (message , display = False , stream = True ):
153
- print ("FROM INTERPRETER. CHUNK:" , chunk )
154
130
155
131
if self ._last_lmc_start_flag != last_lmc_start_flag :
156
132
# self.beeper.stop()
@@ -215,7 +191,7 @@ async def output(self):
215
191
return await self ._output_queue .get ()
216
192
217
193
218
- def server (interpreter ):
194
+ def server (interpreter , port = 8000 ): # Default port is 8000 if not specified
219
195
async_interpreter = AsyncInterpreter (interpreter )
220
196
221
197
app = FastAPI ()
@@ -226,16 +202,19 @@ def server(interpreter):
226
202
allow_methods = ["*" ], # Allow all methods (GET, POST, etc.)
227
203
allow_headers = ["*" ], # Allow all headers
228
204
)
229
-
230
205
@app .post ("/settings" )
231
206
async def settings (payload : Dict [str , Any ]):
232
207
for key , value in payload .items ():
233
208
print ("Updating interpreter settings with the following:" )
234
209
print (key , value )
235
- setattr (async_interpreter .interpreter , key , value )
210
+ if key == "llm" and isinstance (value , dict ):
211
+ for sub_key , sub_value in value .items ():
212
+ setattr (async_interpreter .interpreter , sub_key , sub_value )
213
+ else :
214
+ setattr (async_interpreter .interpreter , key , value )
236
215
237
216
return {"status" : "success" }
238
-
217
+
239
218
@app .websocket ("/" )
240
219
async def websocket_endpoint (websocket : WebSocket ):
241
220
await websocket .accept ()
@@ -261,7 +240,6 @@ async def send_output():
261
240
# we dont send out bytes rn, no TTS
262
241
pass
263
242
elif isinstance (output , dict ):
264
- print ("sending:" , output )
265
243
await websocket .send_text (json .dumps (output ))
266
244
267
245
await asyncio .gather (receive_input (), send_output ())
@@ -271,37 +249,6 @@ async def send_output():
271
249
finally :
272
250
await websocket .close ()
273
251
274
- class Rename (BaseModel ):
275
- input : str
276
-
277
- @app .post ("/rename-chat" )
278
- async def rename_chat (body_content : Rename , x_api_key : str = Header (None )):
279
- print ("RENAME CHAT REQUEST in PY 🌙🌙🌙🌙" )
280
- input_value = body_content .input
281
- client = OpenAI (
282
- # defaults to os.environ.get("OPENAI_API_KEY")
283
- api_key = x_api_key ,
284
- )
285
- try :
286
- response = client .chat .completions .create (
287
- model = "gpt-3.5-turbo" ,
288
- messages = [
289
- {
290
- "role" : "user" ,
291
- "content" : f"Given the following chat snippet, create a unique and descriptive title in less than 8 words. Your answer must not be related to customer service.\n \n { input_value } " ,
292
- }
293
- ],
294
- temperature = 0.3 ,
295
- stream = False ,
296
- )
297
- print (response )
298
- completion = response ["choices" ][0 ]["message" ]["content" ]
299
- return {"data" : {"content" : completion }}
300
- except Exception as e :
301
- print (f"Error: { e } " )
302
- traceback .print_exc ()
303
- return {"error" : str (e )}
304
-
305
- config = Config (app , host = "0.0.0.0" , port = 8000 )
252
+ config = Config (app , host = "0.0.0.0" , port = port )
306
253
interpreter .uvicorn_server = Server (config )
307
254
interpreter .uvicorn_server .run ()
0 commit comments