15
15
import threading
16
16
17
17
###
18
- # from pynput import keyboard
18
+ from pynput import keyboard
19
19
# from RealtimeTTS import TextToAudioStream, OpenAIEngine, CoquiEngine
20
20
# from RealtimeSTT import AudioToTextRecorder
21
21
# from beeper import Beeper
29
29
from pydantic import BaseModel
30
30
from uvicorn import Config , Server
31
31
32
- # import argparse
33
- # from profiles.default import interpreter
34
- # from interpreter import interpreter
35
-
36
- # Parse command line arguments for port number
37
- # parser = argparse.ArgumentParser(description="FastAPI server.")
38
- # parser.add_argument("--port", type=int, default=63863, help="Port to run on.")
39
- # args = parser.parse_args()
40
-
41
-
42
- # interpreter.tts = "openai"
43
-
44
-
45
32
class Settings (BaseModel ):
46
33
auto_run : bool
47
34
custom_instructions : str
@@ -80,12 +67,9 @@ def __init__(self, interpreter):
80
67
False # Tracks whether interpreter is trying to use the keyboard
81
68
)
82
69
83
- # print("oksskk")
84
70
# self.loop = asyncio.get_event_loop()
85
- # print("okkk")
86
71
87
72
async def _add_to_queue (self , queue , item ):
88
- print (f"Adding item to output" , item )
89
73
await queue .put (item )
90
74
91
75
async def clear_queue (self , queue ):
@@ -117,7 +101,6 @@ async def input(self, chunk):
117
101
self ._last_lmc_start_flag = time .time ()
118
102
# self.interpreter.computer.terminal.stop() # Stop any code execution... maybe we should make interpreter.stop()?
119
103
elif "end" in chunk :
120
- print ("yep" )
121
104
asyncio .create_task (self .run ())
122
105
else :
123
106
await self ._add_to_queue (self ._input_queue , chunk )
@@ -126,15 +109,12 @@ def add_to_output_queue_sync(self, chunk):
126
109
"""
127
110
Synchronous function to add a chunk to the output queue.
128
111
"""
129
- print ("ADDING TO QUEUE:" , chunk )
130
112
asyncio .create_task (self ._add_to_queue (self ._output_queue , chunk ))
131
113
132
114
async def run (self ):
133
115
"""
134
116
Runs OI on the audio bytes submitted to the input. Will add streaming LMC chunks to the _output_queue.
135
117
"""
136
- print ("heyyyy" )
137
- # interpreter.messages = self.active_chat_messages
138
118
# self.beeper.start()
139
119
140
120
# self.stt.stop()
@@ -147,10 +127,9 @@ async def run(self):
147
127
def generate (message ):
148
128
last_lmc_start_flag = self ._last_lmc_start_flag
149
129
# interpreter.messages = self.active_chat_messages
150
- print ("🍀🍀🍀🍀GENERATING, using these messages: " , self .interpreter .messages )
130
+ # print("🍀🍀🍀🍀GENERATING, using these messages: ", self.interpreter.messages)
151
131
print ("passing this in:" , message )
152
132
for chunk in self .interpreter .chat (message , display = False , stream = True ):
153
- print ("FROM INTERPRETER. CHUNK:" , chunk )
154
133
155
134
if self ._last_lmc_start_flag != last_lmc_start_flag :
156
135
# self.beeper.stop()
@@ -215,7 +194,7 @@ async def output(self):
215
194
return await self ._output_queue .get ()
216
195
217
196
218
- def server (interpreter ):
197
+ def server (interpreter , port = 8000 ): # Default port is 8000 if not specified
219
198
async_interpreter = AsyncInterpreter (interpreter )
220
199
221
200
app = FastAPI ()
@@ -226,16 +205,19 @@ def server(interpreter):
226
205
allow_methods = ["*" ], # Allow all methods (GET, POST, etc.)
227
206
allow_headers = ["*" ], # Allow all headers
228
207
)
229
-
230
208
@app .post ("/settings" )
231
209
async def settings (payload : Dict [str , Any ]):
232
210
for key , value in payload .items ():
233
211
print ("Updating interpreter settings with the following:" )
234
212
print (key , value )
235
- setattr (async_interpreter .interpreter , key , value )
213
+ if key == "llm" and isinstance (value , dict ):
214
+ for sub_key , sub_value in value .items ():
215
+ setattr (async_interpreter .interpreter , sub_key , sub_value )
216
+ else :
217
+ setattr (async_interpreter .interpreter , key , value )
236
218
237
219
return {"status" : "success" }
238
-
220
+
239
221
@app .websocket ("/" )
240
222
async def websocket_endpoint (websocket : WebSocket ):
241
223
await websocket .accept ()
@@ -261,7 +243,6 @@ async def send_output():
261
243
# we dont send out bytes rn, no TTS
262
244
pass
263
245
elif isinstance (output , dict ):
264
- print ("sending:" , output )
265
246
await websocket .send_text (json .dumps (output ))
266
247
267
248
await asyncio .gather (receive_input (), send_output ())
@@ -271,37 +252,6 @@ async def send_output():
271
252
finally :
272
253
await websocket .close ()
273
254
274
- class Rename (BaseModel ):
275
- input : str
276
-
277
- @app .post ("/rename-chat" )
278
- async def rename_chat (body_content : Rename , x_api_key : str = Header (None )):
279
- print ("RENAME CHAT REQUEST in PY 🌙🌙🌙🌙" )
280
- input_value = body_content .input
281
- client = OpenAI (
282
- # defaults to os.environ.get("OPENAI_API_KEY")
283
- api_key = x_api_key ,
284
- )
285
- try :
286
- response = client .chat .completions .create (
287
- model = "gpt-3.5-turbo" ,
288
- messages = [
289
- {
290
- "role" : "user" ,
291
- "content" : f"Given the following chat snippet, create a unique and descriptive title in less than 8 words. Your answer must not be related to customer service.\n \n { input_value } " ,
292
- }
293
- ],
294
- temperature = 0.3 ,
295
- stream = False ,
296
- )
297
- print (response )
298
- completion = response ["choices" ][0 ]["message" ]["content" ]
299
- return {"data" : {"content" : completion }}
300
- except Exception as e :
301
- print (f"Error: { e } " )
302
- traceback .print_exc ()
303
- return {"error" : str (e )}
304
-
305
- config = Config (app , host = "0.0.0.0" , port = 8000 )
255
+ config = Config (app , host = "0.0.0.0" , port = port )
306
256
interpreter .uvicorn_server = Server (config )
307
257
interpreter .uvicorn_server .run ()
0 commit comments