@@ -737,11 +737,11 @@ async def openai_compatible_generator(run_code):
737
737
if chunk ["type" ] == "message" and "content" in chunk :
738
738
output_content = chunk ["content" ]
739
739
if chunk ["type" ] == "code" and "start" in chunk :
740
- output_content = " "
740
+ output_content = "```" + chunk [ "format" ] + " \n "
741
741
if chunk ["type" ] == "code" and "content" in chunk :
742
- output_content = (
743
- f"""<unvoiced code=" { chunk ["content" ] } "></unvoiced>"""
744
- )
742
+ output_content = chunk [ "content" ]
743
+ if chunk ["type" ] == "code" and "end" in chunk :
744
+ output_content = " \n ``` \n "
745
745
746
746
if output_content :
747
747
await asyncio .sleep (0 )
@@ -776,6 +776,16 @@ async def openai_compatible_generator(run_code):
776
776
chunk ["type" ] == "confirmation"
777
777
and async_interpreter .auto_run == False
778
778
):
779
+ await asyncio .sleep (0 )
780
+ output_content = "Do you want to run this code?"
781
+ output_chunk = {
782
+ "id" : i ,
783
+ "object" : "chat.completion.chunk" ,
784
+ "created" : time .time (),
785
+ "model" : "open-interpreter" ,
786
+ "choices" : [{"delta" : {"content" : output_content }}],
787
+ }
788
+ yield f"data: { json .dumps (output_chunk )} \n \n "
779
789
break
780
790
781
791
if async_interpreter .stop_event .is_set ():
@@ -786,11 +796,11 @@ async def openai_compatible_generator(run_code):
786
796
if chunk ["type" ] == "message" and "content" in chunk :
787
797
output_content = chunk ["content" ]
788
798
if chunk ["type" ] == "code" and "start" in chunk :
789
- output_content = " "
799
+ output_content = "```" + chunk [ "format" ] + " \n "
790
800
if chunk ["type" ] == "code" and "content" in chunk :
791
- output_content = (
792
- f"""<unvoiced code=" { chunk ["content" ] } "></unvoiced>"""
793
- )
801
+ output_content = chunk [ "content" ]
802
+ if chunk ["type" ] == "code" and "end" in chunk :
803
+ output_content = " \n ``` \n "
794
804
795
805
if output_content :
796
806
await asyncio .sleep (0 )
@@ -806,18 +816,6 @@ async def openai_compatible_generator(run_code):
806
816
if made_chunk :
807
817
break
808
818
809
- if async_interpreter .messages [- 1 ]["type" ] == "code" :
810
- await asyncio .sleep (0 )
811
- output_content = "{CODE_FINISHED}"
812
- output_chunk = {
813
- "id" : i ,
814
- "object" : "chat.completion.chunk" ,
815
- "created" : time .time (),
816
- "model" : "open-interpreter" ,
817
- "choices" : [{"delta" : {"content" : output_content }}],
818
- }
819
- yield f"data: { json .dumps (output_chunk )} \n \n "
820
-
821
819
@router .post ("/openai/chat/completions" )
822
820
async def chat_completion (request : ChatCompletionRequest ):
823
821
global last_start_time
@@ -851,7 +849,14 @@ async def chat_completion(request: ChatCompletionRequest):
851
849
async_interpreter .auto_run = False
852
850
return
853
851
854
- if type (last_message .content ) == str :
852
+ run_code = False
853
+ if (
854
+ async_interpreter .messages
855
+ and async_interpreter .messages [- 1 ]["type" ] == "code"
856
+ and last_message .content .lower ().strip (".!?" ).strip () == "yes"
857
+ ):
858
+ run_code = True
859
+ elif type (last_message .content ) == str :
855
860
async_interpreter .messages .append (
856
861
{
857
862
"role" : "user" ,
@@ -890,11 +895,6 @@ async def chat_completion(request: ChatCompletionRequest):
890
895
}
891
896
)
892
897
893
- run_code = False
894
- if last_message .content == "{RUN}" :
895
- run_code = True
896
- # Remove that {RUN} message that would have just been added
897
- async_interpreter .messages = async_interpreter .messages [:- 1 ]
898
898
else :
899
899
if async_interpreter .context_mode :
900
900
# In context mode, we only respond if we recieved a {START} message
0 commit comments