6
6
import re
7
7
import logging
8
8
import sys
9
+ import openai
9
10
import pandas as pd
10
11
11
12
from collections import deque
18
19
19
20
load_dotenv ('.env' )
20
21
21
- OPENAI_API_TYPE = os .environ .get ("OPENAI_API_TYPE" , "openai" )
22
- OPENAI_API_KEY = os .environ .get ("OPENAI_API_KEY" , "" )
23
- OPENAI_BASE_URL = os .environ .get ("OPENAI_BASE_URL" , "https://api.openai.com" )
24
- OPENAI_API_VERSION = os .environ .get ("OPENAI_API_VERSION" , "2023-03-15-preview" )
22
+ openai .api_base = os .environ .get ("OPENAI_BASE_URL" , "https://api.openai.com" )
23
+ openai .api_type = os .environ .get ("OPENAI_API_TYPE" , "openai" )
24
+ openai .api_version = os .environ .get ("OPENAI_API_VERSION" , "2023-03-15-preview" )
25
+ openai .api_key = os .environ .get ("OPENAI_API_KEY" , "" )
26
+ openai .log = os .getenv ("OPENAI_API_LOGLEVEL" , "" )
25
27
AZURE_OPENAI_DEPLOYMENT = os .environ .get ("AZURE_OPENAI_DEPLOYMENT" , "" )
26
-
28
+ OPENAI_EXTRA_HEADERS = json . loads ( os . environ . get ( "OPENAI_EXTRA_HEADERS" , "{}" ))
27
29
28
30
UPLOAD_FOLDER = 'workspace/'
29
31
os .makedirs (UPLOAD_FOLDER , exist_ok = True )
@@ -114,52 +116,43 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
114
116
If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files)
115
117
116
118
Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: <a href='/download?file=INSERT_FILENAME_HERE'>Download file</a>. Replace INSERT_FILENAME_HERE with the actual filename."""
117
- temperature = 0.7
118
- message_array = [
119
- {
120
- "role" : "user" ,
121
- "content" : prompt ,
122
- },
123
- ]
124
-
125
- final_openai_key = OPENAI_API_KEY
119
+
126
120
if user_openai_key :
127
- final_openai_key = user_openai_key
128
-
129
- if OPENAI_API_TYPE == "openai" :
130
- data = {
131
- "model" : model ,
132
- "messages" : message_array ,
133
- "temperature" : temperature ,
134
- }
135
- headers = {
136
- "Content-Type" : "application/json" ,
137
- "Authorization" : f"Bearer { final_openai_key } " ,
138
- }
139
-
140
- response = requests .post (
141
- f"{ OPENAI_BASE_URL } /v1/chat/completions" ,
142
- data = json .dumps (data ),
143
- headers = headers ,
144
- )
145
- elif OPENAI_API_TYPE == "azure" :
146
- data = {
147
- "messages" : message_array ,
148
- "temperature" : temperature ,
149
- }
150
- headers = {
151
- "Content-Type" : "application/json" ,
152
- "api-key" : f"{ final_openai_key } " ,
153
- }
154
-
155
- response = requests .post (
156
- f"{ OPENAI_BASE_URL } /openai/deployments/{ AZURE_OPENAI_DEPLOYMENT } /chat/completions?api-version={ OPENAI_API_VERSION } " ,
157
- data = json .dumps (data ),
158
- headers = headers ,
159
- )
121
+ openai .api_key = user_openai_key
122
+
123
+ arguments = dict (
124
+ temperature = 0.7 ,
125
+ headers = OPENAI_EXTRA_HEADERS ,
126
+ messages = [
127
+ # {"role": "system", "content": system},
128
+ {"role" : "user" , "content" : prompt },
129
+ ]
130
+ )
131
+
132
+ if openai .api_type == 'openai' :
133
+ arguments ["model" ] = model
134
+ elif openai .api_type == 'azure' :
135
+ arguments ["deployment_id" ] = AZURE_OPENAI_DEPLOYMENT
160
136
else :
161
- return None , "Error: Invalid OPENAI_PROVIDER" , 500
137
+ return None , f"Error: Invalid OPENAI_PROVIDER: { openai .api_type } " , 500
138
+
139
+ try :
140
+ result_GPT = openai .ChatCompletion .create (** arguments )
141
+
142
+ if 'error' in result_GPT :
143
+ raise openai .APIError (code = result_GPT .error .code , message = result_GPT .error .message )
144
+
145
+ if result_GPT .choices [0 ].finish_reason == 'content_filter' :
146
+ raise openai .APIError ('Content Filter' )
162
147
148
+ except openai .OpenAIError as e :
149
+ return None , f"Error from API: { e } " , 500
150
+
151
+ try :
152
+ content = result_GPT .choices [0 ].message .content
153
+
154
+ except AttributeError :
155
+ return None , f"Malformed answer from API: { content } " , 500
163
156
164
157
def extract_code (text ):
165
158
# Match triple backtick blocks first
@@ -179,11 +172,6 @@ def extract_non_code(text):
179
172
text = re .sub (r'`(.+?)`' , '' , text , flags = re .DOTALL )
180
173
return text .strip ()
181
174
182
-
183
- if response .status_code != 200 :
184
- return None , "Error: " + response .text , 500
185
-
186
- content = response .json ()["choices" ][0 ]["message" ]["content" ]
187
175
return extract_code (content ), extract_non_code (content ), 200
188
176
189
177
# We know this Flask app is for local use. So we can disable the verbose Werkzeug logger
0 commit comments