|
17 | 17 |
|
18 | 18 | load_dotenv('.env')
|
19 | 19 |
|
| 20 | +OPENAI_API_TYPE = os.environ.get("OPENAI_API_TYPE", "openai") |
20 | 21 | OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
|
21 | 22 | OPENAI_BASE_URL = os.environ.get("OPENAI_BASE_URL", "https://api.openai.com")
|
| 23 | +OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION", "2023-03-15-preview") |
| 24 | +AZURE_OPENAI_DEPLOYMENT = os.environ.get("AZURE_OPENAI_DEPLOYMENT", "") |
| 25 | + |
22 | 26 |
|
23 | 27 | UPLOAD_FOLDER = 'workspace/'
|
24 | 28 | os.makedirs(UPLOAD_FOLDER, exist_ok=True)
|
@@ -55,32 +59,51 @@ def allowed_file(filename):
|
55 | 59 | async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"):
|
56 | 60 |
|
57 | 61 | prompt = f"First, here is a history of what I asked you to do earlier. The actual prompt follows after ENDOFHISTORY. History:\n\n{message_buffer.get_string()}ENDOFHISTORY.\n\nWrite Python code that does the following: \n\n{user_prompt}\n\nNote, the code is going to be executed in a Jupyter Python kernel.\n\nLast instruction, and this is the most important, just return code. No other outputs, as your full response will directly be executed in the kernel. \n\nTeacher mode: if you want to give a download link, just print it as <a href='/download?file=INSERT_FILENAME_HERE'>Download file</a>. Replace INSERT_FILENAME_HERE with the actual filename. So just print that HTML to stdout. No actual downloading of files!"
|
58 |
| - |
59 |
| - data = { |
60 |
| - "model": model, |
61 |
| - "messages": [ |
62 |
| - { |
63 |
| - "role": "user", |
64 |
| - "content": prompt, |
65 |
| - }, |
66 |
| - ], |
67 |
| - "temperature": 0.7, |
68 |
| - } |
| 62 | + temperature = 0.7 |
| 63 | + message_array = [ |
| 64 | + { |
| 65 | + "role": "user", |
| 66 | + "content": prompt, |
| 67 | + }, |
| 68 | + ] |
69 | 69 |
|
70 | 70 | final_openai_key = OPENAI_API_KEY
|
71 | 71 | if user_openai_key:
|
72 | 72 | final_openai_key = user_openai_key
|
73 | 73 |
|
74 |
| - headers = { |
75 |
| - "Content-Type": "application/json", |
76 |
| - "Authorization": f"Bearer {final_openai_key}", |
77 |
| - } |
78 |
| - |
79 |
| - response = requests.post( |
80 |
| - f"{OPENAI_BASE_URL}/v1/chat/completions", |
81 |
| - data=json.dumps(data), |
82 |
| - headers=headers, |
83 |
| - ) |
| 74 | + if OPENAI_API_TYPE == "openai": |
| 75 | + data = { |
| 76 | + "model": model, |
| 77 | + "messages": message_array, |
| 78 | + "temperature": temperature, |
| 79 | + } |
| 80 | + headers = { |
| 81 | + "Content-Type": "application/json", |
| 82 | + "Authorization": f"Bearer {final_openai_key}", |
| 83 | + } |
| 84 | + |
| 85 | + response = requests.post( |
| 86 | + f"{OPENAI_BASE_URL}/v1/chat/completions", |
| 87 | + data=json.dumps(data), |
| 88 | + headers=headers, |
| 89 | + ) |
| 90 | + elif OPENAI_API_TYPE == "azure": |
| 91 | + data = { |
| 92 | + "messages": message_array, |
| 93 | + "temperature": temperature, |
| 94 | + } |
| 95 | + headers = { |
| 96 | + "Content-Type": "application/json", |
| 97 | + "api-key": f"{final_openai_key}", |
| 98 | + } |
| 99 | + |
| 100 | + response = requests.post( |
| 101 | + f"{OPENAI_BASE_URL}/openai/deployments/{AZURE_OPENAI_DEPLOYMENT}/chat/completions?api-version={OPENAI_API_VERSION}", |
| 102 | + data=json.dumps(data), |
| 103 | + headers=headers, |
| 104 | + ) |
| 105 | + else: |
| 106 | + return "Error: Invalid OPENAI_PROVIDER", 500 |
84 | 107 |
|
85 | 108 | def extract_code(text):
|
86 | 109 | # Match triple backtick blocks first
|
|
0 commit comments