|
17 | 17 | "metadata": {}, |
18 | 18 | "outputs": [], |
19 | 19 | "source": [ |
| 20 | + "import json\n", |
| 21 | + "import os\n", |
20 | 22 | "import requests\n", |
21 | | - "import json \n", |
22 | | - "import openai \n", |
| 23 | + "from openai import AzureOpenAI\n", |
23 | 24 | "\n", |
24 | 25 | "# Load config values\n", |
25 | 26 | "with open(r'config.json') as config_file:\n", |
26 | 27 | " config_details = json.load(config_file)\n", |
27 | 28 | " \n", |
28 | 29 | "\n", |
29 | 30 | "\n", |
30 | | - "# Configure OpenAI environment variables\n", |
31 | | - "openai.api_key = config_details['OPENAI_API_KEY']\n", |
32 | | - "openai.api_base = config_details['OPENAI_API_BASE']\n", |
33 | | - "openai.api_type = \"azure\" \n", |
34 | | - "openai.api_version = config_details['OPENAI_API_VERSION']\n", |
| 31 | + "client = AzureOpenAI(\n", |
| 32 | + " azure_endpoint=config_details[\"AZURE_OPENAI_ENDPOINT\"], # The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\n", |
| 33 | + " api_key=os.getenv(\"AZURE_OPENAI_KEY\"), # The API key for your Azure OpenAI resource.\n", |
| 34 | + " api_version=config_details[\"OPENAI_API_VERSION\"], # This version supports function calling\n", |
| 35 | + ")\n", |
35 | 36 | "\n", |
36 | | - "deployment_name = config_details['DEPLOYMENT_NAME'] # You need to use the 0613 version or newer of gpt-35-turbo or gpt-4 to work with functions\n", |
| 37 | + "model_name = config_details['MODEL_NAME'] # You need to ensure the version of the model you are using supports the function calling feature\n", |
37 | 38 | "\n", |
38 | 39 | "bing_search_subscription_key = config_details['BING_SEARCH_SUBSCRIPTION_KEY']\n", |
39 | 40 | "bing_search_url = \"https://api.bing.microsoft.com/v7.0/search\"" |
|
129 | 130 | " {\"role\": \"user\", \"content\": \"How tall is mount rainier?\"}]\n", |
130 | 131 | "\n", |
131 | 132 | " \n", |
132 | | - "functions = [ \n", |
| 133 | + "tools = [ \n", |
133 | 134 | " {\n", |
134 | | - " \"name\": \"search_bing\",\n", |
135 | | - " \"description\": \"Searches bing to get up to date information from the web\",\n", |
136 | | - " \"parameters\": {\n", |
137 | | - " \"type\": \"object\",\n", |
138 | | - " \"properties\": {\n", |
139 | | - " \"query\": {\n", |
140 | | - " \"type\": \"string\",\n", |
141 | | - " \"description\": \"The search query\",\n", |
142 | | - " }\n", |
| 135 | + " \"type\": \"function\",\n", |
| 136 | + " \"function\": {\n", |
| 137 | + " \"name\": \"search_bing\",\n", |
| 138 | + " \"description\": \"Searches bing to get up to date information from the web\",\n", |
| 139 | + " \"parameters\": {\n", |
| 140 | + " \"type\": \"object\",\n", |
| 141 | + " \"properties\": {\n", |
| 142 | + " \"query\": {\n", |
| 143 | + " \"type\": \"string\",\n", |
| 144 | + " \"description\": \"The search query\",\n", |
| 145 | + " }\n", |
| 146 | + " },\n", |
| 147 | + " \"required\": [\"query\"],\n", |
143 | 148 | " },\n", |
144 | | - " \"required\": [\"query\"],\n", |
145 | | - " },\n", |
| 149 | + " }\n", |
146 | 150 | " }\n", |
| 151 | + " \n", |
147 | 152 | "]\n", |
148 | 153 | "\n", |
149 | | - "response = openai.ChatCompletion.create(\n", |
150 | | - " deployment_id=deployment_name, # You need to use the 0613 version or newer of gpt-35-turbo or gpt-4 to work with functions\n", |
151 | | - " messages=messages,\n", |
152 | | - " functions=functions,\n", |
153 | | - " function_call=\"auto\", \n", |
154 | | - ")\n", |
| 154 | + "response = client.chat.completions.create(\n", |
| 155 | + " model=model_name,\n", |
| 156 | + " messages=messages,\n", |
| 157 | + " tools=tools,\n", |
| 158 | + " tool_choice=\"auto\",\n", |
| 159 | + " )\n", |
155 | 160 | "\n", |
156 | | - "print(response['choices'][0]['message'])" |
| 161 | + "print(response.choices[0].message)" |
157 | 162 | ] |
158 | 163 | }, |
159 | 164 | { |
|
172 | 177 | "def run_multiturn_conversation(messages, functions, available_functions, deployment_name):\n", |
173 | 178 | " # Step 1: send the conversation and available functions to GPT\n", |
174 | 179 | "\n", |
175 | | - " response = openai.ChatCompletion.create(\n", |
176 | | - " deployment_id=deployment_name,\n", |
| 180 | + " response = client.chat.completions.create(\n", |
177 | 181 | " messages=messages,\n", |
178 | | - " functions=functions,\n", |
179 | | - " function_call=\"auto\", \n", |
180 | | - " temperature=0\n", |
| 182 | + " tools=tools,\n", |
| 183 | + " tool_choice=\"auto\",\n", |
| 184 | + " model=model_name,\n", |
| 185 | + " temperature=0,\n", |
181 | 186 | " )\n", |
182 | 187 | "\n", |
183 | 188 | " # Step 2: check if GPT wanted to call a function\n", |
184 | | - " while response[\"choices\"][0][\"finish_reason\"] == 'function_call':\n", |
185 | | - " response_message = response[\"choices\"][0][\"message\"]\n", |
| 189 | + " while response.choices[0].finish_reason == \"tool_calls\":\n", |
| 190 | + " response_message = response.choices[0].message\n", |
186 | 191 | " print(\"Recommended Function call:\")\n", |
187 | | - " print(response_message.get(\"function_call\"))\n", |
| 192 | + " print(response_message.tool_calls[0])\n", |
188 | 193 | " print()\n", |
189 | 194 | " \n", |
190 | 195 | " # Step 3: call the function\n", |
191 | 196 | " # Note: the JSON response may not always be valid; be sure to handle errors\n", |
192 | 197 | " \n", |
193 | | - " function_name = response_message[\"function_call\"][\"name\"]\n", |
| 198 | + " function_name = response_message.tool_calls[0].function.name\n", |
194 | 199 | " \n", |
195 | 200 | " # verify function exists\n", |
196 | 201 | " if function_name not in available_functions:\n", |
197 | 202 | " return \"Function \" + function_name + \" does not exist\"\n", |
198 | 203 | " function_to_call = available_functions[function_name] \n", |
199 | 204 | " \n", |
200 | | - " function_args = json.loads(response_message[\"function_call\"][\"arguments\"])\n", |
| 205 | + " function_args = json.loads(response_message.tool_calls[0].function.arguments)\n", |
201 | 206 | " function_response = function_to_call(**function_args)\n", |
202 | 207 | " \n", |
203 | 208 | " print(\"Output of function call:\")\n", |
|
209 | 214 | " # adding assistant response to messages\n", |
210 | 215 | " messages.append(\n", |
211 | 216 | " {\n", |
212 | | - " \"role\": response_message[\"role\"],\n", |
| 217 | + " \"role\": response_message.role,\n", |
213 | 218 | " \"function_call\": {\n", |
214 | | - " \"name\": response_message[\"function_call\"][\"name\"],\n", |
215 | | - " \"arguments\": response_message[\"function_call\"][\"arguments\"],\n", |
| 219 | + " \"name\": response_message.tool_calls[0].function.name,\n", |
| 220 | + " \"arguments\": response_message.tool_calls[0].function.arguments,\n", |
216 | 221 | " },\n", |
217 | 222 | " \"content\": None\n", |
218 | 223 | " }\n", |
|
232 | 237 | " print(message)\n", |
233 | 238 | " print()\n", |
234 | 239 | "\n", |
235 | | - " response = openai.ChatCompletion.create(\n", |
| 240 | + " response = client.chat.completions.create(\n", |
236 | 241 | " messages=messages,\n", |
237 | | - " deployment_id=deployment_name,\n", |
238 | | - " function_call=\"auto\",\n", |
239 | | - " functions=functions,\n", |
240 | | - " temperature=0\n", |
| 242 | + " tools=tools,\n", |
| 243 | + " tool_choice=\"auto\",\n", |
| 244 | + " model=model_name,\n", |
| 245 | + " temperature=0,\n", |
241 | 246 | " ) # get a new response from GPT where it can see the function response\n", |
242 | 247 | "\n", |
243 | 248 | " return response" |
|
284 | 289 | "\n", |
285 | 290 | "available_functions = {'search_bing': search}\n", |
286 | 291 | "\n", |
287 | | - "result = run_multiturn_conversation(messages, functions, available_functions, deployment_name)\n", |
| 292 | + "result = run_multiturn_conversation(messages, tools, available_functions)\n", |
288 | 293 | "\n", |
289 | 294 | "print(\"Final response:\")\n", |
290 | | - "print(result['choices'][0]['message']['content'])" |
| 295 | + "print(result.choices[0].message)" |
291 | 296 | ] |
292 | 297 | }, |
293 | 298 | { |
|
0 commit comments