@@ -11,7 +11,7 @@ ms.author: mbullwin
11
11
ms.date : 05/21/2024
12
12
---
13
13
14
- [ Source code] ( https://github.com/openai/openai-node ) | [ Package (npm)] ( https://www.npmjs.com/package/openai )
14
+ [ Reference documentation ] ( https://platform.openai.com/docs/api-reference/chat ) | [ Source code] ( https://github.com/openai/openai-node ) | [ Package (npm)] ( https://www.npmjs.com/package/openai ) | [ Samples ] ( https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai/samples )
15
15
16
16
> [ !NOTE]
17
17
> This article has been updated to use the [ latest OpenAI npm package] ( https://www.npmjs.com/package/openai ) which now fully supports Azure OpenAI. If you are looking for code examples for the legacy Azure OpenAI JavaScript SDK they are currently still [ available in this repo] ( https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai/samples/v2-beta/javascript ) .
@@ -69,47 +69,61 @@ Open a command prompt where you want the new project, and create a new file name
69
69
## [ ** TypeScript** ] ( #tab/typescript )
70
70
71
71
``` typescript
72
- import { AzureOpenAI } from " openai" ;
73
- import { type ChatCompletion , type ChatCompletionCreateParamsNonStreaming } from " openai/resources/index" ;
74
72
import " dotenv/config" ;
73
+ import { AzureOpenAI } from " openai" ;
74
+ import type {
75
+ ChatCompletion ,
76
+ ChatCompletionCreateParamsNonStreaming ,
77
+ } from " openai/resources/index" ;
75
78
76
79
// You will need to set these environment variables or edit the following values
77
80
const endpoint = process .env [" AZURE_OPENAI_ENDPOINT" ] || " <endpoint>" ;
78
81
const apiKey = process .env [" AZURE_OPENAI_API_KEY" ] || " <api key>" ;
79
82
80
- const apiVersion = " 2024-05-01-preview" ;
81
- const deployment = " gpt-4o" ; // This must match your deployment name.
83
+ // Required Azure OpenAI deployment name and API version
84
+ const apiVersion = " 2024-07-01-preview" ;
85
+ const deploymentName = " gpt-4" ; // This must match your deployment name.
82
86
83
87
function getClient(): AzureOpenAI {
84
- return new AzureOpenAI ({ endpoint , apiKey , apiVersion , deployment });
88
+ return new AzureOpenAI ({
89
+ endpoint ,
90
+ apiKey ,
91
+ apiVersion ,
92
+ deployment: deploymentName ,
93
+ });
85
94
}
86
95
87
96
function createMessages(): ChatCompletionCreateParamsNonStreaming {
88
- return {
89
- messages: [
90
- { role: " system" , content: " You are a helpful assistant." },
91
- { role: " user" , content: " Does Azure OpenAI support customer managed keys?" },
92
- { role: " assistant" , content: " Yes, customer managed keys are supported by Azure OpenAI?" },
93
- { role: " user" , content: " Do other Azure AI services support this too?" },
94
- ],
95
- model: " "
96
- }
97
+ return {
98
+ messages: [
99
+ { role: " system" , content: " You are a helpful assistant." },
100
+ {
101
+ role: " user" ,
102
+ content: " Does Azure OpenAI support customer managed keys?" ,
103
+ },
104
+ {
105
+ role: " assistant" ,
106
+ content: " Yes, customer managed keys are supported by Azure OpenAI?" ,
107
+ },
108
+ { role: " user" , content: " Do other Azure AI services support this too?" },
109
+ ],
110
+ model: " " ,
111
+ };
97
112
}
98
- async function getChoices (completion : ChatCompletion ): Promise <void > {
99
- for (const choice of completion .choices ) {
100
- console .log (choice .message );
101
- }
113
+ async function printChoices (completion : ChatCompletion ): Promise <void > {
114
+ for (const choice of completion .choices ) {
115
+ console .log (choice .message );
116
+ }
102
117
}
103
118
export async function main() {
104
-
105
- const client = getClient ();
106
- const messages = createMessages ();
107
- const result = await client .chat .completions .create (messages );
108
- await getChoices (result );
119
+ const client = getClient ();
120
+ const messages = createMessages ();
121
+ const result = await client .chat .completions .create (messages );
122
+ await printChoices (result );
109
123
}
110
124
111
125
main ().catch ((err ) => {
112
- console .error (" The sample encountered an error:" , err );
126
+ console .error (" The sample encountered an error:" , err );
113
127
});
114
128
```
115
129
@@ -192,47 +206,69 @@ node.exe ChatCompletion.js
192
206
## [ ** TypeScript** ] ( #tab/typescript )
193
207
194
208
``` typescript
195
- import { AzureOpenAI } from " openai" ;
196
- import { type ChatCompletionCreateParamsNonStreaming , type ChatCompletion } from " openai/resources/index" ;
197
- import { DefaultAzureCredential , getBearerTokenProvider } from " @azure/identity" ;
209
+ import {
210
+ DefaultAzureCredential ,
211
+ getBearerTokenProvider ,
212
+ } from " @azure/identity" ;
198
213
import " dotenv/config" ;
214
+ import { AzureOpenAI } from " openai" ;
215
+ import type {
216
+ ChatCompletion ,
217
+ ChatCompletionCreateParamsNonStreaming ,
218
+ } from " openai/resources/index" ;
199
219
200
- const apiVersion = " 2024-05-01-preview" ;
201
- const deployment = " gpt-4o" ; // This must match your deployment name.
220
+ // You will need to set these environment variables or edit the following values
221
+ const endpoint = process .env [" AZURE_OPENAI_ENDPOINT" ] || " <endpoint>" ;
222
+
223
+ // Required Azure OpenAI deployment name and API version
224
+ const apiVersion = " 2024-07-01-preview" ;
225
+ const deploymentName = " gpt-4o" ; // This must match your deployment name.
202
226
203
227
function getClient(): AzureOpenAI {
204
- const scope = " https://cognitiveservices.azure.com/.default" ;
205
- const azureADTokenProvider = getBearerTokenProvider (new DefaultAzureCredential (), scope );
206
- return new AzureOpenAI ({ azureADTokenProvider , deployment , apiVersion });
207
-
208
- }
228
+ const scope = " https://cognitiveservices.azure.com/.default" ;
229
+ const azureADTokenProvider = getBearerTokenProvider (
230
+ new DefaultAzureCredential (),
231
+ scope
232
+ );
233
+ return new AzureOpenAI ({
234
+ endpoint ,
235
+ azureADTokenProvider ,
236
+ deployment: deploymentName ,
237
+ apiVersion ,
238
+ });
239
+ }
209
240
210
241
function createMessages(): ChatCompletionCreateParamsNonStreaming {
211
- return {
212
- messages: [
213
- { role: " system" , content: " You are a helpful assistant." },
214
- { role: " user" , content: " Does Azure OpenAI support customer managed keys?" },
215
- { role: " assistant" , content: " Yes, customer managed keys are supported by Azure OpenAI?" },
216
- { role: " user" , content: " Do other Azure AI services support this too?" },
217
- ],
218
- model: " "
219
- }
242
+ return {
243
+ messages: [
244
+ { role: " system" , content: " You are a helpful assistant." },
245
+ {
246
+ role: " user" ,
247
+ content: " Does Azure OpenAI support customer managed keys?" ,
248
+ },
249
+ {
250
+ role: " assistant" ,
251
+ content: " Yes, customer managed keys are supported by Azure OpenAI?" ,
252
+ },
253
+ { role: " user" , content: " Do other Azure AI services support this too?" },
254
+ ],
255
+ model: " " ,
256
+ };
220
257
}
221
- async function getChoices (completion : ChatCompletion ): Promise <void > {
222
- for (const choice of completion .choices ) {
223
- console .log (choice .message );
224
- }
258
+ async function printChoices (completion : ChatCompletion ): Promise <void > {
259
+ for (const choice of completion .choices ) {
260
+ console .log (choice .message );
261
+ }
225
262
}
226
263
export async function main() {
227
-
228
- const client = getClient ();
229
- const messages = createMessages ();
230
- const result = await client .chat .completions .create (messages );
231
- await getChoices (result );
264
+ const client = getClient ();
265
+ const messages = createMessages ();
266
+ const result = await client .chat .completions .create (messages );
267
+ await printChoices (result );
232
268
}
233
269
234
270
main ().catch ((err ) => {
235
- console .error (" The sample encountered an error:" , err );
271
+ console .error (" The sample encountered an error:" , err );
236
272
});
237
273
```
238
274
0 commit comments