@@ -6,7 +6,7 @@ manager: nitinme
6
6
ms.service : azure-ai-foundry
7
7
ms.subservice : azure-ai-foundry-openai
8
8
ms.topic : conceptual
9
- ms.date : 10/01 /2025
9
+ ms.date : 10/06 /2025
10
10
author : mrbullwinkle
11
11
ms.author : mbullwin
12
12
recommendations : false
@@ -31,6 +31,7 @@ Starting in August 2025, you can now opt in to our next generation v1 Azure Open
31
31
- Faster API release cycle with new features launching more frequently.
32
32
- OpenAI client support with minimal code changes to swap between OpenAI and Azure OpenAI when using key-based authentication.
33
33
- OpenAI client support for token based authentication and automatic token refresh without the need to take a dependency on a separate Azure OpenAI client.
34
+ - Make chat completions calls with models from other providers like DeepSeek and Grok which support the v1 chat completions syntax.
34
35
35
36
Access to new API calls that are still in preview will be controlled by passing feature specific preview headers allowing you to opt in to the features you want, without having to swap API versions. Alternatively, some features will indicate preview status through their API path and don't require an additional header.
36
37
@@ -271,70 +272,199 @@ curl -X POST https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1/responses \
271
272
}'
272
273
```
273
274
274
- # [ Output] ( #tab/output )
275
-
276
- ``` json
277
- {
278
- "id" : " resp_682f7eb5dc408190b491cbbe57be2fbf0f98d661c3dc276d" ,
279
- "created_at" : 1747943093.0 ,
280
- "error" : null ,
281
- "incomplete_details" : null ,
282
- "instructions" : null ,
283
- "metadata" : {},
284
- "model" : " gpt-4.1-nano" ,
285
- "object" : " response" ,
286
- "output" : [
287
- {
288
- "id" : " msg_682f7eb61d908190926a004c15c5ddd00f98d661c3dc276d" ,
289
- "content" : [
290
- {
291
- "annotations" : [],
292
- "text" : " Hello! It looks like you've sent a test message. How can I assist you today?" ,
293
- "type" : " output_text"
275
+ ---
276
+
277
+ ## Model support
278
+
279
+ For Azure OpenAI models we recommend using the [ Responses API] ( ./supported-languages.md ) , however, the v1 API also allows you to make chat completions calls with models from other providers like DeepSeek and Grok which support the OpenAI v1 chat completions syntax.
280
+
281
+ ` base_url ` will accept both ` https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1/ ` and ` https://YOUR-RESOURCE-NAME.services.ai.azure.com/openai/v1/ ` formats.
282
+
283
+ # [ Python] ( #tab/python )
284
+
285
+ ``` python
286
+ from openai import OpenAI
287
+ from azure.identity import DefaultAzureCredential, get_bearer_token_provider
288
+
289
+ token_provider = get_bearer_token_provider(
290
+ DefaultAzureCredential(), " https://cognitiveservices.azure.com/.default"
291
+ )
292
+
293
+ client = OpenAI(
294
+ base_url = " https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1/" ,
295
+ api_key = token_provider,
296
+ )
297
+ completion = client.chat.completions.create(
298
+ model = " grok-3-mini" , # Replace with your model deployment name.
299
+ messages = [
300
+ {" role" : " system" , " content" : " You are a helpful assistant." },
301
+ {" role" : " user" , " content" : " Tell me about the attention is all you need paper" }
302
+ ]
303
+ )
304
+
305
+ # print(completion.choices[0].message)
306
+ print (completion.model_dump_json(indent = 2 ))
307
+ ```
308
+
309
+ # [ C#] ( #tab/dotnet )
310
+
311
+ ``` csharp
312
+ using Azure .Identity ;
313
+ using OpenAI ;
314
+ using OpenAI .Chat ;
315
+ using System .ClientModel .Primitives ;
316
+
317
+ #pragma warning disable OPENAI001
318
+
319
+ BearerTokenPolicy tokenPolicy = new (
320
+ new DefaultAzureCredential (),
321
+ " https://cognitiveservices.azure.com/.default" );
322
+
323
+ ChatClient client = new (
324
+ model : " grok-3-mini" , // Replace with your model deployment name.
325
+ authenticationPolicy : tokenPolicy ,
326
+ options : new OpenAIClientOptions () {
327
+
328
+ Endpoint = new Uri (" https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1" )
329
+ }
330
+ );
331
+
332
+ ChatCompletion completion = client .CompleteChat (" Tell me about the attention is all you need paper" );
333
+
334
+ Console .WriteLine ($" [ASSISTANT]: {completion .Content [0 ].Text }" );
335
+ ```
336
+
337
+ # [ JavaScript] ( #tab/javascript )
338
+
339
+ ``` javascript
340
+ import { DefaultAzureCredential , getBearerTokenProvider } from " @azure/identity" ;
341
+ import { OpenAI } from " openai" ;
342
+
343
+ const tokenProvider = getBearerTokenProvider (
344
+ new DefaultAzureCredential (),
345
+ ' https://cognitiveservices.azure.com/.default' );
346
+ const client = new OpenAI ({
347
+ baseURL: " https://france-central-test-001.openai.azure.com/openai/v1/" ,
348
+ apiKey: tokenProvider
349
+ });
350
+
351
+ const messages = [
352
+ { role: ' system' , content: ' You are a helpful assistant.' },
353
+ { role: ' user' , content: ' Tell me about the attention is all you need paper' }
354
+ ];
355
+
356
+ // Make the API request with top-level await
357
+ const result = await client .chat .completions .create ({
358
+ messages,
359
+ model: ' grok-3-mini' , // model deployment name
360
+ max_tokens: 100
361
+ });
362
+
363
+ // Print the full response
364
+ console .log (' Full response:' , result);
365
+
366
+ // Print just the message content from the response
367
+ console .log (' Response content:' , result .choices [0 ].message .content );
368
+ ```
369
+
370
+ # [ Go] ( #tab/go )
371
+
372
+ ``` go
373
+ package main
374
+
375
+ import (
376
+ " context"
377
+ " fmt"
378
+
379
+ " github.com/Azure/azure-sdk-for-go/sdk/azidentity"
380
+ " github.com/openai/openai-go/v2"
381
+ " github.com/openai/openai-go/v2/azure"
382
+ " github.com/openai/openai-go/v2/option"
383
+ )
384
+
385
+ func main () {
386
+ // Create an Azure credential
387
+ tokenCredential , err := azidentity.NewDefaultAzureCredential (nil )
388
+ if err != nil {
389
+ panic (fmt.Sprintf (" Failed to create credential: %v " , err))
390
+ }
391
+
392
+ // Create a client with Azure OpenAI endpoint and token credential
393
+ client := openai.NewClient (
394
+ option.WithBaseURL (" https://YOUR-RESOURCE_NAME.openai.azure.com/openai/v1/" ),
395
+ azure.WithTokenCredential (tokenCredential),
396
+ )
397
+
398
+ // Make a completion request
399
+ chatCompletion , err := client.Chat .Completions .New (context.TODO (), openai.ChatCompletionNewParams {
400
+ Messages: []openai.ChatCompletionMessageParamUnion {
401
+ openai.UserMessage (" Explain what the bitter lesson is?" ),
402
+ },
403
+ Model: " grok-3-mini" , // Use your deployed model name on Azure
404
+ })
405
+ if err != nil {
406
+ panic (err.Error ())
407
+ }
408
+
409
+ fmt.Println (chatCompletion.Choices [0 ].Message .Content )
410
+ }
411
+ ```
412
+
413
+ # [ Java] ( #tab/Java )
414
+
415
+ ``` java
416
+ package com.example ;
417
+
418
+ import com.openai.client.OpenAIClient ;
419
+ import com.openai.client.okhttp.OpenAIOkHttpClient ;
420
+ import com.openai.models.ChatModel ;
421
+ import com.openai.models.chat.completions.ChatCompletion ;
422
+ import com.openai.models.chat.completions.ChatCompletionCreateParams ;
423
+
424
+ public class OpenAITest {
425
+ public static void main (String [] args ) {
426
+ // Get API key from environment variable for security
427
+ String apiKey = System . getenv(" OPENAI_API_KEY" );
428
+ String resourceName = " https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1" ;
429
+ String modelDeploymentName = " grok-3-mini" ; // replace with you model deployment name
430
+
431
+ try {
432
+ OpenAIClient client = OpenAIOkHttpClient . builder()
433
+ .baseUrl(resourceName)
434
+ .apiKey(apiKey)
435
+ .build();
436
+
437
+ ChatCompletionCreateParams params = ChatCompletionCreateParams . builder()
438
+ .addUserMessage(" Explain what the bitter lesson is?" )
439
+ .model(modelDeploymentName)
440
+ .build();
441
+ ChatCompletion chatCompletion = client. chat(). completions(). create(params);
294
442
}
295
- ],
296
- "role" : " assistant" ,
297
- "status" : " completed" ,
298
- "type" : " message"
299
443
}
300
- ],
301
- "parallel_tool_calls" : true ,
302
- "temperature" : 1.0 ,
303
- "tool_choice" : " auto" ,
304
- "tools" : [],
305
- "top_p" : 1.0 ,
306
- "background" : null ,
307
- "max_output_tokens" : null ,
308
- "previous_response_id" : null ,
309
- "reasoning" : {
310
- "effort" : null ,
311
- "generate_summary" : null ,
312
- "summary" : null
313
- },
314
- "service_tier" : " default" ,
315
- "status" : " completed" ,
316
- "text" : {
317
- "format" : {
318
- "type" : " text"
319
- }
320
- },
321
- "truncation" : " disabled" ,
322
- "usage" : {
323
- "input_tokens" : 12 ,
324
- "input_tokens_details" : {
325
- "cached_tokens" : 0
326
- },
327
- "output_tokens" : 19 ,
328
- "output_tokens_details" : {
329
- "reasoning_tokens" : 0
330
- },
331
- "total_tokens" : 31
332
- },
333
- "user" : null ,
334
- "store" : true
335
444
}
336
445
```
337
446
447
+ # [ REST] ( #tab/rest )
448
+
449
+ ``` bash
450
+ curl -X POST https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1/chat/completions \
451
+ -H " Content-Type: application/json" \
452
+ -H " Authorization: Bearer $AZURE_OPENAI_AUTH_TOKEN " \
453
+ -d ' {
454
+ "model": "grok-3-mini",
455
+ "messages": [
456
+ {
457
+ "role": "developer",
458
+ "content": "You are a helpful assistant."
459
+ },
460
+ {
461
+ "role": "user",
462
+ "content": "Explain what the bitter lesson is?"
463
+ }
464
+ ]
465
+ }'
466
+ ```
467
+
338
468
---
339
469
340
470
## v1 API support
0 commit comments