@@ -27,7 +27,7 @@ To complete this tutorial, you need:
27
27
<dependency >
28
28
<groupId >com.azure</groupId >
29
29
<artifactId >azure-ai-inference</artifactId >
30
- <version >1.0.0-beta.1 </version >
30
+ <version >1.0.0-beta.2 </version >
31
31
</dependency >
32
32
```
33
33
@@ -65,7 +65,6 @@ First, create the client to consume the model. The following code uses an endpoi
65
65
ChatCompletionsClient client = new ChatCompletionsClient (
66
66
new URI (" https://<resource>.services.ai.azure.com/models" ),
67
67
new AzureKeyCredential (System . getProperty(" AZURE_INFERENCE_CREDENTIAL" )),
68
- " ${variants-sample}"
69
68
```
70
69
71
70
> [! TIP ]
@@ -76,8 +75,7 @@ If you have configured the resource to with **Microsoft Entra ID** support, you
76
75
```java
77
76
client = new ChatCompletionsClient(
78
77
new URI("https://<resource>.services.ai.azure.com/models"),
79
- new DefaultAzureCredentialBuilder().build(),
80
- "${variants-sample}"
78
+ new DefaultAzureCredentialBuilder().build()
81
79
);
82
80
```
83
81
@@ -87,6 +85,7 @@ The following example shows how you can create a basic chat request to the model
87
85
88
86
```java
89
87
ChatCompletionsOptions requestOptions = new ChatCompletionsOptions()
88
+ .setModel("DeepSeek-R1")
90
89
.setMessages(Arrays.asList(
91
90
new ChatRequestUserMessage("How many languages are in the world?")
92
91
));
@@ -167,6 +166,7 @@ You can _stream_ the content to get it as it's being generated. Streaming conten
167
166
168
167
```java
169
168
ChatCompletionsOptions requestOptions = new ChatCompletionsOptions ()
169
+ .setModel(" DeepSeek-R1" )
170
170
.setMessages(Arrays . asList(
171
171
new ChatRequestUserMessage (" How many languages are in the world? Write an essay about it." )
172
172
))
0 commit comments