Skip to content

Commit e960a85

Browse files
authored
[OpenAI] Align names with .NET (Azure#35625)
1 parent 40e0e3f commit e960a85

16 files changed

+215
-176
lines changed

sdk/openai/azure-ai-openai/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ chatMessages.add(new ChatMessage(ChatRole.USER).setContent("What's the best way
192192
ChatCompletions chatCompletions = client.getChatCompletions("{deploymentOrModelId}",
193193
new ChatCompletionsOptions(chatMessages));
194194

195-
System.out.printf("Model ID=%s is created at %d.%n", chatCompletions.getId(), chatCompletions.getCreated());
195+
System.out.printf("Model ID=%s is created at %d.%n", chatCompletions.getId(), chatCompletions.getCreatedAt());
196196
for (ChatChoice choice : chatCompletions.getChoices()) {
197197
ChatMessage message = choice.getMessage();
198198
System.out.printf("Index: %d, Chat Role: %s.%n", choice.getIndex(), message.getRole());
@@ -215,7 +215,7 @@ IterableStream<ChatCompletions> chatCompletionsStream = client.getChatCompletion
215215
new ChatCompletionsOptions(chatMessages));
216216

217217
chatCompletionsStream.forEach(chatCompletions -> {
218-
System.out.printf("Model ID=%s is created at %d.%n", chatCompletions.getId(), chatCompletions.getCreated());
218+
System.out.printf("Model ID=%s is created at %d.%n", chatCompletions.getId(), chatCompletions.getCreatedAt());
219219
for (ChatChoice choice : chatCompletions.getChoices()) {
220220
ChatMessage message = choice.getDelta();
221221
if (message != null) {
@@ -243,7 +243,7 @@ EmbeddingsOptions embeddingsOptions = new EmbeddingsOptions(
243243
Embeddings embeddings = client.getEmbeddings("{deploymentOrModelId}", embeddingsOptions);
244244

245245
for (EmbeddingItem item : embeddings.getData()) {
246-
System.out.printf("Index: %d.%n", item.getIndex());
246+
System.out.printf("Index: %d.%n", item.getPromptIndex());
247247
for (Double embedding : item.getEmbedding()) {
248248
System.out.printf("%f;", embedding);
249249
}

sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java

Lines changed: 45 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,8 @@ public final class OpenAIAsyncClient {
7171
* }
7272
* }</pre>
7373
*
74-
* @param deploymentId deployment id of the deployed model.
74+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
75+
* (when using non-Azure OpenAI) to use for this request.
7576
* @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the
7677
* relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar
7778
* scenarios.
@@ -86,10 +87,12 @@ public final class OpenAIAsyncClient {
8687
*/
8788
@ServiceMethod(returns = ReturnType.SINGLE)
8889
public Mono<Response<BinaryData>> getEmbeddingsWithResponse(
89-
String deploymentId, BinaryData embeddingsOptions, RequestOptions requestOptions) {
90+
String deploymentOrModelName, BinaryData embeddingsOptions, RequestOptions requestOptions) {
9091
return openAIServiceClient != null
91-
? openAIServiceClient.getEmbeddingsWithResponseAsync(deploymentId, embeddingsOptions, requestOptions)
92-
: serviceClient.getEmbeddingsWithResponseAsync(deploymentId, embeddingsOptions, requestOptions);
92+
? openAIServiceClient.getEmbeddingsWithResponseAsync(
93+
deploymentOrModelName, embeddingsOptions, requestOptions)
94+
: serviceClient.getEmbeddingsWithResponseAsync(
95+
deploymentOrModelName, embeddingsOptions, requestOptions);
9396
}
9497

9598
/**
@@ -161,7 +164,8 @@ public Mono<Response<BinaryData>> getEmbeddingsWithResponse(
161164
* }
162165
* }</pre>
163166
*
164-
* @param deploymentId deployment id of the deployed model.
167+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
168+
* (when using non-Azure OpenAI) to use for this request.
165169
* @param completionsOptions The configuration information for a completions request. Completions support a wide
166170
* variety of tasks and generate text that continues from or "completes" provided prompt data.
167171
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
@@ -175,10 +179,12 @@ public Mono<Response<BinaryData>> getEmbeddingsWithResponse(
175179
*/
176180
@ServiceMethod(returns = ReturnType.SINGLE)
177181
public Mono<Response<BinaryData>> getCompletionsWithResponse(
178-
String deploymentId, BinaryData completionsOptions, RequestOptions requestOptions) {
182+
String deploymentOrModelName, BinaryData completionsOptions, RequestOptions requestOptions) {
179183
return openAIServiceClient != null
180-
? openAIServiceClient.getCompletionsWithResponseAsync(deploymentId, completionsOptions, requestOptions)
181-
: serviceClient.getCompletionsWithResponseAsync(deploymentId, completionsOptions, requestOptions);
184+
? openAIServiceClient.getCompletionsWithResponseAsync(
185+
deploymentOrModelName, completionsOptions, requestOptions)
186+
: serviceClient.getCompletionsWithResponseAsync(
187+
deploymentOrModelName, completionsOptions, requestOptions);
182188
}
183189

184190
/**
@@ -241,7 +247,8 @@ public Mono<Response<BinaryData>> getCompletionsWithResponse(
241247
* }
242248
* }</pre>
243249
*
244-
* @param deploymentId deployment id of the deployed model.
250+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
251+
* (when using non-Azure OpenAI) to use for this request.
245252
* @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a
246253
* wide variety of tasks and generate text that continues from or "completes" provided prompt data.
247254
* @param requestOptions The options to configure the HTTP request before HTTP client sends it.
@@ -255,18 +262,19 @@ public Mono<Response<BinaryData>> getCompletionsWithResponse(
255262
*/
256263
@ServiceMethod(returns = ReturnType.SINGLE)
257264
public Mono<Response<BinaryData>> getChatCompletionsWithResponse(
258-
String deploymentId, BinaryData chatCompletionsOptions, RequestOptions requestOptions) {
265+
String deploymentOrModelName, BinaryData chatCompletionsOptions, RequestOptions requestOptions) {
259266
return openAIServiceClient != null
260267
? openAIServiceClient.getChatCompletionsWithResponseAsync(
261-
deploymentId, chatCompletionsOptions, requestOptions)
268+
deploymentOrModelName, chatCompletionsOptions, requestOptions)
262269
: serviceClient.getChatCompletionsWithResponseAsync(
263-
deploymentId, chatCompletionsOptions, requestOptions);
270+
deploymentOrModelName, chatCompletionsOptions, requestOptions);
264271
}
265272

266273
/**
267274
* Return the embeddings for a given prompt.
268275
*
269-
* @param deploymentId deployment id of the deployed model.
276+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
277+
* (when using non-Azure OpenAI) to use for this request.
270278
* @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the
271279
* relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar
272280
* scenarios.
@@ -282,10 +290,11 @@ public Mono<Response<BinaryData>> getChatCompletionsWithResponse(
282290
*/
283291
@Generated
284292
@ServiceMethod(returns = ReturnType.SINGLE)
285-
public Mono<Embeddings> getEmbeddings(String deploymentId, EmbeddingsOptions embeddingsOptions) {
293+
public Mono<Embeddings> getEmbeddings(String deploymentOrModelName, EmbeddingsOptions embeddingsOptions) {
286294
// Generated convenience method for getEmbeddingsWithResponse
287295
RequestOptions requestOptions = new RequestOptions();
288-
return getEmbeddingsWithResponse(deploymentId, BinaryData.fromObject(embeddingsOptions), requestOptions)
296+
return getEmbeddingsWithResponse(
297+
deploymentOrModelName, BinaryData.fromObject(embeddingsOptions), requestOptions)
289298
.flatMap(FluxUtil::toMono)
290299
.map(protocolMethodData -> protocolMethodData.toObject(Embeddings.class));
291300
}
@@ -294,7 +303,8 @@ public Mono<Embeddings> getEmbeddings(String deploymentId, EmbeddingsOptions emb
294303
* Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text
295304
* that continues from or "completes" provided prompt data.
296305
*
297-
* @param deploymentId deployment id of the deployed model.
306+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
307+
* (when using non-Azure OpenAI) to use for this request.
298308
* @param completionsOptions The configuration information for a completions request. Completions support a wide
299309
* variety of tasks and generate text that continues from or "completes" provided prompt data.
300310
* @throws IllegalArgumentException thrown if parameters fail the validation.
@@ -308,10 +318,11 @@ public Mono<Embeddings> getEmbeddings(String deploymentId, EmbeddingsOptions emb
308318
*/
309319
@Generated
310320
@ServiceMethod(returns = ReturnType.SINGLE)
311-
public Mono<Completions> getCompletions(String deploymentId, CompletionsOptions completionsOptions) {
321+
public Mono<Completions> getCompletions(String deploymentOrModelName, CompletionsOptions completionsOptions) {
312322
// Generated convenience method for getCompletionsWithResponse
313323
RequestOptions requestOptions = new RequestOptions();
314-
return getCompletionsWithResponse(deploymentId, BinaryData.fromObject(completionsOptions), requestOptions)
324+
return getCompletionsWithResponse(
325+
deploymentOrModelName, BinaryData.fromObject(completionsOptions), requestOptions)
315326
.flatMap(FluxUtil::toMono)
316327
.map(protocolMethodData -> protocolMethodData.toObject(Completions.class));
317328
}
@@ -320,7 +331,8 @@ public Mono<Completions> getCompletions(String deploymentId, CompletionsOptions
320331
* Gets completions for the provided input prompt. Completions support a wide variety of tasks and generate text
321332
* that continues from or "completes" provided prompt data.
322333
*
323-
* @param deploymentId deployment id of the deployed model.
334+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
335+
* (when using non-Azure OpenAI) to use for this request.
324336
* @param prompt The prompt to generate completion text from.
325337
* @throws IllegalArgumentException thrown if parameters fail the validation.
326338
* @throws HttpResponseException thrown if the request is rejected by server.
@@ -332,15 +344,16 @@ public Mono<Completions> getCompletions(String deploymentId, CompletionsOptions
332344
* that continues from or "completes" provided prompt data on successful completion of {@link Mono}.
333345
*/
334346
@ServiceMethod(returns = ReturnType.SINGLE)
335-
public Mono<Completions> getCompletions(String deploymentId, String prompt) {
336-
return getCompletions(deploymentId, CompletionsUtils.defaultCompletionsOptions(prompt));
347+
public Mono<Completions> getCompletions(String deploymentOrModelName, String prompt) {
348+
return getCompletions(deploymentOrModelName, CompletionsUtils.defaultCompletionsOptions(prompt));
337349
}
338350

339351
/**
340352
* Gets completions as a stream for the provided input prompts. Completions support a wide variety of tasks and
341353
* generate text that continues from or "completes" provided prompt data.
342354
*
343-
* @param deploymentId deployment id of the deployed model.
355+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
356+
* (when using non-Azure OpenAI) to use for this request.
344357
* @param completionsOptions The configuration information for a completions request. Completions support a wide
345358
* variety of tasks and generate text that continues from or "completes" provided prompt data.
346359
* @throws IllegalArgumentException thrown if parameters fail the validation.
@@ -353,12 +366,12 @@ public Mono<Completions> getCompletions(String deploymentId, String prompt) {
353366
* and generate text that continues from or "completes" provided prompt data.
354367
*/
355368
@ServiceMethod(returns = ReturnType.COLLECTION)
356-
public Flux<Completions> getCompletionsStream(String deploymentId, CompletionsOptions completionsOptions) {
369+
public Flux<Completions> getCompletionsStream(String deploymentOrModelName, CompletionsOptions completionsOptions) {
357370
completionsOptions.setStream(true);
358371
RequestOptions requestOptions = new RequestOptions();
359372
BinaryData requestBody = BinaryData.fromObject(completionsOptions);
360373
Flux<ByteBuffer> responseStream =
361-
getCompletionsWithResponse(deploymentId, requestBody, requestOptions)
374+
getCompletionsWithResponse(deploymentOrModelName, requestBody, requestOptions)
362375
.flatMapMany(response -> response.getValue().toFluxByteBuffer());
363376
OpenAIServerSentEvents<Completions> completionsStream =
364377
new OpenAIServerSentEvents<>(responseStream, Completions.class);
@@ -369,7 +382,8 @@ public Flux<Completions> getCompletionsStream(String deploymentId, CompletionsOp
369382
* Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate
370383
* text that continues from or "completes" provided prompt data.
371384
*
372-
* @param deploymentId deployment id of the deployed model.
385+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
386+
* (when using non-Azure OpenAI) to use for this request.
373387
* @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a
374388
* wide variety of tasks and generate text that continues from or "completes" provided prompt data.
375389
* @throws IllegalArgumentException thrown if parameters fail the validation.
@@ -384,11 +398,11 @@ public Flux<Completions> getCompletionsStream(String deploymentId, CompletionsOp
384398
@Generated
385399
@ServiceMethod(returns = ReturnType.SINGLE)
386400
public Mono<ChatCompletions> getChatCompletions(
387-
String deploymentId, ChatCompletionsOptions chatCompletionsOptions) {
401+
String deploymentOrModelName, ChatCompletionsOptions chatCompletionsOptions) {
388402
// Generated convenience method for getChatCompletionsWithResponse
389403
RequestOptions requestOptions = new RequestOptions();
390404
return getChatCompletionsWithResponse(
391-
deploymentId, BinaryData.fromObject(chatCompletionsOptions), requestOptions)
405+
deploymentOrModelName, BinaryData.fromObject(chatCompletionsOptions), requestOptions)
392406
.flatMap(FluxUtil::toMono)
393407
.map(protocolMethodData -> protocolMethodData.toObject(ChatCompletions.class));
394408
}
@@ -397,7 +411,8 @@ public Mono<ChatCompletions> getChatCompletions(
397411
* Gets chat completions for the provided chat messages. Chat completions support a wide variety of tasks and
398412
* generate text that continues from or "completes" provided prompt data.
399413
*
400-
* @param deploymentId deployment id of the deployed model.
414+
* @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name
415+
* (when using non-Azure OpenAI) to use for this request.
401416
* @param chatCompletionsOptions The configuration information for a chat completions request. Completions support a
402417
* wide variety of tasks and generate text that continues from or "completes" provided prompt data.
403418
* @throws IllegalArgumentException thrown if parameters fail the validation.
@@ -411,12 +426,12 @@ public Mono<ChatCompletions> getChatCompletions(
411426
*/
412427
@ServiceMethod(returns = ReturnType.COLLECTION)
413428
public Flux<ChatCompletions> getChatCompletionsStream(
414-
String deploymentId, ChatCompletionsOptions chatCompletionsOptions) {
429+
String deploymentOrModelName, ChatCompletionsOptions chatCompletionsOptions) {
415430
chatCompletionsOptions.setStream(true);
416431
RequestOptions requestOptions = new RequestOptions();
417432
Flux<ByteBuffer> responseStream =
418433
getChatCompletionsWithResponse(
419-
deploymentId, BinaryData.fromObject(chatCompletionsOptions), requestOptions)
434+
deploymentOrModelName, BinaryData.fromObject(chatCompletionsOptions), requestOptions)
420435
.flatMapMany(response -> response.getValue().toFluxByteBuffer());
421436
OpenAIServerSentEvents<ChatCompletions> chatCompletionsStream =
422437
new OpenAIServerSentEvents<>(responseStream, ChatCompletions.class);

0 commit comments

Comments
 (0)