Skip to content

Commit 7b30880

Browse files
max completion tokens
1 parent b9ad200 commit 7b30880

11 files changed

+12
-12
lines changed

orchestration/src/main/java/com/sap/ai/sdk/orchestration/OrchestrationAiModel.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ public class OrchestrationAiModel {
2929
*
3030
* <pre>{@code
3131
* Map.of(
32-
* "max_tokens", 50,
32+
* "max_completion_tokens", 50,
3333
* "temperature", 0.1,
3434
* "frequency_penalty", 0,
3535
* "presence_penalty", 0)
@@ -375,7 +375,7 @@ public <ValueT> OrchestrationAiModel withParam(
375375
@FunctionalInterface
376376
public interface Parameter<ValueT> {
377377
/** The maximum number of tokens to generate. */
378-
Parameter<Integer> MAX_TOKENS = () -> "max_tokens";
378+
Parameter<Integer> MAX_TOKENS = () -> "max_completion_tokens";
379379

380380
/** The sampling temperature. */
381381
Parameter<Number> TEMPERATURE = () -> "temperature";

orchestration/src/test/resources/filteringLooseRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"name": "gpt-4o",
77
"params": {
88
"temperature": 0.1,
9-
"max_tokens": 50,
9+
"max_completion_tokens": 50,
1010
"frequency_penalty": 0,
1111
"presence_penalty": 0,
1212
"top_p": 1,

orchestration/src/test/resources/filteringLooseRequestStream.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"name": "gpt-4o",
77
"params": {
88
"temperature": 0.1,
9-
"max_tokens": 50,
9+
"max_completion_tokens": 50,
1010
"frequency_penalty": 0,
1111
"presence_penalty": 0,
1212
"top_p": 1,

orchestration/src/test/resources/groundingHelpSapComRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"model": {
66
"name": "gpt-4o",
77
"params": {
8-
"max_tokens": 50,
8+
"max_completion_tokens": 50,
99
"temperature": 0.1,
1010
"frequency_penalty": 0,
1111
"presence_penalty": 0,

orchestration/src/test/resources/groundingRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"model": {
66
"name": "gpt-4o",
77
"params": {
8-
"max_tokens": 50,
8+
"max_completion_tokens": 50,
99
"temperature": 0.1,
1010
"frequency_penalty": 0,
1111
"presence_penalty": 0,

orchestration/src/test/resources/localTemplateRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"model": {
66
"name": "gpt-4o",
77
"params": {
8-
"max_tokens": 50,
8+
"max_completion_tokens": 50,
99
"temperature": 0.1,
1010
"frequency_penalty": 0,
1111
"presence_penalty": 0,

orchestration/src/test/resources/maskingRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
"params": {
88
"presence_penalty": 0,
99
"frequency_penalty": 0,
10-
"max_tokens": 50,
10+
"max_completion_tokens": 50,
1111
"temperature": 0.1,
1212
"top_p": 1,
1313
"n": 1

orchestration/src/test/resources/messagesHistoryRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
"params": {
88
"presence_penalty": 0,
99
"frequency_penalty": 0,
10-
"max_tokens": 50,
10+
"max_completion_tokens": 50,
1111
"temperature": 0.1,
1212
"top_p": 1,
1313
"n": 1

orchestration/src/test/resources/templateReferenceByIdRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"model": {
66
"name": "gpt-4o",
77
"params": {
8-
"max_tokens": 50,
8+
"max_completion_tokens": 50,
99
"temperature": 0.1,
1010
"frequency_penalty": 0,
1111
"presence_penalty": 0,

orchestration/src/test/resources/templateReferenceByScenarioRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"model": {
66
"name": "gpt-4o",
77
"params": {
8-
"max_tokens": 50,
8+
"max_completion_tokens": 50,
99
"temperature": 0.1,
1010
"frequency_penalty": 0,
1111
"presence_penalty": 0,

0 commit comments

Comments
 (0)