Skip to content

Commit e1904b0

Browse files
author
I750911
committed
update of deprecated gpt-3.5-turbo-16k to gpt 4o
1 parent 1fb491a commit e1904b0

File tree

11 files changed

+17
-16
lines changed

11 files changed

+17
-16
lines changed

orchestration/src/test/java/com/sap/ai/sdk/orchestration/ConfigToRequestTransformerTest.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
package com.sap.ai.sdk.orchestration;
22

3-
import static com.sap.ai.sdk.orchestration.OrchestrationUnitTest.CUSTOM_GPT_35;
3+
import static com.sap.ai.sdk.orchestration.OrchestrationUnitTest.CUSTOM_GPT_4O;
44
import static org.assertj.core.api.Assertions.assertThat;
55
import static org.assertj.core.api.Assertions.assertThatThrownBy;
66

@@ -80,7 +80,7 @@ void testMessagesHistory() {
8080
var prompt = new OrchestrationPrompt("bar").messageHistory(List.of(systemMessage));
8181
var actual =
8282
ConfigToRequestTransformer.toCompletionPostRequest(
83-
prompt, new OrchestrationModuleConfig().withLlmConfig(CUSTOM_GPT_35));
83+
prompt, new OrchestrationModuleConfig().withLlmConfig(CUSTOM_GPT_4O));
8484

8585
assertThat(actual.getMessagesHistory()).containsExactly(systemMessage.createChatMessage());
8686
}

orchestration/src/test/java/com/sap/ai/sdk/orchestration/OrchestrationUnitTest.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
import static com.github.tomakehurst.wiremock.client.WireMock.verify;
1717
import static com.sap.ai.sdk.orchestration.AzureFilterThreshold.ALLOW_SAFE;
1818
import static com.sap.ai.sdk.orchestration.AzureFilterThreshold.ALLOW_SAFE_LOW_MEDIUM;
19-
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_35_TURBO_16K;
19+
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_4O;
2020
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_4O_MINI;
2121
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.Parameter.*;
2222
import static org.apache.hc.core5.http.HttpStatus.SC_BAD_REQUEST;
@@ -82,8 +82,8 @@
8282
*/
8383
@WireMockTest
8484
class OrchestrationUnitTest {
85-
static final OrchestrationAiModel CUSTOM_GPT_35 =
86-
GPT_35_TURBO_16K
85+
static final OrchestrationAiModel CUSTOM_GPT_4O =
86+
GPT_4O
8787
.withParam(MAX_TOKENS, 50)
8888
.withParam(TEMPERATURE, 0.1)
8989
.withParam(FREQUENCY_PENALTY, 0)
@@ -103,7 +103,7 @@ void setup(WireMockRuntimeInfo server) {
103103
final DefaultHttpDestination destination =
104104
DefaultHttpDestination.builder(server.getHttpBaseUrl()).build();
105105
client = new OrchestrationClient(destination);
106-
config = new OrchestrationModuleConfig().withLlmConfig(CUSTOM_GPT_35);
106+
config = new OrchestrationModuleConfig().withLlmConfig(CUSTOM_GPT_4O);
107107
prompt = new OrchestrationPrompt("Hello World! Why is this phrase so famous?");
108108
ApacheHttpClient5Accessor.setHttpClientCache(ApacheHttpClient5Cache.DISABLED);
109109
}

orchestration/src/test/java/com/sap/ai/sdk/orchestration/spring/OrchestrationChatModelTest.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo;
1010
import static com.github.tomakehurst.wiremock.client.WireMock.verify;
1111
import static com.github.tomakehurst.wiremock.stubbing.Scenario.STARTED;
12-
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_35_TURBO_16K;
12+
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_4O;
1313
import static org.assertj.core.api.Assertions.assertThat;
1414
import static org.assertj.core.api.Assertions.assertThatThrownBy;
1515
import static org.mockito.ArgumentMatchers.any;
@@ -62,7 +62,7 @@ void setup(WireMockRuntimeInfo server) {
6262
client = new OrchestrationChatModel(new OrchestrationClient(destination));
6363
defaultOptions =
6464
new OrchestrationChatOptions(
65-
new OrchestrationModuleConfig().withLlmConfig(GPT_35_TURBO_16K));
65+
new OrchestrationModuleConfig().withLlmConfig(GPT_4O));
6666
prompt = new Prompt("Hello World! Why is this phrase so famous?", defaultOptions);
6767
ApacheHttpClient5Accessor.setHttpClientCache(ApacheHttpClient5Cache.DISABLED);
6868
}

orchestration/src/test/resources/filteringLooseRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"orchestration_config": {
33
"module_configurations": {
44
"llm_module_config": {
5-
"model_name": "gpt-35-turbo-16k",
5+
"model_name": "gpt-4o",
66
"model_params": {
77
"temperature": 0.1,
88
"max_tokens": 50,

orchestration/src/test/resources/groundingRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"orchestration_config" : {
33
"module_configurations" : {
44
"llm_module_config" : {
5-
"model_name" : "gpt-35-turbo-16k",
5+
"model_name" : "gpt-4o",
66
"model_params" : {
77
"max_tokens" : 50,
88
"temperature" : 0.1,

orchestration/src/test/resources/maskingRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"orchestration_config": {
33
"module_configurations": {
44
"llm_module_config": {
5-
"model_name": "gpt-35-turbo-16k",
5+
"model_name": "gpt-4o",
66
"model_params": {
77
"presence_penalty": 0,
88
"frequency_penalty": 0,

orchestration/src/test/resources/messagesHistoryRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"orchestration_config": {
33
"module_configurations": {
44
"llm_module_config": {
5-
"model_name": "gpt-35-turbo-16k",
5+
"model_name": "gpt-4o",
66
"model_params": {
77
"presence_penalty": 0,
88
"frequency_penalty": 0,

orchestration/src/test/resources/templatingRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
"tools" : [ ]
1313
},
1414
"llm_module_config": {
15-
"model_name": "gpt-35-turbo-16k",
15+
"model_name": "gpt-4o",
1616
"model_params": {
1717
"max_tokens": 50,
1818
"temperature": 0.1,

orchestration/src/test/resources/toolCallsRequest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"orchestration_config": {
33
"module_configurations": {
44
"llm_module_config": {
5-
"model_name": "gpt-35-turbo-16k",
5+
"model_name": "gpt-4o",
66
"model_params": {},
77
"model_version": "latest"
88
},

orchestration/src/test/resources/toolCallsRequest2.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"orchestration_config": {
33
"module_configurations": {
44
"llm_module_config": {
5-
"model_name": "gpt-35-turbo-16k",
5+
"model_name": "gpt-4o",
66
"model_params": {},
77
"model_version": "latest"
88
},

0 commit comments

Comments
 (0)