File tree Expand file tree Collapse file tree 3 files changed +22
-10
lines changed
main/java/com/sap/ai/sdk/orchestration
test/java/com/sap/ai/sdk/orchestration
sample-code/spring-app/src/main/java/com/sap/ai/sdk/app/controllers Expand file tree Collapse file tree 3 files changed +22
-10
lines changed Original file line number Diff line number Diff line change 33import com .sap .ai .sdk .orchestration .client .model .LLMModuleConfig ;
44import java .util .Map ;
55import javax .annotation .Nonnull ;
6+ import lombok .Getter ;
67
78/** Large language models available in Orchestration. */
89// https://help.sap.com/docs/sap-ai-core/sap-ai-core-service-guide/models-and-scenarios-in-generative-ai-hub
9- public class OrchestrationAiModel extends LLMModuleConfig {
10+ public class OrchestrationAiModel {
11+ @ Getter private final LLMModuleConfig config ;
1012
1113 /** IBM Granite 13B chat completions model */
1214 public static final OrchestrationAiModel IBM_GRANITE_13B_CHAT =
@@ -114,7 +116,15 @@ public class OrchestrationAiModel extends LLMModuleConfig {
114116 new OrchestrationAiModel ("gemini-1.5-flash" );
115117
116118 OrchestrationAiModel (@ Nonnull final String modelName ) {
117- setModelName (modelName );
118- setModelParams (Map .of ());
119+ config = LLMModuleConfig .create ().modelName (modelName ).modelParams (Map .of ());
120+ }
121+
122+ private OrchestrationAiModel (
123+ @ Nonnull final String modelName , Map <String , ? extends Number > modelParams ) {
124+ config = LLMModuleConfig .create ().modelName (modelName ).modelParams (modelParams );
125+ }
126+
127+ public OrchestrationAiModel modelParams (Map <String , ? extends Number > modelParams ) {
128+ return new OrchestrationAiModel (config .getModelName (), modelParams );
119129 }
120130}
Original file line number Diff line number Diff line change 5757@ WireMockTest
5858class OrchestrationUnitTest {
5959 static final LLMModuleConfig LLM_CONFIG =
60- OrchestrationAiModel .GPT_35_TURBO_16K .modelParams (
61- Map .of (
62- "max_tokens" , 50 ,
63- "temperature" , 0.1 ,
64- "frequency_penalty" , 0 ,
65- "presence_penalty" , 0 ));
60+ OrchestrationAiModel .GPT_35_TURBO_16K
61+ .modelParams (
62+ Map .of (
63+ "max_tokens" , 50 ,
64+ "temperature" , 0.1 ,
65+ "frequency_penalty" , 0 ,
66+ "presence_penalty" , 0 ))
67+ .getConfig ();
6668 private final Function <String , InputStream > fileLoader =
6769 filename -> Objects .requireNonNull (getClass ().getClassLoader ().getResourceAsStream (filename ));
6870
Original file line number Diff line number Diff line change 3333class OrchestrationController {
3434
3535 private final OrchestrationClient client = new OrchestrationClient ();
36- OrchestrationModuleConfig config = new OrchestrationModuleConfig ().withLlmConfig (GPT_35_TURBO );
36+ OrchestrationModuleConfig config = new OrchestrationModuleConfig ().withLlmConfig (GPT_35_TURBO . getConfig () );
3737
3838 /**
3939 * Chat request to OpenAI through the Orchestration service with a simple prompt.
You can’t perform that action at this time.
0 commit comments