Skip to content

Commit b133b1d

Browse files
committed
Merge remote-tracking branch 'origin/main' into feat/orch-convenient-filtering
# Conflicts: # orchestration/src/main/java/com/sap/ai/sdk/orchestration/OrchestrationModuleConfig.java # orchestration/src/test/java/com/sap/ai/sdk/orchestration/OrchestrationUnitTest.java # sample-code/spring-app/src/main/java/com/sap/ai/sdk/app/controllers/OrchestrationController.java
2 parents f7c28d6 + a9be1ed commit b133b1d

File tree

14 files changed

+301
-38
lines changed

14 files changed

+301
-38
lines changed
Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
name: Test SAP Cloud SDK Versions
2+
3+
on:
4+
workflow_dispatch:
5+
6+
env:
7+
MVN_MULTI_THREADED_ARGS: --batch-mode --no-transfer-progress --fail-at-end --show-version --threads 1C
8+
JAVA_VERSION: 17
9+
10+
jobs:
11+
fetch-dependency-versions:
12+
runs-on: ubuntu-latest
13+
outputs:
14+
versions: ${{ steps.fetch-versions.outputs.VERSIONS }}
15+
16+
steps:
17+
- name: Fetch versions from Maven Central
18+
id: fetch-versions
19+
run: |
20+
# Specify the dependency coordinates
21+
GROUP_ID="com.sap.cloud.sdk"
22+
ARTIFACT_ID="sdk-bom"
23+
24+
# Fetch available versions from Maven Central API
25+
response=$(curl -s "https://search.maven.org/solrsearch/select?q=g:%22${GROUP_ID}%22+AND+a:%22${ARTIFACT_ID}%22&rows=15&core=gav&wt=json")
26+
27+
# Extract and filter versions (e.g., to exclude snapshots or specific versions)
28+
versions=$(echo "$response" | jq -r '.response.docs[].v' | grep -v -E 'SNAPSHOT|alpha|beta' | sort -V)
29+
30+
# Convert the versions to a JSON array
31+
json_versions=$(echo "$versions" | jq -R . | jq -s . | tr -d '\n')
32+
33+
echo "JSON Versions: $json_versions"
34+
35+
# Output the versions as a string that can be used in the matrix
36+
echo "VERSIONS=${json_versions}" >> $GITHUB_OUTPUT
37+
38+
setup-environment:
39+
runs-on: ubuntu-latest
40+
outputs:
41+
cache-key: ${{ steps.cache-build.outputs.cache-key }}
42+
steps:
43+
- name: "Checkout repository"
44+
uses: actions/checkout@v4
45+
46+
- name: "Setup java"
47+
uses: actions/setup-java@v4
48+
with:
49+
distribution: "temurin"
50+
java-version: ${{ env.JAVA_VERSION }}
51+
cache: 'maven'
52+
53+
- name: "Cache build"
54+
id: cache-build
55+
uses: actions/cache@v3
56+
with:
57+
path: |
58+
~/.m2/repository
59+
target
60+
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
61+
restore-keys: |
62+
${{ runner.os }}-maven-
63+
64+
- name: "Build SDK"
65+
run: |
66+
MVN_ARGS="${{ env.MVN_MULTI_THREADED_ARGS }} clean install -DskipTests -DskipFormatting"
67+
mvn $MVN_ARGS
68+
69+
test-dependency-versions:
70+
needs: [ fetch-dependency-versions, setup-environment ]
71+
runs-on: ubuntu-latest
72+
strategy:
73+
max-parallel: 1
74+
fail-fast: false
75+
matrix:
76+
version: ${{ fromJson(needs.fetch-dependency-versions.outputs.versions) }}
77+
continue-on-error: true
78+
steps:
79+
- name: "Checkout repository"
80+
uses: actions/checkout@v4
81+
82+
- name: "Setup java"
83+
uses: actions/setup-java@v4
84+
with:
85+
distribution: "temurin"
86+
java-version: ${{ env.JAVA_VERSION }}
87+
cache: 'maven'
88+
89+
- name: "Restore build cache"
90+
uses: actions/cache@v3
91+
with:
92+
path: |
93+
~/.m2/repository
94+
target
95+
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
96+
97+
- name: "Run tests with explicit version"
98+
run: |
99+
MVN_ARGS="${{ env.MVN_MULTI_THREADED_ARGS }} clean package -pl :spring-app -DskipTests=false -DskipFormatting -Dcloud-sdk.version=${{ matrix.version }} -Denforcer.skip=true -Dspotless.skip=true"
100+
mvn $MVN_ARGS
101+
env:
102+
# See "End-to-end test application instructions" on the README.md to update the secret
103+
AICORE_SERVICE_KEY: ${{ secrets.AICORE_SERVICE_KEY }}
104+
105+
- name: "Start Application Locally"
106+
run: |
107+
cd sample-code/spring-app
108+
mvn spring-boot:run &
109+
timeout=15
110+
while ! nc -z localhost 8080; do
111+
sleep 1
112+
timeout=$((timeout - 1))
113+
if [ $timeout -le 0 ]; then
114+
echo "Server did not start within 15 seconds."
115+
exit 1
116+
fi
117+
done
118+
env:
119+
# See "End-to-end test application instructions" on the README.md to update the secret
120+
AICORE_SERVICE_KEY: ${{ secrets.AICORE_SERVICE_KEY }}
121+
122+
- name: "Health Check"
123+
# print response body with headers to stdout. q:body only O:print -:stdout S:headers
124+
run: wget -qO- -S localhost:8080

core/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
<parent>
55
<groupId>com.sap.ai.sdk</groupId>
66
<artifactId>sdk-parent</artifactId>
7-
<version>0.2.0-SNAPSHOT</version>
7+
<version>0.3.0-SNAPSHOT</version>
88
</parent>
99
<artifactId>core</artifactId>
1010
<name>AI Core client</name>

docs/guides/ORCHESTRATION_CHAT_COMPLETION.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ To use the Orchestration service, create a client and a configuration object:
7777
var client = new OrchestrationClient();
7878

7979
var config = new OrchestrationModuleConfig()
80-
.withLlmConfig(LLMModuleConfig.create().modelName("gpt-35-turbo").modelParams(Map.of()));
80+
.withLlmConfig(OrchestrationAiModel.GPT_4O);
8181
```
8282

8383
Please also refer to [our sample code](../../sample-code/spring-app/src/main/java/com/sap/ai/sdk/app/controllers/OrchestrationController.java) for this and all following code examples.
@@ -201,16 +201,16 @@ In this example, the input will be masked before the call to the LLM. Note that
201201

202202
### Set model parameters
203203

204-
Change your LLM module configuration to add model parameters:
204+
Change your LLM configuration to add model parameters:
205205

206206
```java
207-
var llmConfig =
208-
LLMModuleConfig.create()
209-
.modelName("gpt-35-turbo")
210-
.modelParams(
207+
OrchestrationAiModel customGPT4O =
208+
OrchestrationAiModel.GPT_4O
209+
.withModelParams(
211210
Map.of(
212211
"max_tokens", 50,
213212
"temperature", 0.1,
214213
"frequency_penalty", 0,
215-
"presence_penalty", 0));
214+
"presence_penalty", 0))
215+
.withModelVersion("2024-05-13");
216216
```

foundation-models/openai/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
<parent>
55
<groupId>com.sap.ai.sdk</groupId>
66
<artifactId>sdk-parent</artifactId>
7-
<version>0.2.0-SNAPSHOT</version>
7+
<version>0.3.0-SNAPSHOT</version>
88
<relativePath>../../pom.xml</relativePath>
99
</parent>
1010
<groupId>com.sap.ai.sdk.foundationmodels</groupId>

orchestration/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
<parent>
55
<groupId>com.sap.ai.sdk</groupId>
66
<artifactId>sdk-parent</artifactId>
7-
<version>0.2.0-SNAPSHOT</version>
7+
<version>0.3.0-SNAPSHOT</version>
88
</parent>
99
<artifactId>orchestration</artifactId>
1010
<name>Orchestration client</name>
Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
package com.sap.ai.sdk.orchestration;
2+
3+
import com.sap.ai.sdk.orchestration.client.model.LLMModuleConfig;
4+
import java.util.Map;
5+
import javax.annotation.Nonnull;
6+
import lombok.AllArgsConstructor;
7+
import lombok.Value;
8+
import lombok.With;
9+
10+
/** Large language models available in Orchestration. */
11+
@Value
12+
@With
13+
@AllArgsConstructor
14+
public class OrchestrationAiModel {
15+
/** The name of the model */
16+
String modelName;
17+
18+
/**
19+
* Optional parameters on this model.
20+
*
21+
* <pre>{@code
22+
* Map.of(
23+
* "max_tokens", 50,
24+
* "temperature", 0.1,
25+
* "frequency_penalty", 0,
26+
* "presence_penalty", 0)
27+
* }</pre>
28+
*/
29+
Map<String, Object> modelParams;
30+
31+
/** The version of the model, defaults to "latest". */
32+
String modelVersion;
33+
34+
/** IBM Granite 13B chat completions model */
35+
public static final OrchestrationAiModel IBM_GRANITE_13B_CHAT =
36+
new OrchestrationAiModel("ibm--granite-13b-chat");
37+
38+
/** MistralAI Mistral Large Instruct model */
39+
public static final OrchestrationAiModel MISTRAL_LARGE_INSTRUCT =
40+
new OrchestrationAiModel("mistralai--mistral-large-instruct");
41+
42+
/** MistralAI Mixtral 8x7B Instruct v01 model */
43+
public static final OrchestrationAiModel MIXTRAL_8X7B_INSTRUCT_V01 =
44+
new OrchestrationAiModel("mistralai--mixtral-8x7b-instruct-v01");
45+
46+
/** Meta Llama3 70B Instruct model */
47+
public static final OrchestrationAiModel LLAMA3_70B_INSTRUCT =
48+
new OrchestrationAiModel("meta--llama3-70b-instruct");
49+
50+
/** Meta Llama3.1 70B Instruct model */
51+
public static final OrchestrationAiModel LLAMA3_1_70B_INSTRUCT =
52+
new OrchestrationAiModel("meta--llama3.1-70b-instruct");
53+
54+
/** Anthropic Claude 3 Sonnet model */
55+
public static final OrchestrationAiModel CLAUDE_3_SONNET =
56+
new OrchestrationAiModel("anthropic--claude-3-sonnet");
57+
58+
/** Anthropic Claude 3 Haiku model */
59+
public static final OrchestrationAiModel CLAUDE_3_HAIKU =
60+
new OrchestrationAiModel("anthropic--claude-3-haiku");
61+
62+
/** Anthropic Claude 3 Opus model */
63+
public static final OrchestrationAiModel CLAUDE_3_OPUS =
64+
new OrchestrationAiModel("anthropic--claude-3-opus");
65+
66+
/** Anthropic Claude 3.5 Sonnet model */
67+
public static final OrchestrationAiModel CLAUDE_3_5_SONNET =
68+
new OrchestrationAiModel("anthropic--claude-3.5-sonnet");
69+
70+
/** Amazon Titan Text Lite model */
71+
public static final OrchestrationAiModel TITAN_TEXT_LITE =
72+
new OrchestrationAiModel("amazon--titan-text-lite");
73+
74+
/** Amazon Titan Text Express model */
75+
public static final OrchestrationAiModel TITAN_TEXT_EXPRESS =
76+
new OrchestrationAiModel("amazon--titan-text-express");
77+
78+
/** Azure OpenAI GPT-3.5 Turbo chat completions model */
79+
public static final OrchestrationAiModel GPT_35_TURBO = new OrchestrationAiModel("gpt-35-turbo");
80+
81+
/** Azure OpenAI GPT-3.5 Turbo chat completions model */
82+
public static final OrchestrationAiModel GPT_35_TURBO_16K =
83+
new OrchestrationAiModel("gpt-35-turbo-16k");
84+
85+
/** Azure OpenAI GPT-4 chat completions model */
86+
public static final OrchestrationAiModel GPT_4 = new OrchestrationAiModel("gpt-4");
87+
88+
/** Azure OpenAI GPT-4-32k chat completions model */
89+
public static final OrchestrationAiModel GPT_4_32K = new OrchestrationAiModel("gpt-4-32k");
90+
91+
/** Azure OpenAI GPT-4o chat completions model */
92+
public static final OrchestrationAiModel GPT_4O = new OrchestrationAiModel("gpt-4o");
93+
94+
/** Azure OpenAI GPT-4o-mini chat completions model */
95+
public static final OrchestrationAiModel GPT_4O_MINI = new OrchestrationAiModel("gpt-4o-mini");
96+
97+
/** Google Cloud Platform Gemini 1.0 Pro model */
98+
public static final OrchestrationAiModel GEMINI_1_0_PRO =
99+
new OrchestrationAiModel("gemini-1.0-pro");
100+
101+
/** Google Cloud Platform Gemini 1.5 Pro model */
102+
public static final OrchestrationAiModel GEMINI_1_5_PRO =
103+
new OrchestrationAiModel("gemini-1.5-pro");
104+
105+
/** Google Cloud Platform Gemini 1.5 Flash model */
106+
public static final OrchestrationAiModel GEMINI_1_5_FLASH =
107+
new OrchestrationAiModel("gemini-1.5-flash");
108+
109+
OrchestrationAiModel(@Nonnull final String modelName) {
110+
this(modelName, Map.of(), "latest");
111+
}
112+
113+
@Nonnull
114+
LLMModuleConfig createConfig() {
115+
return new LLMModuleConfig()
116+
.modelName(modelName)
117+
.modelParams(modelParams)
118+
.modelVersion(modelVersion);
119+
}
120+
}

orchestration/src/main/java/com/sap/ai/sdk/orchestration/OrchestrationModuleConfig.java

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
import lombok.NoArgsConstructor;
1515
import lombok.Value;
1616
import lombok.With;
17+
import lombok.experimental.Tolerate;
1718

1819
/**
1920
* Represents the configuration for the orchestration service. Allows for configuring the different
@@ -53,6 +54,18 @@ public class OrchestrationModuleConfig {
5354
/** A content filter to filter the prompt. */
5455
@Nullable FilteringModuleConfig filteringConfig;
5556

57+
/**
58+
* Creates a new configuration with the given LLM configuration.
59+
*
60+
* @param aiModel The LLM configuration to use.
61+
* @return A new configuration with the given LLM configuration.
62+
*/
63+
@Tolerate
64+
@Nonnull
65+
public OrchestrationModuleConfig withLlmConfig(@Nonnull final OrchestrationAiModel aiModel) {
66+
return withLlmConfig(aiModel.createConfig());
67+
}
68+
5669
/**
5770
* Adds input content filters to the orchestration configuration.
5871
*

orchestration/src/test/java/com/sap/ai/sdk/orchestration/ConfigToRequestTransformerTest.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
package com.sap.ai.sdk.orchestration;
22

3-
import static com.sap.ai.sdk.orchestration.OrchestrationUnitTest.LLM_CONFIG;
3+
import static com.sap.ai.sdk.orchestration.OrchestrationUnitTest.CUSTOM_GPT_35;
44
import static org.assertj.core.api.Assertions.assertThat;
55
import static org.assertj.core.api.Assertions.assertThatThrownBy;
66

@@ -71,7 +71,7 @@ void testMessagesHistory() {
7171
var prompt = new OrchestrationPrompt("bar").messageHistory(List.of(systemMessage));
7272
var actual =
7373
ConfigToRequestTransformer.toCompletionPostRequest(
74-
prompt, new OrchestrationModuleConfig().withLlmConfig(LLM_CONFIG));
74+
prompt, new OrchestrationModuleConfig().withLlmConfig(CUSTOM_GPT_35));
7575

7676
assertThat(actual.getMessagesHistory()).containsExactly(systemMessage);
7777
}

0 commit comments

Comments
 (0)