diff --git a/foundation-models/openai/src/main/java/com/sap/ai/sdk/foundationmodels/openai/ChatCompletionString.java b/foundation-models/openai/src/main/java/com/sap/ai/sdk/foundationmodels/openai/ChatCompletionString.java new file mode 100644 index 000000000..30a44eeac --- /dev/null +++ b/foundation-models/openai/src/main/java/com/sap/ai/sdk/foundationmodels/openai/ChatCompletionString.java @@ -0,0 +1,53 @@ +package com.sap.ai.sdk.foundationmodels.openai; + +import com.sap.ai.sdk.foundationmodels.openai.model.OpenAiChatCompletionOutput; +import com.sap.ai.sdk.foundationmodels.openai.model.OpenAiChatCompletionParameters; +import java.util.stream.Stream; +import javax.annotation.Nonnull; + +/** + * Client for interacting with OpenAI models. Allows for convenient string prompts only. + * + * @since 1.2.0 + */ +public interface ChatCompletionString { + + /** + * Generate a completion for the given user prompt. + * + * @param prompt a text message. + * @return the completion output + * @throws OpenAiClientException if the request fails + */ + @Nonnull + OpenAiChatCompletionOutput chatCompletion(@Nonnull final String prompt) + throws OpenAiClientException; + + /** + * Stream a completion for the given prompt. Returns a lazily populated stream of text + * chunks. To access more details about the individual chunks, use {@link + * OpenAiClient#streamChatCompletionDeltas(OpenAiChatCompletionParameters)}. + * + *

The stream should be consumed using a try-with-resources block to ensure that the underlying + * HTTP connection is closed. + * + *

Example: + * + *

{@code
+   * try (var stream = client.streamChatCompletion("...")) {
+   *       stream.forEach(System.out::println);
+   * }
+   * }
+ * + *

Please keep in mind that using a terminal stream operation like {@link Stream#forEach} will + * block until all chunks are consumed. Also, for obvious reasons, invoking {@link + * Stream#parallel()} on this stream is not supported. + * + * @param prompt a text message. + * @return A stream of message deltas + * @throws OpenAiClientException if the request fails or if the finish reason is content_filter + * @see OpenAiClient#streamChatCompletionDeltas(OpenAiChatCompletionParameters) + */ + @Nonnull + Stream streamChatCompletion(@Nonnull final String prompt) throws OpenAiClientException; +} diff --git a/foundation-models/openai/src/main/java/com/sap/ai/sdk/foundationmodels/openai/OpenAiClient.java b/foundation-models/openai/src/main/java/com/sap/ai/sdk/foundationmodels/openai/OpenAiClient.java index 8699201a7..8b0840ddb 100644 --- a/foundation-models/openai/src/main/java/com/sap/ai/sdk/foundationmodels/openai/OpenAiClient.java +++ b/foundation-models/openai/src/main/java/com/sap/ai/sdk/foundationmodels/openai/OpenAiClient.java @@ -37,7 +37,7 @@ /** Client for interacting with OpenAI models. */ @Slf4j @RequiredArgsConstructor(access = AccessLevel.PRIVATE) -public final class OpenAiClient { +public final class OpenAiClient implements ChatCompletionString { private static final String DEFAULT_API_VERSION = "2024-02-01"; static final ObjectMapper JACKSON = getDefaultObjectMapper(); @Nullable private String systemPrompt = null; @@ -113,18 +113,12 @@ public static OpenAiClient withCustomDestination(@Nonnull final Destination dest * @return the client */ @Nonnull - public OpenAiClient withSystemPrompt(@Nonnull final String systemPrompt) { + public ChatCompletionString withSystemPrompt(@Nonnull final String systemPrompt) { this.systemPrompt = systemPrompt; return this; } - /** - * Generate a completion for the given user prompt. - * - * @param prompt a text message. - * @return the completion output - * @throws OpenAiClientException if the request fails - */ + @Override @Nonnull public OpenAiChatCompletionOutput chatCompletion(@Nonnull final String prompt) throws OpenAiClientException { @@ -146,35 +140,10 @@ public OpenAiChatCompletionOutput chatCompletion(@Nonnull final String prompt) @Nonnull public OpenAiChatCompletionOutput chatCompletion( @Nonnull final OpenAiChatCompletionParameters parameters) throws OpenAiClientException { - warnIfUnsupportedUsage(); return execute("/chat/completions", parameters, OpenAiChatCompletionOutput.class); } - /** - * Stream a completion for the given prompt. Returns a lazily populated stream of text - * chunks. To access more details about the individual chunks, use {@link - * #streamChatCompletionDeltas(OpenAiChatCompletionParameters)}. - * - *

The stream should be consumed using a try-with-resources block to ensure that the underlying - * HTTP connection is closed. - * - *

Example: - * - *

{@code
-   * try (var stream = client.streamChatCompletion("...")) {
-   *       stream.forEach(System.out::println);
-   * }
-   * }
- * - *

Please keep in mind that using a terminal stream operation like {@link Stream#forEach} will - * block until all chunks are consumed. Also, for obvious reasons, invoking {@link - * Stream#parallel()} on this stream is not supported. - * - * @param prompt a text message. - * @return A stream of message deltas - * @throws OpenAiClientException if the request fails or if the finish reason is content_filter - * @see #streamChatCompletionDeltas(OpenAiChatCompletionParameters) - */ + @Override @Nonnull public Stream streamChatCompletion(@Nonnull final String prompt) throws OpenAiClientException { @@ -225,18 +194,10 @@ private static void throwOnContentFilter(@Nonnull final OpenAiChatCompletionDelt @Nonnull public Stream streamChatCompletionDeltas( @Nonnull final OpenAiChatCompletionParameters parameters) throws OpenAiClientException { - warnIfUnsupportedUsage(); parameters.enableStreaming(); return executeStream("/chat/completions", parameters, OpenAiChatCompletionDelta.class); } - private void warnIfUnsupportedUsage() { - if (systemPrompt != null) { - log.warn( - "Previously set messages will be ignored, set it as an argument of this method instead."); - } - } - /** * Get a vector representation of a given input that can be easily consumed by machine learning * models and algorithms.