Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
package com.sap.ai.sdk.foundationmodels.openai;

import com.sap.ai.sdk.foundationmodels.openai.model.OpenAiChatCompletionOutput;
import com.sap.ai.sdk.foundationmodels.openai.model.OpenAiChatCompletionParameters;
import java.util.stream.Stream;
import javax.annotation.Nonnull;

/**
* Client for interacting with OpenAI models. Allows for convenient string prompts only.
*
* @since 1.2.0
*/
public interface ChatCompletionString {

/**
* Generate a completion for the given user prompt.
*
* @param prompt a text message.
* @return the completion output
* @throws OpenAiClientException if the request fails
*/
@Nonnull
OpenAiChatCompletionOutput chatCompletion(@Nonnull final String prompt)
throws OpenAiClientException;

/**
* Stream a completion for the given prompt. Returns a <b>lazily</b> populated stream of text
* chunks. To access more details about the individual chunks, use {@link
* OpenAiClient#streamChatCompletionDeltas(OpenAiChatCompletionParameters)}.
*
* <p>The stream should be consumed using a try-with-resources block to ensure that the underlying
* HTTP connection is closed.
*
* <p>Example:
*
* <pre>{@code
* try (var stream = client.streamChatCompletion("...")) {
* stream.forEach(System.out::println);
* }
* }</pre>
*
* <p>Please keep in mind that using a terminal stream operation like {@link Stream#forEach} will
* block until all chunks are consumed. Also, for obvious reasons, invoking {@link
* Stream#parallel()} on this stream is not supported.
*
* @param prompt a text message.
* @return A stream of message deltas
* @throws OpenAiClientException if the request fails or if the finish reason is content_filter
* @see OpenAiClient#streamChatCompletionDeltas(OpenAiChatCompletionParameters)
*/
@Nonnull
Stream<String> streamChatCompletion(@Nonnull final String prompt) throws OpenAiClientException;
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
/** Client for interacting with OpenAI models. */
@Slf4j
@RequiredArgsConstructor(access = AccessLevel.PRIVATE)
public final class OpenAiClient {
public final class OpenAiClient implements ChatCompletionString {
private static final String DEFAULT_API_VERSION = "2024-02-01";
static final ObjectMapper JACKSON = getDefaultObjectMapper();
@Nullable private String systemPrompt = null;
Expand Down Expand Up @@ -113,18 +113,12 @@ public static OpenAiClient withCustomDestination(@Nonnull final Destination dest
* @return the client
*/
@Nonnull
public OpenAiClient withSystemPrompt(@Nonnull final String systemPrompt) {
public ChatCompletionString withSystemPrompt(@Nonnull final String systemPrompt) {
this.systemPrompt = systemPrompt;
return this;
}

/**
* Generate a completion for the given user prompt.
*
* @param prompt a text message.
* @return the completion output
* @throws OpenAiClientException if the request fails
*/
@Override
@Nonnull
public OpenAiChatCompletionOutput chatCompletion(@Nonnull final String prompt)
throws OpenAiClientException {
Expand All @@ -146,35 +140,10 @@ public OpenAiChatCompletionOutput chatCompletion(@Nonnull final String prompt)
@Nonnull
public OpenAiChatCompletionOutput chatCompletion(
@Nonnull final OpenAiChatCompletionParameters parameters) throws OpenAiClientException {
warnIfUnsupportedUsage();
return execute("/chat/completions", parameters, OpenAiChatCompletionOutput.class);
}

/**
* Stream a completion for the given prompt. Returns a <b>lazily</b> populated stream of text
* chunks. To access more details about the individual chunks, use {@link
* #streamChatCompletionDeltas(OpenAiChatCompletionParameters)}.
*
* <p>The stream should be consumed using a try-with-resources block to ensure that the underlying
* HTTP connection is closed.
*
* <p>Example:
*
* <pre>{@code
* try (var stream = client.streamChatCompletion("...")) {
* stream.forEach(System.out::println);
* }
* }</pre>
*
* <p>Please keep in mind that using a terminal stream operation like {@link Stream#forEach} will
* block until all chunks are consumed. Also, for obvious reasons, invoking {@link
* Stream#parallel()} on this stream is not supported.
*
* @param prompt a text message.
* @return A stream of message deltas
* @throws OpenAiClientException if the request fails or if the finish reason is content_filter
* @see #streamChatCompletionDeltas(OpenAiChatCompletionParameters)
*/
@Override
@Nonnull
public Stream<String> streamChatCompletion(@Nonnull final String prompt)
throws OpenAiClientException {
Expand Down Expand Up @@ -225,18 +194,10 @@ private static void throwOnContentFilter(@Nonnull final OpenAiChatCompletionDelt
@Nonnull
public Stream<OpenAiChatCompletionDelta> streamChatCompletionDeltas(
@Nonnull final OpenAiChatCompletionParameters parameters) throws OpenAiClientException {
warnIfUnsupportedUsage();
parameters.enableStreaming();
return executeStream("/chat/completions", parameters, OpenAiChatCompletionDelta.class);
}

private void warnIfUnsupportedUsage() {
if (systemPrompt != null) {
log.warn(
"Previously set messages will be ignored, set it as an argument of this method instead.");
}
}

/**
* Get a vector representation of a given input that can be easily consumed by machine learning
* models and algorithms.
Expand Down
Loading