From 4675f833ec48e940512e59a02e797119b6dfa0bb Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 9 Oct 2024 17:13:33 -0400 Subject: [PATCH 01/17] Improved kdoc for GenerativeModel --- .../firebase/vertexai/GenerativeModel.kt | 68 +++++++++++-------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt index 7c50759ac73..c219f87892d 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt @@ -48,7 +48,8 @@ import kotlinx.coroutines.flow.map import kotlinx.coroutines.tasks.await /** - * A controller for communicating with the API of a given multimodal model (for example, Gemini). + * Represents a multimodal model (like Gemini), capable of generating content based on various input + * types. */ public class GenerativeModel internal constructor( @@ -122,11 +123,13 @@ internal constructor( ) /** - * Generates a [GenerateContentResponse] from the backend with the provided [Content]. + * Generates new content from the input [Content] given to the model as a prompt. * - * @param prompt [Content] to send to the model. - * @return A [GenerateContentResponse]. Function should be called within a suspend context to - * properly manage concurrency. + * This function should be called within a suspend context to properly manage concurrency. + * + * @param prompt The input(s) given to the model as a prompt. + * @return The content generated by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse = try { @@ -136,10 +139,11 @@ internal constructor( } /** - * Generates a streaming response from the backend with the provided [Content]. + * Generates new content as a stream from the input [Content] given to the model as a prompt. * - * @param prompt [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. + * @param prompt The input(s) given to the model as a prompt. + * @return A [Flow] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public fun generateContentStream(vararg prompt: Content): Flow = controller @@ -148,52 +152,58 @@ internal constructor( .map { it.toPublic().validate() } /** - * Generates a [GenerateContentResponse] from the backend with the provided text prompt. + * Generates new content from the text input given to the model as a prompt. * - * @param prompt The text to be converted into a single piece of [Content] to send to the model. - * @return A [GenerateContentResponse] after some delay. Function should be called within a - * suspend context to properly manage concurrency. + * This function should be called within a suspend context to properly manage concurrency. + * + * @param prompt The text to be send to the model as a prompt. + * @return The content generated by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public suspend fun generateContent(prompt: String): GenerateContentResponse = generateContent(content { text(prompt) }) /** - * Generates a streaming response from the backend with the provided text prompt. + * Generates new content as a stream from the text input given to the model as a prompt. * - * @param prompt The text to be converted into a single piece of [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. + * @param prompt The text to be send to the model as a prompt. + * @return A [Flow] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public fun generateContentStream(prompt: String): Flow = generateContentStream(content { text(prompt) }) /** - * Generates a [GenerateContentResponse] from the backend with the provided image prompt. + * Generates new content from the image input given to the model as a prompt. * * @param prompt The image to be converted into a single piece of [Content] to send to the model. * @return A [GenerateContentResponse] after some delay. Function should be called within a * suspend context to properly manage concurrency. + * @throws [FirebaseVertexAIException] if the request failed. */ public suspend fun generateContent(prompt: Bitmap): GenerateContentResponse = generateContent(content { image(prompt) }) /** - * Generates a streaming response from the backend with the provided image prompt. + * Generates new content as a stream from the image input given to the model as a prompt. * * @param prompt The image to be converted into a single piece of [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. + * @return A [Flow] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public fun generateContentStream(prompt: Bitmap): Flow = generateContentStream(content { image(prompt) }) - /** Creates a [Chat] instance which internally tracks the ongoing conversation with the model */ + /** Creates a [Chat] instance using this model with the optionally provided history. */ public fun startChat(history: List = emptyList()): Chat = Chat(this, history.toMutableList()) /** - * Counts the amount of tokens in a prompt. + * Counts the number of tokens in a prompt using the model's tokenizer. * - * @param prompt A group of [Content] to count tokens of. - * @return A [CountTokensResponse] containing the amount of tokens in the prompt. + * @param prompt The input(s) given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. */ public suspend fun countTokens(vararg prompt: Content): CountTokensResponse { try { @@ -204,20 +214,22 @@ internal constructor( } /** - * Counts the amount of tokens in the text prompt. + * Counts the number of tokens in a text prompt using the model's tokenizer. * - * @param prompt The text to be converted to a single piece of [Content] to count the tokens of. - * @return A [CountTokensResponse] containing the amount of tokens in the prompt. + * @param prompt The text given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. */ public suspend fun countTokens(prompt: String): CountTokensResponse { return countTokens(content { text(prompt) }) } /** - * Counts the amount of tokens in the image prompt. + * Counts the number of tokens in an image prompt using the model's tokenizer. * - * @param prompt The image to be converted to a single piece of [Content] to count the tokens of. - * @return A [CountTokensResponse] containing the amount of tokens in the prompt. + * @param prompt The image given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. */ public suspend fun countTokens(prompt: Bitmap): CountTokensResponse { return countTokens(content { image(prompt) }) From 30768aa8dc3dc833e6a0f18c76e9c9700b7c15ab Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 9 Oct 2024 17:27:55 -0400 Subject: [PATCH 02/17] Improve FirebaseVertexAIMultiResourceComponent.kt kdoc --- .../vertexai/FirebaseVertexAIMultiResourceComponent.kt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt index 996ff601483..213351fdc92 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt @@ -22,7 +22,11 @@ import com.google.firebase.appcheck.interop.InteropAppCheckTokenProvider import com.google.firebase.auth.internal.InternalAuthProvider import com.google.firebase.inject.Provider -/** Multi-resource container for Firebase Vertex AI */ +/** + * Multi-resource container for Firebase Vertex AI. + * + * @hide + */ internal class FirebaseVertexAIMultiResourceComponent( private val app: FirebaseApp, private val appCheckProvider: Provider, From 4fbff600c2cb1251473474b8b2724160efc4fe56 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Wed, 9 Oct 2024 17:28:07 -0400 Subject: [PATCH 03/17] Improved FirebaseVertexAI kdoc --- .../firebase/vertexai/FirebaseVertexAI.kt | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt index 145dd90b121..03f7b952bce 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt @@ -42,13 +42,15 @@ internal constructor( /** * Instantiates a new [GenerativeModel] given the provided parameters. * - * @param modelName name of the model in the backend - * @param generationConfig configuration parameters to use for content generation - * @param safetySettings safety bounds to use during alongside prompts during content generation - * @param requestOptions configuration options to utilize during backend communication - * @param tools list of tools to make available to the model - * @param toolConfig configuration that defines how the model handles the tools provided - * @param systemInstruction contains a [Content] that directs the model to behave a certain way + * @param modelName The name of the model to use, for example "gemini-1.5-pro" + * @param generationConfig The configuration parameters to use for content generation + * @param safetySettings The safety bounds the model will abide to when content generation + * @param tools A list of [Tool]s the model may use to generate content + * @param toolConfig The [ToolConfig] that defines how the model handles the tools provided + * @param systemInstruction [Content] instructions that direct the model to behave a certain way + * currently only text content is supported. + * @param requestOptions Configuration options for sending requests to the backend + * @return The initialized [GenerativeModel] instance. */ @JvmOverloads public fun generativeModel( @@ -86,7 +88,7 @@ internal constructor( @JvmStatic public fun getInstance(app: FirebaseApp): FirebaseVertexAI = getInstance(app) /** - * Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location] + * Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location]. * * @param location location identifier, defaults to `us-central1`; see available * [Vertex AI regions](https://firebase.google.com/docs/vertex-ai/locations?platform=android#available-locations) From a6985d46e42c7508a02eded90c1a2cefb645761b Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 10:44:12 -0400 Subject: [PATCH 04/17] Improved Chat kdoc --- .../com/google/firebase/vertexai/Chat.kt | 104 +++++++++++++----- 1 file changed, 77 insertions(+), 27 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index 9e0fc1bc2aa..d328179947d 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -33,14 +33,16 @@ import kotlinx.coroutines.flow.onEach /** * Representation of a multi-turn interaction with a model. * - * Handles the capturing and storage of the communication with the model, providing methods for - * further interaction. + * Captures, and stores the history of communication in memory, and provides it as context with each + * new message. * * **Note:** This object is not thread-safe, and calling [sendMessage] multiple times without * waiting for a response will throw an [InvalidStateException]. * * @param model The model to use for the interaction - * @property history The previous interactions with the model + * @property history The previous content from the chat that has been successfully sent and received + * from the model. This will be provided to the model for each message sent as context for the + * discussion. */ public class Chat( private val model: GenerativeModel, @@ -49,12 +51,18 @@ public class Chat( private var lock = Semaphore(1) /** - * Generates a response from the backend with the provided [Content], and any previous ones - * sent/returned from this chat. + * Sends a message using the existing history of this chat as context and the provided [Content] + * prompt. * - * @param prompt A [Content] to send to the model. - * @throws InvalidStateException if the prompt is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request. + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * This function should be called within a suspend context to properly manage concurrency. + * + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public suspend fun sendMessage(prompt: Content): GenerateContentResponse { prompt.assertComesFromUser() @@ -70,10 +78,18 @@ public class Chat( } /** - * Generates a response from the backend with the provided text prompt. + * Sends a message using the existing history of this chat as context and the provided text + * prompt. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * This function should be called within a suspend context to properly manage concurrency. * - * @param prompt The text to be converted into a single piece of [Content] to send to the model. - * @throws InvalidStateException if the [Chat] instance has an active request. + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public suspend fun sendMessage(prompt: String): GenerateContentResponse { val content = content { text(prompt) } @@ -81,10 +97,18 @@ public class Chat( } /** - * Generates a response from the backend with the provided image prompt. + * Sends a message using the existing history of this chat as context and the provided image + * prompt. * - * @param prompt The image to be converted into a single piece of [Content] to send to the model. - * @throws InvalidStateException if the [Chat] instance has an active request. + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * This function should be called within a suspend context to properly manage concurrency. + * + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public suspend fun sendMessage(prompt: Bitmap): GenerateContentResponse { val content = content { image(prompt) } @@ -92,12 +116,20 @@ public class Chat( } /** - * Generates a streaming response from the backend with the provided [Content]. + * Sends a message using the existing history of this chat as context and the provided [Content] + * prompt. + * + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. * - * @param prompt A [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. - * @throws InvalidStateException if the prompt is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request. + * This function should be called within a suspend context to properly manage concurrency. + * + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public fun sendMessageStream(prompt: Content): Flow { prompt.assertComesFromUser() @@ -146,11 +178,20 @@ public class Chat( } /** - * Generates a streaming response from the backend with the provided text prompt. + * Sends a message using the existing history of this chat as context and the provided text + * prompt. + * + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * This function should be called within a suspend context to properly manage concurrency. * - * @param prompt a text to be converted into a single piece of [Content] to send to the model - * @return A [Flow] which will emit responses as they are returned from the model. - * @throws InvalidStateException if the [Chat] instance has an active request. + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public fun sendMessageStream(prompt: String): Flow { val content = content { text(prompt) } @@ -158,11 +199,20 @@ public class Chat( } /** - * Generates a streaming response from the backend with the provided image prompt. + * Sends a message using the existing history of this chat as context and the provided image + * prompt. + * + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * This function should be called within a suspend context to properly manage concurrency. * - * @param prompt A [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. - * @throws InvalidStateException if the [Chat] instance has an active request. + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public fun sendMessageStream(prompt: Bitmap): Flow { val content = content { image(prompt) } From 6dcae5c73b53127ae10ef2a4f961cd2ec345c29b Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 10:50:04 -0400 Subject: [PATCH 05/17] Improved ChatFutures kdoc --- .../com/google/firebase/vertexai/Chat.kt | 5 --- .../firebase/vertexai/java/ChatFutures.kt | 32 ++++++++++++++----- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index d328179947d..e4bafb2e742 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -124,7 +124,6 @@ public class Chat( * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * This function should be called within a suspend context to properly manage concurrency. * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. @@ -186,8 +185,6 @@ public class Chat( * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * This function should be called within a suspend context to properly manage concurrency. - * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role @@ -207,8 +204,6 @@ public class Chat( * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * This function should be called within a suspend context to properly manage concurrency. - * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt index d6b1a4e5e22..c22bf979271 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt @@ -25,28 +25,44 @@ import kotlinx.coroutines.reactive.asPublisher import org.reactivestreams.Publisher /** - * Helper method for interacting with a [Chat] from Java. + * Helper wrapper for interacting with a [Chat] instance from Java. * - * @see from + * @see [Chat] */ public abstract class ChatFutures internal constructor() { /** - * Generates a response from the backend with the provided [Content], and any previous ones - * sent/returned from this chat. + * Sends a message using the existing history of this chat as context and the provided [Content] + * prompt. * - * @param prompt A [Content] to send to the model. + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public abstract fun sendMessage(prompt: Content): ListenableFuture /** - * Generates a streaming response from the backend with the provided [Content]. + * Sends a message using the existing history of this chat as context and the provided [Content] + * prompt. + * + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * * - * @param prompt A [Content] to send to the model. + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public abstract fun sendMessageStream(prompt: Content): Publisher - /** Returns the [Chat] instance that was used to create this instance */ + /** Returns the [Chat] object wrapped by this object. */ public abstract fun getChat(): Chat private class FuturesImpl(private val chat: Chat) : ChatFutures() { From 06e3b5345265d0ac1c888c2283c3539eef4be319 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 10:59:41 -0400 Subject: [PATCH 06/17] Improve GenerativeModelFutures kdoc Also include some fixes for ChatFutures --- .../firebase/vertexai/java/ChatFutures.kt | 3 +- .../vertexai/java/GenerativeModelFutures.kt | 30 +++++++++++-------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt index c22bf979271..a6a4212fbd6 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt @@ -25,7 +25,7 @@ import kotlinx.coroutines.reactive.asPublisher import org.reactivestreams.Publisher /** - * Helper wrapper for interacting with a [Chat] instance from Java. + * Wrapper class providing Java compatible methods for [Chat]. * * @see [Chat] */ @@ -54,7 +54,6 @@ public abstract class ChatFutures internal constructor() { * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt index fe43e0b69a2..db81d0f62e8 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt @@ -27,48 +27,54 @@ import kotlinx.coroutines.reactive.asPublisher import org.reactivestreams.Publisher /** - * Helper method for interacting with a [GenerativeModel] from Java. + * Wrapper class providing Java compatible methods for [GenerativeModel]. * - * @see from + * @see [GenerativeModel] */ public abstract class GenerativeModelFutures internal constructor() { /** - * Generates a response from the backend with the provided [Content]. + * Generates new content from the input [Content] given to the model as a prompt. * - * @param prompt A group of [Content] to send to the model. + * @param prompt The input(s) given to the model as a prompt. + * @return The content generated by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public abstract fun generateContent( vararg prompt: Content ): ListenableFuture /** - * Generates a streaming response from the backend with the provided [Content]. + * Generates new content as a stream from the input [Content] given to the model as a prompt. * - * @param prompt A group of [Content] to send to the model. + * @param prompt The input(s) given to the model as a prompt. + * @return A [Publisher] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public abstract fun generateContentStream( vararg prompt: Content ): Publisher /** - * Counts the number of tokens used in a prompt. + * Counts the number of tokens in a prompt using the model's tokenizer. * - * @param prompt A group of [Content] to count tokens of. + * @param prompt The input(s) given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. */ public abstract fun countTokens(vararg prompt: Content): ListenableFuture - /** Creates a chat instance which internally tracks the ongoing conversation with the model */ + /** Creates a [ChatFuture] instance which internally tracks the ongoing conversation with the model. */ public abstract fun startChat(): ChatFutures /** - * Creates a chat instance which internally tracks the ongoing conversation with the model + * Creates a [ChatFuture] instance, initialized using the optionally provided [history]. * - * @param history an existing history of context to use as a starting point + * @param history A list of previous interactions with the model to use as a starting point */ public abstract fun startChat(history: List): ChatFutures - /** Returns the [GenerativeModel] instance that was used to create this object */ + /** Returns the [GenerativeModel] object wrapped by this object. */ public abstract fun getGenerativeModel(): GenerativeModel private class FuturesImpl(private val model: GenerativeModel) : GenerativeModelFutures() { From a4212c6b2aa7ebd8a81d82c1b37abfe542120ae4 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 11:41:25 -0400 Subject: [PATCH 07/17] Improve Candidate kdoc --- .../com/google/firebase/vertexai/Chat.kt | 1 - .../vertexai/java/GenerativeModelFutures.kt | 5 +- .../firebase/vertexai/type/Candidate.kt | 52 ++++++++++++++----- 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index e4bafb2e742..9339ac19ab0 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -124,7 +124,6 @@ public class Chat( * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt index db81d0f62e8..9b7d2b1c1a9 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt @@ -64,7 +64,10 @@ public abstract class GenerativeModelFutures internal constructor() { */ public abstract fun countTokens(vararg prompt: Content): ListenableFuture - /** Creates a [ChatFuture] instance which internally tracks the ongoing conversation with the model. */ + /** + * Creates a [ChatFuture] instance which internally tracks the ongoing conversation with the + * model. + */ public abstract fun startChat(): ChatFutures /** diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt index 60937e8c6b8..08cdc7f74bf 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt @@ -18,7 +18,14 @@ package com.google.firebase.vertexai.type import java.util.Calendar -/** A response generated by the model. */ +/** + * A `Candidate` represents a single response generated by the model for a given request. + * + * @property content The actual content generated by the model. + * @property safetyRatings A list of [SafetyRating]s describing the generated content. + * @property citationMetadata Metadata about the sources used to generate this content. + * @property finishReason The reason the model stopped generating content, if it exist. + */ public class Candidate internal constructor( public val content: Content, @@ -27,7 +34,18 @@ internal constructor( public val finishReason: FinishReason? ) -/** Safety rating corresponding to a generated content. */ +/** + * An assessment of the potential harm of some generated content. + * + * The rating will be restricted to a particular [category]. + * + * @property category The category of harm being assessed (e.g., Hate speech). + * @property probability The likelihood of the content causing harm. + * @property probabilityScore A numerical score representing the probability of harm. + * @property blocked Indicates whether the content was blocked due to safety concerns. + * @property severity The severity of the potential harm. + * @property severityScore A numerical score representing the severity of harm. + */ public class SafetyRating internal constructor( public val category: HarmCategory, @@ -47,17 +65,18 @@ internal constructor( public class CitationMetadata internal constructor(public val citations: List) /** - * Provides citation information for sourcing of content provided by the model between a given - * [startIndex] and [endIndex]. + * Represents a citation of content from an external source within the model's output. + * + * When the language model generates text that includes content from another source, + * it should provide a citation to properly attribute the original source. This class + * encapsulates the metadata associated with that citation. * - * @property title Title of the attribution. - * @property startIndex The inclusive beginning of a sequence in a model response that derives from - * a cited source. - * @property endIndex The exclusive end of a sequence in a model response that derives from a cited - * source. - * @property uri A link to the cited source, if available. - * @property license The license the cited source work is distributed under, if specified. - * @property publicationDate Publication date of the attribution, if available. + * @property title The title of the cited source, if available. + * @property startIndex The (inclusive) starting index within the model output where the cited content begins. + * @property endIndex The (exclusive) ending index within the model output where the cited content ends. + * @property uri The URI of the cited source, if available. + * @property license The license under which the cited content is distributed under, if available. + * @property publicationDate The date of publication of the cited source, if available. */ public class Citation internal constructor( @@ -69,7 +88,12 @@ internal constructor( public val publicationDate: Calendar? = null ) -/** The reason for content finishing. */ +/** + * Represents the reason why the model stopped generating content. + * + * @property name The name of the finish reason. + * @property ordinal The ordinal value of the finish reason. + */ public class FinishReason private constructor(public val name: String, public val ordinal: Int) { public companion object { /** A new and not yet supported value. */ @@ -84,7 +108,7 @@ public class FinishReason private constructor(public val name: String, public va /** [SafetySetting] prevented the model from outputting content. */ @JvmField public val SAFETY: FinishReason = FinishReason("SAFETY", 3) - /** Model began looping. */ + /** The token generation was stopped because the response was flagged for unauthorized citations. */ @JvmField public val RECITATION: FinishReason = FinishReason("RECITATION", 4) /** Model stopped for another reason. */ From d94b1fb68e95a96850841a6e89a55ac2c618e621 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 11:43:43 -0400 Subject: [PATCH 08/17] Address missing dots --- .../com/google/firebase/vertexai/Chat.kt | 26 +++++++++---------- .../firebase/vertexai/FirebaseVertexAI.kt | 14 +++++----- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index 9339ac19ab0..9d18de94209 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -39,7 +39,7 @@ import kotlinx.coroutines.flow.onEach * **Note:** This object is not thread-safe, and calling [sendMessage] multiple times without * waiting for a response will throw an [InvalidStateException]. * - * @param model The model to use for the interaction + * @param model The model to use for the interaction. * @property history The previous content from the chat that has been successfully sent and received * from the model. This will be provided to the model for each message sent as context for the * discussion. @@ -61,8 +61,8 @@ public class Chat( * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. - * @throws InvalidStateException if [prompt] is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. + * @throws InvalidStateException if the [Chat] instance has an active request. */ public suspend fun sendMessage(prompt: Content): GenerateContentResponse { prompt.assertComesFromUser() @@ -88,8 +88,8 @@ public class Chat( * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. - * @throws InvalidStateException if [prompt] is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. + * @throws InvalidStateException if the [Chat] instance has an active request. */ public suspend fun sendMessage(prompt: String): GenerateContentResponse { val content = content { text(prompt) } @@ -107,8 +107,8 @@ public class Chat( * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. - * @throws InvalidStateException if [prompt] is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. + * @throws InvalidStateException if the [Chat] instance has an active request. */ public suspend fun sendMessage(prompt: Bitmap): GenerateContentResponse { val content = content { image(prompt) } @@ -126,8 +126,8 @@ public class Chat( * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. - * @throws InvalidStateException if [prompt] is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. + * @throws InvalidStateException if the [Chat] instance has an active request. */ public fun sendMessageStream(prompt: Content): Flow { prompt.assertComesFromUser() @@ -186,8 +186,8 @@ public class Chat( * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. - * @throws InvalidStateException if [prompt] is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. + * @throws InvalidStateException if the [Chat] instance has an active request. */ public fun sendMessageStream(prompt: String): Flow { val content = content { text(prompt) } @@ -205,8 +205,8 @@ public class Chat( * * @param prompt The input(s) that, together with the history, will be given to the model as the * prompt. - * @throws InvalidStateException if [prompt] is not coming from the 'user' role - * @throws InvalidStateException if the [Chat] instance has an active request + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. + * @throws InvalidStateException if the [Chat] instance has an active request. */ public fun sendMessageStream(prompt: Bitmap): Flow { val content = content { image(prompt) } diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt index 03f7b952bce..c3302dede27 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt @@ -42,14 +42,14 @@ internal constructor( /** * Instantiates a new [GenerativeModel] given the provided parameters. * - * @param modelName The name of the model to use, for example "gemini-1.5-pro" - * @param generationConfig The configuration parameters to use for content generation - * @param safetySettings The safety bounds the model will abide to when content generation - * @param tools A list of [Tool]s the model may use to generate content - * @param toolConfig The [ToolConfig] that defines how the model handles the tools provided + * @param modelName The name of the model to use, for example "gemini-1.5-pro". + * @param generationConfig The configuration parameters to use for content generation. + * @param safetySettings The safety bounds the model will abide to when content generation. + * @param tools A list of [Tool]s the model may use to generate content. + * @param toolConfig The [ToolConfig] that defines how the model handles the tools provided. * @param systemInstruction [Content] instructions that direct the model to behave a certain way * currently only text content is supported. - * @param requestOptions Configuration options for sending requests to the backend + * @param requestOptions Configuration options for sending requests to the backend. * @return The initialized [GenerativeModel] instance. */ @JvmOverloads @@ -91,7 +91,7 @@ internal constructor( * Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location]. * * @param location location identifier, defaults to `us-central1`; see available - * [Vertex AI regions](https://firebase.google.com/docs/vertex-ai/locations?platform=android#available-locations) + * [Vertex AI regions](https://firebase.google.com/docs/vertex-ai/locations?platform=android#available-locations). */ @JvmStatic @JvmOverloads From a222c67821df08ee06e9574ededd95a8777b1e6d Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 12:23:52 -0400 Subject: [PATCH 09/17] Improved Content kdoc --- .../firebase/vertexai/FirebaseVertexAI.kt | 3 ++- .../firebase/vertexai/type/Candidate.kt | 16 ++++++----- .../google/firebase/vertexai/type/Content.kt | 27 +++++++++---------- 3 files changed, 25 insertions(+), 21 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt index c3302dede27..41e10dad42e 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt @@ -91,7 +91,8 @@ internal constructor( * Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location]. * * @param location location identifier, defaults to `us-central1`; see available - * [Vertex AI regions](https://firebase.google.com/docs/vertex-ai/locations?platform=android#available-locations). + * [Vertex AI regions](https://firebase.google.com/docs/vertex-ai/locations?platform=android#available-locations) + * . */ @JvmStatic @JvmOverloads diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt index 08cdc7f74bf..b9821210cc3 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt @@ -67,13 +67,15 @@ public class CitationMetadata internal constructor(public val citations: List) { + /** Returns a copy of this object, with the provided parameters overwriting the originals. */ public fun copy(role: String? = this.role, parts: List = this.parts): Content { return Content(role, parts) } @@ -41,9 +43,9 @@ constructor(public val role: String? = "user", public val parts: List) { public var role: String? = "user" /** - * Mutable list of [Part] comprising a single [Content]. + * The mutable list of [Part]s comprising the [Content]. * - * Prefer using the provided helper methods over adding elements to the list directly. + * Prefer using the provided helper methods over modifying this list directly. */ public var parts: MutableList = arrayListOf() @@ -51,24 +53,21 @@ constructor(public val role: String? = "user", public val parts: List) { @JvmName("addPart") public fun part(data: T): Content.Builder = apply { parts.add(data) } - /** Wraps the provided text inside a [TextPart] and adds it to [parts] list. */ + /** Adds a new [TextPart] with the provided [text] to [parts]. */ @JvmName("addText") public fun text(text: String): Content.Builder = part(TextPart(text)) /** - * Wraps the provided [bytes] and [mimeType] inside a [InlineDataPart] and adds it to the - * [parts] list. + * Adds a new [InlineDataPart] with the provided [bytes], which should be interpreted by the + * model based on the [mimeType], to [parts]. */ @JvmName("addInlineData") public fun inlineData(bytes: ByteArray, mimeType: String): Content.Builder = part(InlineDataPart(bytes, mimeType)) - /** Wraps the provided [image] inside an [ImagePart] and adds it to the [parts] list. */ + /** Adds a new [ImagePart] with the provided [image] to [parts]. */ @JvmName("addImage") public fun image(image: Bitmap): Content.Builder = part(ImagePart(image)) - /** - * Wraps the provided Google Cloud Storage for Firebase [uri] and [mimeType] inside a - * [FileDataPart] and adds it to the [parts] list. - */ + /** Adds a new [FileDataPart] with the provided [uri] and [mimeType] to [parts]. */ @JvmName("addFileData") public fun fileData(uri: String, mimeType: String): Content.Builder = part(FileDataPart(uri, mimeType)) @@ -79,7 +78,7 @@ constructor(public val role: String? = "user", public val parts: List) { } /** - * Function to construct content sent to and received in a DSL-like manner. + * Function to build a new [Content] instances in a DSL-like manner. * * Contains a collection of text, image, and binary parts. * From 5587c008cd9801ec0ee55d58fbfd130854cd693d Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 14:00:49 -0400 Subject: [PATCH 10/17] Improve CountTokensResponse kdoc --- .../firebase/vertexai/type/CountTokensResponse.kt | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt index cb8b17009b1..2835deba6f7 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt @@ -17,11 +17,17 @@ package com.google.firebase.vertexai.type /** - * Represents a response measuring model input. + * The model's response to a count tokens request. * - * @property totalTokens A count of the tokens in the input - * @property totalBillableCharacters A count of the characters that are billable in the input, if - * available. + * **Important:** The counters in this class do not include billable image, video or other non-text + * input. See [Vertex AI pricing](https://cloud.google.com/vertex-ai/generative-ai/pricing) for + * details. + * + * @property totalTokens The total number of tokens in the input given to the model as a prompt. + * @property totalBillableCharacters The total number of billable characters in the text input given + * to the model as a prompt. **Important:** this property does not include billable image, video or + * other non-text input. See + * [Vertex AI pricing](https://cloud.google.com/vertex-ai/generative-ai/pricing) for details. */ public class CountTokensResponse( public val totalTokens: Int, From 1349955ac3a98715ed2516bb1ed31e64634dde34 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 15:49:18 -0400 Subject: [PATCH 11/17] Improve functionCalling kdoc --- .../firebase/vertexai/type/FunctionCallingConfig.kt | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt index 30021c0fac9..620a1276cb2 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt @@ -17,13 +17,7 @@ package com.google.firebase.vertexai.type /** - * Contains configuration for function calling from the model. This can be used to force function - * calling predictions or disable them. - * - * @param mode The function calling mode of the model - * @param allowedFunctionNames Function names to call. Only set when the [Mode.ANY]. Function names - * should match [FunctionDeclaration.name]. With [Mode.ANY], model will predict a function call from - * the set of function names provided. + * The configuration that specifies the function calling behavior. */ public class FunctionCallingConfig internal constructor( @@ -52,7 +46,7 @@ internal constructor( public companion object { /** * The default behavior for function calling. The model calls functions to answer queries at its - * discretion + * discretion. */ @JvmStatic public fun auto(): FunctionCallingConfig = FunctionCallingConfig(Mode.AUTO) From c041cf76c2d33a8493bb393dd566e0e9759e61d0 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 16:30:12 -0400 Subject: [PATCH 12/17] Improve function calling kdoc --- .../vertexai/type/FunctionCallingConfig.kt | 4 +--- .../vertexai/type/FunctionDeclaration.kt | 24 +++++++++++++++---- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt index 620a1276cb2..d6004290ecd 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt @@ -16,9 +16,7 @@ package com.google.firebase.vertexai.type -/** - * The configuration that specifies the function calling behavior. - */ +/** The configuration that specifies the function calling behavior. */ public class FunctionCallingConfig internal constructor( internal val mode: Mode, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt index 119a36d3eab..672293bb559 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt @@ -17,7 +17,14 @@ package com.google.firebase.vertexai.type /** - * A declared function that a model can be given access to in order to gain info or complete tasks. + * Defines a function that the model can use as a tool. + * + * When generating responses, the model might need external information or require the application + * to perform an action. `FunctionDeclaration` provides the necessary information for the model to + * create a [FunctionCallPart], which instructs the client to execute the corresponding function. + * The client then sends the result back to the model as a [FunctionResponsePart]. + * + * For example * * ``` * val getExchangeRate = FunctionDeclaration( @@ -30,10 +37,17 @@ package com.google.firebase.vertexai.type * ) * ``` * - * @param name The name of the function call, this should be clear and descriptive for the model. - * @param description A description of what the function does and its output. - * @param parameters A list of parameters that the function accepts. - * @param optionalParameters A list of parameters that can be omitted. + * See the + * [Use the Gemini API for function calling](https://firebase.google.com/docs/vertex-ai/function-calling?platform=android) + * guide for more information on function calling. + * + * @param name The name of the function. + * @param description The description of what the function does and its output. To improve the + * effectiveness of the model, the description should be clear and detailed. + * @param parameters The map of parameters names to their [Schema] the function accepts as + * arguments. + * @param optionalParameters The list of parameter names that the model can omit when invoking this + * function. * @see Schema */ public class FunctionDeclaration( From 1afb09af01436f9b37ed913477108069d2903202 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 16:50:28 -0400 Subject: [PATCH 13/17] Improve GenerateContentResponse kdoc --- .../firebase/vertexai/type/GenerateContentResponse.kt | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt index 61eb9218f33..1aea561b494 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt @@ -19,23 +19,24 @@ package com.google.firebase.vertexai.type import android.util.Log /** - * Represents a response from the model. + * A response from the model. * - * @property candidates a list of possible responses generated from the model - * @property promptFeedback optional feedback for the given prompt. When streaming, it's only + * @property candidates The list of [Candidate] responses generated by the model. + * @property promptFeedback Feedback about the prompt send to the model to generate this response. When streaming, it's only * populated in the first response. + * @property usageMetadata Information about the number of tokens in the prompt and in the response. */ public class GenerateContentResponse( public val candidates: List, public val promptFeedback: PromptFeedback?, public val usageMetadata: UsageMetadata?, ) { - /** Convenience field representing all the text parts in the response, if they exists. */ + /** Convenience field representing all the text parts in the response as a single string, if they exists. */ public val text: String? by lazy { candidates.first().content.parts.filterIsInstance().joinToString(" ") { it.text } } - /** Convenience field to get all the function call parts in the request, if they exist */ + /** Convenience field to list all the [FunctionCallPart]s in the response, if they exist. */ public val functionCalls: List by lazy { candidates.first().content.parts.filterIsInstance() } From 8c5bc4c6d97269cb214d0ce0d9e89336bc70d2de Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Date: Thu, 10 Oct 2024 17:42:51 -0400 Subject: [PATCH 14/17] Apply suggestions from code review Co-authored-by: Daymon <17409137+daymxn@users.noreply.github.com> --- .../com/google/firebase/vertexai/Chat.kt | 18 ++++++++---------- .../firebase/vertexai/FirebaseVertexAI.kt | 2 +- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index 9d18de94209..3360a101707 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -41,8 +41,8 @@ import kotlinx.coroutines.flow.onEach * * @param model The model to use for the interaction. * @property history The previous content from the chat that has been successfully sent and received - * from the model. This will be provided to the model for each message sent as context for the - * discussion. + * from the model. This will be provided to the model for each message sent (as context for the + * discussion). */ public class Chat( private val model: GenerativeModel, @@ -51,15 +51,14 @@ public class Chat( private var lock = Semaphore(1) /** - * Sends a message using the existing history of this chat as context and the provided [Content] - * prompt. + * Sends a message using the provided [prompt]; automatically providing the existing [history] as context. * - * If successful, the message and response will be added to the history. If unsuccessful, history + * If successful, the message and response will be added to the [history]. If unsuccessful, [history] * will remain unchanged. * * This function should be called within a suspend context to properly manage concurrency. * - * @param prompt The input(s) that, together with the history, will be given to the model as the + * @param prompt The input(s) that, together with the [history], will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. @@ -78,15 +77,14 @@ public class Chat( } /** - * Sends a message using the existing history of this chat as context and the provided text - * prompt. + * Sends a message using the provided [text prompt][prompt]; automatically providing the existing [history] as context. * - * If successful, the message and response will be added to the history. If unsuccessful, history + * If successful, the message and response will be added to the [history]. If unsuccessful, [history] * will remain unchanged. * * This function should be called within a suspend context to properly manage concurrency. * - * @param prompt The input(s) that, together with the history, will be given to the model as the + * @param prompt The input(s) that, together with the [history], will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt index 41e10dad42e..2a7864cef61 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt @@ -44,7 +44,7 @@ internal constructor( * * @param modelName The name of the model to use, for example "gemini-1.5-pro". * @param generationConfig The configuration parameters to use for content generation. - * @param safetySettings The safety bounds the model will abide to when content generation. + * @param safetySettings The safety bounds the model will abide to during content generation. * @param tools A list of [Tool]s the model may use to generate content. * @param toolConfig The [ToolConfig] that defines how the model handles the tools provided. * @param systemInstruction [Content] instructions that direct the model to behave a certain way From 7696b7a519f6ef7581611381654c4f3c5c459f51 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 17:44:21 -0400 Subject: [PATCH 15/17] Address comments --- .../com/google/firebase/vertexai/Chat.kt | 18 ++++++------------ .../firebase/vertexai/FirebaseVertexAI.kt | 4 ++-- .../firebase/vertexai/GenerativeModel.kt | 16 ++++++++++------ .../google/firebase/vertexai/type/Candidate.kt | 3 ++- .../vertexai/type/GenerateContentResponse.kt | 11 +++++++---- 5 files changed, 27 insertions(+), 25 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index 3360a101707..66b8614feaa 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -33,7 +33,7 @@ import kotlinx.coroutines.flow.onEach /** * Representation of a multi-turn interaction with a model. * - * Captures, and stores the history of communication in memory, and provides it as context with each + * Captures and stores the history of communication in memory, and provides it as context with each * new message. * * **Note:** This object is not thread-safe, and calling [sendMessage] multiple times without @@ -56,9 +56,7 @@ public class Chat( * If successful, the message and response will be added to the [history]. If unsuccessful, [history] * will remain unchanged. * - * This function should be called within a suspend context to properly manage concurrency. - * - * @param prompt The input(s) that, together with the [history], will be given to the model as the + * @param prompt The input that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. @@ -82,9 +80,7 @@ public class Chat( * If successful, the message and response will be added to the [history]. If unsuccessful, [history] * will remain unchanged. * - * This function should be called within a suspend context to properly manage concurrency. - * - * @param prompt The input(s) that, together with the [history], will be given to the model as the + * @param prompt The input that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. @@ -101,9 +97,7 @@ public class Chat( * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * This function should be called within a suspend context to properly manage concurrency. - * - * @param prompt The input(s) that, together with the history, will be given to the model as the + * @param prompt The input that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. @@ -122,7 +116,7 @@ public class Chat( * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * @param prompt The input(s) that, together with the history, will be given to the model as the + * @param prompt The input that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. @@ -201,7 +195,7 @@ public class Chat( * If successful, the message and response will be added to the history. If unsuccessful, history * will remain unchanged. * - * @param prompt The input(s) that, together with the history, will be given to the model as the + * @param prompt The input that, together with the history, will be given to the model as the * prompt. * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt index 2a7864cef61..5b81177b8d5 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt @@ -47,8 +47,8 @@ internal constructor( * @param safetySettings The safety bounds the model will abide to during content generation. * @param tools A list of [Tool]s the model may use to generate content. * @param toolConfig The [ToolConfig] that defines how the model handles the tools provided. - * @param systemInstruction [Content] instructions that direct the model to behave a certain way - * currently only text content is supported. + * @param systemInstruction [Content] instructions that direct the model to behave a certain way. + * Currently only text content is supported. * @param requestOptions Configuration options for sending requests to the backend. * @return The initialized [GenerativeModel] instance. */ diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt index c219f87892d..872fe61823f 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt @@ -125,11 +125,10 @@ internal constructor( /** * Generates new content from the input [Content] given to the model as a prompt. * - * This function should be called within a suspend context to properly manage concurrency. - * * @param prompt The input(s) given to the model as a prompt. * @return The content generated by the model. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse = try { @@ -144,6 +143,7 @@ internal constructor( * @param prompt The input(s) given to the model as a prompt. * @return A [Flow] which will emit responses as they are returned by the model. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public fun generateContentStream(vararg prompt: Content): Flow = controller @@ -154,11 +154,10 @@ internal constructor( /** * Generates new content from the text input given to the model as a prompt. * - * This function should be called within a suspend context to properly manage concurrency. - * * @param prompt The text to be send to the model as a prompt. * @return The content generated by the model. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun generateContent(prompt: String): GenerateContentResponse = generateContent(content { text(prompt) }) @@ -169,6 +168,7 @@ internal constructor( * @param prompt The text to be send to the model as a prompt. * @return A [Flow] which will emit responses as they are returned by the model. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public fun generateContentStream(prompt: String): Flow = generateContentStream(content { text(prompt) }) @@ -177,9 +177,9 @@ internal constructor( * Generates new content from the image input given to the model as a prompt. * * @param prompt The image to be converted into a single piece of [Content] to send to the model. - * @return A [GenerateContentResponse] after some delay. Function should be called within a - * suspend context to properly manage concurrency. + * @return A [GenerateContentResponse] after some delay. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun generateContent(prompt: Bitmap): GenerateContentResponse = generateContent(content { image(prompt) }) @@ -190,6 +190,7 @@ internal constructor( * @param prompt The image to be converted into a single piece of [Content] to send to the model. * @return A [Flow] which will emit responses as they are returned by the model. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public fun generateContentStream(prompt: Bitmap): Flow = generateContentStream(content { image(prompt) }) @@ -204,6 +205,7 @@ internal constructor( * @param prompt The input(s) given to the model as a prompt. * @return The [CountTokensResponse] of running the model's tokenizer on the input. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun countTokens(vararg prompt: Content): CountTokensResponse { try { @@ -219,6 +221,7 @@ internal constructor( * @param prompt The text given to the model as a prompt. * @return The [CountTokensResponse] of running the model's tokenizer on the input. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun countTokens(prompt: String): CountTokensResponse { return countTokens(content { text(prompt) }) @@ -230,6 +233,7 @@ internal constructor( * @param prompt The image given to the model as a prompt. * @return The [CountTokensResponse] of running the model's tokenizer on the input. * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun countTokens(prompt: Bitmap): CountTokensResponse { return countTokens(content { image(prompt) }) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt index b9821210cc3..6d8d96eb047 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt @@ -41,7 +41,8 @@ internal constructor( * * @property category The category of harm being assessed (e.g., Hate speech). * @property probability The likelihood of the content causing harm. - * @property probabilityScore A numerical score representing the probability of harm. + * @property probabilityScore A numerical score representing the probability of harm, between 0 and + * 1. * @property blocked Indicates whether the content was blocked due to safety concerns. * @property severity The severity of the potential harm. * @property severityScore A numerical score representing the severity of harm. diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt index 1aea561b494..236a2180f23 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt @@ -19,11 +19,11 @@ package com.google.firebase.vertexai.type import android.util.Log /** - * A response from the model. + * A response from the model. * * @property candidates The list of [Candidate] responses generated by the model. - * @property promptFeedback Feedback about the prompt send to the model to generate this response. When streaming, it's only - * populated in the first response. + * @property promptFeedback Feedback about the prompt send to the model to generate this response. + * When streaming, it's only populated in the first response. * @property usageMetadata Information about the number of tokens in the prompt and in the response. */ public class GenerateContentResponse( @@ -31,7 +31,10 @@ public class GenerateContentResponse( public val promptFeedback: PromptFeedback?, public val usageMetadata: UsageMetadata?, ) { - /** Convenience field representing all the text parts in the response as a single string, if they exists. */ + /** + * Convenience field representing all the text parts in the response as a single string, if they + * exists. + */ public val text: String? by lazy { candidates.first().content.parts.filterIsInstance().joinToString(" ") { it.text } } From ec3e02bfdf542b5361b74daaa8eb89af5b9cffcf Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 18:14:49 -0400 Subject: [PATCH 16/17] functino calling improvements --- .../kotlin/com/google/firebase/vertexai/Chat.kt | 14 ++++++++------ .../vertexai/type/FunctionCallingConfig.kt | 6 +++++- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index 66b8614feaa..594be41310a 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -51,10 +51,11 @@ public class Chat( private var lock = Semaphore(1) /** - * Sends a message using the provided [prompt]; automatically providing the existing [history] as context. + * Sends a message using the provided [prompt]; automatically providing the existing [history] as + * context. * - * If successful, the message and response will be added to the [history]. If unsuccessful, [history] - * will remain unchanged. + * If successful, the message and response will be added to the [history]. If unsuccessful, + * [history] will remain unchanged. * * @param prompt The input that, together with the history, will be given to the model as the * prompt. @@ -75,10 +76,11 @@ public class Chat( } /** - * Sends a message using the provided [text prompt][prompt]; automatically providing the existing [history] as context. + * Sends a message using the provided [text prompt][prompt]; automatically providing the existing + * [history] as context. * - * If successful, the message and response will be added to the [history]. If unsuccessful, [history] - * will remain unchanged. + * If successful, the message and response will be added to the [history]. If unsuccessful, + * [history] will remain unchanged. * * @param prompt The input that, together with the history, will be given to the model as the * prompt. diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt index d6004290ecd..a2ea9b1d01e 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt @@ -16,7 +16,11 @@ package com.google.firebase.vertexai.type -/** The configuration that specifies the function calling behavior. */ +/** + * The configuration that specifies the function calling behavior. + * + * See the static methods in the `companion object` for the list of available behaviors. + */ public class FunctionCallingConfig internal constructor( internal val mode: Mode, From 0fe01beeb970d3bceb1e1f3be4e251f31abf4361 Mon Sep 17 00:00:00 2001 From: Rodrigo Lazo Paz Date: Thu, 10 Oct 2024 18:21:24 -0400 Subject: [PATCH 17/17] Address comments --- .../main/kotlin/com/google/firebase/vertexai/type/Content.kt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt index 05e9005e1af..0168f524de8 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt @@ -24,7 +24,8 @@ import android.graphics.Bitmap * `Content` is composed of a one or more heterogeneous parts that can be represent data in * different formats, like text or images. * - * @param role The producer of the content. By default, it's "user". + * @param role The producer of the content. Must be either 'user' or 'model'. By default, it's + * "user". * @param parts An ordered list of [Part] that constitute this content. */ public class Content @@ -39,7 +40,7 @@ constructor(public val role: String? = "user", public val parts: List) { /** Builder class to facilitate constructing complex [Content] objects. */ public class Builder { - /** The producer of the content. By default, it's "user". */ + /** The producer of the content. Must be either 'user' or 'model'. By default, it's "user". */ public var role: String? = "user" /**