diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt index 9e0fc1bc2aa..594be41310a 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/Chat.kt @@ -33,14 +33,16 @@ import kotlinx.coroutines.flow.onEach /** * Representation of a multi-turn interaction with a model. * - * Handles the capturing and storage of the communication with the model, providing methods for - * further interaction. + * Captures and stores the history of communication in memory, and provides it as context with each + * new message. * * **Note:** This object is not thread-safe, and calling [sendMessage] multiple times without * waiting for a response will throw an [InvalidStateException]. * - * @param model The model to use for the interaction - * @property history The previous interactions with the model + * @param model The model to use for the interaction. + * @property history The previous content from the chat that has been successfully sent and received + * from the model. This will be provided to the model for each message sent (as context for the + * discussion). */ public class Chat( private val model: GenerativeModel, @@ -49,11 +51,15 @@ public class Chat( private var lock = Semaphore(1) /** - * Generates a response from the backend with the provided [Content], and any previous ones - * sent/returned from this chat. + * Sends a message using the provided [prompt]; automatically providing the existing [history] as + * context. * - * @param prompt A [Content] to send to the model. - * @throws InvalidStateException if the prompt is not coming from the 'user' role + * If successful, the message and response will be added to the [history]. If unsuccessful, + * [history] will remain unchanged. + * + * @param prompt The input that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. */ public suspend fun sendMessage(prompt: Content): GenerateContentResponse { @@ -70,9 +76,15 @@ public class Chat( } /** - * Generates a response from the backend with the provided text prompt. + * Sends a message using the provided [text prompt][prompt]; automatically providing the existing + * [history] as context. + * + * If successful, the message and response will be added to the [history]. If unsuccessful, + * [history] will remain unchanged. * - * @param prompt The text to be converted into a single piece of [Content] to send to the model. + * @param prompt The input that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. */ public suspend fun sendMessage(prompt: String): GenerateContentResponse { @@ -81,9 +93,15 @@ public class Chat( } /** - * Generates a response from the backend with the provided image prompt. + * Sends a message using the existing history of this chat as context and the provided image + * prompt. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. * - * @param prompt The image to be converted into a single piece of [Content] to send to the model. + * @param prompt The input that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. */ public suspend fun sendMessage(prompt: Bitmap): GenerateContentResponse { @@ -92,11 +110,17 @@ public class Chat( } /** - * Generates a streaming response from the backend with the provided [Content]. + * Sends a message using the existing history of this chat as context and the provided [Content] + * prompt. + * + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. * - * @param prompt A [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. - * @throws InvalidStateException if the prompt is not coming from the 'user' role + * @param prompt The input that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. */ public fun sendMessageStream(prompt: Content): Flow { @@ -146,10 +170,17 @@ public class Chat( } /** - * Generates a streaming response from the backend with the provided text prompt. + * Sends a message using the existing history of this chat as context and the provided text + * prompt. * - * @param prompt a text to be converted into a single piece of [Content] to send to the model - * @return A [Flow] which will emit responses as they are returned from the model. + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. */ public fun sendMessageStream(prompt: String): Flow { @@ -158,10 +189,17 @@ public class Chat( } /** - * Generates a streaming response from the backend with the provided image prompt. + * Sends a message using the existing history of this chat as context and the provided image + * prompt. + * + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. * - * @param prompt A [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. + * @param prompt The input that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role. * @throws InvalidStateException if the [Chat] instance has an active request. */ public fun sendMessageStream(prompt: Bitmap): Flow { diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt index 145dd90b121..5b81177b8d5 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAI.kt @@ -42,13 +42,15 @@ internal constructor( /** * Instantiates a new [GenerativeModel] given the provided parameters. * - * @param modelName name of the model in the backend - * @param generationConfig configuration parameters to use for content generation - * @param safetySettings safety bounds to use during alongside prompts during content generation - * @param requestOptions configuration options to utilize during backend communication - * @param tools list of tools to make available to the model - * @param toolConfig configuration that defines how the model handles the tools provided - * @param systemInstruction contains a [Content] that directs the model to behave a certain way + * @param modelName The name of the model to use, for example "gemini-1.5-pro". + * @param generationConfig The configuration parameters to use for content generation. + * @param safetySettings The safety bounds the model will abide to during content generation. + * @param tools A list of [Tool]s the model may use to generate content. + * @param toolConfig The [ToolConfig] that defines how the model handles the tools provided. + * @param systemInstruction [Content] instructions that direct the model to behave a certain way. + * Currently only text content is supported. + * @param requestOptions Configuration options for sending requests to the backend. + * @return The initialized [GenerativeModel] instance. */ @JvmOverloads public fun generativeModel( @@ -86,10 +88,11 @@ internal constructor( @JvmStatic public fun getInstance(app: FirebaseApp): FirebaseVertexAI = getInstance(app) /** - * Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location] + * Returns the [FirebaseVertexAI] instance for the provided [FirebaseApp] and [location]. * * @param location location identifier, defaults to `us-central1`; see available * [Vertex AI regions](https://firebase.google.com/docs/vertex-ai/locations?platform=android#available-locations) + * . */ @JvmStatic @JvmOverloads diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt index 996ff601483..213351fdc92 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/FirebaseVertexAIMultiResourceComponent.kt @@ -22,7 +22,11 @@ import com.google.firebase.appcheck.interop.InteropAppCheckTokenProvider import com.google.firebase.auth.internal.InternalAuthProvider import com.google.firebase.inject.Provider -/** Multi-resource container for Firebase Vertex AI */ +/** + * Multi-resource container for Firebase Vertex AI. + * + * @hide + */ internal class FirebaseVertexAIMultiResourceComponent( private val app: FirebaseApp, private val appCheckProvider: Provider, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt index 7c50759ac73..872fe61823f 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/GenerativeModel.kt @@ -48,7 +48,8 @@ import kotlinx.coroutines.flow.map import kotlinx.coroutines.tasks.await /** - * A controller for communicating with the API of a given multimodal model (for example, Gemini). + * Represents a multimodal model (like Gemini), capable of generating content based on various input + * types. */ public class GenerativeModel internal constructor( @@ -122,11 +123,12 @@ internal constructor( ) /** - * Generates a [GenerateContentResponse] from the backend with the provided [Content]. + * Generates new content from the input [Content] given to the model as a prompt. * - * @param prompt [Content] to send to the model. - * @return A [GenerateContentResponse]. Function should be called within a suspend context to - * properly manage concurrency. + * @param prompt The input(s) given to the model as a prompt. + * @return The content generated by the model. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse = try { @@ -136,10 +138,12 @@ internal constructor( } /** - * Generates a streaming response from the backend with the provided [Content]. + * Generates new content as a stream from the input [Content] given to the model as a prompt. * - * @param prompt [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. + * @param prompt The input(s) given to the model as a prompt. + * @return A [Flow] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public fun generateContentStream(vararg prompt: Content): Flow = controller @@ -148,52 +152,60 @@ internal constructor( .map { it.toPublic().validate() } /** - * Generates a [GenerateContentResponse] from the backend with the provided text prompt. + * Generates new content from the text input given to the model as a prompt. * - * @param prompt The text to be converted into a single piece of [Content] to send to the model. - * @return A [GenerateContentResponse] after some delay. Function should be called within a - * suspend context to properly manage concurrency. + * @param prompt The text to be send to the model as a prompt. + * @return The content generated by the model. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun generateContent(prompt: String): GenerateContentResponse = generateContent(content { text(prompt) }) /** - * Generates a streaming response from the backend with the provided text prompt. + * Generates new content as a stream from the text input given to the model as a prompt. * - * @param prompt The text to be converted into a single piece of [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. + * @param prompt The text to be send to the model as a prompt. + * @return A [Flow] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public fun generateContentStream(prompt: String): Flow = generateContentStream(content { text(prompt) }) /** - * Generates a [GenerateContentResponse] from the backend with the provided image prompt. + * Generates new content from the image input given to the model as a prompt. * * @param prompt The image to be converted into a single piece of [Content] to send to the model. - * @return A [GenerateContentResponse] after some delay. Function should be called within a - * suspend context to properly manage concurrency. + * @return A [GenerateContentResponse] after some delay. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun generateContent(prompt: Bitmap): GenerateContentResponse = generateContent(content { image(prompt) }) /** - * Generates a streaming response from the backend with the provided image prompt. + * Generates new content as a stream from the image input given to the model as a prompt. * * @param prompt The image to be converted into a single piece of [Content] to send to the model. - * @return A [Flow] which will emit responses as they are returned from the model. + * @return A [Flow] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public fun generateContentStream(prompt: Bitmap): Flow = generateContentStream(content { image(prompt) }) - /** Creates a [Chat] instance which internally tracks the ongoing conversation with the model */ + /** Creates a [Chat] instance using this model with the optionally provided history. */ public fun startChat(history: List = emptyList()): Chat = Chat(this, history.toMutableList()) /** - * Counts the amount of tokens in a prompt. + * Counts the number of tokens in a prompt using the model's tokenizer. * - * @param prompt A group of [Content] to count tokens of. - * @return A [CountTokensResponse] containing the amount of tokens in the prompt. + * @param prompt The input(s) given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun countTokens(vararg prompt: Content): CountTokensResponse { try { @@ -204,20 +216,24 @@ internal constructor( } /** - * Counts the amount of tokens in the text prompt. + * Counts the number of tokens in a text prompt using the model's tokenizer. * - * @param prompt The text to be converted to a single piece of [Content] to count the tokens of. - * @return A [CountTokensResponse] containing the amount of tokens in the prompt. + * @param prompt The text given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun countTokens(prompt: String): CountTokensResponse { return countTokens(content { text(prompt) }) } /** - * Counts the amount of tokens in the image prompt. + * Counts the number of tokens in an image prompt using the model's tokenizer. * - * @param prompt The image to be converted to a single piece of [Content] to count the tokens of. - * @return A [CountTokensResponse] containing the amount of tokens in the prompt. + * @param prompt The image given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. + * @see [FirebaseVertexAIException] for types of errors. */ public suspend fun countTokens(prompt: Bitmap): CountTokensResponse { return countTokens(content { image(prompt) }) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt index d6b1a4e5e22..a6a4212fbd6 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/ChatFutures.kt @@ -25,28 +25,43 @@ import kotlinx.coroutines.reactive.asPublisher import org.reactivestreams.Publisher /** - * Helper method for interacting with a [Chat] from Java. + * Wrapper class providing Java compatible methods for [Chat]. * - * @see from + * @see [Chat] */ public abstract class ChatFutures internal constructor() { /** - * Generates a response from the backend with the provided [Content], and any previous ones - * sent/returned from this chat. + * Sends a message using the existing history of this chat as context and the provided [Content] + * prompt. * - * @param prompt A [Content] to send to the model. + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. + * + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public abstract fun sendMessage(prompt: Content): ListenableFuture /** - * Generates a streaming response from the backend with the provided [Content]. + * Sends a message using the existing history of this chat as context and the provided [Content] + * prompt. + * + * The response from the model is returned as a stream. + * + * If successful, the message and response will be added to the history. If unsuccessful, history + * will remain unchanged. * - * @param prompt A [Content] to send to the model. + * @param prompt The input(s) that, together with the history, will be given to the model as the + * prompt. + * @throws InvalidStateException if [prompt] is not coming from the 'user' role + * @throws InvalidStateException if the [Chat] instance has an active request */ public abstract fun sendMessageStream(prompt: Content): Publisher - /** Returns the [Chat] instance that was used to create this instance */ + /** Returns the [Chat] object wrapped by this object. */ public abstract fun getChat(): Chat private class FuturesImpl(private val chat: Chat) : ChatFutures() { diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt index fe43e0b69a2..9b7d2b1c1a9 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/java/GenerativeModelFutures.kt @@ -27,48 +27,57 @@ import kotlinx.coroutines.reactive.asPublisher import org.reactivestreams.Publisher /** - * Helper method for interacting with a [GenerativeModel] from Java. + * Wrapper class providing Java compatible methods for [GenerativeModel]. * - * @see from + * @see [GenerativeModel] */ public abstract class GenerativeModelFutures internal constructor() { /** - * Generates a response from the backend with the provided [Content]. + * Generates new content from the input [Content] given to the model as a prompt. * - * @param prompt A group of [Content] to send to the model. + * @param prompt The input(s) given to the model as a prompt. + * @return The content generated by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public abstract fun generateContent( vararg prompt: Content ): ListenableFuture /** - * Generates a streaming response from the backend with the provided [Content]. + * Generates new content as a stream from the input [Content] given to the model as a prompt. * - * @param prompt A group of [Content] to send to the model. + * @param prompt The input(s) given to the model as a prompt. + * @return A [Publisher] which will emit responses as they are returned by the model. + * @throws [FirebaseVertexAIException] if the request failed. */ public abstract fun generateContentStream( vararg prompt: Content ): Publisher /** - * Counts the number of tokens used in a prompt. + * Counts the number of tokens in a prompt using the model's tokenizer. * - * @param prompt A group of [Content] to count tokens of. + * @param prompt The input(s) given to the model as a prompt. + * @return The [CountTokensResponse] of running the model's tokenizer on the input. + * @throws [FirebaseVertexAIException] if the request failed. */ public abstract fun countTokens(vararg prompt: Content): ListenableFuture - /** Creates a chat instance which internally tracks the ongoing conversation with the model */ + /** + * Creates a [ChatFuture] instance which internally tracks the ongoing conversation with the + * model. + */ public abstract fun startChat(): ChatFutures /** - * Creates a chat instance which internally tracks the ongoing conversation with the model + * Creates a [ChatFuture] instance, initialized using the optionally provided [history]. * - * @param history an existing history of context to use as a starting point + * @param history A list of previous interactions with the model to use as a starting point */ public abstract fun startChat(history: List): ChatFutures - /** Returns the [GenerativeModel] instance that was used to create this object */ + /** Returns the [GenerativeModel] object wrapped by this object. */ public abstract fun getGenerativeModel(): GenerativeModel private class FuturesImpl(private val model: GenerativeModel) : GenerativeModelFutures() { diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt index 60937e8c6b8..6d8d96eb047 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt @@ -18,7 +18,14 @@ package com.google.firebase.vertexai.type import java.util.Calendar -/** A response generated by the model. */ +/** + * A `Candidate` represents a single response generated by the model for a given request. + * + * @property content The actual content generated by the model. + * @property safetyRatings A list of [SafetyRating]s describing the generated content. + * @property citationMetadata Metadata about the sources used to generate this content. + * @property finishReason The reason the model stopped generating content, if it exist. + */ public class Candidate internal constructor( public val content: Content, @@ -27,7 +34,19 @@ internal constructor( public val finishReason: FinishReason? ) -/** Safety rating corresponding to a generated content. */ +/** + * An assessment of the potential harm of some generated content. + * + * The rating will be restricted to a particular [category]. + * + * @property category The category of harm being assessed (e.g., Hate speech). + * @property probability The likelihood of the content causing harm. + * @property probabilityScore A numerical score representing the probability of harm, between 0 and + * 1. + * @property blocked Indicates whether the content was blocked due to safety concerns. + * @property severity The severity of the potential harm. + * @property severityScore A numerical score representing the severity of harm. + */ public class SafetyRating internal constructor( public val category: HarmCategory, @@ -47,17 +66,20 @@ internal constructor( public class CitationMetadata internal constructor(public val citations: List) /** - * Provides citation information for sourcing of content provided by the model between a given - * [startIndex] and [endIndex]. + * Represents a citation of content from an external source within the model's output. + * + * When the language model generates text that includes content from another source, it should + * provide a citation to properly attribute the original source. This class encapsulates the + * metadata associated with that citation. * - * @property title Title of the attribution. - * @property startIndex The inclusive beginning of a sequence in a model response that derives from - * a cited source. - * @property endIndex The exclusive end of a sequence in a model response that derives from a cited - * source. - * @property uri A link to the cited source, if available. - * @property license The license the cited source work is distributed under, if specified. - * @property publicationDate Publication date of the attribution, if available. + * @property title The title of the cited source, if available. + * @property startIndex The (inclusive) starting index within the model output where the cited + * content begins. + * @property endIndex The (exclusive) ending index within the model output where the cited content + * ends. + * @property uri The URI of the cited source, if available. + * @property license The license under which the cited content is distributed under, if available. + * @property publicationDate The date of publication of the cited source, if available. */ public class Citation internal constructor( @@ -69,7 +91,12 @@ internal constructor( public val publicationDate: Calendar? = null ) -/** The reason for content finishing. */ +/** + * Represents the reason why the model stopped generating content. + * + * @property name The name of the finish reason. + * @property ordinal The ordinal value of the finish reason. + */ public class FinishReason private constructor(public val name: String, public val ordinal: Int) { public companion object { /** A new and not yet supported value. */ @@ -84,7 +111,9 @@ public class FinishReason private constructor(public val name: String, public va /** [SafetySetting] prevented the model from outputting content. */ @JvmField public val SAFETY: FinishReason = FinishReason("SAFETY", 3) - /** Model began looping. */ + /** + * The token generation was stopped because the response was flagged for unauthorized citations. + */ @JvmField public val RECITATION: FinishReason = FinishReason("RECITATION", 4) /** Model stopped for another reason. */ diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt index d34502d4cbd..0168f524de8 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Content.kt @@ -21,15 +21,18 @@ import android.graphics.Bitmap /** * Represents content sent to and received from the model. * - * @param role the producer of the content. By default, it's "user". - * @param parts ordered list of [Part] that constitute a single message. + * `Content` is composed of a one or more heterogeneous parts that can be represent data in + * different formats, like text or images. * - * @see content + * @param role The producer of the content. Must be either 'user' or 'model'. By default, it's + * "user". + * @param parts An ordered list of [Part] that constitute this content. */ public class Content @JvmOverloads constructor(public val role: String? = "user", public val parts: List) { + /** Returns a copy of this object, with the provided parameters overwriting the originals. */ public fun copy(role: String? = this.role, parts: List = this.parts): Content { return Content(role, parts) } @@ -37,13 +40,13 @@ constructor(public val role: String? = "user", public val parts: List) { /** Builder class to facilitate constructing complex [Content] objects. */ public class Builder { - /** The producer of the content. By default, it's "user". */ + /** The producer of the content. Must be either 'user' or 'model'. By default, it's "user". */ public var role: String? = "user" /** - * Mutable list of [Part] comprising a single [Content]. + * The mutable list of [Part]s comprising the [Content]. * - * Prefer using the provided helper methods over adding elements to the list directly. + * Prefer using the provided helper methods over modifying this list directly. */ public var parts: MutableList = arrayListOf() @@ -51,24 +54,21 @@ constructor(public val role: String? = "user", public val parts: List) { @JvmName("addPart") public fun part(data: T): Content.Builder = apply { parts.add(data) } - /** Wraps the provided text inside a [TextPart] and adds it to [parts] list. */ + /** Adds a new [TextPart] with the provided [text] to [parts]. */ @JvmName("addText") public fun text(text: String): Content.Builder = part(TextPart(text)) /** - * Wraps the provided [bytes] and [mimeType] inside a [InlineDataPart] and adds it to the - * [parts] list. + * Adds a new [InlineDataPart] with the provided [bytes], which should be interpreted by the + * model based on the [mimeType], to [parts]. */ @JvmName("addInlineData") public fun inlineData(bytes: ByteArray, mimeType: String): Content.Builder = part(InlineDataPart(bytes, mimeType)) - /** Wraps the provided [image] inside an [ImagePart] and adds it to the [parts] list. */ + /** Adds a new [ImagePart] with the provided [image] to [parts]. */ @JvmName("addImage") public fun image(image: Bitmap): Content.Builder = part(ImagePart(image)) - /** - * Wraps the provided Google Cloud Storage for Firebase [uri] and [mimeType] inside a - * [FileDataPart] and adds it to the [parts] list. - */ + /** Adds a new [FileDataPart] with the provided [uri] and [mimeType] to [parts]. */ @JvmName("addFileData") public fun fileData(uri: String, mimeType: String): Content.Builder = part(FileDataPart(uri, mimeType)) @@ -79,7 +79,7 @@ constructor(public val role: String? = "user", public val parts: List) { } /** - * Function to construct content sent to and received in a DSL-like manner. + * Function to build a new [Content] instances in a DSL-like manner. * * Contains a collection of text, image, and binary parts. * diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt index cb8b17009b1..2835deba6f7 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt @@ -17,11 +17,17 @@ package com.google.firebase.vertexai.type /** - * Represents a response measuring model input. + * The model's response to a count tokens request. * - * @property totalTokens A count of the tokens in the input - * @property totalBillableCharacters A count of the characters that are billable in the input, if - * available. + * **Important:** The counters in this class do not include billable image, video or other non-text + * input. See [Vertex AI pricing](https://cloud.google.com/vertex-ai/generative-ai/pricing) for + * details. + * + * @property totalTokens The total number of tokens in the input given to the model as a prompt. + * @property totalBillableCharacters The total number of billable characters in the text input given + * to the model as a prompt. **Important:** this property does not include billable image, video or + * other non-text input. See + * [Vertex AI pricing](https://cloud.google.com/vertex-ai/generative-ai/pricing) for details. */ public class CountTokensResponse( public val totalTokens: Int, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt index 30021c0fac9..a2ea9b1d01e 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionCallingConfig.kt @@ -17,13 +17,9 @@ package com.google.firebase.vertexai.type /** - * Contains configuration for function calling from the model. This can be used to force function - * calling predictions or disable them. + * The configuration that specifies the function calling behavior. * - * @param mode The function calling mode of the model - * @param allowedFunctionNames Function names to call. Only set when the [Mode.ANY]. Function names - * should match [FunctionDeclaration.name]. With [Mode.ANY], model will predict a function call from - * the set of function names provided. + * See the static methods in the `companion object` for the list of available behaviors. */ public class FunctionCallingConfig internal constructor( @@ -52,7 +48,7 @@ internal constructor( public companion object { /** * The default behavior for function calling. The model calls functions to answer queries at its - * discretion + * discretion. */ @JvmStatic public fun auto(): FunctionCallingConfig = FunctionCallingConfig(Mode.AUTO) diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt index 119a36d3eab..672293bb559 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/FunctionDeclaration.kt @@ -17,7 +17,14 @@ package com.google.firebase.vertexai.type /** - * A declared function that a model can be given access to in order to gain info or complete tasks. + * Defines a function that the model can use as a tool. + * + * When generating responses, the model might need external information or require the application + * to perform an action. `FunctionDeclaration` provides the necessary information for the model to + * create a [FunctionCallPart], which instructs the client to execute the corresponding function. + * The client then sends the result back to the model as a [FunctionResponsePart]. + * + * For example * * ``` * val getExchangeRate = FunctionDeclaration( @@ -30,10 +37,17 @@ package com.google.firebase.vertexai.type * ) * ``` * - * @param name The name of the function call, this should be clear and descriptive for the model. - * @param description A description of what the function does and its output. - * @param parameters A list of parameters that the function accepts. - * @param optionalParameters A list of parameters that can be omitted. + * See the + * [Use the Gemini API for function calling](https://firebase.google.com/docs/vertex-ai/function-calling?platform=android) + * guide for more information on function calling. + * + * @param name The name of the function. + * @param description The description of what the function does and its output. To improve the + * effectiveness of the model, the description should be clear and detailed. + * @param parameters The map of parameters names to their [Schema] the function accepts as + * arguments. + * @param optionalParameters The list of parameter names that the model can omit when invoking this + * function. * @see Schema */ public class FunctionDeclaration( diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt index 1455bd6584b..85891457b78 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/GenerateContentResponse.kt @@ -17,23 +17,27 @@ package com.google.firebase.vertexai.type /** - * Represents a response from the model. + * A response from the model. * - * @property candidates a list of possible responses generated from the model - * @property promptFeedback optional feedback for the given prompt. When streaming, it's only - * populated in the first response. + * @property candidates The list of [Candidate] responses generated by the model. + * @property promptFeedback Feedback about the prompt send to the model to generate this response. + * When streaming, it's only populated in the first response. + * @property usageMetadata Information about the number of tokens in the prompt and in the response. */ public class GenerateContentResponse( public val candidates: List, public val promptFeedback: PromptFeedback?, public val usageMetadata: UsageMetadata?, ) { - /** Convenience field representing all the text parts in the response, if they exists. */ + /** + * Convenience field representing all the text parts in the response as a single string, if they + * exists. + */ public val text: String? by lazy { candidates.first().content.parts.filterIsInstance().joinToString(" ") { it.text } } - /** Convenience field to get all the function call parts in the request, if they exist */ + /** Convenience field to list all the [FunctionCallPart]s in the response, if they exist. */ public val functionCalls: List by lazy { candidates.first().content.parts.filterIsInstance() }