Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions firebase-ai/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
2.5 series models. (#6990)
* [feature] **Breaking Change**: Add support for Grounding with Google Search (#7042).
* **Action Required:** Update all references of `groundingAttributions`, `webSearchQueries`, `retrievalQueries` in `GroundingMetadata` to be non-optional.
* [changed] require at least one argument for `generateContent()`, `generateContentStream()` and
`countTokens()`.
* [feature] Added new overloads for `generateContent()`, `generateContentStream()` and
`countTokens()` that take a `List<Content>` parameter.

# 16.2.0
* [changed] Deprecate the `totalBillableCharacters` field (only usable with pre-2.0 models). (#7042)
Expand Down Expand Up @@ -34,3 +38,4 @@

Note: This feature is in Public Preview, which means that it is not subject to any SLA or
deprecation policy and could change in backwards-incompatible ways.

15 changes: 9 additions & 6 deletions firebase-ai/api.txt
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,17 @@ package com.google.firebase.ai {

public final class GenerativeModel {
method public suspend Object? countTokens(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? countTokens(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? countTokens(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? countTokens(java.util.List<com.google.firebase.ai.type.Content> prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? generateContent(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public suspend Object? generateContent(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public suspend Object? generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public suspend Object? generateContent(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public suspend Object? generateContent(java.util.List<com.google.firebase.ai.type.Content> prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(android.graphics.Bitmap prompt);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(String prompt);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(java.util.List<com.google.firebase.ai.type.Content> prompt);
method public com.google.firebase.ai.Chat startChat(java.util.List<com.google.firebase.ai.type.Content> history = emptyList());
}

Expand Down Expand Up @@ -89,10 +92,10 @@ package com.google.firebase.ai.java {
}

public abstract class GenerativeModelFutures {
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content... prompt);
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public static final com.google.firebase.ai.java.GenerativeModelFutures from(com.google.firebase.ai.GenerativeModel model);
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content... prompt);
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public abstract com.google.firebase.ai.GenerativeModel getGenerativeModel();
method public abstract com.google.firebase.ai.java.ChatFutures startChat();
method public abstract com.google.firebase.ai.java.ChatFutures startChat(java.util.List<com.google.firebase.ai.type.Content> history);
Expand Down
6 changes: 4 additions & 2 deletions firebase-ai/src/main/kotlin/com/google/firebase/ai/Chat.kt
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ public class Chat(
prompt.assertComesFromUser()
attemptLock()
try {
val response = model.generateContent(*history.toTypedArray(), prompt)
val fullPrompt = history + prompt
val response = model.generateContent(fullPrompt.first(), *fullPrompt.drop(1).toTypedArray())
history.add(prompt)
history.add(response.candidates.first().content)
return response
Expand Down Expand Up @@ -127,7 +128,8 @@ public class Chat(
prompt.assertComesFromUser()
attemptLock()

val flow = model.generateContentStream(*history.toTypedArray(), prompt)
val fullPrompt = history + prompt
val flow = model.generateContentStream(fullPrompt.first(), *fullPrompt.drop(1).toTypedArray())
val bitmaps = LinkedList<Bitmap>()
val inlineDataParts = LinkedList<InlineDataPart>()
val text = StringBuilder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,48 @@ internal constructor(
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse =
public suspend fun generateContent(
prompt: Content,
vararg prompts: Content
): GenerateContentResponse =
try {
controller.generateContent(constructRequest(*prompt)).toPublic().validate()
controller.generateContent(constructRequest(prompt, *prompts)).toPublic().validate()
} catch (e: Throwable) {
throw FirebaseAIException.from(e)
}

/**
* Generates new content from the input [Content] given to the model as a prompt.
*
* @param prompt The input(s) given to the model as a prompt.
* @return The content generated by the model.
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public suspend fun generateContent(prompt: List<Content>): GenerateContentResponse =
try {
controller.generateContent(constructRequest(prompt)).toPublic().validate()
} catch (e: Throwable) {
throw FirebaseAIException.from(e)
}

/**
* Generates new content as a stream from the input [Content] given to the model as a prompt.
*
* @param prompt The input(s) given to the model as a prompt.
* @return A [Flow] which will emit responses as they are returned by the model.
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public fun generateContentStream(
prompt: Content,
vararg prompts: Content
): Flow<GenerateContentResponse> =
controller
.generateContentStream(constructRequest(prompt, *prompts))
.catch { throw FirebaseAIException.from(it) }
.map { it.toPublic().validate() }

/**
* Generates new content as a stream from the input [Content] given to the model as a prompt.
*
Expand All @@ -115,9 +150,9 @@ internal constructor(
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public fun generateContentStream(vararg prompt: Content): Flow<GenerateContentResponse> =
public fun generateContentStream(prompt: List<Content>): Flow<GenerateContentResponse> =
controller
.generateContentStream(constructRequest(*prompt))
.generateContentStream(constructRequest(prompt))
.catch { throw FirebaseAIException.from(it) }
.map { it.toPublic().validate() }

Expand Down Expand Up @@ -177,9 +212,25 @@ internal constructor(
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public suspend fun countTokens(vararg prompt: Content): CountTokensResponse {
public suspend fun countTokens(prompt: Content, vararg prompts: Content): CountTokensResponse {
try {
return controller.countTokens(constructCountTokensRequest(prompt, *prompts)).toPublic()
} catch (e: Throwable) {
throw FirebaseAIException.from(e)
}
}

/**
* Counts the number of tokens in a prompt using the model's tokenizer.
*
* @param prompt The input(s) given to the model as a prompt.
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public suspend fun countTokens(prompt: List<Content>): CountTokensResponse {
try {
return controller.countTokens(constructCountTokensRequest(*prompt)).toPublic()
return controller.countTokens(constructCountTokensRequest(*prompt.toTypedArray())).toPublic()
} catch (e: Throwable) {
throw FirebaseAIException.from(e)
}
Expand Down Expand Up @@ -232,6 +283,8 @@ internal constructor(
systemInstruction?.copy(role = "system")?.toInternal(),
)

private fun constructRequest(prompt: List<Content>) = constructRequest(*prompt.toTypedArray())

private fun constructCountTokensRequest(vararg prompt: Content) =
when (generativeBackend.backend) {
GenerativeBackendEnum.GOOGLE_AI -> CountTokensRequest.forGoogleAI(constructRequest(*prompt))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ public abstract class GenerativeModelFutures internal constructor() {
* @throws [FirebaseAIException] if the request failed.
*/
public abstract fun generateContent(
vararg prompt: Content
prompt: Content,
vararg prompts: Content
): ListenableFuture<GenerateContentResponse>

/**
Expand All @@ -53,7 +54,8 @@ public abstract class GenerativeModelFutures internal constructor() {
* @throws [FirebaseAIException] if the request failed.
*/
public abstract fun generateContentStream(
vararg prompt: Content
prompt: Content,
vararg prompts: Content
): Publisher<GenerateContentResponse>

/**
Expand All @@ -63,7 +65,10 @@ public abstract class GenerativeModelFutures internal constructor() {
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
* @throws [FirebaseAIException] if the request failed.
*/
public abstract fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse>
public abstract fun countTokens(
prompt: Content,
vararg prompts: Content
): ListenableFuture<CountTokensResponse>

/**
* Creates a [ChatFutures] instance which internally tracks the ongoing conversation with the
Expand All @@ -83,15 +88,22 @@ public abstract class GenerativeModelFutures internal constructor() {

private class FuturesImpl(private val model: GenerativeModel) : GenerativeModelFutures() {
override fun generateContent(
vararg prompt: Content
prompt: Content,
vararg prompts: Content
): ListenableFuture<GenerateContentResponse> =
SuspendToFutureAdapter.launchFuture { model.generateContent(*prompt) }

override fun generateContentStream(vararg prompt: Content): Publisher<GenerateContentResponse> =
model.generateContentStream(*prompt).asPublisher()

override fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse> =
SuspendToFutureAdapter.launchFuture { model.countTokens(*prompt) }
SuspendToFutureAdapter.launchFuture { model.generateContent(prompt, *prompts) }

override fun generateContentStream(
prompt: Content,
vararg prompts: Content
): Publisher<GenerateContentResponse> =
model.generateContentStream(prompt, *prompts).asPublisher()

override fun countTokens(
prompt: Content,
vararg prompts: Content
): ListenableFuture<CountTokensResponse> =
SuspendToFutureAdapter.launchFuture { model.countTokens(prompt, *prompts) }

override fun startChat(): ChatFutures = startChat(emptyList())

Expand Down
Loading