Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion firebase-ai/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Unreleased

* [changed] require at least one argument for `generateContent()`, `generateContentStream()` and
`countTokens()`.

# 16.2.0
* [changed] Deprecate the `totalBillableCharacters` field (only usable with pre-2.0 models). (#7042)
Expand Down
12 changes: 6 additions & 6 deletions firebase-ai/api.txt
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,13 @@ package com.google.firebase.ai {

public final class GenerativeModel {
method public suspend Object? countTokens(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? countTokens(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? countTokens(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
method public suspend Object? generateContent(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public suspend Object? generateContent(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public suspend Object? generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public suspend Object? generateContent(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(android.graphics.Bitmap prompt);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(String prompt);
method public com.google.firebase.ai.Chat startChat(java.util.List<com.google.firebase.ai.type.Content> history = emptyList());
}
Expand Down Expand Up @@ -89,10 +89,10 @@ package com.google.firebase.ai.java {
}

public abstract class GenerativeModelFutures {
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content... prompt);
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public static final com.google.firebase.ai.java.GenerativeModelFutures from(com.google.firebase.ai.GenerativeModel model);
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content... prompt);
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
method public abstract com.google.firebase.ai.GenerativeModel getGenerativeModel();
method public abstract com.google.firebase.ai.java.ChatFutures startChat();
method public abstract com.google.firebase.ai.java.ChatFutures startChat(java.util.List<com.google.firebase.ai.type.Content> history);
Expand Down
4 changes: 2 additions & 2 deletions firebase-ai/src/main/kotlin/com/google/firebase/ai/Chat.kt
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ public class Chat(
prompt.assertComesFromUser()
attemptLock()
try {
val response = model.generateContent(*history.toTypedArray(), prompt)
val response = model.generateContent(prompt, *history.toTypedArray())
history.add(prompt)
history.add(response.candidates.first().content)
return response
Expand Down Expand Up @@ -127,7 +127,7 @@ public class Chat(
prompt.assertComesFromUser()
attemptLock()

val flow = model.generateContentStream(*history.toTypedArray(), prompt)
val flow = model.generateContentStream(prompt, *history.toTypedArray())
val bitmaps = LinkedList<Bitmap>()
val inlineDataParts = LinkedList<InlineDataPart>()
val text = StringBuilder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,12 @@ internal constructor(
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse =
public suspend fun generateContent(
prompt: Content,
vararg prompts: Content
): GenerateContentResponse =
try {
controller.generateContent(constructRequest(*prompt)).toPublic().validate()
controller.generateContent(constructRequest(prompt, *prompts)).toPublic().validate()
} catch (e: Throwable) {
throw FirebaseAIException.from(e)
}
Expand All @@ -115,9 +118,12 @@ internal constructor(
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public fun generateContentStream(vararg prompt: Content): Flow<GenerateContentResponse> =
public fun generateContentStream(
prompt: Content,
vararg prompts: Content
): Flow<GenerateContentResponse> =
controller
.generateContentStream(constructRequest(*prompt))
.generateContentStream(constructRequest(prompt, *prompts))
.catch { throw FirebaseAIException.from(it) }
.map { it.toPublic().validate() }

Expand Down Expand Up @@ -177,9 +183,9 @@ internal constructor(
* @throws [FirebaseAIException] if the request failed.
* @see [FirebaseAIException] for types of errors.
*/
public suspend fun countTokens(vararg prompt: Content): CountTokensResponse {
public suspend fun countTokens(prompt: Content, vararg prompts: Content): CountTokensResponse {
try {
return controller.countTokens(constructCountTokensRequest(*prompt)).toPublic()
return controller.countTokens(constructCountTokensRequest(prompt, *prompts)).toPublic()
} catch (e: Throwable) {
throw FirebaseAIException.from(e)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ public abstract class GenerativeModelFutures internal constructor() {
* @throws [FirebaseAIException] if the request failed.
*/
public abstract fun generateContent(
vararg prompt: Content
prompt: Content,
vararg prompts: Content
): ListenableFuture<GenerateContentResponse>

/**
Expand All @@ -53,7 +54,8 @@ public abstract class GenerativeModelFutures internal constructor() {
* @throws [FirebaseAIException] if the request failed.
*/
public abstract fun generateContentStream(
vararg prompt: Content
prompt: Content,
vararg prompts: Content
): Publisher<GenerateContentResponse>

/**
Expand All @@ -63,7 +65,10 @@ public abstract class GenerativeModelFutures internal constructor() {
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
* @throws [FirebaseAIException] if the request failed.
*/
public abstract fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse>
public abstract fun countTokens(
prompt: Content,
vararg prompts: Content
): ListenableFuture<CountTokensResponse>

/**
* Creates a [ChatFutures] instance which internally tracks the ongoing conversation with the
Expand All @@ -83,15 +88,22 @@ public abstract class GenerativeModelFutures internal constructor() {

private class FuturesImpl(private val model: GenerativeModel) : GenerativeModelFutures() {
override fun generateContent(
vararg prompt: Content
prompt: Content,
vararg prompts: Content
): ListenableFuture<GenerateContentResponse> =
SuspendToFutureAdapter.launchFuture { model.generateContent(*prompt) }

override fun generateContentStream(vararg prompt: Content): Publisher<GenerateContentResponse> =
model.generateContentStream(*prompt).asPublisher()

override fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse> =
SuspendToFutureAdapter.launchFuture { model.countTokens(*prompt) }
SuspendToFutureAdapter.launchFuture { model.generateContent(prompt, *prompts) }

override fun generateContentStream(
prompt: Content,
vararg prompts: Content
): Publisher<GenerateContentResponse> =
model.generateContentStream(prompt, *prompts).asPublisher()

override fun countTokens(
prompt: Content,
vararg prompts: Content
): ListenableFuture<CountTokensResponse> =
SuspendToFutureAdapter.launchFuture { model.countTokens(prompt, *prompts) }

override fun startChat(): ChatFutures = startChat(emptyList())

Expand Down
Loading