Skip to content

Commit e6b6b54

Browse files
committed
refactor(ai): required argument for vararg functions
1 parent 696f021 commit e6b6b54

File tree

5 files changed

+45
-26
lines changed

5 files changed

+45
-26
lines changed

firebase-ai/CHANGELOG.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
# Unreleased
2-
2+
* [changed] require at least one argument for `generateContent()`, `generateContentStream()` and
3+
`countTokens()`.
34

45
# 16.2.0
56
* [changed] Deprecate the `totalBillableCharacters` field (only usable with pre-2.0 models). (#7042)

firebase-ai/api.txt

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,13 @@ package com.google.firebase.ai {
5353

5454
public final class GenerativeModel {
5555
method public suspend Object? countTokens(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
56-
method public suspend Object? countTokens(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
56+
method public suspend Object? countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
5757
method public suspend Object? countTokens(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.CountTokensResponse>);
5858
method public suspend Object? generateContent(android.graphics.Bitmap prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
59-
method public suspend Object? generateContent(com.google.firebase.ai.type.Content[] prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
59+
method public suspend Object? generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content[] prompts, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
6060
method public suspend Object? generateContent(String prompt, kotlin.coroutines.Continuation<? super com.google.firebase.ai.type.GenerateContentResponse>);
6161
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(android.graphics.Bitmap prompt);
62-
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
62+
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
6363
method public kotlinx.coroutines.flow.Flow<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(String prompt);
6464
method public com.google.firebase.ai.Chat startChat(java.util.List<com.google.firebase.ai.type.Content> history = emptyList());
6565
}
@@ -89,10 +89,10 @@ package com.google.firebase.ai.java {
8989
}
9090

9191
public abstract class GenerativeModelFutures {
92-
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content... prompt);
92+
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.CountTokensResponse> countTokens(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
9393
method public static final com.google.firebase.ai.java.GenerativeModelFutures from(com.google.firebase.ai.GenerativeModel model);
94-
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content... prompt);
95-
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content... prompt);
94+
method public abstract com.google.common.util.concurrent.ListenableFuture<com.google.firebase.ai.type.GenerateContentResponse> generateContent(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
95+
method public abstract org.reactivestreams.Publisher<com.google.firebase.ai.type.GenerateContentResponse> generateContentStream(com.google.firebase.ai.type.Content prompt, com.google.firebase.ai.type.Content... prompts);
9696
method public abstract com.google.firebase.ai.GenerativeModel getGenerativeModel();
9797
method public abstract com.google.firebase.ai.java.ChatFutures startChat();
9898
method public abstract com.google.firebase.ai.java.ChatFutures startChat(java.util.List<com.google.firebase.ai.type.Content> history);

firebase-ai/src/main/kotlin/com/google/firebase/ai/Chat.kt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ public class Chat(
6666
prompt.assertComesFromUser()
6767
attemptLock()
6868
try {
69-
val response = model.generateContent(*history.toTypedArray(), prompt)
69+
val response = model.generateContent(prompt, *history.toTypedArray())
7070
history.add(prompt)
7171
history.add(response.candidates.first().content)
7272
return response
@@ -127,7 +127,7 @@ public class Chat(
127127
prompt.assertComesFromUser()
128128
attemptLock()
129129

130-
val flow = model.generateContentStream(*history.toTypedArray(), prompt)
130+
val flow = model.generateContentStream(prompt, *history.toTypedArray())
131131
val bitmaps = LinkedList<Bitmap>()
132132
val inlineDataParts = LinkedList<InlineDataPart>()
133133
val text = StringBuilder()

firebase-ai/src/main/kotlin/com/google/firebase/ai/GenerativeModel.kt

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,12 @@ internal constructor(
100100
* @throws [FirebaseAIException] if the request failed.
101101
* @see [FirebaseAIException] for types of errors.
102102
*/
103-
public suspend fun generateContent(vararg prompt: Content): GenerateContentResponse =
103+
public suspend fun generateContent(
104+
prompt: Content,
105+
vararg prompts: Content
106+
): GenerateContentResponse =
104107
try {
105-
controller.generateContent(constructRequest(*prompt)).toPublic().validate()
108+
controller.generateContent(constructRequest(prompt, *prompts)).toPublic().validate()
106109
} catch (e: Throwable) {
107110
throw FirebaseAIException.from(e)
108111
}
@@ -115,9 +118,12 @@ internal constructor(
115118
* @throws [FirebaseAIException] if the request failed.
116119
* @see [FirebaseAIException] for types of errors.
117120
*/
118-
public fun generateContentStream(vararg prompt: Content): Flow<GenerateContentResponse> =
121+
public fun generateContentStream(
122+
prompt: Content,
123+
vararg prompts: Content
124+
): Flow<GenerateContentResponse> =
119125
controller
120-
.generateContentStream(constructRequest(*prompt))
126+
.generateContentStream(constructRequest(prompt, *prompts))
121127
.catch { throw FirebaseAIException.from(it) }
122128
.map { it.toPublic().validate() }
123129

@@ -177,9 +183,9 @@ internal constructor(
177183
* @throws [FirebaseAIException] if the request failed.
178184
* @see [FirebaseAIException] for types of errors.
179185
*/
180-
public suspend fun countTokens(vararg prompt: Content): CountTokensResponse {
186+
public suspend fun countTokens(prompt: Content, vararg prompts: Content): CountTokensResponse {
181187
try {
182-
return controller.countTokens(constructCountTokensRequest(*prompt)).toPublic()
188+
return controller.countTokens(constructCountTokensRequest(prompt, *prompts)).toPublic()
183189
} catch (e: Throwable) {
184190
throw FirebaseAIException.from(e)
185191
}

firebase-ai/src/main/kotlin/com/google/firebase/ai/java/GenerativeModelFutures.kt

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ public abstract class GenerativeModelFutures internal constructor() {
4242
* @throws [FirebaseAIException] if the request failed.
4343
*/
4444
public abstract fun generateContent(
45-
vararg prompt: Content
45+
prompt: Content,
46+
vararg prompts: Content
4647
): ListenableFuture<GenerateContentResponse>
4748

4849
/**
@@ -53,7 +54,8 @@ public abstract class GenerativeModelFutures internal constructor() {
5354
* @throws [FirebaseAIException] if the request failed.
5455
*/
5556
public abstract fun generateContentStream(
56-
vararg prompt: Content
57+
prompt: Content,
58+
vararg prompts: Content
5759
): Publisher<GenerateContentResponse>
5860

5961
/**
@@ -63,7 +65,10 @@ public abstract class GenerativeModelFutures internal constructor() {
6365
* @return The [CountTokensResponse] of running the model's tokenizer on the input.
6466
* @throws [FirebaseAIException] if the request failed.
6567
*/
66-
public abstract fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse>
68+
public abstract fun countTokens(
69+
prompt: Content,
70+
vararg prompts: Content
71+
): ListenableFuture<CountTokensResponse>
6772

6873
/**
6974
* Creates a [ChatFutures] instance which internally tracks the ongoing conversation with the
@@ -83,15 +88,22 @@ public abstract class GenerativeModelFutures internal constructor() {
8388

8489
private class FuturesImpl(private val model: GenerativeModel) : GenerativeModelFutures() {
8590
override fun generateContent(
86-
vararg prompt: Content
91+
prompt: Content,
92+
vararg prompts: Content
8793
): ListenableFuture<GenerateContentResponse> =
88-
SuspendToFutureAdapter.launchFuture { model.generateContent(*prompt) }
89-
90-
override fun generateContentStream(vararg prompt: Content): Publisher<GenerateContentResponse> =
91-
model.generateContentStream(*prompt).asPublisher()
92-
93-
override fun countTokens(vararg prompt: Content): ListenableFuture<CountTokensResponse> =
94-
SuspendToFutureAdapter.launchFuture { model.countTokens(*prompt) }
94+
SuspendToFutureAdapter.launchFuture { model.generateContent(prompt, *prompts) }
95+
96+
override fun generateContentStream(
97+
prompt: Content,
98+
vararg prompts: Content
99+
): Publisher<GenerateContentResponse> =
100+
model.generateContentStream(prompt, *prompts).asPublisher()
101+
102+
override fun countTokens(
103+
prompt: Content,
104+
vararg prompts: Content
105+
): ListenableFuture<CountTokensResponse> =
106+
SuspendToFutureAdapter.launchFuture { model.countTokens(prompt, *prompts) }
95107

96108
override fun startChat(): ChatFutures = startChat(emptyList())
97109

0 commit comments

Comments
 (0)