diff --git a/.changes/generativeai/decision-amount-amusement-bite.json b/.changes/generativeai/decision-amount-amusement-bite.json new file mode 100644 index 00000000..695f8602 --- /dev/null +++ b/.changes/generativeai/decision-amount-amusement-bite.json @@ -0,0 +1 @@ +{"type":"PATCH","changes":["Require at least one argument for functions that take vararg"]} diff --git a/generativeai/src/main/java/com/google/ai/client/generativeai/Chat.kt b/generativeai/src/main/java/com/google/ai/client/generativeai/Chat.kt index fbe8f346..14cc220b 100644 --- a/generativeai/src/main/java/com/google/ai/client/generativeai/Chat.kt +++ b/generativeai/src/main/java/com/google/ai/client/generativeai/Chat.kt @@ -57,7 +57,7 @@ class Chat(private val model: GenerativeModel, val history: MutableList prompt.assertComesFromUser() attemptLock() try { - val response = model.generateContent(*history.toTypedArray(), prompt) + val response = model.generateContent(prompt, *history.toTypedArray()) history.add(prompt) history.add(response.candidates.first().content) return response @@ -100,7 +100,7 @@ class Chat(private val model: GenerativeModel, val history: MutableList prompt.assertComesFromUser() attemptLock() - val flow = model.generateContentStream(*history.toTypedArray(), prompt) + val flow = model.generateContentStream(prompt, *history.toTypedArray()) val bitmaps = LinkedList() val blobs = LinkedList() val text = StringBuilder() diff --git a/generativeai/src/main/java/com/google/ai/client/generativeai/GenerativeModel.kt b/generativeai/src/main/java/com/google/ai/client/generativeai/GenerativeModel.kt index 6a4618a9..633165f8 100644 --- a/generativeai/src/main/java/com/google/ai/client/generativeai/GenerativeModel.kt +++ b/generativeai/src/main/java/com/google/ai/client/generativeai/GenerativeModel.kt @@ -108,9 +108,9 @@ internal constructor( * @return A [GenerateContentResponse] after some delay. Function should be called within a * suspend context to properly manage concurrency. */ - suspend fun generateContent(vararg prompt: Content): GenerateContentResponse = + suspend fun generateContent(prompt: Content, vararg prompts: Content): GenerateContentResponse = try { - controller.generateContent(constructRequest(*prompt)).toPublic().validate() + controller.generateContent(constructRequest(prompt, *prompts)).toPublic().validate() } catch (e: Throwable) { throw GoogleGenerativeAIException.from(e) } @@ -121,9 +121,12 @@ internal constructor( * @param prompt A group of [Content]s to send to the model. * @return A [Flow] which will emit responses as they are returned from the model. */ - fun generateContentStream(vararg prompt: Content): Flow = + fun generateContentStream( + prompt: Content, + vararg prompts: Content + ): Flow = controller - .generateContentStream(constructRequest(*prompt)) + .generateContentStream(constructRequest(prompt, *prompts)) .catch { throw GoogleGenerativeAIException.from(it) } .map { it.toPublic().validate() } @@ -174,8 +177,8 @@ internal constructor( * @param prompt A group of [Content]s to count tokens of. * @return A [CountTokensResponse] containing the number of tokens in the prompt. */ - suspend fun countTokens(vararg prompt: Content): CountTokensResponse { - return controller.countTokens(constructCountTokensRequest(*prompt)).toPublic() + suspend fun countTokens(prompt: Content, vararg prompts: Content): CountTokensResponse { + return controller.countTokens(constructCountTokensRequest(prompt, *prompts)).toPublic() } /** diff --git a/generativeai/src/main/java/com/google/ai/client/generativeai/java/GenerativeModelFutures.kt b/generativeai/src/main/java/com/google/ai/client/generativeai/java/GenerativeModelFutures.kt index 757aeb43..92554c71 100644 --- a/generativeai/src/main/java/com/google/ai/client/generativeai/java/GenerativeModelFutures.kt +++ b/generativeai/src/main/java/com/google/ai/client/generativeai/java/GenerativeModelFutures.kt @@ -38,21 +38,30 @@ abstract class GenerativeModelFutures internal constructor() { * * @param prompt A group of [Content]s to send to the model. */ - abstract fun generateContent(vararg prompt: Content): ListenableFuture + abstract fun generateContent( + prompt: Content, + vararg prompts: Content + ): ListenableFuture /** * Generates a streaming response from the backend with the provided [Content]s. * * @param prompt A group of [Content]s to send to the model. */ - abstract fun generateContentStream(vararg prompt: Content): Publisher + abstract fun generateContentStream( + prompt: Content, + vararg prompts: Content + ): Publisher /** * Counts the number of tokens used in a prompt. * * @param prompt A group of [Content]s to count tokens of. */ - abstract fun countTokens(vararg prompt: Content): ListenableFuture + abstract fun countTokens( + prompt: Content, + vararg prompts: Content + ): ListenableFuture /** Creates a chat instance which internally tracks the ongoing conversation with the model */ abstract fun startChat(): ChatFutures @@ -69,15 +78,22 @@ abstract class GenerativeModelFutures internal constructor() { private class FuturesImpl(private val model: GenerativeModel) : GenerativeModelFutures() { override fun generateContent( - vararg prompt: Content + prompt: Content, + vararg prompts: Content ): ListenableFuture = - SuspendToFutureAdapter.launchFuture { model.generateContent(*prompt) } - - override fun generateContentStream(vararg prompt: Content): Publisher = - model.generateContentStream(*prompt).asPublisher() - - override fun countTokens(vararg prompt: Content): ListenableFuture = - SuspendToFutureAdapter.launchFuture { model.countTokens(*prompt) } + SuspendToFutureAdapter.launchFuture { model.generateContent(prompt, *prompts) } + + override fun generateContentStream( + prompt: Content, + vararg prompts: Content + ): Publisher = + model.generateContentStream(prompt, *prompts).asPublisher() + + override fun countTokens( + prompt: Content, + vararg prompts: Content + ): ListenableFuture = + SuspendToFutureAdapter.launchFuture { model.countTokens(prompt, *prompts) } override fun startChat(): ChatFutures = startChat(emptyList())