@@ -48,7 +48,8 @@ import kotlinx.coroutines.flow.map
4848import kotlinx.coroutines.tasks.await
4949
5050/* *
51- * A controller for communicating with the API of a given multimodal model (for example, Gemini).
51+ * Represents a multimodal model (like Gemini), capable of generating content based on various input
52+ * types.
5253 */
5354public class GenerativeModel
5455internal constructor (
@@ -122,11 +123,12 @@ internal constructor(
122123 )
123124
124125 /* *
125- * Generates a [GenerateContentResponse] from the backend with the provided [Content] .
126+ * Generates new content from the input [Content] given to the model as a prompt .
126127 *
127- * @param prompt [Content] to send to the model.
128- * @return A [GenerateContentResponse]. Function should be called within a suspend context to
129- * properly manage concurrency.
128+ * @param prompt The input(s) given to the model as a prompt.
129+ * @return The content generated by the model.
130+ * @throws [FirebaseVertexAIException] if the request failed.
131+ * @see [FirebaseVertexAIException] for types of errors.
130132 */
131133 public suspend fun generateContent (vararg prompt : Content ): GenerateContentResponse =
132134 try {
@@ -136,10 +138,12 @@ internal constructor(
136138 }
137139
138140 /* *
139- * Generates a streaming response from the backend with the provided [Content].
141+ * Generates new content as a stream from the input [Content] given to the model as a prompt .
140142 *
141- * @param prompt [Content] to send to the model.
142- * @return A [Flow] which will emit responses as they are returned from the model.
143+ * @param prompt The input(s) given to the model as a prompt.
144+ * @return A [Flow] which will emit responses as they are returned by the model.
145+ * @throws [FirebaseVertexAIException] if the request failed.
146+ * @see [FirebaseVertexAIException] for types of errors.
143147 */
144148 public fun generateContentStream (vararg prompt : Content ): Flow <GenerateContentResponse > =
145149 controller
@@ -148,52 +152,60 @@ internal constructor(
148152 .map { it.toPublic().validate() }
149153
150154 /* *
151- * Generates a [GenerateContentResponse] from the backend with the provided text prompt.
155+ * Generates new content from the text input given to the model as a prompt.
152156 *
153- * @param prompt The text to be converted into a single piece of [Content] to send to the model.
154- * @return A [GenerateContentResponse] after some delay. Function should be called within a
155- * suspend context to properly manage concurrency.
157+ * @param prompt The text to be send to the model as a prompt.
158+ * @return The content generated by the model.
159+ * @throws [FirebaseVertexAIException] if the request failed.
160+ * @see [FirebaseVertexAIException] for types of errors.
156161 */
157162 public suspend fun generateContent (prompt : String ): GenerateContentResponse =
158163 generateContent(content { text(prompt) })
159164
160165 /* *
161- * Generates a streaming response from the backend with the provided text prompt.
166+ * Generates new content as a stream from the text input given to the model as a prompt.
162167 *
163- * @param prompt The text to be converted into a single piece of [Content] to send to the model.
164- * @return A [Flow] which will emit responses as they are returned from the model.
168+ * @param prompt The text to be send to the model as a prompt.
169+ * @return A [Flow] which will emit responses as they are returned by the model.
170+ * @throws [FirebaseVertexAIException] if the request failed.
171+ * @see [FirebaseVertexAIException] for types of errors.
165172 */
166173 public fun generateContentStream (prompt : String ): Flow <GenerateContentResponse > =
167174 generateContentStream(content { text(prompt) })
168175
169176 /* *
170- * Generates a [GenerateContentResponse] from the backend with the provided image prompt.
177+ * Generates new content from the image input given to the model as a prompt.
171178 *
172179 * @param prompt The image to be converted into a single piece of [Content] to send to the model.
173- * @return A [GenerateContentResponse] after some delay. Function should be called within a
174- * suspend context to properly manage concurrency.
180+ * @return A [GenerateContentResponse] after some delay.
181+ * @throws [FirebaseVertexAIException] if the request failed.
182+ * @see [FirebaseVertexAIException] for types of errors.
175183 */
176184 public suspend fun generateContent (prompt : Bitmap ): GenerateContentResponse =
177185 generateContent(content { image(prompt) })
178186
179187 /* *
180- * Generates a streaming response from the backend with the provided image prompt.
188+ * Generates new content as a stream from the image input given to the model as a prompt.
181189 *
182190 * @param prompt The image to be converted into a single piece of [Content] to send to the model.
183- * @return A [Flow] which will emit responses as they are returned from the model.
191+ * @return A [Flow] which will emit responses as they are returned by the model.
192+ * @throws [FirebaseVertexAIException] if the request failed.
193+ * @see [FirebaseVertexAIException] for types of errors.
184194 */
185195 public fun generateContentStream (prompt : Bitmap ): Flow <GenerateContentResponse > =
186196 generateContentStream(content { image(prompt) })
187197
188- /* * Creates a [Chat] instance which internally tracks the ongoing conversation with the model */
198+ /* * Creates a [Chat] instance using this model with the optionally provided history. */
189199 public fun startChat (history : List <Content > = emptyList()): Chat =
190200 Chat (this , history.toMutableList())
191201
192202 /* *
193- * Counts the amount of tokens in a prompt.
203+ * Counts the number of tokens in a prompt using the model's tokenizer .
194204 *
195- * @param prompt A group of [Content] to count tokens of.
196- * @return A [CountTokensResponse] containing the amount of tokens in the prompt.
205+ * @param prompt The input(s) given to the model as a prompt.
206+ * @return The [CountTokensResponse] of running the model's tokenizer on the input.
207+ * @throws [FirebaseVertexAIException] if the request failed.
208+ * @see [FirebaseVertexAIException] for types of errors.
197209 */
198210 public suspend fun countTokens (vararg prompt : Content ): CountTokensResponse {
199211 try {
@@ -204,20 +216,24 @@ internal constructor(
204216 }
205217
206218 /* *
207- * Counts the amount of tokens in the text prompt.
219+ * Counts the number of tokens in a text prompt using the model's tokenizer .
208220 *
209- * @param prompt The text to be converted to a single piece of [Content] to count the tokens of.
210- * @return A [CountTokensResponse] containing the amount of tokens in the prompt.
221+ * @param prompt The text given to the model as a prompt.
222+ * @return The [CountTokensResponse] of running the model's tokenizer on the input.
223+ * @throws [FirebaseVertexAIException] if the request failed.
224+ * @see [FirebaseVertexAIException] for types of errors.
211225 */
212226 public suspend fun countTokens (prompt : String ): CountTokensResponse {
213227 return countTokens(content { text(prompt) })
214228 }
215229
216230 /* *
217- * Counts the amount of tokens in the image prompt.
231+ * Counts the number of tokens in an image prompt using the model's tokenizer .
218232 *
219- * @param prompt The image to be converted to a single piece of [Content] to count the tokens of.
220- * @return A [CountTokensResponse] containing the amount of tokens in the prompt.
233+ * @param prompt The image given to the model as a prompt.
234+ * @return The [CountTokensResponse] of running the model's tokenizer on the input.
235+ * @throws [FirebaseVertexAIException] if the request failed.
236+ * @see [FirebaseVertexAIException] for types of errors.
221237 */
222238 public suspend fun countTokens (prompt : Bitmap ): CountTokensResponse {
223239 return countTokens(content { image(prompt) })
0 commit comments