@@ -144,17 +144,27 @@ class GenerationConfig:
144
144
Note: The default value varies by model, see the
145
145
`Model.top_k` attribute of the `Model` returned the
146
146
`genai.get_model` function.
147
-
147
+ seed:
148
+ Optional. Seed used in decoding. If not set, the request uses a randomly generated seed.
148
149
response_mime_type:
149
150
Optional. Output response mimetype of the generated candidate text.
150
151
151
152
Supported mimetype:
152
153
`text/plain`: (default) Text output.
154
+ `text/x-enum`: for use with a string-enum in `response_schema`
153
155
`application/json`: JSON response in the candidates.
154
156
155
157
response_schema:
156
158
Optional. Specifies the format of the JSON requested if response_mime_type is
157
159
`application/json`.
160
+ presence_penalty:
161
+ Optional.
162
+ frequency_penalty:
163
+ Optional.
164
+ response_logprobs:
165
+ Optional. If true, export the `logprobs` results in response.
166
+ logprobs:
167
+ Optional. Number of candidates of log probabilities to return at each step of decoding.
158
168
"""
159
169
160
170
candidate_count : int | None = None
@@ -163,8 +173,13 @@ class GenerationConfig:
163
173
temperature : float | None = None
164
174
top_p : float | None = None
165
175
top_k : int | None = None
176
+ seed : int | None = None
166
177
response_mime_type : str | None = None
167
178
response_schema : protos .Schema | Mapping [str , Any ] | type | None = None
179
+ presence_penalty : float | None = None
180
+ frequency_penalty : float | None = None
181
+ response_logprobs : bool | None = None
182
+ logprobs : int | None = None
168
183
169
184
170
185
GenerationConfigType = Union [protos .GenerationConfig , GenerationConfigDict , GenerationConfig ]
@@ -306,6 +321,7 @@ def _join_code_execution_result(result_1, result_2):
306
321
307
322
308
323
def _join_candidates (candidates : Iterable [protos .Candidate ]):
324
+ """Joins stream chunks of a single candidate."""
309
325
candidates = tuple (candidates )
310
326
311
327
index = candidates [0 ].index # These should all be the same.
@@ -321,6 +337,7 @@ def _join_candidates(candidates: Iterable[protos.Candidate]):
321
337
322
338
323
339
def _join_candidate_lists (candidate_lists : Iterable [list [protos .Candidate ]]):
340
+ """Joins stream chunks where each chunk is a list of candidate chunks."""
324
341
# Assuming that is a candidate ends, it is no longer returned in the list of
325
342
# candidates and that's why candidates have an index
326
343
candidates = collections .defaultdict (list )
@@ -344,10 +361,15 @@ def _join_prompt_feedbacks(
344
361
345
362
def _join_chunks (chunks : Iterable [protos .GenerateContentResponse ]):
346
363
chunks = tuple (chunks )
364
+ if "usage_metadata" in chunks [- 1 ]:
365
+ usage_metadata = chunks [- 1 ].usage_metadata
366
+ else :
367
+ usage_metadata = None
368
+
347
369
return protos .GenerateContentResponse (
348
370
candidates = _join_candidate_lists (c .candidates for c in chunks ),
349
371
prompt_feedback = _join_prompt_feedbacks (c .prompt_feedback for c in chunks ),
350
- usage_metadata = chunks [ - 1 ]. usage_metadata ,
372
+ usage_metadata = usage_metadata ,
351
373
)
352
374
353
375
@@ -412,14 +434,22 @@ def parts(self):
412
434
"""
413
435
candidates = self .candidates
414
436
if not candidates :
415
- raise ValueError (
437
+ msg = (
416
438
"Invalid operation: The `response.parts` quick accessor requires a single candidate, "
417
- "but none were returned. Please check the `response.prompt_feedback` to determine if the prompt was blocked ."
439
+ "but but `response.candidates` is empty ."
418
440
)
441
+ if self .prompt_feedback :
442
+ raise ValueError (
443
+ msg + "\n This appears to be caused by a blocked prompt, "
444
+ f"see `response.prompt_feedback`: { self .prompt_feedback } "
445
+ )
446
+ else :
447
+ raise ValueError (msg )
448
+
419
449
if len (candidates ) > 1 :
420
450
raise ValueError (
421
- "Invalid operation: The `response.parts` quick accessor requires a single candidate. "
422
- "For multiple candidates, please use `result.candidates[index].text`."
451
+ "Invalid operation: The `response.parts` quick accessor retrieves the parts for a single candidate. "
452
+ "This response contains multiple candidates, please use `result.candidates[index].text`."
423
453
)
424
454
parts = candidates [0 ].content .parts
425
455
return parts
@@ -433,10 +463,53 @@ def text(self):
433
463
"""
434
464
parts = self .parts
435
465
if not parts :
436
- raise ValueError (
437
- "Invalid operation: The `response.text` quick accessor requires the response to contain a valid `Part`, "
438
- "but none were returned. Please check the `candidate.safety_ratings` to determine if the response was blocked."
466
+ candidate = self .candidates [0 ]
467
+
468
+ fr = candidate .finish_reason
469
+ FinishReason = protos .Candidate .FinishReason
470
+
471
+ msg = (
472
+ "Invalid operation: The `response.text` quick accessor requires the response to contain a valid "
473
+ "`Part`, but none were returned. The candidate's "
474
+ f"[finish_reason](https://ai.google.dev/api/generate-content#finishreason) is { fr } ."
439
475
)
476
+ if candidate .finish_message :
477
+ msg += 'The `finish_message` is "{candidate.finish_message}".'
478
+
479
+ if fr is FinishReason .FINISH_REASON_UNSPECIFIED :
480
+ raise ValueError (msg )
481
+ elif fr is FinishReason .STOP :
482
+ raise ValueError (msg )
483
+ elif fr is FinishReason .MAX_TOKENS :
484
+ raise ValueError (msg )
485
+ elif fr is FinishReason .SAFETY :
486
+ raise ValueError (
487
+ msg + f" The candidate's safety_ratings are: { candidate .safety_ratings } ." ,
488
+ candidate .safety_ratings ,
489
+ )
490
+ elif fr is FinishReason .RECITATION :
491
+ raise ValueError (
492
+ msg + " Meaning that the model was reciting from copyrighted material."
493
+ )
494
+ elif fr is FinishReason .LANGUAGE :
495
+ raise ValueError (msg + " Meaning the response was using an unsupported language." )
496
+ elif fr is FinishReason .OTHER :
497
+ raise ValueError (msg )
498
+ elif fr is FinishReason .BLOCKLIST :
499
+ raise ValueError (msg )
500
+ elif fr is FinishReason .PROHIBITED_CONTENT :
501
+ raise ValueError (msg )
502
+ elif fr is FinishReason .SPII :
503
+ raise ValueError (msg + " SPII - Sensitive Personally Identifiable Information." )
504
+ elif fr is FinishReason .MALFORMED_FUNCTION_CALL :
505
+ raise ValueError (
506
+ msg + " Meaning that model generated a `FunctionCall` that was invalid. "
507
+ "Setting the "
508
+ "[Function calling mode](https://ai.google.dev/gemini-api/docs/function-calling#function_calling_mode) "
509
+ "to `ANY` can fix this because it enables constrained decoding."
510
+ )
511
+ else :
512
+ raise ValueError (msg )
440
513
441
514
texts = []
442
515
for part in parts :
@@ -490,7 +563,8 @@ def __str__(self) -> str:
490
563
_result = _result .replace ("\n " , "\n " )
491
564
492
565
if self ._error :
493
- _error = f",\n error=<{ self ._error .__class__ .__name__ } > { self ._error } "
566
+
567
+ _error = f",\n error={ repr (self ._error )} "
494
568
else :
495
569
_error = ""
496
570
0 commit comments