1313# limitations under the License.
1414
1515import functools
16+ import json
1617import logging
1718import os
1819import time
2122from google .genai .models import AsyncModels , Models
2223from google .genai .types import (
2324 BlockedReason ,
25+ Candidate ,
26+ Content ,
27+ ContentUnion ,
28+ ContentUnionDict ,
2429 ContentListUnion ,
2530 ContentListUnionDict ,
2631 GenerateContentConfigOrDict ,
4045_logger = logging .getLogger (__name__ )
4146
4247
48+ # Constant used to make the absence of content more understandable.
49+ _CONTENT_ELIDED = "<elided>"
50+
51+
4352class _MethodsSnapshot :
4453 def __init__ (self ):
4554 self ._original_generate_content = Models .generate_content
@@ -177,6 +186,15 @@ def _get_top_p(config: Optional[GenerateContentConfigOrDict]):
177186}
178187
179188
189+ def _to_dict (value : object ):
190+ if isinstance (value , dict ):
191+ return value
192+ if hasattr (value , 'model_dump' ):
193+ return value .model_dump ()
194+ return json .loads (json .dumps (value ))
195+
196+
197+
180198class _GenerateContentInstrumentationHelper :
181199 def __init__ (
182200 self ,
@@ -193,6 +211,8 @@ def __init__(
193211 self ._input_tokens = 0
194212 self ._output_tokens = 0
195213 self ._content_recording_enabled = is_content_recording_enabled ()
214+ self ._response_index = 0
215+ self ._candidate_index = 0
196216
197217 def start_span_as_current_span (self , model_name , function_name ):
198218 return self ._otel_wrapper .start_as_current_span (
@@ -230,6 +250,7 @@ def process_response(self, response: GenerateContentResponse):
230250 self ._maybe_update_token_counts (response )
231251 self ._maybe_update_error_type (response )
232252 self ._maybe_log_response (response )
253+ self ._response_index += 1
233254
234255 def process_error (self , e : Exception ):
235256 self ._error_type = str (e .__class__ .__name__ )
@@ -291,64 +312,145 @@ def _maybe_update_error_type(self, response: GenerateContentResponse):
291312 def _maybe_log_system_instruction (
292313 self , config : Optional [GenerateContentConfigOrDict ] = None
293314 ):
294- if not self ._content_recording_enabled :
295- return
296315 system_instruction = _get_config_property (config , "system_instruction" )
297316 if not system_instruction :
298317 return
318+ attributes = {
319+ gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
320+ }
299321 # TODO: determine if "role" should be reported here or not. It is unclear
300322 # since the caller does not supply a "role" and since this comes through
301323 # a property named "system_instruction" which would seem to align with
302324 # the default "role" that is allowed to be omitted by default.
303325 #
304326 # See also: "TODOS.md"
327+ body = {}
328+ if self ._content_recording_enabled :
329+ body ["content" ] = _to_dict (system_instruction )
330+ else :
331+ body ["content" ] = _CONTENT_ELIDED
305332 self ._otel_wrapper .log_system_prompt (
306- attributes = {
307- gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
308- },
309- body = {
310- "content" : system_instruction ,
311- },
333+ attributes = attributes ,
334+ body = body ,
312335 )
313336
314337 def _maybe_log_user_prompt (
315338 self , contents : Union [ContentListUnion , ContentListUnionDict ]
316339 ):
317- if not self ._content_recording_enabled :
318- return
340+ if isinstance (contents , list ):
341+ total = len (contents )
342+ index = 0
343+ for entry in contents :
344+ self ._maybe_log_single_user_prompt (entry , index = index , total = total )
345+ index += 1
346+ else :
347+ self ._maybe_log_single_user_prompt (contents )
348+
349+ def _maybe_log_single_user_prompt (
350+ self ,
351+ contents : Union [ContentUnion , ContentUnionDict ],
352+ index = 0 ,
353+ total = 1 ):
354+ # TODO: figure out how to report the index in a manner that is
355+ # aligned with the OTel semantic conventions.
356+ attributes = {
357+ gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
358+ }
359+
319360 # TODO: determine if "role" should be reported here or not and, if so,
320361 # what the value ought to be. It is not clear whether there is always
321362 # a role supplied (and it looks like there could be cases where there
322363 # is more than one role present in the supplied contents)?
323364 #
324365 # See also: "TODOS.md"
366+ body = {}
367+ if self ._content_recording_enabled :
368+ logged_contents = contents
369+ if isinstance (contents , list ):
370+ logged_contents = Content (parts = contents )
371+ body ["content" ] = _to_dict (logged_contents )
372+ else :
373+ body ["content" ] = _CONTENT_ELIDED
325374 self ._otel_wrapper .log_user_prompt (
326- attributes = {
327- gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
328- },
329- body = {
330- "content" : contents ,
331- },
375+ attributes = attributes ,
376+ body = body ,
332377 )
333378
379+ def _maybe_log_response_stats (self , response : GenerateContentResponse ):
380+ # TODO: Determine if there is a way that we can log a summary
381+ # of the overall response in a manner that is aligned with
382+ # Semantic Conventions. For example, it would be natural
383+ # to report an event that looks something like:
384+ #
385+ # gen_ai.response.stats {
386+ # response_index: 0,
387+ # candidate_count: 3,
388+ # parts_per_candidate: [
389+ # 3,
390+ # 1,
391+ # 5
392+ # ]
393+ # }
394+ #
395+ pass
396+
397+ def _maybe_log_response_safety_ratings (self , response : GenerateContentResponse ):
398+ # TODO: Determine if there is a way that we can log
399+ # the "prompt_feedback". This would be especially useful
400+ # in the case where the response is blocked.
401+ pass
402+
334403 def _maybe_log_response (self , response : GenerateContentResponse ):
335- if not self ._content_recording_enabled :
404+ self ._maybe_log_response_stats (response )
405+ self ._maybe_log_response_safety_ratings (response )
406+ if not response .candidates :
336407 return
408+ candidate_in_response_index = 0
409+ for candidate in response .candidates :
410+ self ._maybe_log_response_candidate (
411+ candidate ,
412+ flat_candidate_index = self ._candidate_index ,
413+ candidate_in_response_index = candidate_in_response_index ,
414+ response_index = self ._response_index )
415+ self ._candidate_index += 1
416+ candidate_in_response_index += 1
417+
418+ def _maybe_log_response_candidate (
419+ self ,
420+ candidate : Candidate ,
421+ flat_candidate_index : int ,
422+ candidate_in_response_index : int ,
423+ response_index : int ):
424+ # TODO: Determine if there might be a way to report the
425+ # response index and candidate response index.
426+ attributes = {
427+ gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
428+ }
337429 # TODO: determine if "role" should be reported here or not and, if so,
338430 # what the value ought to be.
339431 #
340432 # TODO: extract tool information into a separate tool message.
341433 #
342- # TODO: determine if/when we need to emit a 'gen_ai.choice' event.
434+ # TODO: determine if/when we need to emit a 'gen_ai.assistant.message' event.
435+ #
436+ # TODO: determine how to report other relevant details in the candidate that
437+ # are not presently captured by Semantic Conventions. For example, the
438+ # "citation_metadata", "grounding_metadata", "logprobs_result", etc.
343439 #
344440 # See also: "TODOS.md"
441+ body = {
442+ "index" : flat_candidate_index ,
443+ }
444+ if self ._content_recording_enabled :
445+ if candidate .content :
446+ body ["content" ] = _to_dict (candidate .content )
447+ else :
448+ body ["content" ] = _CONTENT_ELIDED
449+ if candidate .finish_reason is not None :
450+ body ["finish_reason" ] = candidate .finish_reason .name
345451 self ._otel_wrapper .log_response_content (
346- attributes = {
347- gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
348- },
349- body = {
350- "content" : response .model_dump (),
351- },
452+ attributes = attributes ,
453+ body = body ,
352454 )
353455
354456 def _record_token_usage_metric (self ):
0 commit comments