3535 GenerateContentConfigOrDict ,
3636 GenerateContentResponse ,
3737)
38+
3839from opentelemetry import trace
3940from opentelemetry ._events import Event
4041from opentelemetry .instrumentation ._semconv import (
4849)
4950from opentelemetry .semconv .attributes import error_attributes
5051from opentelemetry .trace .span import Span
52+ from opentelemetry .util .genai .completion_hook import CompletionHook
5153from opentelemetry .util .genai .types import (
5254 ContentCapturingMode ,
53- MessagePart ,
5455 InputMessage ,
56+ MessagePart ,
5557 OutputMessage ,
5658)
57- from opentelemetry .util .genai .completion_hook import CompletionHook
5859
5960from .allowlist_util import AllowList
6061from .custom_semconv import GCP_GENAI_OPERATION_CONFIG
@@ -165,7 +166,9 @@ def _to_dict(value: object):
165166
166167
167168def _add_request_options_to_span (
168- span : Span , config : Optional [GenerateContentConfigOrDict ], allow_list : AllowList
169+ span : Span ,
170+ config : Optional [GenerateContentConfigOrDict ],
171+ allow_list : AllowList ,
169172):
170173 if config is None :
171174 return
@@ -209,7 +212,9 @@ def _add_request_options_to_span(
209212 },
210213 )
211214 for key , value in attributes .items ():
212- if key .startswith (GCP_GENAI_OPERATION_CONFIG ) and not allow_list .allowed (key ):
215+ if key .startswith (
216+ GCP_GENAI_OPERATION_CONFIG
217+ ) and not allow_list .allowed (key ):
213218 # The allowlist is used to control inclusion of the dynamic keys.
214219 continue
215220 span .set_attribute (key , value )
@@ -245,7 +250,9 @@ def _wrapped_config_with_tools(
245250 if not config .tools :
246251 return config
247252 result = copy .copy (config )
248- result .tools = [wrapped_tool (tool , otel_wrapper , ** kwargs ) for tool in config .tools ]
253+ result .tools = [
254+ wrapped_tool (tool , otel_wrapper , ** kwargs ) for tool in config .tools
255+ ]
249256 return result
250257
251258
@@ -268,10 +275,12 @@ def _create_completion_details_attributes(
268275) -> dict [str , Any ]:
269276 attributes : dict [str , Any ] = {
270277 gen_ai_attributes .GEN_AI_INPUT_MESSAGES : [
271- dataclasses .asdict (input_message ) for input_message in input_messages
278+ dataclasses .asdict (input_message )
279+ for input_message in input_messages
272280 ],
273281 gen_ai_attributes .GEN_AI_OUTPUT_MESSAGES : [
274- dataclasses .asdict (output_message ) for output_message in output_messages
282+ dataclasses .asdict (output_message )
283+ for output_message in output_messages
275284 ],
276285 }
277286 if system_instructions :
@@ -280,7 +289,10 @@ def _create_completion_details_attributes(
280289 ]
281290
282291 if as_str :
283- return {k : json .dumps (v , cls = Base64JsonEncoder ) for k , v in attributes .items ()}
292+ return {
293+ k : json .dumps (v , cls = Base64JsonEncoder )
294+ for k , v in attributes .items ()
295+ }
284296
285297 return attributes
286298
@@ -464,12 +476,17 @@ def _maybe_log_completion_details(
464476 system_instructions = to_system_instructions (
465477 content = transformers .t_contents (system_content )[0 ]
466478 )
467- input_messages = to_input_messages (contents = transformers .t_contents (request ))
468- output_messages = to_output_messages (candidates = response .candidates or [])
479+ input_messages = to_input_messages (
480+ contents = transformers .t_contents (request )
481+ )
482+ output_messages = to_output_messages (
483+ candidates = response .candidates or []
484+ )
469485
470486 span = trace .get_current_span ()
471487 event = Event (
472- name = "gen_ai.client.inference.operation.details" , attributes = attributes
488+ name = "gen_ai.client.inference.operation.details" ,
489+ attributes = attributes ,
473490 )
474491 self .completion_hook .on_completion (
475492 inputs = input_messages ,
@@ -482,21 +499,25 @@ def _maybe_log_completion_details(
482499 ContentCapturingMode .SPAN_ONLY ,
483500 ContentCapturingMode .SPAN_AND_EVENT ,
484501 ]:
485- completion_details_attributes = _create_completion_details_attributes (
486- input_messages ,
487- output_messages ,
488- system_instructions ,
489- as_str = True ,
502+ completion_details_attributes = (
503+ _create_completion_details_attributes (
504+ input_messages ,
505+ output_messages ,
506+ system_instructions ,
507+ as_str = True ,
508+ )
490509 )
491510 span .set_attributes (completion_details_attributes )
492511 if self ._content_recording_enabled in [
493512 ContentCapturingMode .EVENT_ONLY ,
494513 ContentCapturingMode .SPAN_AND_EVENT ,
495514 ]:
496- completion_details_attributes = _create_completion_details_attributes (
497- input_messages ,
498- output_messages ,
499- system_instructions ,
515+ completion_details_attributes = (
516+ _create_completion_details_attributes (
517+ input_messages ,
518+ output_messages ,
519+ system_instructions ,
520+ )
500521 )
501522 event .attributes = {
502523 ** (event .attributes or {}),
@@ -541,7 +562,9 @@ def _maybe_log_user_prompt(
541562 total = len (contents )
542563 index = 0
543564 for entry in contents :
544- self ._maybe_log_single_user_prompt (entry , index = index , total = total )
565+ self ._maybe_log_single_user_prompt (
566+ entry , index = index , total = total
567+ )
545568 index += 1
546569 else :
547570 self ._maybe_log_single_user_prompt (contents )
@@ -647,7 +670,9 @@ def _maybe_log_response_stats(self, response: GenerateContentResponse):
647670 #
648671 pass
649672
650- def _maybe_log_response_safety_ratings (self , response : GenerateContentResponse ):
673+ def _maybe_log_response_safety_ratings (
674+ self , response : GenerateContentResponse
675+ ):
651676 # TODO: Determine if there is a way that we can log
652677 # the "prompt_feedback". This would be especially useful
653678 # in the case where the response is blocked.
@@ -917,13 +942,18 @@ async def _response_async_generator_wrapper():
917942 with trace .use_span (span , end_on_exit = True ):
918943 try :
919944 async for response in response_async_generator :
920- if helper .sem_conv_opt_in_mode == _StabilityMode .DEFAULT :
945+ if (
946+ helper .sem_conv_opt_in_mode
947+ == _StabilityMode .DEFAULT
948+ ):
921949 helper .process_response (response )
922950 elif (
923951 helper .sem_conv_opt_in_mode
924952 == _StabilityMode .GEN_AI_LATEST_EXPERIMENTAL
925953 ):
926- helper .process_completion (contents , response , config )
954+ helper .process_completion (
955+ contents , response , config
956+ )
927957 else :
928958 raise ValueError (
929959 f"Sem Conv opt in mode { helper .sem_conv_opt_in_mode } not supported."
@@ -969,12 +999,10 @@ def instrument_generate_content(
969999 completion_hook ,
9701000 generate_content_config_key_allowlist = generate_content_config_key_allowlist ,
9711001 )
972- AsyncModels .generate_content_stream = (
973- _create_instrumented_async_generate_content_stream (
974- snapshot ,
975- otel_wrapper ,
976- completion_hook ,
977- generate_content_config_key_allowlist = generate_content_config_key_allowlist ,
978- )
1002+ AsyncModels .generate_content_stream = _create_instrumented_async_generate_content_stream (
1003+ snapshot ,
1004+ otel_wrapper ,
1005+ completion_hook ,
1006+ generate_content_config_key_allowlist = generate_content_config_key_allowlist ,
9791007 )
9801008 return snapshot
0 commit comments