Skip to content

Commit f199af5

Browse files
committed
Fix a few bugs in gen AI instrumentation
1 parent 34db73e commit f199af5

File tree

5 files changed

+123
-94
lines changed

5 files changed

+123
-94
lines changed

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/dict_util.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -189,13 +189,16 @@ def _flatten_compound_value(
189189
flatten_functions=flatten_functions,
190190
)
191191
if hasattr(value, "model_dump"):
192-
return _flatten_dict(
193-
value.model_dump(),
194-
key_prefix=key,
195-
exclude_keys=exclude_keys,
196-
rename_keys=rename_keys,
197-
flatten_functions=flatten_functions,
198-
)
192+
try:
193+
return _flatten_dict(
194+
value.model_dump(),
195+
key_prefix=key,
196+
exclude_keys=exclude_keys,
197+
rename_keys=rename_keys,
198+
flatten_functions=flatten_functions,
199+
)
200+
except TypeError:
201+
return {key: str(value)}
199202
return _flatten_compound_value_using_json(
200203
key,
201204
value,

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 88 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -162,30 +162,24 @@ def _to_dict(value: object):
162162
if isinstance(value, dict):
163163
return value
164164
if hasattr(value, "model_dump"):
165-
return value.model_dump()
165+
try:
166+
return value.model_dump()
167+
except TypeError:
168+
return {"ModelName": str(value)}
169+
166170
return json.loads(json.dumps(value))
167171

168172

169-
def _add_request_options_to_span(
170-
span: Span,
173+
def create_request_attributes(
171174
config: Optional[GenerateContentConfigOrDict],
175+
is_experimental_mode: bool,
172176
allow_list: AllowList,
173-
):
174-
if config is None:
175-
return
176-
span_context = span.get_span_context()
177-
if not span_context.trace_flags.sampled:
178-
# Avoid potentially costly traversal of config
179-
# options if the span will be dropped, anyway.
180-
return
181-
# Automatically derive attributes from the contents of the
182-
# config object. This ensures that all relevant parameters
183-
# are captured in the telemetry data (except for those
184-
# that are excluded via "exclude_keys"). Dynamic attributes (those
185-
# starting with "gcp.gen_ai." instead of simply "gen_ai.request.")
186-
# are filtered with the "allow_list" before inclusion in the span.
177+
) -> dict[str, Any]:
178+
if not config:
179+
return {}
180+
config = _to_dict(config)
187181
attributes = flatten_dict(
188-
_to_dict(config),
182+
config,
189183
# A custom prefix is used, because the names/structure of the
190184
# configuration is likely to be specific to Google Gen AI SDK.
191185
key_prefix=GCP_GENAI_OPERATION_CONFIG,
@@ -212,37 +206,21 @@ def _add_request_options_to_span(
212206
"gcp.gen_ai.operation.config.seed": gen_ai_attributes.GEN_AI_REQUEST_SEED,
213207
},
214208
)
215-
for key, value in attributes.items():
216-
if key.startswith(
217-
GCP_GENAI_OPERATION_CONFIG
218-
) and not allow_list.allowed(key):
219-
# The allowlist is used to control inclusion of the dynamic keys.
220-
continue
221-
span.set_attribute(key, value)
222-
223-
224-
def _get_gen_ai_request_attributes(
225-
config: Union[GenerateContentConfigOrDict, None],
226-
) -> dict[str, Any]:
227-
if not config:
228-
return {}
229-
attributes: dict[str, Any] = {}
230-
config = _coerce_config_to_object(config)
231-
if config.seed:
232-
attributes[gen_ai_attributes.GEN_AI_REQUEST_SEED] = config.seed
233-
if config.candidate_count:
234-
attributes[gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT] = (
235-
config.candidate_count
236-
)
237-
if config.response_mime_type:
238-
if config.response_mime_type == "text/plain":
209+
response_mime_type = config.get("response_mime_type")
210+
if response_mime_type and is_experimental_mode:
211+
if response_mime_type == "text/plain":
239212
attributes[gen_ai_attributes.GEN_AI_OUTPUT_TYPE] = "text"
240-
elif config.response_mime_type == "application/json":
213+
elif response_mime_type == "application/json":
241214
attributes[gen_ai_attributes.GEN_AI_OUTPUT_TYPE] = "json"
242215
else:
243216
attributes[gen_ai_attributes.GEN_AI_OUTPUT_TYPE] = (
244-
config.response_mime_type
217+
response_mime_type
245218
)
219+
for key in list(attributes.keys()):
220+
if key.startswith(
221+
GCP_GENAI_OPERATION_CONFIG
222+
) and not allow_list.allowed(key):
223+
del attributes[key]
246224
return attributes
247225

248226

@@ -372,14 +350,6 @@ def start_span_as_current_span(
372350
end_on_exit=end_on_exit,
373351
)
374352

375-
def add_request_options_to_span(
376-
self, config: Optional[GenerateContentConfigOrDict]
377-
):
378-
span = trace.get_current_span()
379-
_add_request_options_to_span(
380-
span, config, self._generate_content_config_key_allowlist
381-
)
382-
383353
def process_request(
384354
self,
385355
contents: Union[ContentListUnion, ContentListUnionDict],
@@ -393,17 +363,6 @@ def process_response(self, response: GenerateContentResponse):
393363
self._maybe_log_response(response)
394364
self._response_index += 1
395365

396-
def process_completion(
397-
self,
398-
request: Union[ContentListUnion, ContentListUnionDict],
399-
response: GenerateContentResponse,
400-
config: Optional[GenerateContentConfigOrDict] = None,
401-
):
402-
self._update_response(response)
403-
self._maybe_log_completion_details(
404-
request, response.candidates or [], config
405-
)
406-
407366
def process_error(self, e: Exception):
408367
self._error_type = str(e.__class__.__name__)
409368

@@ -488,11 +447,11 @@ def _maybe_update_error_type(self, response: GenerateContentResponse):
488447

489448
def _maybe_log_completion_details(
490449
self,
450+
request_attributes: dict[str, Any],
491451
request: Union[ContentListUnion, ContentListUnionDict],
492452
candidates: list[Candidate],
493453
config: Optional[GenerateContentConfigOrDict] = None,
494454
):
495-
attributes = _get_gen_ai_request_attributes(config)
496455
system_instructions = []
497456
if system_content := _config_to_system_instruction(config):
498457
system_instructions = to_system_instructions(
@@ -506,7 +465,7 @@ def _maybe_log_completion_details(
506465
span = trace.get_current_span()
507466
event = LogRecord(
508467
event_name="gen_ai.client.inference.operation.details",
509-
attributes=attributes,
468+
attributes=request_attributes,
510469
)
511470
self.completion_hook.on_completion(
512471
inputs=input_messages,
@@ -540,7 +499,7 @@ def _maybe_log_completion_details(
540499
for k, v in completion_details_attributes.items()
541500
}
542501
)
543-
span.set_attributes(attributes)
502+
# request attributes were already set on the span..
544503

545504
def _maybe_log_system_instruction(
546505
self, config: Optional[GenerateContentConfigOrDict] = None
@@ -748,17 +707,27 @@ def instrumented_generate_content(
748707
config: Optional[GenerateContentConfigOrDict] = None,
749708
**kwargs: Any,
750709
) -> GenerateContentResponse:
710+
candidates = []
751711
helper = _GenerateContentInstrumentationHelper(
752712
self,
753713
otel_wrapper,
754714
model,
755715
completion_hook,
756716
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
757717
)
718+
is_experimental_mode = (
719+
helper.sem_conv_opt_in_mode
720+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
721+
)
722+
request_attributes = create_request_attributes(
723+
config,
724+
is_experimental_mode,
725+
helper._generate_content_config_key_allowlist,
726+
)
758727
with helper.start_span_as_current_span(
759728
model, "google.genai.Models.generate_content"
760-
):
761-
helper.add_request_options_to_span(config)
729+
) as span:
730+
span.set_attributes(request_attributes)
762731
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
763732
helper.process_request(contents, config)
764733
try:
@@ -771,11 +740,9 @@ def instrumented_generate_content(
771740
)
772741
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
773742
helper.process_response(response)
774-
elif (
775-
helper.sem_conv_opt_in_mode
776-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
777-
):
778-
helper.process_completion(contents, response, config)
743+
elif is_experimental_mode:
744+
candidates = response.candidates
745+
helper._update_response(response)
779746
else:
780747
raise ValueError(
781748
f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported."
@@ -785,6 +752,9 @@ def instrumented_generate_content(
785752
helper.process_error(error)
786753
raise
787754
finally:
755+
helper._maybe_log_completion_details(
756+
request_attributes, contents, candidates, config
757+
)
788758
helper.finalize_processing()
789759

790760
return instrumented_generate_content
@@ -815,10 +785,19 @@ def instrumented_generate_content_stream(
815785
completion_hook,
816786
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
817787
)
788+
is_experimental_mode = (
789+
helper.sem_conv_opt_in_mode
790+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
791+
)
792+
request_attributes = create_request_attributes(
793+
config,
794+
is_experimental_mode,
795+
helper._generate_content_config_key_allowlist,
796+
)
818797
with helper.start_span_as_current_span(
819798
model, "google.genai.Models.generate_content_stream"
820-
):
821-
helper.add_request_options_to_span(config)
799+
) as span:
800+
span.set_attributes(request_attributes)
822801
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
823802
helper.process_request(contents, config)
824803
try:
@@ -831,10 +810,7 @@ def instrumented_generate_content_stream(
831810
):
832811
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
833812
helper.process_response(response)
834-
elif (
835-
helper.sem_conv_opt_in_mode
836-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
837-
):
813+
elif is_experimental_mode:
838814
helper._update_response(response)
839815
if response.candidates:
840816
candidates += response.candidates
@@ -848,7 +824,7 @@ def instrumented_generate_content_stream(
848824
raise
849825
finally:
850826
helper._maybe_log_completion_details(
851-
contents, candidates, config
827+
request_attributes, contents, candidates, config
852828
)
853829
helper.finalize_processing()
854830

@@ -879,10 +855,20 @@ async def instrumented_generate_content(
879855
completion_hook,
880856
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
881857
)
858+
is_experimental_mode = (
859+
helper.sem_conv_opt_in_mode
860+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
861+
)
862+
request_attributes = create_request_attributes(
863+
config,
864+
is_experimental_mode,
865+
helper._generate_content_config_key_allowlist,
866+
)
867+
candidates: list[Candidate] = []
882868
with helper.start_span_as_current_span(
883869
model, "google.genai.AsyncModels.generate_content"
884-
):
885-
helper.add_request_options_to_span(config)
870+
) as span:
871+
span.set_attributes(request_attributes)
886872
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
887873
helper.process_request(contents, config)
888874
try:
@@ -899,7 +885,9 @@ async def instrumented_generate_content(
899885
helper.sem_conv_opt_in_mode
900886
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
901887
):
902-
helper.process_completion(contents, response, config)
888+
helper._update_response(response)
889+
if response.candidates:
890+
candidates += response.candidates
903891
else:
904892
raise ValueError(
905893
f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported."
@@ -909,6 +897,12 @@ async def instrumented_generate_content(
909897
helper.process_error(error)
910898
raise
911899
finally:
900+
helper._maybe_log_completion_details(
901+
request_attributes,
902+
contents,
903+
candidates,
904+
config,
905+
)
912906
helper.finalize_processing()
913907

914908
return instrumented_generate_content
@@ -939,12 +933,21 @@ async def instrumented_generate_content_stream(
939933
completion_hook,
940934
generate_content_config_key_allowlist=generate_content_config_key_allowlist,
941935
)
936+
is_experimental_mode = (
937+
helper.sem_conv_opt_in_mode
938+
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
939+
)
940+
request_attributes = create_request_attributes(
941+
config,
942+
is_experimental_mode,
943+
helper._generate_content_config_key_allowlist,
944+
)
942945
with helper.start_span_as_current_span(
943946
model,
944947
"google.genai.AsyncModels.generate_content_stream",
945948
end_on_exit=False,
946949
) as span:
947-
helper.add_request_options_to_span(config)
950+
span.set_attributes(request_attributes)
948951
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
949952
helper.process_request(contents, config)
950953
try:
@@ -988,7 +991,7 @@ async def _response_async_generator_wrapper():
988991
raise
989992
finally:
990993
helper._maybe_log_completion_details(
991-
contents, candidates, config
994+
request_attributes, contents, candidates, config
992995
)
993996
helper.finalize_processing()
994997

0 commit comments

Comments
 (0)