From 3ac198d048f6beda29271e055a9b96a030aceb9a Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Mon, 22 Sep 2025 17:05:24 +0000 Subject: [PATCH 01/19] Update google genai instrumentation to latest semantic convention. Co-authored-by: Aaron Abbott --- .../instrumentation/google_genai/flags.py | 23 +- .../google_genai/generate_content.py | 287 ++++++++++++++---- .../instrumentation/google_genai/message.py | 141 +++++++++ .../google_genai/message_models.py | 58 ++++ .../google_genai/otel_wrapper.py | 36 ++- .../google_genai/tool_call_wrapper.py | 58 ++-- 6 files changed, 500 insertions(+), 103 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py index 541d9ab48f..6fd404eadf 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/flags.py @@ -12,12 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +from os import environ +from typing import Union -_CONTENT_RECORDING_ENV_VAR = ( - "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" +from opentelemetry.instrumentation._semconv import _StabilityMode +from opentelemetry.util.genai.environment_variables import ( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, ) +from opentelemetry.util.genai.types import ContentCapturingMode +from opentelemetry.util.genai.utils import get_content_capturing_mode -def is_content_recording_enabled(): - return os.getenv(_CONTENT_RECORDING_ENV_VAR, "false").lower() == "true" +def is_content_recording_enabled( + mode: _StabilityMode, +) -> Union[bool, ContentCapturingMode]: + if mode == _StabilityMode.DEFAULT: + capture_content = environ.get( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false" + ) + return capture_content.lower() == "true" + if mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL: + return get_content_capturing_mode() + raise RuntimeError(f"{mode} mode not supported") diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 7e85336e56..cf8bc7da67 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -13,6 +13,7 @@ # limitations under the License. import copy +import dataclasses import functools import json import logging @@ -21,6 +22,7 @@ from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union from google.genai.models import AsyncModels, Models +from google.genai.models import t as transformers from google.genai.types import ( BlockedReason, Candidate, @@ -33,18 +35,33 @@ GenerateContentConfigOrDict, GenerateContentResponse, ) - from opentelemetry import trace +from opentelemetry._events import Event +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) from opentelemetry.semconv._incubating.attributes import ( code_attributes, gen_ai_attributes, ) from opentelemetry.semconv.attributes import error_attributes +from opentelemetry.trace.span import Span +from opentelemetry.util.genai.types import ContentCapturingMode +from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG from .dict_util import flatten_dict from .flags import is_content_recording_enabled +from .message import ( + InputMessage, + OutputMessage, + to_input_messages, + to_output_messages, + to_system_instruction, +) from .otel_wrapper import OTelWrapper from .tool_call_wrapper import wrapped as wrapped_tool @@ -144,7 +161,7 @@ def _to_dict(value: object): def _add_request_options_to_span( - span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList + span: Span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList ): if config is None: return @@ -188,9 +205,7 @@ def _add_request_options_to_span( }, ) for key, value in attributes.items(): - if key.startswith( - GCP_GENAI_OPERATION_CONFIG - ) and not allow_list.allowed(key): + if key.startswith(GCP_GENAI_OPERATION_CONFIG) and not allow_list.allowed(key): # The allowlist is used to control inclusion of the dynamic keys. continue span.set_attribute(key, value) @@ -226,12 +241,42 @@ def _wrapped_config_with_tools( if not config.tools: return config result = copy.copy(config) - result.tools = [ - wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools - ] + result.tools = [wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools] return result +def _config_to_system_instruction( + config: Union[GenerateContentConfigOrDict, None], +) -> Union[ContentUnion, None]: + if not config: + return None + + if isinstance(config, dict): + return GenerateContentConfig.model_validate(config).system_instruction + return config.system_instruction + + +def _create_completion_details_attributes( + input_messages: list[InputMessage], + output_messages: list[OutputMessage], + system_instruction: Union[InputMessage, None], +): + attributes = { + "gen_ai.input.messages": json.dumps( + [dataclasses.asdict(input_message) for input_message in input_messages] + ), + "gen_ai.output.messages": json.dumps( + [dataclasses.asdict(output_message) for output_message in output_messages] + ), + } + if system_instruction: + attributes["gen_ai.system.instructions"] = json.dumps( + dataclasses.asdict(system_instruction) + ) + + return attributes + + class _GenerateContentInstrumentationHelper: def __init__( self, @@ -248,7 +293,12 @@ def __init__( self._error_type = None self._input_tokens = 0 self._output_tokens = 0 - self._content_recording_enabled = is_content_recording_enabled() + self.sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.GEN_AI + ) + self._content_recording_enabled = is_content_recording_enabled( + self.sem_conv_opt_in_mode + ) self._response_index = 0 self._candidate_index = 0 self._generate_content_config_key_allowlist = ( @@ -268,7 +318,7 @@ def wrapped_config( def start_span_as_current_span( self, model_name, function_name, end_on_exit=True - ): + ) -> Span: return self._otel_wrapper.start_as_current_span( f"{_GENERATE_CONTENT_OP_NAME} {model_name}", start_time=self._start_time, @@ -281,29 +331,37 @@ def start_span_as_current_span( end_on_exit=end_on_exit, ) - def process_request( - self, - contents: Union[ContentListUnion, ContentListUnionDict], - config: Optional[GenerateContentConfigOrDict], + def add_request_options_to_span( + self, config: Optional[GenerateContentConfigOrDict] ): span = trace.get_current_span() _add_request_options_to_span( span, config, self._generate_content_config_key_allowlist ) + + def process_request( + self, + contents: Union[ContentListUnion, ContentListUnionDict], + config: Optional[GenerateContentConfigOrDict], + ): self._maybe_log_system_instruction(config=config) self._maybe_log_user_prompt(contents) def process_response(self, response: GenerateContentResponse): - # TODO: Determine if there are other response properties that - # need to be reflected back into the span attributes. - # - # See also: TODOS.md. - self._update_finish_reasons(response) - self._maybe_update_token_counts(response) - self._maybe_update_error_type(response) + self._update_response(response) self._maybe_log_response(response) self._response_index += 1 + def process_completion( + self, + request: Union[ContentListUnion, ContentListUnionDict], + response: GenerateContentResponse, + config: Optional[GenerateContentConfigOrDict] = None, + ): + self._update_response(response) + self._maybe_log_completion_details(request, response, config) + self._response_index += 1 + def process_error(self, e: Exception): self._error_type = str(e.__class__.__name__) @@ -322,7 +380,16 @@ def finalize_processing(self): self._record_token_usage_metric() self._record_duration_metric() - def _update_finish_reasons(self, response): + def _update_response(self, response: GenerateContentResponse): + # TODO: Determine if there are other response properties that + # need to be reflected back into the span attributes. + # + # See also: TODOS.md. + self._update_finish_reasons(response) + self._maybe_update_token_counts(response) + self._maybe_update_error_type(response) + + def _update_finish_reasons(self, response: GenerateContentResponse): if not response.candidates: return for candidate in response.candidates: @@ -373,6 +440,56 @@ def _maybe_update_error_type(self, response: GenerateContentResponse): block_reason = response.prompt_feedback.block_reason.name.upper() self._error_type = f"BLOCKED_{block_reason}" + def _maybe_log_completion_details( + self, + request: Union[ContentListUnion, ContentListUnionDict], + response: GenerateContentResponse, + config: Optional[GenerateContentConfigOrDict] = None, + ): + attributes = { + gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system, + } + system_instruction = None + if system_content := _config_to_system_instruction(config): + system_instruction = to_system_instruction( + content=transformers.t_contents(system_content)[0] + ) + input_messages = to_input_messages(contents=transformers.t_contents(request)) + output_messages = to_output_messages(candidates=response.candidates or []) + + completion_details_attributes = _create_completion_details_attributes( + input_messages, output_messages, system_instruction + ) + + span = None + if self._content_recording_enabled in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + span = trace.get_current_span() + span.set_attributes(completion_details_attributes) + if self._content_recording_enabled in [ + ContentCapturingMode.EVENT_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + attributes.update(completion_details_attributes) + event = Event(name="gen_ai.completion.details", attributes=attributes) + hook = load_upload_hook() + hook.upload( + inputs=input_messages, + outputs=output_messages, + system_instruction=( + system_instruction.parts if system_instruction else [] + ), + span=span, + log_record=event, + ) + # TODO Cannot access attribute shutdown for class UploadHook + # hook.shutdown() + self._otel_wrapper.log_completion_details( + event=event, + ) + def _maybe_log_system_instruction( self, config: Optional[GenerateContentConfigOrDict] = None ): @@ -410,9 +527,7 @@ def _maybe_log_user_prompt( total = len(contents) index = 0 for entry in contents: - self._maybe_log_single_user_prompt( - entry, index=index, total=total - ) + self._maybe_log_single_user_prompt(entry, index=index, total=total) index += 1 else: self._maybe_log_single_user_prompt(contents) @@ -445,32 +560,6 @@ def _maybe_log_single_user_prompt( body=body, ) - def _maybe_log_response_stats(self, response: GenerateContentResponse): - # TODO: Determine if there is a way that we can log a summary - # of the overall response in a manner that is aligned with - # Semantic Conventions. For example, it would be natural - # to report an event that looks something like: - # - # gen_ai.response.stats { - # response_index: 0, - # candidate_count: 3, - # parts_per_candidate: [ - # 3, - # 1, - # 5 - # ] - # } - # - pass - - def _maybe_log_response_safety_ratings( - self, response: GenerateContentResponse - ): - # TODO: Determine if there is a way that we can log - # the "prompt_feedback". This would be especially useful - # in the case where the response is blocked. - pass - def _maybe_log_response(self, response: GenerateContentResponse): self._maybe_log_response_stats(response) self._maybe_log_response_safety_ratings(response) @@ -526,6 +615,30 @@ def _maybe_log_response_candidate( body=body, ) + def _maybe_log_response_stats(self, response: GenerateContentResponse): + # TODO: Determine if there is a way that we can log a summary + # of the overall response in a manner that is aligned with + # Semantic Conventions. For example, it would be natural + # to report an event that looks something like: + # + # gen_ai.response.stats { + # response_index: 0, + # candidate_count: 3, + # parts_per_candidate: [ + # 3, + # 1, + # 5 + # ] + # } + # + pass + + def _maybe_log_response_safety_ratings(self, response: GenerateContentResponse): + # TODO: Determine if there is a way that we can log + # the "prompt_feedback". This would be especially useful + # in the case where the response is blocked. + pass + def _record_token_usage_metric(self): self._otel_wrapper.token_usage_metric.record( self._input_tokens, @@ -587,7 +700,9 @@ def instrumented_generate_content( with helper.start_span_as_current_span( model, "google.genai.Models.generate_content" ): - helper.process_request(contents, config) + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_request(contents, config) try: response = wrapped_func( self, @@ -596,7 +711,17 @@ def instrumented_generate_content( config=helper.wrapped_config(config), **kwargs, ) - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) return response except Exception as error: helper.process_error(error) @@ -632,7 +757,9 @@ def instrumented_generate_content_stream( with helper.start_span_as_current_span( model, "google.genai.Models.generate_content_stream" ): - helper.process_request(contents, config) + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_request(contents, config) try: for response in wrapped_func( self, @@ -641,7 +768,17 @@ def instrumented_generate_content_stream( config=helper.wrapped_config(config), **kwargs, ): - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) yield response except Exception as error: helper.process_error(error) @@ -677,7 +814,9 @@ async def instrumented_generate_content( with helper.start_span_as_current_span( model, "google.genai.AsyncModels.generate_content" ): - helper.process_request(contents, config) + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_request(contents, config) try: response = await wrapped_func( self, @@ -686,7 +825,17 @@ async def instrumented_generate_content( config=helper.wrapped_config(config), **kwargs, ) - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) return response except Exception as error: helper.process_error(error) @@ -725,6 +874,8 @@ async def instrumented_generate_content_stream( "google.genai.AsyncModels.generate_content_stream", end_on_exit=False, ) as span: + helper.add_request_options_to_span(config) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: helper.process_request(contents, config) try: response_async_generator = await wrapped_func( @@ -744,7 +895,17 @@ async def _response_async_generator_wrapper(): with trace.use_span(span, end_on_exit=True): try: async for response in response_async_generator: - helper.process_response(response) + if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + helper.process_response(response) + elif ( + helper.sem_conv_opt_in_mode + == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + ): + helper.process_completion(contents, response, config) + else: + raise ValueError( + f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." + ) yield response except Exception as error: helper.process_error(error) @@ -782,9 +943,11 @@ def instrument_generate_content( otel_wrapper, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) - AsyncModels.generate_content_stream = _create_instrumented_async_generate_content_stream( - snapshot, - otel_wrapper, - generate_content_config_key_allowlist=generate_content_config_key_allowlist, + AsyncModels.generate_content_stream = ( + _create_instrumented_async_generate_content_stream( + snapshot, + otel_wrapper, + generate_content_config_key_allowlist=generate_content_config_key_allowlist, + ) ) return snapshot diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py new file mode 100644 index 0000000000..eaf470892e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -0,0 +1,141 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging + +from google.genai import types as genai_types +from opentelemetry.util.genai.types import ( + InputMessage, + OutputMessage, + MessagePart, + FinishReason, + Text, + ToolCall, + ToolCallResponse, +) + +from .message_models import ( + # BlobPart, + # FileDataPart, + Role, +) + +_logger = logging.getLogger(__name__) + +def to_input_messages( + *, + contents: list[genai_types.Content], +) -> list[InputMessage]: + return [_to_input_message(content) for content in contents]) + +def to_output_messages( + *, + candidates: list[genai_types.Candidate], +) -> list[OutputMessage]: + def content_to_output_message( + candidate: genai_types.Candidate, + ) -> OutputMessage | None: + if not candidate.content: + return None + + message = _to_input_message(candidate.content) + return OutputMessage( + finish_reason=_to_finish_reason(candidate.finish_reason), + role=message.role, + parts=message.parts, + ) + + messages = ( + content_to_output_message(candidate) for candidate in candidates + ) + return [message for message in messages if message is not None] + +def to_system_instruction( + *, + content: genai_types.Content, +) -> InputMessage: + return _to_input_message(content) + +def _to_input_message( + content: genai_types.Content, +) -> InputMessage: + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) + return InputMessage( + role=_to_role(content.role), + # filter Nones + parts=[part for part in parts if part is not None], + ) + +def _to_part(part: genai_types.Part, idx: int) -> MessagePart | None: + def tool_call_id(name: str | None) -> str: + if name: + return f"{name}_{idx}" + return f"{idx}" + + if (text := part.text) is not None: + return Text(content=text) + + # if data := part.inline_data: # TODO ??? + # return BlobPart(mime_type=data.mime_type or "", data=data.data or b"") + + # if data := part.file_data: # TODO ??? + # return FileDataPart( + # mime_type=data.mime_type or "", file_uri=data.file_uri or "" + # ) + + if call := part.function_call: + return ToolCall( + id=call.id or tool_call_id(call.name), # TODO ??? + name=call.name or "", + arguments=call.args, + ) + + if response := part.function_response: + return ToolCallResponse( + id=response.id or tool_call_id(response.name), # TODO ??? + response=response.response, + ) + + _logger.info("Unknown part dropped from telemetry %s", part) + return None + +def _to_role(role: str | None) -> Role | str: + if role == "user": + return Role.USER + if role == "model": + return Role.ASSISTANT + return "" + + +def _to_finish_reason( + finish_reason: genai_types.FinishReason | None, +) -> FinishReason | str: + if finish_reason is None: + return "" + if ( + finish_reason is genai_types.FinishReason.FINISH_REASON_UNSPECIFIED + or finish_reason is genai_types.FinishReason.OTHER + ): + return "error" + if finish_reason is genai_types.FinishReason.STOP: + return "stop" + if finish_reason is genai_types.FinishReason.MAX_TOKENS: + return "length" + + # If there is no 1:1 mapping to an OTel preferred enum value, use the exact vertex reason + return finish_reason.name diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py new file mode 100644 index 0000000000..bf5b804ba7 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py @@ -0,0 +1,58 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copied and adapted from +# https://gist.github.com/lmolkova/09ba0de7f68280f1eac27a6acfd9b1a6?permalink_comment_id=5578799#gistcomment-5578799 + +from enum import Enum +from typing import Annotated, Literal + +from pydantic import Base64Encoder, BaseModel, EncodedBytes + + +class Base64OneWayEncoder(Base64Encoder): + @classmethod + def decode(cls, data: bytes) -> bytes: + """NoOp""" + return data + + +Base64EncodedBytes = Annotated[ + bytes, EncodedBytes(encoder=Base64OneWayEncoder) +] + + +class Role(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + TOOL = "tool" + + +class BlobPart(BaseModel): + type: Literal["blob"] = "blob" + mime_type: str + data: Base64EncodedBytes + + class Config: + extra = "allow" + + +class FileDataPart(BaseModel): + type: Literal["file_data"] = "file_data" + mime_type: str + file_uri: str + + class Config: + extra = "allow" diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py index b7dbb5de41..3d6a5a41a2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import logging +from typing import Any import google.genai - -from opentelemetry._events import Event +from opentelemetry._events import Event, EventLogger, EventLoggerProvider +from opentelemetry.metrics import Meter, MeterProvider from opentelemetry.semconv._incubating.metrics import gen_ai_metrics from opentelemetry.semconv.schemas import Schemas +from opentelemetry.trace import Tracer, TracerProvider from .version import __version__ as _LIBRARY_VERSION @@ -36,19 +39,23 @@ class OTelWrapper: - def __init__(self, tracer, event_logger, meter): + def __init__(self, tracer: Tracer, event_logger: EventLogger, meter: Meter): self._tracer = tracer self._event_logger = event_logger self._meter = meter self._operation_duration_metric = ( gen_ai_metrics.create_gen_ai_client_operation_duration(meter) ) - self._token_usage_metric = ( - gen_ai_metrics.create_gen_ai_client_token_usage(meter) + self._token_usage_metric = gen_ai_metrics.create_gen_ai_client_token_usage( + meter ) @staticmethod - def from_providers(tracer_provider, event_logger_provider, meter_provider): + def from_providers( + tracer_provider: TracerProvider, + event_logger_provider: EventLoggerProvider, + meter_provider: MeterProvider, + ): return OTelWrapper( tracer_provider.get_tracer( _SCOPE_NAME, _LIBRARY_VERSION, _SCHEMA_URL, _SCOPE_ATTRIBUTES @@ -72,21 +79,30 @@ def operation_duration_metric(self): def token_usage_metric(self): return self._token_usage_metric - def log_system_prompt(self, attributes, body): + def log_system_prompt(self, attributes: dict[str, str], body: dict[str, Any]): _logger.debug("Recording system prompt.") event_name = "gen_ai.system.message" self._log_event(event_name, attributes, body) - def log_user_prompt(self, attributes, body): + def log_user_prompt(self, attributes: dict[str, str], body: dict[str, Any]): _logger.debug("Recording user prompt.") event_name = "gen_ai.user.message" self._log_event(event_name, attributes, body) - def log_response_content(self, attributes, body): + def log_response_content(self, attributes: dict[str, str], body: dict[str, Any]): _logger.debug("Recording response.") event_name = "gen_ai.choice" self._log_event(event_name, attributes, body) - def _log_event(self, event_name, attributes, body): + def _log_event( + self, event_name: str, attributes: dict[str, str], body: dict[str, Any] + ): event = Event(event_name, body=body, attributes=attributes) self._event_logger.emit(event) + + def log_completion_details( + self, + event: Event, + ) -> None: + _logger.debug("Recording completion details event.") + self._event_logger.emit(event) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py index 7b4cc1924a..80d83ea3ae 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py @@ -27,6 +27,13 @@ from opentelemetry.semconv._incubating.attributes import ( code_attributes, ) +from opentelemetry.instrumentation._semconv import ( + _StabilityMode, + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, +) + +from opentelemetry.util.genai.types import ContentCapturingMode from .flags import is_content_recording_enabled from .otel_wrapper import OTelWrapper @@ -45,9 +52,7 @@ def _to_otel_value(python_value): if isinstance(python_value, list): return [_to_otel_value(x) for x in python_value] if isinstance(python_value, dict): - return { - key: _to_otel_value(val) for (key, val) in python_value.items() - } + return {key: _to_otel_value(val) for (key, val) in python_value.items()} if hasattr(python_value, "model_dump"): return python_value.model_dump() if hasattr(python_value, "__dict__"): @@ -76,6 +81,21 @@ def _to_otel_attribute(python_value): return json.dumps(otel_value) +def _is_capture_content_enabled() -> bool: + mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode( + _OpenTelemetryStabilitySignalType.GEN_AI + ) + if mode == _StabilityMode.DEFAULT: + return bool(is_content_recording_enabled(mode)) + if mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL: + capturing_mode = is_content_recording_enabled(mode) + return capturing_mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ] + raise RuntimeError(f"{mode} mode not supported") + + def _create_function_span_name(wrapped_function): """Constructs the span name for a given local function tool call.""" function_name = wrapped_function.__name__ @@ -100,9 +120,7 @@ def _create_function_span_attributes( return result -def _record_function_call_argument( - span, param_name, param_value, include_values -): +def _record_function_call_argument(span, param_name, param_value, include_values): attribute_prefix = f"code.function.parameters.{param_name}" type_attribute = f"{attribute_prefix}.type" span.set_attribute(type_attribute, type(param_value).__name__) @@ -115,7 +133,7 @@ def _record_function_call_arguments( otel_wrapper, wrapped_function, function_args, function_kwargs ): """Records the details about a function invocation as span attributes.""" - include_values = is_content_recording_enabled() + include_values = _is_capture_content_enabled() span = trace.get_current_span() signature = inspect.signature(wrapped_function) params = list(signature.parameters.values()) @@ -130,13 +148,11 @@ def _record_function_call_arguments( def _record_function_call_result(otel_wrapper, wrapped_function, result): """Records the details about a function result as span attributes.""" - include_values = is_content_recording_enabled() + include_values = _is_capture_content_enabled() span = trace.get_current_span() span.set_attribute("code.function.return.type", type(result).__name__) if include_values: - span.set_attribute( - "code.function.return.value", _to_otel_attribute(result) - ) + span.set_attribute("code.function.return.value", _to_otel_attribute(result)) def _wrap_sync_tool_function( @@ -151,12 +167,8 @@ def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span( - span_name, attributes=attributes - ): - _record_function_call_arguments( - otel_wrapper, tool_function, args, kwargs - ) + with otel_wrapper.start_as_current_span(span_name, attributes=attributes): + _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) result = tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -176,12 +188,8 @@ async def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span( - span_name, attributes=attributes - ): - _record_function_call_arguments( - otel_wrapper, tool_function, args, kwargs - ) + with otel_wrapper.start_as_current_span(span_name, attributes=attributes): + _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) result = await tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -207,9 +215,7 @@ def wrapped( if tool_or_tools is None: return None if isinstance(tool_or_tools, list): - return [ - wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools - ] + return [wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools] if isinstance(tool_or_tools, dict): return { key: wrapped(value, otel_wrapper, **kwargs) From 9ea22781a190e651f9d66700430e01b8705f45d7 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 11:55:20 +0000 Subject: [PATCH 02/19] fix: log roles as str, event attrs as objects. --- .../google_genai/generate_content.py | 53 +++++++++---------- .../instrumentation/google_genai/message.py | 17 +++--- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index cf8bc7da67..fa47ac2a45 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -48,7 +48,7 @@ ) from opentelemetry.semconv.attributes import error_attributes from opentelemetry.trace.span import Span -from opentelemetry.util.genai.types import ContentCapturingMode +from opentelemetry.util.genai.types import ContentCapturingMode, MessagePart from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList @@ -60,7 +60,7 @@ OutputMessage, to_input_messages, to_output_messages, - to_system_instruction, + to_system_instructions, ) from .otel_wrapper import OTelWrapper from .tool_call_wrapper import wrapped as wrapped_tool @@ -259,20 +259,18 @@ def _config_to_system_instruction( def _create_completion_details_attributes( input_messages: list[InputMessage], output_messages: list[OutputMessage], - system_instruction: Union[InputMessage, None], -): - attributes = { - "gen_ai.input.messages": json.dumps( - [dataclasses.asdict(input_message) for input_message in input_messages] - ), - "gen_ai.output.messages": json.dumps( - [dataclasses.asdict(output_message) for output_message in output_messages] - ), + system_instructions: list[MessagePart], + as_str: bool = False, +) -> dict[str, Any]: + attributes: dict[str, Any] = { + "gen_ai.input.messages": [dataclasses.asdict(input_message) for input_message in input_messages], + "gen_ai.output.messages": [dataclasses.asdict(output_message) for output_message in output_messages], } - if system_instruction: - attributes["gen_ai.system.instructions"] = json.dumps( - dataclasses.asdict(system_instruction) - ) + if system_instructions: + attributes["gen_ai.system.instructions"] = [dataclasses.asdict(sys_instr) for sys_instr in system_instructions] + + if as_str: + return {k: json.dumps(v) for k, v in attributes.items()} return attributes @@ -449,46 +447,45 @@ def _maybe_log_completion_details( attributes = { gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system, } - system_instruction = None + system_instructions = [] if system_content := _config_to_system_instruction(config): - system_instruction = to_system_instruction( + system_instructions = to_system_instructions( content=transformers.t_contents(system_content)[0] ) input_messages = to_input_messages(contents=transformers.t_contents(request)) output_messages = to_output_messages(candidates=response.candidates or []) - completion_details_attributes = _create_completion_details_attributes( - input_messages, output_messages, system_instruction - ) + span = None if self._content_recording_enabled in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: + completion_details_attributes = _create_completion_details_attributes( + input_messages, output_messages, system_instructions, as_str=True, + ) span = trace.get_current_span() span.set_attributes(completion_details_attributes) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: + completion_details_attributes = _create_completion_details_attributes( + input_messages, output_messages, system_instructions, + ) attributes.update(completion_details_attributes) event = Event(name="gen_ai.completion.details", attributes=attributes) hook = load_upload_hook() hook.upload( inputs=input_messages, outputs=output_messages, - system_instruction=( - system_instruction.parts if system_instruction else [] - ), + system_instruction=system_instructions, span=span, log_record=event, ) - # TODO Cannot access attribute shutdown for class UploadHook - # hook.shutdown() - self._otel_wrapper.log_completion_details( - event=event, - ) + hook.shutdown() + self._otel_wrapper.log_completion_details(event=event) def _maybe_log_system_instruction( self, config: Optional[GenerateContentConfigOrDict] = None diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index eaf470892e..2f56e0d484 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -39,7 +39,7 @@ def to_input_messages( *, contents: list[genai_types.Content], ) -> list[InputMessage]: - return [_to_input_message(content) for content in contents]) + return [_to_input_message(content) for content in contents] def to_output_messages( *, @@ -63,11 +63,14 @@ def content_to_output_message( ) return [message for message in messages if message is not None] -def to_system_instruction( +def to_system_instructions( *, content: genai_types.Content, -) -> InputMessage: - return _to_input_message(content) +) -> list[MessagePart]: + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) + return [part for part in parts if part is not None] def _to_input_message( content: genai_types.Content, @@ -114,11 +117,11 @@ def tool_call_id(name: str | None) -> str: _logger.info("Unknown part dropped from telemetry %s", part) return None -def _to_role(role: str | None) -> Role | str: +def _to_role(role: str | None) -> str: if role == "user": - return Role.USER + return Role.USER.value if role == "model": - return Role.ASSISTANT + return Role.ASSISTANT.value return "" From eeed7d5b7695313be88f0ed174707e5471bd9a6f Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 11:55:39 +0000 Subject: [PATCH 03/19] fix: proper event name --- .../instrumentation/google_genai/generate_content.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index fa47ac2a45..9b653d4a2c 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -475,7 +475,7 @@ def _maybe_log_completion_details( input_messages, output_messages, system_instructions, ) attributes.update(completion_details_attributes) - event = Event(name="gen_ai.completion.details", attributes=attributes) + event = Event(name="gen_ai.client.inference.operation.details", attributes=attributes) hook = load_upload_hook() hook.upload( inputs=input_messages, From e1738c2a6d89d2ff00e6dce1e9477ce6c5a37886 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 12:52:30 +0000 Subject: [PATCH 04/19] refactor: remove unused message models. --- .../instrumentation/google_genai/message.py | 47 +++++++-------- .../google_genai/message_models.py | 58 ------------------- 2 files changed, 21 insertions(+), 84 deletions(-) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index 2f56e0d484..760589d1f8 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -15,32 +15,37 @@ from __future__ import annotations import logging +from enum import Enum from google.genai import types as genai_types from opentelemetry.util.genai.types import ( + FinishReason, InputMessage, - OutputMessage, MessagePart, - FinishReason, + OutputMessage, Text, ToolCall, ToolCallResponse, ) -from .message_models import ( - # BlobPart, - # FileDataPart, - Role, -) + +class Role(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + TOOL = "tool" + _logger = logging.getLogger(__name__) + def to_input_messages( *, contents: list[genai_types.Content], ) -> list[InputMessage]: return [_to_input_message(content) for content in contents] + def to_output_messages( *, candidates: list[genai_types.Candidate], @@ -58,32 +63,29 @@ def content_to_output_message( parts=message.parts, ) - messages = ( - content_to_output_message(candidate) for candidate in candidates - ) + messages = (content_to_output_message(candidate) for candidate in candidates) return [message for message in messages if message is not None] + def to_system_instructions( *, content: genai_types.Content, ) -> list[MessagePart]: - parts = ( - _to_part(part, idx) for idx, part in enumerate(content.parts or []) - ) + parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) return [part for part in parts if part is not None] + def _to_input_message( content: genai_types.Content, ) -> InputMessage: - parts = ( - _to_part(part, idx) for idx, part in enumerate(content.parts or []) - ) + parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) return InputMessage( role=_to_role(content.role), # filter Nones parts=[part for part in parts if part is not None], ) + def _to_part(part: genai_types.Part, idx: int) -> MessagePart | None: def tool_call_id(name: str | None) -> str: if name: @@ -93,30 +95,23 @@ def tool_call_id(name: str | None) -> str: if (text := part.text) is not None: return Text(content=text) - # if data := part.inline_data: # TODO ??? - # return BlobPart(mime_type=data.mime_type or "", data=data.data or b"") - - # if data := part.file_data: # TODO ??? - # return FileDataPart( - # mime_type=data.mime_type or "", file_uri=data.file_uri or "" - # ) - if call := part.function_call: return ToolCall( - id=call.id or tool_call_id(call.name), # TODO ??? + id=call.id or tool_call_id(call.name), name=call.name or "", arguments=call.args, ) if response := part.function_response: return ToolCallResponse( - id=response.id or tool_call_id(response.name), # TODO ??? + id=response.id or tool_call_id(response.name), response=response.response, ) _logger.info("Unknown part dropped from telemetry %s", part) return None + def _to_role(role: str | None) -> str: if role == "user": return Role.USER.value diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py deleted file mode 100644 index bf5b804ba7..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message_models.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copied and adapted from -# https://gist.github.com/lmolkova/09ba0de7f68280f1eac27a6acfd9b1a6?permalink_comment_id=5578799#gistcomment-5578799 - -from enum import Enum -from typing import Annotated, Literal - -from pydantic import Base64Encoder, BaseModel, EncodedBytes - - -class Base64OneWayEncoder(Base64Encoder): - @classmethod - def decode(cls, data: bytes) -> bytes: - """NoOp""" - return data - - -Base64EncodedBytes = Annotated[ - bytes, EncodedBytes(encoder=Base64OneWayEncoder) -] - - -class Role(str, Enum): - SYSTEM = "system" - USER = "user" - ASSISTANT = "assistant" - TOOL = "tool" - - -class BlobPart(BaseModel): - type: Literal["blob"] = "blob" - mime_type: str - data: Base64EncodedBytes - - class Config: - extra = "allow" - - -class FileDataPart(BaseModel): - type: Literal["file_data"] = "file_data" - mime_type: str - file_uri: str - - class Config: - extra = "allow" From f1feaf7373b9d988e2e4a8c43094f2fdcd3fa454 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 12:54:31 +0000 Subject: [PATCH 05/19] refactor: use OTel imported semconv attributes. --- .../google_genai/generate_content.py | 36 +++++++++++++------ 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 9b653d4a2c..a786abf2b9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -48,7 +48,12 @@ ) from opentelemetry.semconv.attributes import error_attributes from opentelemetry.trace.span import Span -from opentelemetry.util.genai.types import ContentCapturingMode, MessagePart +from opentelemetry.util.genai.types import ( + ContentCapturingMode, + MessagePart, + InputMessage, + OutputMessage, +) from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList @@ -56,8 +61,6 @@ from .dict_util import flatten_dict from .flags import is_content_recording_enabled from .message import ( - InputMessage, - OutputMessage, to_input_messages, to_output_messages, to_system_instructions, @@ -263,11 +266,17 @@ def _create_completion_details_attributes( as_str: bool = False, ) -> dict[str, Any]: attributes: dict[str, Any] = { - "gen_ai.input.messages": [dataclasses.asdict(input_message) for input_message in input_messages], - "gen_ai.output.messages": [dataclasses.asdict(output_message) for output_message in output_messages], + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: [ + dataclasses.asdict(input_message) for input_message in input_messages + ], + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: [ + dataclasses.asdict(output_message) for output_message in output_messages + ], } if system_instructions: - attributes["gen_ai.system.instructions"] = [dataclasses.asdict(sys_instr) for sys_instr in system_instructions] + attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS] = [ + dataclasses.asdict(sys_instr) for sys_instr in system_instructions + ] if as_str: return {k: json.dumps(v) for k, v in attributes.items()} @@ -455,15 +464,16 @@ def _maybe_log_completion_details( input_messages = to_input_messages(contents=transformers.t_contents(request)) output_messages = to_output_messages(candidates=response.candidates or []) - - span = None if self._content_recording_enabled in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: completion_details_attributes = _create_completion_details_attributes( - input_messages, output_messages, system_instructions, as_str=True, + input_messages, + output_messages, + system_instructions, + as_str=True, ) span = trace.get_current_span() span.set_attributes(completion_details_attributes) @@ -472,10 +482,14 @@ def _maybe_log_completion_details( ContentCapturingMode.SPAN_AND_EVENT, ]: completion_details_attributes = _create_completion_details_attributes( - input_messages, output_messages, system_instructions, + input_messages, + output_messages, + system_instructions, ) attributes.update(completion_details_attributes) - event = Event(name="gen_ai.client.inference.operation.details", attributes=attributes) + event = Event( + name="gen_ai.client.inference.operation.details", attributes=attributes + ) hook = load_upload_hook() hook.upload( inputs=input_messages, From ee911f570cf067f775ccda6ad4d697d2f54ca3b8 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 25 Sep 2025 13:28:45 +0000 Subject: [PATCH 06/19] refactor: Inject upload_hook in Instrumentor. --- .../google_genai/generate_content.py | 21 +++++++++++++++---- .../google_genai/instrumentor.py | 3 +++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index a786abf2b9..73c178c855 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -54,7 +54,7 @@ InputMessage, OutputMessage, ) -from opentelemetry.util.genai.upload_hook import load_upload_hook +from opentelemetry.util.genai.upload_hook import UploadHook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG @@ -290,12 +290,14 @@ def __init__( models_object: Union[Models, AsyncModels], otel_wrapper: OTelWrapper, model: str, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): self._start_time = time.time_ns() self._otel_wrapper = otel_wrapper self._genai_system = _determine_genai_system(models_object) self._genai_request_model = model + self.upload_hook = upload_hook self._finish_reasons_set = set() self._error_type = None self._input_tokens = 0 @@ -490,15 +492,13 @@ def _maybe_log_completion_details( event = Event( name="gen_ai.client.inference.operation.details", attributes=attributes ) - hook = load_upload_hook() - hook.upload( + self.upload_hook.upload( inputs=input_messages, outputs=output_messages, system_instruction=system_instructions, span=span, log_record=event, ) - hook.shutdown() self._otel_wrapper.log_completion_details(event=event) def _maybe_log_system_instruction( @@ -689,6 +689,7 @@ def _record_duration_metric(self): def _create_instrumented_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content @@ -706,6 +707,7 @@ def instrumented_generate_content( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -746,6 +748,7 @@ def instrumented_generate_content( def _create_instrumented_generate_content_stream( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content_stream @@ -763,6 +766,7 @@ def instrumented_generate_content_stream( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -803,6 +807,7 @@ def instrumented_generate_content_stream( def _create_instrumented_async_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content @@ -820,6 +825,7 @@ async def instrumented_generate_content( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -861,6 +867,7 @@ async def instrumented_generate_content( def _create_instrumented_async_generate_content_stream( # type: ignore snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content_stream @@ -878,6 +885,7 @@ async def instrumented_generate_content_stream( self, otel_wrapper, model, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -936,28 +944,33 @@ def uninstrument_generate_content(snapshot: object): def instrument_generate_content( otel_wrapper: OTelWrapper, + upload_hook: UploadHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ) -> object: snapshot = _MethodsSnapshot() Models.generate_content = _create_instrumented_generate_content( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) Models.generate_content_stream = _create_instrumented_generate_content_stream( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content = _create_instrumented_async_generate_content( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content_stream = ( _create_instrumented_async_generate_content_stream( snapshot, otel_wrapper, + upload_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py index 8a3f792651..5983b18b5a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py @@ -18,6 +18,7 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider +from opentelemetry.util.genai.upload_hook import load_upload_hook from .allowlist_util import AllowList from .generate_content import ( @@ -58,8 +59,10 @@ def _instrument(self, **kwargs: Any): event_logger_provider=event_logger_provider, meter_provider=meter_provider, ) + upload_hook = kwargs.get("upload_hook") or load_upload_hook() self._generate_content_snapshot = instrument_generate_content( otel_wrapper, + upload_hook, generate_content_config_key_allowlist=self._generate_content_config_key_allowlist, ) From 8583327e8c785c8453c6c6d9472f000f3ec84d4a Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Mon, 29 Sep 2025 09:23:16 +0000 Subject: [PATCH 07/19] refactor: rename upload hook to completion hook. --- .../google_genai/generate_content.py | 34 +++++++++---------- .../google_genai/instrumentor.py | 6 ++-- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 73c178c855..971a0670cc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -54,7 +54,7 @@ InputMessage, OutputMessage, ) -from opentelemetry.util.genai.upload_hook import UploadHook +from opentelemetry.util.genai.completion_hook import CompletionHook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG @@ -290,14 +290,14 @@ def __init__( models_object: Union[Models, AsyncModels], otel_wrapper: OTelWrapper, model: str, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): self._start_time = time.time_ns() self._otel_wrapper = otel_wrapper self._genai_system = _determine_genai_system(models_object) self._genai_request_model = model - self.upload_hook = upload_hook + self.completion_hook = completion_hook self._finish_reasons_set = set() self._error_type = None self._input_tokens = 0 @@ -492,7 +492,7 @@ def _maybe_log_completion_details( event = Event( name="gen_ai.client.inference.operation.details", attributes=attributes ) - self.upload_hook.upload( + self.completion_hook.on_completion( inputs=input_messages, outputs=output_messages, system_instruction=system_instructions, @@ -689,7 +689,7 @@ def _record_duration_metric(self): def _create_instrumented_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content @@ -707,7 +707,7 @@ def instrumented_generate_content( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -748,7 +748,7 @@ def instrumented_generate_content( def _create_instrumented_generate_content_stream( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.generate_content_stream @@ -766,7 +766,7 @@ def instrumented_generate_content_stream( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -807,7 +807,7 @@ def instrumented_generate_content_stream( def _create_instrumented_async_generate_content( snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content @@ -825,7 +825,7 @@ async def instrumented_generate_content( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -867,7 +867,7 @@ async def instrumented_generate_content( def _create_instrumented_async_generate_content_stream( # type: ignore snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ): wrapped_func = snapshot.async_generate_content_stream @@ -885,7 +885,7 @@ async def instrumented_generate_content_stream( self, otel_wrapper, model, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) with helper.start_span_as_current_span( @@ -944,33 +944,33 @@ def uninstrument_generate_content(snapshot: object): def instrument_generate_content( otel_wrapper: OTelWrapper, - upload_hook: UploadHook, + completion_hook: CompletionHook, generate_content_config_key_allowlist: Optional[AllowList] = None, ) -> object: snapshot = _MethodsSnapshot() Models.generate_content = _create_instrumented_generate_content( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) Models.generate_content_stream = _create_instrumented_generate_content_stream( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content = _create_instrumented_async_generate_content( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) AsyncModels.generate_content_stream = ( _create_instrumented_async_generate_content_stream( snapshot, otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py index 5983b18b5a..c1110663ff 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py @@ -18,7 +18,7 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider -from opentelemetry.util.genai.upload_hook import load_upload_hook +from opentelemetry.util.genai.completion_hook import load_completion_hook from .allowlist_util import AllowList from .generate_content import ( @@ -59,10 +59,10 @@ def _instrument(self, **kwargs: Any): event_logger_provider=event_logger_provider, meter_provider=meter_provider, ) - upload_hook = kwargs.get("upload_hook") or load_upload_hook() + completion_hook = kwargs.get("completion_hook") or load_completion_hook() self._generate_content_snapshot = instrument_generate_content( otel_wrapper, - upload_hook, + completion_hook, generate_content_config_key_allowlist=self._generate_content_config_key_allowlist, ) From 47d57289e0d04fb8d006238b4c5bcaaba0987aae Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Tue, 30 Sep 2025 16:07:28 +0000 Subject: [PATCH 08/19] test: add tests for non streaming case and tool_call_wrapper. --- .../generate_content/nonstreaming_base.py | 68 +++++++++++++------ .../tests/requirements.latest.txt | 5 +- .../tests/utils/test_tool_call_wrapper.py | 46 +++++++++++-- 3 files changed, 91 insertions(+), 28 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 39f1dfe927..7b79c40594 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -13,8 +13,15 @@ # limitations under the License. import json -import os import unittest +from unittest.mock import patch + +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) +from opentelemetry.util.genai.types import ContentCapturingMode from .base import TestCase @@ -111,10 +118,8 @@ def test_generated_span_counts_tokens(self): self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123) self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456) + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_records_system_prompt_as_log(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) config = {"system_instruction": "foo"} self.configure_valid_response() self.generate_content( @@ -125,10 +130,8 @@ def test_records_system_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "foo") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "false" - ) config = {"system_instruction": "foo"} self.configure_valid_response() self.generate_content( @@ -139,20 +142,16 @@ def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present( self, ): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_does_not_have_event_named("gen_ai.system.message") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_records_user_prompt_as_log(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.user.message") @@ -160,10 +159,8 @@ def test_records_user_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "Some input") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "false" - ) self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.user.message") @@ -171,10 +168,8 @@ def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) def test_records_response_as_log(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "true" - ) self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.choice") @@ -184,10 +179,8 @@ def test_records_response_as_log(self): "Some response content", json.dumps(event_record.body["content"]) ) + @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) def test_does_not_record_response_as_log_if_disabled_by_env(self): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( - "false" - ) self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.choice") @@ -195,6 +188,37 @@ def test_does_not_record_response_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") + def test_new_semconv_record_response_as_log(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + self.configure_valid_response(text="Some response content") + self.generate_content(model="gemini-2.0-flash", contents="Some input") + + if mode in [ + ContentCapturingMode.NO_CONTENT, + ContentCapturingMode.SPAN_ONLY, + ]: + self.otel.assert_does_not_have_event_named("gen_ai.client.inference.operation.details") + else: + self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") + + self.tearDown() + def test_records_metrics_data(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt index 32cf3422f5..3ee7b91536 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.latest.txt @@ -41,9 +41,10 @@ pytest-asyncio==0.21.0 pytest-vcr==1.0.2 google-auth==2.38.0 -google-genai==1.0.0 +google-genai==1.32.0 # Install locally from the folder. This path is relative to the # root directory, given invocation from "tox" at root level. -e opentelemetry-instrumentation --e instrumentation-genai/opentelemetry-instrumentation-google-genai \ No newline at end of file +-e instrumentation-genai/opentelemetry-instrumentation-google-genai +-e util/opentelemetry-util-genai diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py index 3c8aee3f70..61d4799913 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py @@ -17,14 +17,16 @@ from unittest.mock import patch from google.genai import types as genai_types - from opentelemetry._events import get_event_logger_provider -from opentelemetry.instrumentation.google_genai import ( - otel_wrapper, - tool_call_wrapper, +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, ) +from opentelemetry.instrumentation.google_genai import otel_wrapper, tool_call_wrapper from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider +from opentelemetry.util.genai.types import ContentCapturingMode from ..common import otel_mocker @@ -278,3 +280,39 @@ def somefunction(arg=None): span.attributes["code.function.parameters.arg.value"], '[123, "abc"]', ) + + def test_handle_with_new_sem_conv(self): + def somefunction(arg=None): + pass + + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + wrapped_somefunction = self.wrap(somefunction) + wrapped_somefunction(12345) + + span = self.otel.get_span_named("execute_tool somefunction") + + print(mode) + if mode in [ + ContentCapturingMode.NO_CONTENT, + ContentCapturingMode.EVENT_ONLY, + ]: + self.assertNotIn("code.function.parameters.arg.value", span.attributes) + else: + self.assertIn("code.function.parameters.arg.value", span.attributes) + self.tearDown() From b3a6efba26f6899fa287cb2acbedae8ca850eed3 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Tue, 30 Sep 2025 17:35:18 +0000 Subject: [PATCH 09/19] test: add tool call instrumentation tests and nonstreaming recording in spans test. --- .../generate_content/nonstreaming_base.py | 40 +++++- .../test_tool_call_instrumentation.py | 129 ++++++++++++++++++ 2 files changed, 167 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 7b79c40594..8b919ffe62 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -16,6 +16,7 @@ import unittest from unittest.mock import patch +from google.genai.types import GenerateContentConfig from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, @@ -188,7 +189,7 @@ def test_does_not_record_response_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") - def test_new_semconv_record_response_as_log(self): + def test_new_semconv_record_completion_as_log(self): for mode in ContentCapturingMode: patched_environ = patch.dict( "os.environ", @@ -218,7 +219,42 @@ def test_new_semconv_record_response_as_log(self): self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") self.tearDown() - + + def test_new_semconv_record_completion_in_span(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + self.configure_valid_response(text="Some response content") + self.generate_content(model="gemini-2.0-flash", contents="Some input", config=GenerateContentConfig(system_instruction="System instruction")) + span = self.otel.get_span_named("generate_content gemini-2.0-flash") + if mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + self.assertEqual(span.attributes["gen_ai.input.messages"], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') + self.assertEqual(span.attributes["gen_ai.output.messages"], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') + self.assertEqual(span.attributes["gen_ai.system_instructions"], '[{"content": "System instruction", "type": "text"}]') + else: + self.assertNotIn("gen_ai.input.messages", span.attributes) + self.assertNotIn("gen_ai.output.messages", span.attributes) + self.assertNotIn("gen_ai.system_instructions", span.attributes) + + self.tearDown() + def test_records_metrics_data(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py index 7e06422812..7439944e37 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py @@ -15,6 +15,12 @@ from unittest.mock import patch import google.genai.types as genai_types +from opentelemetry.instrumentation._semconv import ( + _OpenTelemetrySemanticConventionStability, + _OpenTelemetryStabilitySignalType, + _StabilityMode, +) +from opentelemetry.util.genai.types import ContentCapturingMode from .base import TestCase @@ -275,3 +281,126 @@ def somefunction(x, y=2): self.assertNotIn( "code.function.return.value", generated_span.attributes ) + + def test_new_semconv_tool_calls_record_parameter_values(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + calls = [] + + def handle(*args, **kwargs): + calls.append((args, kwargs)) + return "some result" + + def somefunction(someparam, otherparam=2): + print("someparam=%s, otherparam=%s", someparam, otherparam) + + self.mock_generate_content.side_effect = handle + self.client.models.generate_content( + model="some-model-name", + contents="Some content", + config={ + "tools": [somefunction], + }, + ) + self.assertEqual(len(calls), 1) + config = calls[0][1]["config"] + tools = config.tools + wrapped_somefunction = tools[0] + wrapped_somefunction(123, otherparam="abc") + self.otel.assert_has_span_named("execute_tool somefunction") + generated_span = self.otel.get_span_named("execute_tool somefunction") + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.someparam.type" + ], + "int", + ) + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.otherparam.type" + ], + "str", + ) + if mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + self.assertEqual(generated_span.attributes["code.function.parameters.someparam.value"], 123) + self.assertEqual(generated_span.attributes["code.function.parameters.otherparam.value"], "abc") + else: + self.assertNotIn("code.function.parameters.someparam.value", generated_span.attributes) + self.assertNotIn("code.function.parameters.otherparam.value", generated_span.attributes) + self.tearDown() + + def test_new_semconv_tool_calls_record_return_values(self): + for mode in ContentCapturingMode: + patched_environ = patch.dict( + "os.environ", + { + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": mode.name, + "OTEL_SEMCONV_STABILITY_OPT_IN": "gen_ai_latest_experimental", + }, + ) + patched_otel_mapping = patch.dict( + _OpenTelemetrySemanticConventionStability._OTEL_SEMCONV_STABILITY_SIGNAL_MAPPING, + { + _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL + }, + ) + with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + self.setUp() + with patched_environ, patched_otel_mapping: + calls = [] + + def handle(*args, **kwargs): + calls.append((args, kwargs)) + return "some result" + + def somefunction(x, y=2): + return x + y + + self.mock_generate_content.side_effect = handle + self.client.models.generate_content( + model="some-model-name", + contents="Some content", + config={ + "tools": [somefunction], + }, + ) + self.assertEqual(len(calls), 1) + config = calls[0][1]["config"] + tools = config.tools + wrapped_somefunction = tools[0] + wrapped_somefunction(123) + self.otel.assert_has_span_named("execute_tool somefunction") + generated_span = self.otel.get_span_named("execute_tool somefunction") + self.assertEqual( + generated_span.attributes["code.function.return.type"], "int" + ) + if mode in [ + ContentCapturingMode.SPAN_ONLY, + ContentCapturingMode.SPAN_AND_EVENT, + ]: + self.assertIn( + "code.function.return.value", generated_span.attributes + ) + else: + self.assertNotIn( + "code.function.return.value", generated_span.attributes + ) + self.tearDown() From 2eefc39b039b55a55a0768d1ca7e489e3417bfae Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 1 Oct 2025 12:39:36 +0000 Subject: [PATCH 10/19] fix: remove print --- .../tests/utils/test_tool_call_wrapper.py | 1 - 1 file changed, 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py index 61d4799913..6bfa53cf5e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py @@ -307,7 +307,6 @@ def somefunction(arg=None): span = self.otel.get_span_named("execute_tool somefunction") - print(mode) if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.EVENT_ONLY, From 166c08188229dc598efba71710a52ec94966ec34 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Wed, 1 Oct 2025 14:18:14 +0000 Subject: [PATCH 11/19] feature: add blobpart and filepart to message handling --- .../instrumentation/google_genai/message.py | 12 ++++++++++++ .../src/opentelemetry/util/genai/types.py | 17 +++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index 760589d1f8..d888efb4a7 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -19,6 +19,8 @@ from google.genai import types as genai_types from opentelemetry.util.genai.types import ( + BlobPart, + FileDataPart, FinishReason, InputMessage, MessagePart, @@ -95,6 +97,16 @@ def tool_call_id(name: str | None) -> str: if (text := part.text) is not None: return Text(content=text) + if data := part.inline_data: + return BlobPart( + mime_type=data.mime_type or "", data=data.data or b"" + ) + + if data := part.file_data: + return FileDataPart( + mime_type=data.mime_type or "", uri=data.file_uri or "" + ) + if call := part.function_call: return ToolCall( id=call.id or tool_call_id(call.name), diff --git a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py index 569e7e7e00..4a3952d16e 100644 --- a/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py +++ b/util/opentelemetry-util-genai/src/opentelemetry/util/genai/types.py @@ -55,6 +55,23 @@ class Text: type: Literal["text"] = "text" +@dataclass +class BlobPart: + data: bytes + mime_type: str + type: Literal["blob"] = "blob" + + +@dataclass +class FileDataPart: + mime_type: str + uri: str + type: Literal["file_data"] = "file_data" + + class Config: + extra = "allow" + + MessagePart = Union[Text, ToolCall, ToolCallResponse, Any] From ae4da6428ebeafb84cb77ddf7c78f9581f6d4cfa Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 11:06:49 +0000 Subject: [PATCH 12/19] fix: encode bytes as base64 when dumping to json string. --- .../instrumentation/google_genai/generate_content.py | 3 ++- .../instrumentation/google_genai/message.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 971a0670cc..07cdd0d439 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -61,6 +61,7 @@ from .dict_util import flatten_dict from .flags import is_content_recording_enabled from .message import ( + Base64JsonEncoder, to_input_messages, to_output_messages, to_system_instructions, @@ -279,7 +280,7 @@ def _create_completion_details_attributes( ] if as_str: - return {k: json.dumps(v) for k, v in attributes.items()} + return {k: json.dumps(v, cls=Base64JsonEncoder) for k, v in attributes.items()} return attributes diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index d888efb4a7..ff94d864e4 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -14,8 +14,11 @@ from __future__ import annotations +from base64 import b64encode +import json import logging from enum import Enum +from typing import Any from google.genai import types as genai_types from opentelemetry.util.genai.types import ( @@ -38,6 +41,13 @@ class Role(str, Enum): TOOL = "tool" +class Base64JsonEncoder(json.JSONEncoder): + def default(self, o: Any) -> Any: + if isinstance(o, bytes): + return b64encode(o).decode() + return super().default(o) + + _logger = logging.getLogger(__name__) From 13b6f2ea7a6089a063aac8f6c6538eddfc1154cf Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 13:12:59 +0000 Subject: [PATCH 13/19] fix: always call completion hook, independently of recording settings. --- .../google_genai/generate_content.py | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 07cdd0d439..185a9d36fc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -467,7 +467,17 @@ def _maybe_log_completion_details( input_messages = to_input_messages(contents=transformers.t_contents(request)) output_messages = to_output_messages(candidates=response.candidates or []) - span = None + span = trace.get_current_span() + event = Event( + name="gen_ai.client.inference.operation.details", attributes=attributes + ) + self.completion_hook.on_completion( + inputs=input_messages, + outputs=output_messages, + system_instruction=system_instructions, + span=span, + log_record=event, + ) if self._content_recording_enabled in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, @@ -478,7 +488,6 @@ def _maybe_log_completion_details( system_instructions, as_str=True, ) - span = trace.get_current_span() span.set_attributes(completion_details_attributes) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, @@ -489,18 +498,11 @@ def _maybe_log_completion_details( output_messages, system_instructions, ) - attributes.update(completion_details_attributes) - event = Event( - name="gen_ai.client.inference.operation.details", attributes=attributes - ) - self.completion_hook.on_completion( - inputs=input_messages, - outputs=output_messages, - system_instruction=system_instructions, - span=span, - log_record=event, - ) - self._otel_wrapper.log_completion_details(event=event) + event.attributes = { + **(event.attributes or {}), + **completion_details_attributes, + } + self._otel_wrapper.log_completion_details(event=event) def _maybe_log_system_instruction( self, config: Optional[GenerateContentConfigOrDict] = None From 058da13430eaa2566075cb621b7533cdacbf6007 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 13:23:19 +0000 Subject: [PATCH 14/19] test: update requirements for instrumentation-google-genai oldest env --- .../tests/requirements.oldest.txt | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt index 50fc45f39f..19f08bfecc 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt @@ -21,11 +21,12 @@ pytest-vcr==1.0.2 google-auth==2.15.0 google-genai==1.0.0 -opentelemetry-api==1.31.1 -opentelemetry-sdk==1.31.1 -opentelemetry-semantic-conventions==0.52b1 -opentelemetry-instrumentation==0.52b1 +opentelemetry-api==1.37.0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentelemetry-instrumentation==0.58b0 # Install locally from the folder. This path is relative to the # root directory, given invocation from "tox" at root level. -e instrumentation-genai/opentelemetry-instrumentation-google-genai +-e util/opentelemetry-util-genai From 17b9c0aa4e5aeda13711bf7ca9d89a14dba04260 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Thu, 2 Oct 2025 14:33:16 +0000 Subject: [PATCH 15/19] test: bump google-genai lib version in -oldest test env. --- .../tests/requirements.oldest.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt index 19f08bfecc..2228363418 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/requirements.oldest.txt @@ -20,7 +20,7 @@ pytest-asyncio==0.21.0 pytest-vcr==1.0.2 google-auth==2.15.0 -google-genai==1.0.0 +google-genai==1.32.0 opentelemetry-api==1.37.0 opentelemetry-sdk==1.37.0 opentelemetry-semantic-conventions==0.58b0 From 057d956206c426b8c915fdfd1581248629c6de29 Mon Sep 17 00:00:00 2001 From: Artur Gajowniczek Date: Fri, 3 Oct 2025 12:08:02 +0000 Subject: [PATCH 16/19] test: fix event recording test. --- .../generate_content/nonstreaming_base.py | 43 +++++++++++++------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 8b919ffe62..039d077b72 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -17,11 +17,15 @@ from unittest.mock import patch from google.genai.types import GenerateContentConfig +from opentelemetry._events import Event from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, _StabilityMode, ) +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes, +) from opentelemetry.util.genai.types import ContentCapturingMode from .base import TestCase @@ -204,20 +208,33 @@ def test_new_semconv_record_completion_as_log(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + content = "Some input" + output = "Some response content" + sys_instr = "System instruction" + with self.subTest(f"mode: {mode}", patched_environ=patched_environ): self.setUp() with patched_environ, patched_otel_mapping: - self.configure_valid_response(text="Some response content") - self.generate_content(model="gemini-2.0-flash", contents="Some input") - + self.configure_valid_response(text=output) + self.generate_content(model="gemini-2.0-flash", contents=content, config=GenerateContentConfig(system_instruction=sys_instr)) + self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") + event = self.otel.get_event_named("gen_ai.client.inference.operation.details") if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.SPAN_ONLY, ]: - self.otel.assert_does_not_have_event_named("gen_ai.client.inference.operation.details") + self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, event.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, event.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, event.attributes) else: - self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") - + attrs = { + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: ({"role": "user", "parts": ({"content": content, "type": "text"},)},), + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: ({"role": "assistant", "parts": ({"content": output, "type": "text"},), "finish_reason": ""},), + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: ({"content": sys_instr, "type": "text"},) + } + expected_event = Event("gen_ai.client.inference.operation.details", attributes=attrs) + self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES]) + self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES]) + self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], expected_event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS]) self.tearDown() def test_new_semconv_record_completion_in_span(self): @@ -245,13 +262,13 @@ def test_new_semconv_record_completion_in_span(self): ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - self.assertEqual(span.attributes["gen_ai.input.messages"], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') - self.assertEqual(span.attributes["gen_ai.output.messages"], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') - self.assertEqual(span.attributes["gen_ai.system_instructions"], '[{"content": "System instruction", "type": "text"}]') + self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') + self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') + self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], '[{"content": "System instruction", "type": "text"}]') else: - self.assertNotIn("gen_ai.input.messages", span.attributes) - self.assertNotIn("gen_ai.output.messages", span.attributes) - self.assertNotIn("gen_ai.system_instructions", span.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, span.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, span.attributes) + self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, span.attributes) self.tearDown() From 486796a98dff23f53ec635946fea1f55dcd1337d Mon Sep 17 00:00:00 2001 From: Dylan Russell Date: Mon, 6 Oct 2025 14:22:03 +0000 Subject: [PATCH 17/19] Update FakeCredentials --- .../tests/common/auth.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py index 88831a3e9a..f46de7ca3a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py @@ -16,5 +16,8 @@ class FakeCredentials(google.auth.credentials.AnonymousCredentials): + def __init__(self): + self.token = 'a' + self._quota_project_id = 'a' def refresh(self, request): pass From 205aa8873c1dbad4ee76af2c505608ef31260c40 Mon Sep 17 00:00:00 2001 From: Dylan Russell Date: Mon, 6 Oct 2025 15:53:17 +0000 Subject: [PATCH 18/19] Fix tests --- .../google_genai/generate_content.py | 90 ++++++--- .../google_genai/instrumentor.py | 4 +- .../instrumentation/google_genai/message.py | 19 +- .../google_genai/otel_wrapper.py | 21 ++- .../google_genai/tool_call_wrapper.py | 41 ++-- .../tests/common/auth.py | 5 +- ...mini-1.5-flash-002-vertexaiapi-async].yaml | 94 --------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 94 --------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 ++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 102 ++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 94 --------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 94 --------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 ++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 102 ++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 97 ---------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 102 ---------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 102 ++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 109 +++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 99 ---------- ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 99 ---------- ...t-gemini-2.5-flash-vertexaiapi-async].yaml | 123 ++++++++++++ ...nt-gemini-2.5-flash-vertexaiapi-sync].yaml | 109 +++++++++++ .../generate_content/nonstreaming_base.py | 178 +++++++++++++++--- .../tests/generate_content/test_e2e.py | 14 +- .../test_tool_call_instrumentation.py | 64 +++++-- .../tests/utils/test_tool_call_wrapper.py | 24 ++- 26 files changed, 1200 insertions(+), 884 deletions(-) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 185a9d36fc..60d6136e4e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -35,6 +35,7 @@ GenerateContentConfigOrDict, GenerateContentResponse, ) + from opentelemetry import trace from opentelemetry._events import Event from opentelemetry.instrumentation._semconv import ( @@ -48,13 +49,13 @@ ) from opentelemetry.semconv.attributes import error_attributes from opentelemetry.trace.span import Span +from opentelemetry.util.genai.completion_hook import CompletionHook from opentelemetry.util.genai.types import ( ContentCapturingMode, - MessagePart, InputMessage, + MessagePart, OutputMessage, ) -from opentelemetry.util.genai.completion_hook import CompletionHook from .allowlist_util import AllowList from .custom_semconv import GCP_GENAI_OPERATION_CONFIG @@ -165,7 +166,9 @@ def _to_dict(value: object): def _add_request_options_to_span( - span: Span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList + span: Span, + config: Optional[GenerateContentConfigOrDict], + allow_list: AllowList, ): if config is None: return @@ -209,7 +212,9 @@ def _add_request_options_to_span( }, ) for key, value in attributes.items(): - if key.startswith(GCP_GENAI_OPERATION_CONFIG) and not allow_list.allowed(key): + if key.startswith( + GCP_GENAI_OPERATION_CONFIG + ) and not allow_list.allowed(key): # The allowlist is used to control inclusion of the dynamic keys. continue span.set_attribute(key, value) @@ -245,7 +250,9 @@ def _wrapped_config_with_tools( if not config.tools: return config result = copy.copy(config) - result.tools = [wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools] + result.tools = [ + wrapped_tool(tool, otel_wrapper, **kwargs) for tool in config.tools + ] return result @@ -268,10 +275,12 @@ def _create_completion_details_attributes( ) -> dict[str, Any]: attributes: dict[str, Any] = { gen_ai_attributes.GEN_AI_INPUT_MESSAGES: [ - dataclasses.asdict(input_message) for input_message in input_messages + dataclasses.asdict(input_message) + for input_message in input_messages ], gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: [ - dataclasses.asdict(output_message) for output_message in output_messages + dataclasses.asdict(output_message) + for output_message in output_messages ], } if system_instructions: @@ -280,7 +289,10 @@ def _create_completion_details_attributes( ] if as_str: - return {k: json.dumps(v, cls=Base64JsonEncoder) for k, v in attributes.items()} + return { + k: json.dumps(v, cls=Base64JsonEncoder) + for k, v in attributes.items() + } return attributes @@ -464,12 +476,17 @@ def _maybe_log_completion_details( system_instructions = to_system_instructions( content=transformers.t_contents(system_content)[0] ) - input_messages = to_input_messages(contents=transformers.t_contents(request)) - output_messages = to_output_messages(candidates=response.candidates or []) + input_messages = to_input_messages( + contents=transformers.t_contents(request) + ) + output_messages = to_output_messages( + candidates=response.candidates or [] + ) span = trace.get_current_span() event = Event( - name="gen_ai.client.inference.operation.details", attributes=attributes + name="gen_ai.client.inference.operation.details", + attributes=attributes, ) self.completion_hook.on_completion( inputs=input_messages, @@ -482,21 +499,25 @@ def _maybe_log_completion_details( ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - completion_details_attributes = _create_completion_details_attributes( - input_messages, - output_messages, - system_instructions, - as_str=True, + completion_details_attributes = ( + _create_completion_details_attributes( + input_messages, + output_messages, + system_instructions, + as_str=True, + ) ) span.set_attributes(completion_details_attributes) if self._content_recording_enabled in [ ContentCapturingMode.EVENT_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - completion_details_attributes = _create_completion_details_attributes( - input_messages, - output_messages, - system_instructions, + completion_details_attributes = ( + _create_completion_details_attributes( + input_messages, + output_messages, + system_instructions, + ) ) event.attributes = { **(event.attributes or {}), @@ -541,7 +562,9 @@ def _maybe_log_user_prompt( total = len(contents) index = 0 for entry in contents: - self._maybe_log_single_user_prompt(entry, index=index, total=total) + self._maybe_log_single_user_prompt( + entry, index=index, total=total + ) index += 1 else: self._maybe_log_single_user_prompt(contents) @@ -647,7 +670,9 @@ def _maybe_log_response_stats(self, response: GenerateContentResponse): # pass - def _maybe_log_response_safety_ratings(self, response: GenerateContentResponse): + def _maybe_log_response_safety_ratings( + self, response: GenerateContentResponse + ): # TODO: Determine if there is a way that we can log # the "prompt_feedback". This would be especially useful # in the case where the response is blocked. @@ -917,13 +942,18 @@ async def _response_async_generator_wrapper(): with trace.use_span(span, end_on_exit=True): try: async for response in response_async_generator: - if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT: + if ( + helper.sem_conv_opt_in_mode + == _StabilityMode.DEFAULT + ): helper.process_response(response) elif ( helper.sem_conv_opt_in_mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL ): - helper.process_completion(contents, response, config) + helper.process_completion( + contents, response, config + ) else: raise ValueError( f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported." @@ -969,12 +999,10 @@ def instrument_generate_content( completion_hook, generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) - AsyncModels.generate_content_stream = ( - _create_instrumented_async_generate_content_stream( - snapshot, - otel_wrapper, - completion_hook, - generate_content_config_key_allowlist=generate_content_config_key_allowlist, - ) + AsyncModels.generate_content_stream = _create_instrumented_async_generate_content_stream( + snapshot, + otel_wrapper, + completion_hook, + generate_content_config_key_allowlist=generate_content_config_key_allowlist, ) return snapshot diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py index c1110663ff..ed7d4638a3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/instrumentor.py @@ -59,7 +59,9 @@ def _instrument(self, **kwargs: Any): event_logger_provider=event_logger_provider, meter_provider=meter_provider, ) - completion_hook = kwargs.get("completion_hook") or load_completion_hook() + completion_hook = ( + kwargs.get("completion_hook") or load_completion_hook() + ) self._generate_content_snapshot = instrument_generate_content( otel_wrapper, completion_hook, diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py index ff94d864e4..6a515163b6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/message.py @@ -14,13 +14,14 @@ from __future__ import annotations -from base64 import b64encode import json import logging +from base64 import b64encode from enum import Enum from typing import Any from google.genai import types as genai_types + from opentelemetry.util.genai.types import ( BlobPart, FileDataPart, @@ -75,7 +76,9 @@ def content_to_output_message( parts=message.parts, ) - messages = (content_to_output_message(candidate) for candidate in candidates) + messages = ( + content_to_output_message(candidate) for candidate in candidates + ) return [message for message in messages if message is not None] @@ -83,14 +86,18 @@ def to_system_instructions( *, content: genai_types.Content, ) -> list[MessagePart]: - parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) return [part for part in parts if part is not None] def _to_input_message( content: genai_types.Content, ) -> InputMessage: - parts = (_to_part(part, idx) for idx, part in enumerate(content.parts or [])) + parts = ( + _to_part(part, idx) for idx, part in enumerate(content.parts or []) + ) return InputMessage( role=_to_role(content.role), # filter Nones @@ -108,9 +115,7 @@ def tool_call_id(name: str | None) -> str: return Text(content=text) if data := part.inline_data: - return BlobPart( - mime_type=data.mime_type or "", data=data.data or b"" - ) + return BlobPart(mime_type=data.mime_type or "", data=data.data or b"") if data := part.file_data: return FileDataPart( diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py index 3d6a5a41a2..675c84b855 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py @@ -17,6 +17,7 @@ from typing import Any import google.genai + from opentelemetry._events import Event, EventLogger, EventLoggerProvider from opentelemetry.metrics import Meter, MeterProvider from opentelemetry.semconv._incubating.metrics import gen_ai_metrics @@ -39,15 +40,17 @@ class OTelWrapper: - def __init__(self, tracer: Tracer, event_logger: EventLogger, meter: Meter): + def __init__( + self, tracer: Tracer, event_logger: EventLogger, meter: Meter + ): self._tracer = tracer self._event_logger = event_logger self._meter = meter self._operation_duration_metric = ( gen_ai_metrics.create_gen_ai_client_operation_duration(meter) ) - self._token_usage_metric = gen_ai_metrics.create_gen_ai_client_token_usage( - meter + self._token_usage_metric = ( + gen_ai_metrics.create_gen_ai_client_token_usage(meter) ) @staticmethod @@ -79,17 +82,23 @@ def operation_duration_metric(self): def token_usage_metric(self): return self._token_usage_metric - def log_system_prompt(self, attributes: dict[str, str], body: dict[str, Any]): + def log_system_prompt( + self, attributes: dict[str, str], body: dict[str, Any] + ): _logger.debug("Recording system prompt.") event_name = "gen_ai.system.message" self._log_event(event_name, attributes, body) - def log_user_prompt(self, attributes: dict[str, str], body: dict[str, Any]): + def log_user_prompt( + self, attributes: dict[str, str], body: dict[str, Any] + ): _logger.debug("Recording user prompt.") event_name = "gen_ai.user.message" self._log_event(event_name, attributes, body) - def log_response_content(self, attributes: dict[str, str], body: dict[str, Any]): + def log_response_content( + self, attributes: dict[str, str], body: dict[str, Any] + ): _logger.debug("Recording response.") event_name = "gen_ai.choice" self._log_event(event_name, attributes, body) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py index 80d83ea3ae..f4303306e3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/tool_call_wrapper.py @@ -24,15 +24,14 @@ ) from opentelemetry import trace -from opentelemetry.semconv._incubating.attributes import ( - code_attributes, -) from opentelemetry.instrumentation._semconv import ( - _StabilityMode, _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, + _StabilityMode, +) +from opentelemetry.semconv._incubating.attributes import ( + code_attributes, ) - from opentelemetry.util.genai.types import ContentCapturingMode from .flags import is_content_recording_enabled @@ -52,7 +51,9 @@ def _to_otel_value(python_value): if isinstance(python_value, list): return [_to_otel_value(x) for x in python_value] if isinstance(python_value, dict): - return {key: _to_otel_value(val) for (key, val) in python_value.items()} + return { + key: _to_otel_value(val) for (key, val) in python_value.items() + } if hasattr(python_value, "model_dump"): return python_value.model_dump() if hasattr(python_value, "__dict__"): @@ -120,7 +121,9 @@ def _create_function_span_attributes( return result -def _record_function_call_argument(span, param_name, param_value, include_values): +def _record_function_call_argument( + span, param_name, param_value, include_values +): attribute_prefix = f"code.function.parameters.{param_name}" type_attribute = f"{attribute_prefix}.type" span.set_attribute(type_attribute, type(param_value).__name__) @@ -152,7 +155,9 @@ def _record_function_call_result(otel_wrapper, wrapped_function, result): span = trace.get_current_span() span.set_attribute("code.function.return.type", type(result).__name__) if include_values: - span.set_attribute("code.function.return.value", _to_otel_attribute(result)) + span.set_attribute( + "code.function.return.value", _to_otel_attribute(result) + ) def _wrap_sync_tool_function( @@ -167,8 +172,12 @@ def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span(span_name, attributes=attributes): - _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) + with otel_wrapper.start_as_current_span( + span_name, attributes=attributes + ): + _record_function_call_arguments( + otel_wrapper, tool_function, args, kwargs + ) result = tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -188,8 +197,12 @@ async def wrapped_function(*args, **kwargs): attributes = _create_function_span_attributes( tool_function, args, kwargs, extra_span_attributes ) - with otel_wrapper.start_as_current_span(span_name, attributes=attributes): - _record_function_call_arguments(otel_wrapper, tool_function, args, kwargs) + with otel_wrapper.start_as_current_span( + span_name, attributes=attributes + ): + _record_function_call_arguments( + otel_wrapper, tool_function, args, kwargs + ) result = await tool_function(*args, **kwargs) _record_function_call_result(otel_wrapper, tool_function, result) return result @@ -215,7 +228,9 @@ def wrapped( if tool_or_tools is None: return None if isinstance(tool_or_tools, list): - return [wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools] + return [ + wrapped(item, otel_wrapper, **kwargs) for item in tool_or_tools + ] if isinstance(tool_or_tools, dict): return { key: wrapped(value, otel_wrapper, **kwargs) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py index f46de7ca3a..5719ddf0f8 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py @@ -17,7 +17,8 @@ class FakeCredentials(google.auth.credentials.AnonymousCredentials): def __init__(self): - self.token = 'a' - self._quota_project_id = 'a' + self.token = "a" + self._quota_project_id = "a" + def refresh(self, request): pass diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index c251cc104b..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark, inscrutable ways,\nTo trace a request through hazy days.\nOpen Telemetry, a beacon bright,\nIlluminates the path, both day and night.\n\nFrom metrics gathered, a clear display,\nOf latency's dance, and errors' sway.\nTraces unwind, a silken thread,\nShowing the journey, from start to head.\n\nLogs interweave, a richer hue,\nContextual clues, for me and you.\nNo vendor lock-in, a freedom's call,\nTo choose your tools, to stand up tall.\n\nExporters aplenty, a varied choice,\nTo send your data, amplify your voice.\nJaeger, Zipkin, Prometheus' might,\nAll integrate, a glorious sight.\n\nWith spans and attributes, a detailed scene,\nOf how your system works, both sleek and keen.\nPerformance bottlenecks, now laid bare,\nOpen Telemetry, beyond compare.\n\nSo embrace the light, let darkness flee,\nWith Open Telemetry, set your systems free.\nObserve, and learn, and optimize with grace,\nA brighter future, in this digital space.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.3303731600443522 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 240, - "totalTokenCount": 248, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 240 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:18.083091Z", - "responseId": "5nDLZ5OJBdyY3NoPiZGx0Ag" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 3ae84308bf..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark logs, a cryptic, hidden trace,\nOf failing systems, lost in time and space.\nOpenTelemetry, a beacon shining bright,\nIlluminating paths, both dark and light.\n\nFrom microservices, a sprawling, tangled mesh,\nTo monolithic beasts, put to the test,\nIt gathers traces, spans, and metrics too,\nA holistic view, for me and you.\n\nWith signals clear, from every single node,\nPerformance bottlenecks, instantly bestowed.\nDistributed tracing, paints a vivid scene,\nWhere latency lurks, and slowdowns intervene.\n\nExporters rise, to send the data forth,\nTo dashboards grand, of proven, measured worth.\nPrometheus, Grafana, Jaeger, fluent streams,\nVisualizing insights, fulfilling data dreams.\n\nFrom Jaeger's diagrams, a branching, flowing art,\nTo Grafana's charts, that play a vital part,\nThe mysteries unravel, hidden deep inside,\nWhere errors slumber, and slow responses hide.\n\nSo hail OpenTelemetry, a gift to all who code,\nA brighter future, on a well-lit road.\nNo more guesswork, no more fruitless chase,\nJust clear observability, in time and space.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.45532724261283875 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 256, - "totalTokenCount": 264, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 256 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:15.268428Z", - "responseId": "43DLZ4yxEM6F3NoPzaTkiQU" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..68fe1978e2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In tangled clouds, where microservices roam,\nAnd data streams find a circuitous home,\nA dark abyss, where errors often hide,\nWith mysteries deep, there's nowhere to confide.\n\nBut from the void, a standard takes its flight,\nTo bring the hidden processes to light.\nOpenTelemetry, a unified design,\nA beacon in the digital divine.\n\nWith traces keen, a journey we embark,\nEach operation leaves its vital mark.\nFrom service entry to a database's keep,\nContext propagated, secrets now don't sleep.\n\nThen metrics gleam, in charts and graphs displayed,\nResponse times measured, no detail delayed.\nThrough counters, gauges, histograms so precise,\nPerformance whispered, at a measurable price.\n\nAnd logs, the tales of moments as they pass,\nDiscrete events within the digital glass.\nWith structured wisdom, messages unfold,\nA narrative of what the systems hold.\n\nThrough instrumentation, code begins to speak,\nA common language that all systems seek.\nThe Collector waits, a sentinel so grand,\nTo process, batch, and send across the land.\n\nNo vendor binds, no proprietary chain,\nBut open standards, free from selfish gain.\nA universal tongue, for insight to arise,\nReflecting truth before observing eyes.\n\nFrom scattered signals, patterns now emerge,\nTo quell the chaos, stem the error's surge.\nWith deep observability, we understand,\nThe intricate dance across the digital sand.\n\nSo hail this standard, built with thoughtful grace,\nTo light the corners of computing space.\nOpenTelemetry, a steadfast, vital guide,\nWhere understanding and precision ride." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -2.298871321801128 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 349, + "totalTokenCount": 2326, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 349 + } + ], + "thoughtsTokenCount": 1969 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:52.185046Z", + "responseId": "XOXjaNalC4-nnvgPsaPcuAw" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:51:05 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..5f8608e42d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In realms where code profoundly weaves its might,\nAnd services converse in hurried streams,\nA common tongue emerges, clear and bright,\nTo shed its light upon our digital dreams.\n\nNo longer hidden, lost in labyrinthine ways,\nThe secret life of systems, now revealed,\nThrough OpenTelemetry's enlightened gaze,\nA universal language, fairly wielded.\n\nA **Trace** begins, a journey's golden thread,\nFrom distant client to the deepest core,\nEach tiny \"span,\" a word that has been said,\nMapping the path, from shore to server's shore.\nContext propagated, like a gentle breeze,\nAcross the network, through the busy maze,\nConnecting dots with elegant, calm ease,\nUnveiling time, in swift or lingering phases.\n\nThen **Metrics** pulse, a steady, rhythmic beat,\nThe CPU's hum, the memory's steady climb,\nLatency's dance, both bitter and so sweet,\nA quantitative pulse, defying time.\nCounters tally, gauges hold their ground,\nHistograms paint distributions, clear and bold,\nThe health of systems, constantly profound,\nA silent story, in their numbers told.\n\nAnd finally, the **Logs**, a detailed scroll,\nEach event marked, a narrative precise,\nWhat happened where, beyond our own control,\nA key to debug, a moment's wise advice.\nStructured records, searchable and plain,\nThey whisper errors, or triumphant news,\nA chronological, unwavering refrain,\nUnlocking insights, banishing confusions.\n\nFrom humble agents, nestled in the code,\nTo collectors standing, vigilant and keen,\nThe data flows, a precious, heavy load,\nTo any backend, for display and screen.\nVendor-neutral, open, strong, and free,\nIt breaks the silos, tears the walls apart,\nFor true observability's decree,\nTo understand the system's beating heart.\n\nSo hail this standard, built with thoughtful hand,\nFor those who build, and those who seek to mend,\nOpenTelemetry, across the land,\nOur guiding light, until the journey's end." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -2.4816789437603477 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 453, + "totalTokenCount": 2468, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 453 + } + ], + "thoughtsTokenCount": 2007 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:35.682132Z", + "responseId": "S-XjaJTRKdO6nvgPnMuyoQs" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:50:51 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index 77e985bf28..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark, mysterious traces,\nNo more guessing, in empty spaces.\nOpenTelemetry's light now shines,\nIlluminating all our designs.\n\nFrom microservices, small and fleet,\nTo monolithic beasts, hard to beat,\nIt weaves a net, both fine and strong,\nWhere metrics flow, where logs belong.\n\nTraces dance, a vibrant hue,\nShowing journeys, old and new.\nSpans unfold, a story told,\nOf requests handled, brave and bold.\n\nMetrics hum, a steady beat,\nLatency, errors, can't be beat.\nDistribution charts, a clear display,\nGuiding us along the way.\n\nLogs provide a detailed view,\nOf what happened, me and you.\nContext rich, with helpful clues,\nDebugging woes, it quickly subdues.\n\nWith exporters wise, a thoughtful choice,\nTo Prometheus, Jaeger, or Zipkin's voice,\nOur data flows, a precious stream,\nReal-time insights, a waking dream.\n\nSo hail to OpenTelemetry's might,\nBringing clarity to our darkest night.\nObservability's champion, bold and true,\nA brighter future, for me and you.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.4071464086238575 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 253, - "totalTokenCount": 261, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 253 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:12.443989Z", - "responseId": "4HDLZ9WMG6SK698Pr5uZ2Qw" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 7d3d7a56b2..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,94 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent - response: - body: - string: |- - { - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": "No more dark, mysterious traces,\nOf failing systems, hidden spaces.\nOpen Telemetry's light shines bright,\nGuiding us through the darkest night.\n\nFrom metrics gathered, finely spun,\nTo logs that tell of tasks undone,\nAnd traces linking every call,\nIt answers questions, standing tall.\n\nDistributed systems, complex and vast,\nTheir hidden flaws, no longer cast\nIn shadows deep, beyond our view,\nOpen Telemetry sees them through.\n\nWith spans and attributes, it weaves a tale,\nOf requests flowing, never frail.\nIt pinpoints bottlenecks, slow and grim,\nAnd helps us optimize, system trim.\n\nAcross languages, a common ground,\nWhere data's shared, and insights found.\nExporters whisper, collectors hum,\nA symphony of data, overcome.\n\nSo raise a glass, to this open source,\nA shining beacon, a powerful force.\nOpen Telemetry, a guiding star,\nRevealing secrets, near and far.\n" - } - ] - }, - "finishReason": "STOP", - "avgLogprobs": -0.3586180628193498 - } - ], - "usageMetadata": { - "promptTokenCount": 8, - "candidatesTokenCount": 211, - "totalTokenCount": 219, - "promptTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 8 - } - ], - "candidatesTokensDetails": [ - { - "modality": "TEXT", - "tokenCount": 211 - } - ] - }, - "modelVersion": "gemini-1.5-flash-002", - "createTime": "2025-03-07T22:19:09.936326Z", - "responseId": "3XDLZ4aTOZSpnvgPn-e0qQk" - } - headers: - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=UTF-8 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..e717acd482 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In realms of code, where microservices gleam,\nA hidden flow, a silent, complex stream.\nThe journey lost, where data takes its flight,\nA developer's plea for dawning light.\n\nThen from the cloud, a standard takes its stand,\nWith open heart, across the digital land.\nOpenTelemetry, a beacon strong and true,\nTo pierce the fog, and bring the facts to view.\n\nA Trace begins, a story to unfold,\nThrough every service, brave and strong and bold.\nWith Spans entwined, a parent and its child,\nThe call graph painted, nevermore beguiled.\n\nThen Metrics rise, in numbers clear and plain,\nThe count of errors, or the joyful gain.\nA gauge of load, a histogram's wide spread,\nThe health of systems, clearly to be read.\n\nAnd Logs arrive, with context rich and deep,\nThe structured message, secrets they don't keep.\nEach line a moment, with data to attest,\nA vital clue, to pass the crucial test.\n\nNo vendor lock, no proprietary chain,\nBut open hands, a collaborative domain.\nA unified approach, for all to understand,\nThe data flowing, free across the land.\n\nWith SDKs bright, in languages diverse,\nYou instrument your code, a crucial verse.\nAttach the agents, where the queries run,\nThe observation journey, has begun.\n\nThe Collector waits, a gatherer so keen,\nTo process data, clean and sharp and lean.\nTransform and batch, then send it far away,\nTo chosen backend, come what may.\n\nSo dark no longer, where the errors hide,\nBut clarity shines, your systems open wide.\nThe 'why' revealed, the latency explained,\nWith OTel's power, mastery is gained.\n\nFor engineers who seek to truly see,\nThe inner workings, wild and fiercely free.\nEmbrace the standard, let its power ignite,\nOpenTelemetry, your guiding, digital light." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -1.5871007582720589 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 425, + "totalTokenCount": 2470, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 425 + } + ], + "thoughtsTokenCount": 2037 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:21.557646Z", + "responseId": "PeXjaM6EItur3NoPkYPI2A4" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:50:34 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..09a734361f --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "In lands of code, where microservices roam,\nAnd hidden pathways lead our data home,\nWhere errors lurk and mysteries reside,\nA guiding light we desperately confide.\n\nThen from the open source, a beacon gleams,\nUnifying visions, waking dormant dreams.\nNo longer bound by vendor's watchful hand,\nOpenTelemetry, across the digital land.\n\nIt weaves the **Traces**, threads of golden light,\nThrough every service, in the darkest night.\nEach **Span** a step, a function's brief embrace,\nRevealing journeys, time, and hidden space.\n\nThen **Metrics** rise, in counters and in gauges,\nPerformance stories, told on countless pages.\nCPU hum, latency's slow tide,\nThe health of systems, nowhere left to hide.\n\nAnd **Logs**, the whispers from each busy node,\nA chronicle of actions, burdens freely showed.\nFrom info messages to errors stark and plain,\nContextual wisdom, banishing the pain.\n\nWith SDKs and agents, code aware it grows,\nThrough sturdy Collectors, the data swiftly flows.\nTo chosen backends, be they new or old,\nA unified stream, a story to unfold.\n\nNo longer blind, to whispers in the dark,\nWe find the culprits, leaving their dark mark.\nUnderstand the flow, the logic and the strain,\nAnd build with confidence, again and again.\n\nSo hail the standard, vibrant, strong, and free,\nFor every system, for you, for me.\nOpenTelemetry, a power understood,\nIlluminating darkness, for the common good." + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -1.8066022087545957 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 340, + "totalTokenCount": 2275, + "trafficType": "ON_DEMAND", + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 340 + } + ], + "thoughtsTokenCount": 1927 + }, + "modelVersion": "gemini-2.5-flash", + "createTime": "2025-10-06T15:50:08.533649Z", + "responseId": "MOXjaJHJIPP02PgP1dnn0Qo" + } + headers: + Accept-Ranges: + - none + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 06 Oct 2025 15:50:20 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - X-Origin + - Referer + - Origin,Accept-Encoding + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index a946911c36..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,97 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" longer dark, the tracing's light,\\nOpen Telemetry, shining\ - \ bright\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \".\\nA beacon in the coding night,\\nRevealing paths, both\ - \ dark\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ - 2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" and bright.\\n\\nFrom microservice to sprawling beast,\\\ - nIts watchful eye, a silent priest.\\nObserving calls, both small and vast,\\\ - nPerformance\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" flaws, revealed at last.\\n\\nWith metrics gleaned and logs\ - \ aligned,\\nA clearer picture, you will find.\\nOf latency, and errors dire,\\\ - n\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\ - ,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"And bottlenecks\ - \ that set afire.\\n\\nIt spans the clouds, a network wide,\\nWhere data streams,\ - \ a surging tide.\\nCollecting traces, rich and deep,\\nWhile slumbering apps\ - \ their secrets keep.\\n\\nJaeger, Zip\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"kin, the tools it holds,\\nA tapestry of stories told.\\nOf\ - \ requests flowing, swift and free,\\nOr tangled knots, for all to see.\\\ - n\\nSo embrace the power, understand,\\nThe vital role, across the\"}]}}],\"\ - modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\ - ,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" land.\\nOpen\ - \ Telemetry, a guiding star,\\nTo navigate the digital afar.\\n\"}]},\"finishReason\"\ - : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ - : 212,\"totalTokenCount\": 220,\"promptTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 212}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\ - createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ - }\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 647a76b80a..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,102 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"The\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" black box whispers, secrets deep,\\nOf failing systems, promises\ - \ to keep.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\nBut tracing's light, a guiding hand,\\nReveals the path\"\ - }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", across the\ - \ land.\\n\\nOpen Telemetry, a beacon bright,\\nIlluminating pathways, day\ - \ and night.\\nFrom spans and traces, stories told,\"}]}}],\"modelVersion\"\ - : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nOf requests\ - \ flowing, brave and bold.\\n\\nThe metrics rise, a vibrant chart,\\nDisplaying\ - \ latency, a work of art.\\nEach request'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"s journey, clearly shown,\\nWhere bottlenecks slumber, seeds\ - \ are sown.\\n\\nWith logs appended, context clear,\\nThe root of problems,\ - \ drawing near.\\nObservability's embrace, so wide,\\nUnraveling mysteries,\"\ - }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" deep inside.\\\ - n\\nFrom simple apps to complex weaves,\\nOpen Telemetry's power achieves,\\\ - nA unified vision, strong and true,\\nMonitoring systems, old and new.\\n\\\ - nNo vendor lock-in, free to roam,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\nAcross the clouds, and find your home.\\nA standard rising,\ - \ strong and bold,\\nA future brighter, to behold.\\n\\nSo let the traces\ - \ flow and gleam,\\nOpen Telemetry, a vibrant dream.\\nOf healthy systems,\ - \ running free,\\nFor all to see, for all to be.\"}]}}],\"modelVersion\":\ - \ \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ - ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\n\"}]},\"\ - finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"\ - candidatesTokenCount\": 258,\"totalTokenCount\": 266,\"promptTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ - : [{\"modality\": \"TEXT\",\"tokenCount\": 258}]},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ - }\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..b09dd31662 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In realms of code, where microservices bloom,\\nA tangled\ + \ web, obscured by digital gloom.\\nWhen errors hide, and latency takes hold,\\\ + nA story lost, a mystery untold.\\n\\nThen from the depths, a guiding light\ + \ appears,\\nTo banish doubt and quell developers' fears.\\n**OpenTelemetry**,\"\ + }]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"\ + responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a standard, bold\ + \ and free,\\nUnifying sight for all the world to see.\\n\\nThrough vast distributed\ + \ lands, it weaves a thread,\\nFrom start to finish, where each packet's led.\\\ + nWith **spans** and parentage, a call stack clear,\\nThe journey's path, dismissing\ + \ every fear.\\n\\n\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\"\ + ,\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"Then **metrics**\ + \ rise, a pulse, a steady beat,\\nOf CPU, memory, and every server's heat.\\\ + nLatency's dance, throughput's flowing tide,\\nIn charts and graphs, where\ + \ vital truths reside.\\n\\nAnd **logs**, the tales of action, word by word,\\\ + n\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"\ + responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \"Each event recorded,\ + \ faithfully heard.\\n**Context attached**, correlation strong,\\nWhere truth\ + \ unfolds, and nothing can go wrong.\\n\\nNo vendor lock, no proprietary chain,\\\ + nJust **open standards**, easing every pain.\\nWith SDKs to **instrument**\ + \ your plea,\\nAnd **collectors** gathering, for all to see.\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:43.615903Z\",\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\n\\nSo clarity descends, where once was night,\\nA system's\ + \ health, revealed in shining light.\\nFor dev and ops, a shared and common\ + \ tongue,\\n**OpenTelemetry**, its virtues widely sung.\"}]},\"finishReason\"\ + : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ + : 309,\"totalTokenCount\": 1779,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 309}],\"thoughtsTokenCount\": 1462},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:43.615903Z\"\ + ,\"responseId\": \"j-XjaN_LJdO6nvgPnMuyoQs\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:52 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..4f98f0102d --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,109 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In realms of code, where logic weaves its way,\\nAnd microservices\ + \ in silent dance hold sway,\\nA shadowed maze, where errors often hide,\\\ + nAnd mysteries within the darkness ride.\\n\\nThen from the cloud, a guiding\ + \ star takes flight,\\nOpenTelemetry, bringing forth the light.\\nA universal\ + \ tongue, for systems\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ + ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" to embrace,\\\ + nTo chart the journey, through time and space.\\n\\nIt paints the **Traces**,\ + \ a thread of golden gleam,\\nEach request's passage, a detailed, vital dream.\\\ + nFrom source to endpoint, through every hop it flows,\\nThe parent-child relationships,\ + \ every service knows.\\n\\nThen\"}]}}],\"usageMetadata\": {\"trafficType\"\ + : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ + 2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" **Metrics** rise, with numbers sharp and clear,\\nThe CPU's\ + \ heartbeat, the latency we fear.\\nCounts, gauges, histograms, precise and\ + \ true,\\nPerformance secrets, brought right into view.\\n\\nAnd **Logs**,\ + \ the tales each crucial moment tells,\\nContextual details, from the deepest\ + \ cells.\\nWith\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ + ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" trace IDs\ + \ tied, a story now complete,\\nNo isolated entry, bitter-sweet.\\n\\nThrough\ + \ agents calm, the Collector takes its stand,\\nGathering wisdom, from every\ + \ corner of the land.\\nVendor-neutral standard, a freedom it bestows,\\nOn\ + \ how to see, and where the knowledge goes.\\n\\nNo\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" longer guessing, in the dark we grope,\\nBut paths illuminated,\ + \ sparking future hope.\\nFor engineers and SREs, a tool so grand,\\nTo understand\ + \ the landscape, fully in command.\\n\\nSo hail to OTel, its vision strong\ + \ and wide,\\nOur system's heartbeat, can no longer hide\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:32.145041Z\",\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \".\\nA canvas drawn, where every pixel gleams,\\nThe living\ + \ tapestry of all our digital dreams.\"}]},\"finishReason\": \"STOP\"}],\"\ + usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\": 353,\"\ + totalTokenCount\": 1932,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 353}],\"thoughtsTokenCount\": 1571},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:32.145041Z\"\ + ,\"responseId\": \"hOXjaJHtCPP02PgP1dnn0Qo\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:41 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml deleted file mode 100644 index 9a068aae89..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml +++ /dev/null @@ -1,99 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" more dark logs, a cryptic, silent scream,\\nNo more the hunt\ - \ for\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ - 2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" errors, a lost, fading dream.\\nOpen Telemetry, a beacon\ - \ in\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ - 2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" the night,\\nShining forth its data, clear and burning bright.\\\ - n\\nFrom traces spanning systems, a flowing, silver thread,\\nMetrics pulse\ - \ and measure,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" insights finely spread.\\nLogs enriched with context, a story\ - \ they unfold,\\nOf requests and responses, both brave and bold.\\n\\nObservability's\ - \ promise\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \", a future now at hand,\\nWith vendors interoperable, a collaborative\ - \ band.\\nNo longer vendor lock-in, a restrictive, iron cage,\\nBut freedom\ - \ of selection, turning a new page.\\n\\nFrom microservices humming,\"}]}}],\"\ - modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ - ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a symphony\ - \ of calls,\\nTo monolithic giants, answering their thralls,\\nOpen Telemetry\ - \ watches, with keen and watchful eye,\\nDetecting the anomalies, before they\ - \ rise and fly.\\n\\nSo let the data flow freely, a\"}]}}],\"modelVersion\"\ - : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ - ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" river strong\ - \ and deep,\\nIts secrets it will whisper, while the systems sleep.\\nOpen\ - \ Telemetry's power, a force that we can wield,\\nTo build more stable systems,\ - \ in the digital field.\\n\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\ - : {\"promptTokenCount\": 8,\"candidatesTokenCount\": 238,\"totalTokenCount\"\ - : 246,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"\ - candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 238}]},\"\ - modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ - ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml deleted file mode 100644 index 669f1af93b..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml +++ /dev/null @@ -1,99 +0,0 @@ -interactions: -- request: - body: |- - { - "contents": [ - { - "parts": [ - { - "text": "Create a poem about Open Telemetry." - } - ], - "role": "user" - } - ] - } - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '92' - Content-Type: - - application/json - user-agent: - - google-genai-sdk/1.0.0 gl-python/3.12.8 - x-goog-api-client: - - - x-goog-user-project: - - - method: POST - uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse - response: - body: - string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \" more dark, mysterious traces,\\nNo more guessing, in time\ - \ and spaces.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ - : \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"\\nOpen Telemetry's light shines bright,\\nIlluminating the\ - \ code'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\":\ - \ \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"s dark night.\\n\\nFrom spans and metrics, a story told,\\\ - nOf requests flowing, both brave and bold.\\nTraces weaving, a tapestry grand,\"\ - }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\ - ,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nShowing\ - \ performance, across the land.\\n\\nLogs and metrics, a perfect blend,\\\ - nInformation's flow, without end.\\nObservability's promise\"}]}}],\"modelVersion\"\ - : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\ - ,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\ - : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", clear and\ - \ true,\\nInsights revealed, for me and you.\\n\\nJaeger, Zipkin, a chorus\ - \ sings,\\nWith exporters ready, for all the things.\\nFrom simple apps to\ - \ systems vast,\\nOpen Telemetry'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ - ,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ - : [{\"text\": \"s power will last.\\n\\nNo vendor lock-in, a freedom sweet,\\\ - nOpen source glory, can't be beat.\\nSo let us embrace, this modern way,\\\ - nTo monitor systems, come what may.\\n\\nFrom\"}]}}],\"modelVersion\": \"\ - gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"\ - responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\": [{\"\ - content\": {\"role\": \"model\",\"parts\": [{\"text\": \" microservices, small\ - \ and slight,\\nTo monolithic giants, shining bright,\\nOpen Telemetry shows\ - \ the path,\\nTo understand, and fix the wrath,\\nOf latency demons, lurking\ - \ near,\\nBringing clarity, year after year.\\n\"}]},\"finishReason\": \"\ - STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ - : 242,\"totalTokenCount\": 250,\"promptTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\ - TEXT\",\"tokenCount\": 242}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\ - createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ - }\r\n\r\n" - headers: - Content-Disposition: - - attachment - Content-Type: - - text/event-stream - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml new file mode 100644 index 0000000000..e06f1689d3 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-async].yaml @@ -0,0 +1,123 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In realms of code, where microservices roam,\\nA digital maze,\ + \ far from a single home.\\nWhere calls interweave, and logic softly hums,\\\ + nHow know the path, when trouble swiftly comes?\\nA black box deep, with secrets\ + \ held inside,\\nWhere errors lurk, and issues try to hide\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \".\\n\\nThen a beacon shines, a guiding, steady hand,\\nA common\ + \ tongue, across the digital land.\\n**OpenTelemetry**, its promise clear,\ + \ for all to see,\\nA standard set, for observability.\\nNo vendor lock, but\ + \ freedom's open gate,\\nTo understand, before it is too late.\\n\\n\"}]}}],\"\ + usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"A **trace** begins, a thread of light unfurled,\\nThrough\ + \ services vast, across the cyber world.\\nEach interaction, a **span** along\ + \ the way,\\nFrom user's click, to final, rendered day.\\nA parent's call,\ + \ a child's reply so swift,\\nA\"}]}}],\"usageMetadata\": {\"trafficType\"\ + : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ + 2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" narrative woven, an insightful gift.\\n\\nThe **metrics**\ + \ hum, the system's steady beat,\\nA pulse of health, or looming, cold defeat.\\\ + nCPU's quick breath, memory's deep embrace,\\nRequest rates soar, or slow\ + \ to crawling pace.\\nCounters tick, and gauges softly sway,\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\nRevealing truths, at close of every day.\\n\\nAnd **logs**\ + \ record, the whispers from the core,\\nEach critical event, and nothing less,\ + \ or more.\\nA timestamped tale, of what transpired within,\\nWhere errors\ + \ bloomed, or victories did win.\\nThe full context shared, with every\"}]}}],\"\ + usageMetadata\": {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" single line,\\nA detailed story, perfectly aligned.\\n\\\ + nThrough **context propagation**, threads entwine,\\nAcross boundaries, making\ + \ the design\\nCoherent, whole, where once were gaping cracks,\\nThe full\ + \ transaction, on its varied tracks.\\nThe **Collector** stands, a guardian\ + \ at the gate,\\nTo gather\"}]}}],\"usageMetadata\": {\"trafficType\": \"\ + ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:18.506243Z\"\ + ,\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" streams, before\ + \ it is too late.\\nTransform, enrich, then send where insights bloom,\\nDispelling\ + \ shadows, chasing back the gloom.\\n\\nSo gaze upon the truth, in light so\ + \ clear,\\nNo longer guessing, banishing all fear.\\nWith OpenTelemetry, the\ + \ path is known,\\nThe digital future, gracefully is sown\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:18.506243Z\",\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \".\\nFor those who build, and those who bravely run,\\nA deeper\ + \ vision, 'til the work is done.\\nThe complexity tamed, the chaos brought\ + \ to heel,\\nThe heart of systems, finally revealed.\"}]},\"finishReason\"\ + : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ + : 514,\"totalTokenCount\": 2270,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 514}],\"thoughtsTokenCount\": 1748},\"\ + modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:18.506243Z\"\ + ,\"responseId\": \"duXjaIPzHqimnvgPtM-joQw\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:29 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..8b63b05da6 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-2.5-flash-vertexaiapi-sync].yaml @@ -0,0 +1,109 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + accept: + - '*/*' + accept-encoding: + - identity + connection: + - keep-alive + content-length: + - '92' + content-type: + - application/json + host: + - us-central1-aiplatform.googleapis.com + user-agent: + - google-genai-sdk/1.32.0 gl-python/3.11.9 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-2.5-flash:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"In labyrinthine code, where shadows creep,\\nA million microservices,\ + \ secrets keep.\\nPerformance woes, a phantom's silent hand,\\nAcross the\ + \ digital, a darkened land.\\n\\nThen from the mist, a beacon starts to gleam,\\\ + nOpenTelemetry, a waking dream.\\nA common tongue, for data,\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" vast and deep,\\nThe promises of clarity to keep.\\n\\nThe\ + \ trace unfurls, a golden thread so fine,\\nFrom user click, to database divine.\\\ + nEach span a step, a function's hurried breath,\\nUnveiling latency, and coding's\ + \ death.\\n\\nWith metrics gathered, pulses start to\"}]}}],\"usageMetadata\"\ + : {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" beat,\\nCPU hums, and memory's hot seat.\\nRequests per second,\ + \ errors on the rise,\\nA living dashboard, open to our eyes.\\n\\nAnd humble\ + \ logs, with details stark and clear,\\nEvents recorded, banishing all fear.\\\ + nThe story told, of what transpired within,\\nA\"}]}}],\"usageMetadata\":\ + \ {\"trafficType\": \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\"\ + ,\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" whispered secret, where issues begin.\\n\\nThrough context\ + \ woven, links are forged anew,\\nA tapestry of truth, for me and you.\\nNo\ + \ isolated fragment, lost and lone,\\nBut part of history, clearly shown.\\\ + n\\nThe root cause found, where once was dark despair,\\nPerformance tuned,\ + \ beyond all earthly care\"}]}}],\"usageMetadata\": {\"trafficType\": \"ON_DEMAND\"\ + },\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:06.600201Z\"\ + ,\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \".\\nA system\ + \ known, its whispers and its roar,\\nObservability, forevermore.\\n\\nNo\ + \ vendor locked, no single path to take,\\nAn open standard, for all our systems'\ + \ sake.\\nA community's embrace, robust and wide,\\nWhere wisdom flows, with\ + \ nothing left to hide.\\n\\nSo\"}]}}],\"usageMetadata\": {\"trafficType\"\ + : \"ON_DEMAND\"},\"modelVersion\": \"gemini-2.5-flash\",\"createTime\": \"\ + 2025-10-06T15:51:06.600201Z\",\"responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" hail the tools, that bring the light to bear,\\nOn tangled\ + \ webs, and burdens we must share.\\nOpenTelemetry, a guide so true,\\nRevealing\ + \ insights, shining ever new.\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\ + : {\"promptTokenCount\": 8,\"candidatesTokenCount\": 370,\"totalTokenCount\"\ + : 1904,\"trafficType\": \"ON_DEMAND\",\"promptTokensDetails\": [{\"modality\"\ + : \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\"\ + : \"TEXT\",\"tokenCount\": 370}],\"thoughtsTokenCount\": 1526},\"modelVersion\"\ + : \"gemini-2.5-flash\",\"createTime\": \"2025-10-06T15:51:06.600201Z\",\"\ + responseId\": \"auXjaInRJPP02PgP1dnn0Qo\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Mon, 06 Oct 2025 15:51:16 GMT + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 039d077b72..5c7e683456 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -17,6 +17,7 @@ from unittest.mock import patch from google.genai.types import GenerateContentConfig + from opentelemetry._events import Event from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, @@ -123,7 +124,10 @@ def test_generated_span_counts_tokens(self): self.assertEqual(span.attributes["gen_ai.usage.input_tokens"], 123) self.assertEqual(span.attributes["gen_ai.usage.output_tokens"], 456) - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_records_system_prompt_as_log(self): config = {"system_instruction": "foo"} self.configure_valid_response() @@ -135,7 +139,10 @@ def test_records_system_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "foo") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}, + ) def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): config = {"system_instruction": "foo"} self.configure_valid_response() @@ -147,7 +154,10 @@ def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present( self, ): @@ -155,7 +165,10 @@ def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present( self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_does_not_have_event_named("gen_ai.system.message") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_records_user_prompt_as_log(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -164,7 +177,10 @@ def test_records_user_prompt_as_log(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "Some input") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}, + ) def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): self.configure_valid_response() self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -173,7 +189,10 @@ def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self): self.assertEqual(event_record.attributes["gen_ai.system"], "gemini") self.assertEqual(event_record.body["content"], "") - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true"}, + ) def test_records_response_as_log(self): self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -184,7 +203,10 @@ def test_records_response_as_log(self): "Some response content", json.dumps(event_record.body["content"]) ) - @patch.dict("os.environ", {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}) + @patch.dict( + "os.environ", + {"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "false"}, + ) def test_does_not_record_response_as_log_if_disabled_by_env(self): self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") @@ -211,30 +233,92 @@ def test_new_semconv_record_completion_as_log(self): content = "Some input" output = "Some response content" sys_instr = "System instruction" - with self.subTest(f"mode: {mode}", patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: self.configure_valid_response(text=output) - self.generate_content(model="gemini-2.0-flash", contents=content, config=GenerateContentConfig(system_instruction=sys_instr)) - self.otel.assert_has_event_named("gen_ai.client.inference.operation.details") - event = self.otel.get_event_named("gen_ai.client.inference.operation.details") + self.generate_content( + model="gemini-2.0-flash", + contents=content, + config=GenerateContentConfig( + system_instruction=sys_instr + ), + ) + self.otel.assert_has_event_named( + "gen_ai.client.inference.operation.details" + ) + event = self.otel.get_event_named( + "gen_ai.client.inference.operation.details" + ) if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.SPAN_ONLY, ]: - self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, event.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, event.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, event.attributes) + self.assertNotIn( + gen_ai_attributes.GEN_AI_INPUT_MESSAGES, + event.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, + event.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, + event.attributes, + ) else: attrs = { - gen_ai_attributes.GEN_AI_INPUT_MESSAGES: ({"role": "user", "parts": ({"content": content, "type": "text"},)},), - gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: ({"role": "assistant", "parts": ({"content": output, "type": "text"},), "finish_reason": ""},), - gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: ({"content": sys_instr, "type": "text"},) + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: ( + { + "role": "user", + "parts": ( + {"content": content, "type": "text"}, + ), + }, + ), + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES: ( + { + "role": "assistant", + "parts": ( + {"content": output, "type": "text"}, + ), + "finish_reason": "", + }, + ), + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: ( + {"content": sys_instr, "type": "text"}, + ), } - expected_event = Event("gen_ai.client.inference.operation.details", attributes=attrs) - self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES]) - self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], expected_event.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES]) - self.assertEqual(event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], expected_event.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS]) + expected_event = Event( + "gen_ai.client.inference.operation.details", + attributes=attrs, + ) + self.assertEqual( + event.attributes[ + gen_ai_attributes.GEN_AI_INPUT_MESSAGES + ], + expected_event.attributes[ + gen_ai_attributes.GEN_AI_INPUT_MESSAGES + ], + ) + self.assertEqual( + event.attributes[ + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES + ], + expected_event.attributes[ + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES + ], + ) + self.assertEqual( + event.attributes[ + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS + ], + expected_event.attributes[ + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS + ], + ) self.tearDown() def test_new_semconv_record_completion_in_span(self): @@ -252,23 +336,57 @@ def test_new_semconv_record_completion_in_span(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: self.configure_valid_response(text="Some response content") - self.generate_content(model="gemini-2.0-flash", contents="Some input", config=GenerateContentConfig(system_instruction="System instruction")) - span = self.otel.get_span_named("generate_content gemini-2.0-flash") + self.generate_content( + model="gemini-2.0-flash", + contents="Some input", + config=GenerateContentConfig( + system_instruction="System instruction" + ), + ) + span = self.otel.get_span_named( + "generate_content gemini-2.0-flash" + ) if mode in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_INPUT_MESSAGES], '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]') - self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES], '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]') - self.assertEqual(span.attributes[gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS], '[{"content": "System instruction", "type": "text"}]') + self.assertEqual( + span.attributes[ + gen_ai_attributes.GEN_AI_INPUT_MESSAGES + ], + '[{"role": "user", "parts": [{"content": "Some input", "type": "text"}]}]', + ) + self.assertEqual( + span.attributes[ + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES + ], + '[{"role": "assistant", "parts": [{"content": "Some response content", "type": "text"}], "finish_reason": ""}]', + ) + self.assertEqual( + span.attributes[ + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS + ], + '[{"content": "System instruction", "type": "text"}]', + ) else: - self.assertNotIn(gen_ai_attributes.GEN_AI_INPUT_MESSAGES, span.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, span.attributes) - self.assertNotIn(gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, span.attributes) + self.assertNotIn( + gen_ai_attributes.GEN_AI_INPUT_MESSAGES, + span.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_OUTPUT_MESSAGES, + span.attributes, + ) + self.assertNotIn( + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS, + span.attributes, + ) self.tearDown() diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index afe4dbfe6b..15fdd3dd3e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -35,6 +35,7 @@ import google.genai import pytest import yaml +from google.genai import types from vcr.record_mode import RecordMode from opentelemetry.instrumentation.google_genai import ( @@ -389,7 +390,13 @@ def fixture_gcloud_api_key(gemini_api_key): @pytest.fixture(name="nonvertex_client_factory") def fixture_nonvertex_client_factory(gemini_api_key): def _factory(): - return google.genai.Client(api_key=gemini_api_key, vertexai=False) + return google.genai.Client( + api_key=gemini_api_key, + vertexai=False, + http_options=types.HttpOptions( + headers={"accept-encoding": "identity"} + ), + ) return _factory @@ -404,6 +411,9 @@ def _factory(): project=gcloud_project, location=gcloud_location, credentials=gcloud_credentials, + http_options=types.HttpOptions( + headers={"accept-encoding": "identity"} + ), ) return _factory @@ -435,7 +445,7 @@ def fixture_is_async(request): return request.param == "async" -@pytest.fixture(name="model", params=["gemini-1.5-flash-002"]) +@pytest.fixture(name="model", params=["gemini-2.5-flash"]) def fixture_model(request): return request.param diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py index 7439944e37..501519d4ff 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py @@ -15,6 +15,7 @@ from unittest.mock import patch import google.genai.types as genai_types + from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, @@ -297,7 +298,9 @@ def test_new_semconv_tool_calls_record_parameter_values(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: calls = [] @@ -307,7 +310,11 @@ def handle(*args, **kwargs): return "some result" def somefunction(someparam, otherparam=2): - print("someparam=%s, otherparam=%s", someparam, otherparam) + print( + "someparam=%s, otherparam=%s", + someparam, + otherparam, + ) self.mock_generate_content.side_effect = handle self.client.models.generate_content( @@ -322,8 +329,12 @@ def somefunction(someparam, otherparam=2): tools = config.tools wrapped_somefunction = tools[0] wrapped_somefunction(123, otherparam="abc") - self.otel.assert_has_span_named("execute_tool somefunction") - generated_span = self.otel.get_span_named("execute_tool somefunction") + self.otel.assert_has_span_named( + "execute_tool somefunction" + ) + generated_span = self.otel.get_span_named( + "execute_tool somefunction" + ) self.assertEqual( generated_span.attributes[ "code.function.parameters.someparam.type" @@ -340,11 +351,27 @@ def somefunction(someparam, otherparam=2): ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: - self.assertEqual(generated_span.attributes["code.function.parameters.someparam.value"], 123) - self.assertEqual(generated_span.attributes["code.function.parameters.otherparam.value"], "abc") + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.someparam.value" + ], + 123, + ) + self.assertEqual( + generated_span.attributes[ + "code.function.parameters.otherparam.value" + ], + "abc", + ) else: - self.assertNotIn("code.function.parameters.someparam.value", generated_span.attributes) - self.assertNotIn("code.function.parameters.otherparam.value", generated_span.attributes) + self.assertNotIn( + "code.function.parameters.someparam.value", + generated_span.attributes, + ) + self.assertNotIn( + "code.function.parameters.otherparam.value", + generated_span.attributes, + ) self.tearDown() def test_new_semconv_tool_calls_record_return_values(self): @@ -362,7 +389,9 @@ def test_new_semconv_tool_calls_record_return_values(self): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: calls = [] @@ -387,20 +416,27 @@ def somefunction(x, y=2): tools = config.tools wrapped_somefunction = tools[0] wrapped_somefunction(123) - self.otel.assert_has_span_named("execute_tool somefunction") - generated_span = self.otel.get_span_named("execute_tool somefunction") + self.otel.assert_has_span_named( + "execute_tool somefunction" + ) + generated_span = self.otel.get_span_named( + "execute_tool somefunction" + ) self.assertEqual( - generated_span.attributes["code.function.return.type"], "int" + generated_span.attributes["code.function.return.type"], + "int", ) if mode in [ ContentCapturingMode.SPAN_ONLY, ContentCapturingMode.SPAN_AND_EVENT, ]: self.assertIn( - "code.function.return.value", generated_span.attributes + "code.function.return.value", + generated_span.attributes, ) else: self.assertNotIn( - "code.function.return.value", generated_span.attributes + "code.function.return.value", + generated_span.attributes, ) self.tearDown() diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py index 6bfa53cf5e..7f92aade7e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/utils/test_tool_call_wrapper.py @@ -17,13 +17,17 @@ from unittest.mock import patch from google.genai import types as genai_types + from opentelemetry._events import get_event_logger_provider from opentelemetry.instrumentation._semconv import ( _OpenTelemetrySemanticConventionStability, _OpenTelemetryStabilitySignalType, _StabilityMode, ) -from opentelemetry.instrumentation.google_genai import otel_wrapper, tool_call_wrapper +from opentelemetry.instrumentation.google_genai import ( + otel_wrapper, + tool_call_wrapper, +) from opentelemetry.metrics import get_meter_provider from opentelemetry.trace import get_tracer_provider from opentelemetry.util.genai.types import ContentCapturingMode @@ -299,19 +303,29 @@ def somefunction(arg=None): _OpenTelemetryStabilitySignalType.GEN_AI: _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL }, ) - with self.subTest(f'mode: {mode}', patched_environ=patched_environ): + with self.subTest( + f"mode: {mode}", patched_environ=patched_environ + ): self.setUp() with patched_environ, patched_otel_mapping: wrapped_somefunction = self.wrap(somefunction) wrapped_somefunction(12345) - span = self.otel.get_span_named("execute_tool somefunction") + span = self.otel.get_span_named( + "execute_tool somefunction" + ) if mode in [ ContentCapturingMode.NO_CONTENT, ContentCapturingMode.EVENT_ONLY, ]: - self.assertNotIn("code.function.parameters.arg.value", span.attributes) + self.assertNotIn( + "code.function.parameters.arg.value", + span.attributes, + ) else: - self.assertIn("code.function.parameters.arg.value", span.attributes) + self.assertIn( + "code.function.parameters.arg.value", + span.attributes, + ) self.tearDown() From 05bed52c7775463226c026efcc5d4734850f94e0 Mon Sep 17 00:00:00 2001 From: Dylan Russell Date: Mon, 6 Oct 2025 16:06:30 +0000 Subject: [PATCH 19/19] fix lint issues --- .../instrumentation/google_genai/generate_content.py | 1 + .../tests/generate_content/test_tool_call_instrumentation.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py index 60d6136e4e..796c267cff 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=too-many-lines import copy import dataclasses diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py index 501519d4ff..78eb642c60 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_tool_call_instrumentation.py @@ -285,6 +285,7 @@ def somefunction(x, y=2): def test_new_semconv_tool_calls_record_parameter_values(self): for mode in ContentCapturingMode: + calls = [] patched_environ = patch.dict( "os.environ", { @@ -303,7 +304,6 @@ def test_new_semconv_tool_calls_record_parameter_values(self): ): self.setUp() with patched_environ, patched_otel_mapping: - calls = [] def handle(*args, **kwargs): calls.append((args, kwargs)) @@ -376,6 +376,7 @@ def somefunction(someparam, otherparam=2): def test_new_semconv_tool_calls_record_return_values(self): for mode in ContentCapturingMode: + calls = [] patched_environ = patch.dict( "os.environ", { @@ -394,7 +395,6 @@ def test_new_semconv_tool_calls_record_return_values(self): ): self.setUp() with patched_environ, patched_otel_mapping: - calls = [] def handle(*args, **kwargs): calls.append((args, kwargs))