Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/google/adk/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,8 @@ async def run_async(
async def _run_with_trace(
new_message: types.Content,
) -> AsyncGenerator[Event, None]:
with tracer.start_as_current_span('invocation'):
with tracer.start_as_current_span('invocation') as span:
span.set_attribute('gen_ai.operation.name', 'invoke_agent')
session = await self.session_service.get_session(
app_name=self.app_name, user_id=user_id, session_id=session_id
)
Expand Down
81 changes: 75 additions & 6 deletions src/google/adk/telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,13 @@ def trace_tool_call(
function_response_event: The event with the function response details.
"""
span = trace.get_current_span()
span.set_attribute('gen_ai.system', 'gcp.vertex.agent')

# Standard OpenTelemetry GenAI attributes as of OTel SemConv v1.36.0 for Agents and Frameworks
span.set_attribute('gen_ai.system', 'gcp.vertex_ai')
span.set_attribute('gen_ai.operation.name', 'execute_tool')
span.set_attribute('gen_ai.tool.name', tool.name)
span.set_attribute('gen_ai.tool.description', tool.description)

tool_call_id = '<not specified>'
tool_response = '<not specified>'
if function_response_event.content.parts:
Expand All @@ -86,6 +89,7 @@ def trace_tool_call(

span.set_attribute('gen_ai.tool.call.id', tool_call_id)

# Vendor-specific attributes (moved from gen_ai.* to gcp.vertex.agent.*)
if not isinstance(tool_response, dict):
tool_response = {'result': tool_response}
span.set_attribute(
Expand Down Expand Up @@ -121,12 +125,15 @@ def trace_merged_tool_calls(
"""

span = trace.get_current_span()
span.set_attribute('gen_ai.system', 'gcp.vertex.agent')

# Standard OpenTelemetry GenAI attributes
span.set_attribute('gen_ai.system', 'gcp.vertex_ai')
span.set_attribute('gen_ai.operation.name', 'execute_tool')
span.set_attribute('gen_ai.tool.name', '(merged tools)')
span.set_attribute('gen_ai.tool.description', '(merged tools)')
span.set_attribute('gen_ai.tool.call.id', response_event_id)

# Vendor-specific attributes
span.set_attribute('gcp.vertex.agent.tool_call_args', 'N/A')
span.set_attribute('gcp.vertex.agent.event_id', response_event_id)
try:
Expand Down Expand Up @@ -167,23 +174,38 @@ def trace_call_llm(
llm_response: The LLM response object.
"""
span = trace.get_current_span()
# Special standard Open Telemetry GenaI attributes that indicate
# that this is a span related to a Generative AI system.
span.set_attribute('gen_ai.system', 'gcp.vertex.agent')

# Standard OpenTelemetry GenAI attributes
span.set_attribute('gen_ai.system', 'gcp.vertex_ai')
span.set_attribute('gen_ai.operation.name', 'chat')
span.set_attribute('gen_ai.request.model', llm_request.model)

if hasattr(llm_response, 'id') and llm_response.id:
span.set_attribute('gen_ai.response.id', llm_response.id)

# Set response model if different from request model
if (
hasattr(llm_response, 'model')
and llm_response.model
and llm_response.model != llm_request.model
):
span.set_attribute('gen_ai.response.model', llm_response.model)
Comment on lines +187 to +192
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's the reasoning for only setting the gen_ai.response.model when it's different to gen_ai.request.model?

Is this behavior specified somewhere, so it would be reasonable to assume both are the same if the response model is missing?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SemConv marks gen_ai.request.model as “Conditionally Required (if available)” and gen_ai.response.model as “Recommended,” not required, so it might makes sense to only include it if it provides additional information. I have a vague recollection of this practice from one of the many semconv GH issues discussing this, but cannot find it now.


Comment on lines +183 to +193
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where did you find the llm_response.model and llm_response.id field?
They are not a part of the pydantic model https://github.com/google/adk-python/blob/main/src/google/adk/models/llm_response.py#L26

span.set_attribute(
'gcp.vertex.agent.invocation_id', invocation_context.invocation_id
)
span.set_attribute(
'gcp.vertex.agent.session_id', invocation_context.session.id
)
span.set_attribute('gcp.vertex.agent.event_id', event_id)

# Consider removing once GenAI SDK provides a way to record this info.
span.set_attribute(
'gcp.vertex.agent.llm_request',
_safe_json_serialize(_build_llm_request_for_trace(llm_request)),
)
# Consider removing once GenAI SDK provides a way to record this info.

# Standard GenAI request attributes
if llm_request.config:
if llm_request.config.top_p:
span.set_attribute(
Expand All @@ -195,6 +217,14 @@ def trace_call_llm(
'gen_ai.request.max_tokens',
llm_request.config.max_output_tokens,
)
if (
hasattr(llm_request.config, 'temperature')
and llm_request.config.temperature is not None
):
span.set_attribute(
'gen_ai.request.temperature',
llm_request.config.temperature,
)

try:
llm_response_json = llm_response.model_dump_json(exclude_none=True)
Expand All @@ -206,6 +236,7 @@ def trace_call_llm(
llm_response_json,
)

# Standard GenAI usage and response attributes
if llm_response.usage_metadata is not None:
span.set_attribute(
'gen_ai.usage.input_tokens',
Expand Down Expand Up @@ -286,3 +317,41 @@ def _build_llm_request_for_trace(llm_request: LlmRequest) -> dict[str, Any]:
)
)
return result


def _create_span_name(operation_name: str, model_name: str) -> str:
"""Creates a span name following OpenTelemetry GenAI conventions.

Args:
operation_name: The GenAI operation name (e.g., 'generate_content', 'execute_tool').
model_name: The model name being used.

Returns:
A span name in the format '{operation_name} {model_name}'.
"""
return f'{operation_name} {model_name}'


def add_genai_prompt_event(span: trace.Span, prompt_content: str):
"""Adds a GenAI prompt event to the span following OpenTelemetry conventions.

Args:
span: The OpenTelemetry span to add the event to.
prompt_content: The prompt content as a JSON string.
"""
span.add_event(
name='gen_ai.content.prompt', attributes={'gen_ai.prompt': prompt_content}
)


def add_genai_completion_event(span: trace.Span, completion_content: str):
"""Adds a GenAI completion event to the span following OpenTelemetry conventions.

Args:
span: The OpenTelemetry span to add the event to.
completion_content: The completion content as a JSON string.
"""
span.add_event(
name='gen_ai.content.completion',
attributes={'gen_ai.completion': completion_content},
)
15 changes: 8 additions & 7 deletions tests/unittests/test_telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,15 @@ async def test_trace_call_llm(monkeypatch, mock_span_fixture):
trace_call_llm(invocation_context, 'test_event_id', llm_request, llm_response)

expected_calls = [
mock.call('gen_ai.system', 'gcp.vertex.agent'),
mock.call('gen_ai.system', 'gcp.vertex_ai'),
mock.call('gen_ai.operation.name', 'chat'),
mock.call('gen_ai.request.top_p', 0.95),
mock.call('gen_ai.request.max_tokens', 1024),
mock.call('gen_ai.usage.input_tokens', 50),
mock.call('gen_ai.usage.output_tokens', 50),
mock.call('gen_ai.response.finish_reasons', ['stop']),
]
assert mock_span_fixture.set_attribute.call_count == 12
assert mock_span_fixture.set_attribute.call_count == 13
mock_span_fixture.set_attribute.assert_has_calls(
expected_calls, any_order=True
)
Expand Down Expand Up @@ -173,9 +174,9 @@ async def test_trace_call_llm_with_binary_content(

# Verify basic telemetry attributes are set
expected_calls = [
mock.call('gen_ai.system', 'gcp.vertex.agent'),
mock.call('gen_ai.system', 'gcp.vertex_ai'),
]
assert mock_span_fixture.set_attribute.call_count == 7
assert mock_span_fixture.set_attribute.call_count == 8
mock_span_fixture.set_attribute.assert_has_calls(expected_calls)

# Verify binary content is replaced with '<not serializable>' in JSON
Expand Down Expand Up @@ -230,7 +231,7 @@ def test_trace_tool_call_with_scalar_response(
# Assert
assert mock_span_fixture.set_attribute.call_count == 10
expected_calls = [
mock.call('gen_ai.system', 'gcp.vertex.agent'),
mock.call('gen_ai.system', 'gcp.vertex_ai'),
mock.call('gen_ai.operation.name', 'execute_tool'),
mock.call('gen_ai.tool.name', mock_tool_fixture.name),
mock.call('gen_ai.tool.description', mock_tool_fixture.description),
Expand Down Expand Up @@ -289,7 +290,7 @@ def test_trace_tool_call_with_dict_response(

# Assert
expected_calls = [
mock.call('gen_ai.system', 'gcp.vertex.agent'),
mock.call('gen_ai.system', 'gcp.vertex_ai'),
mock.call('gen_ai.operation.name', 'execute_tool'),
mock.call('gen_ai.tool.name', mock_tool_fixture.name),
mock.call('gen_ai.tool.description', mock_tool_fixture.description),
Expand Down Expand Up @@ -328,7 +329,7 @@ def test_trace_merged_tool_calls_sets_correct_attributes(
)

expected_calls = [
mock.call('gen_ai.system', 'gcp.vertex.agent'),
mock.call('gen_ai.system', 'gcp.vertex_ai'),
mock.call('gen_ai.operation.name', 'execute_tool'),
mock.call('gen_ai.tool.name', '(merged tools)'),
mock.call('gen_ai.tool.description', '(merged tools)'),
Expand Down