Skip to content

Commit eca1beb

Browse files
committed
feat: agent run span
1 parent 5fb7aae commit eca1beb

File tree

4 files changed

+20
-22
lines changed

4 files changed

+20
-22
lines changed

veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -561,7 +561,7 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
561561
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
562562
# -> 1.4. span kind
563563
"gen_ai.operation.name": llm_gen_ai_operation_name,
564-
"gen_ai.span.kind": llm_gen_ai_span_kind, # apmplus required
564+
"gen_ai.span.kind": llm_gen_ai_span_kind, # apmplus required
565565
# -> 1.5. inputs
566566
"gen_ai.prompt": llm_gen_ai_prompt,
567567
# -> 1.6. outputs

veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def tool_gen_ai_tool_output(params: ToolAttributesParams) -> ExtractorResponse:
7676
"gen_ai.tool.output": tool_gen_ai_tool_output, # TLS required
7777
"cozeloop.input": tool_gen_ai_tool_input, # CozeLoop required
7878
"cozeloop.output": tool_gen_ai_tool_output, # CozeLoop required
79-
"gen_ai.span.kind": tool_gen_ai_span_kind, # apmplus required
80-
"gen_ai.input": tool_gen_ai_tool_input, # apmplus required
81-
"gen_ai.output": tool_gen_ai_tool_output, # apmplus required
79+
"gen_ai.span.kind": tool_gen_ai_span_kind, # apmplus required
80+
"gen_ai.input": tool_gen_ai_tool_input, # apmplus required
81+
"gen_ai.output": tool_gen_ai_tool_output, # apmplus required
8282
}

veadk/tracing/telemetry/exporters/inmemory_exporter.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -77,18 +77,20 @@ def __init__(self, exporter: _InMemoryExporter) -> None:
7777
super().__init__(exporter)
7878

7979
def on_start(self, span, parent_context) -> None:
80-
if span.name.startswith("invocation") or span.name.startswith("invoke") :
80+
if span.name.startswith("invocation"):
8181
span.set_attribute("gen_ai.operation.name", "chain")
8282
span.set_attribute("gen_ai.span.kind", "workflow")
8383
span.set_attribute("gen_ai.usage.total_tokens", 0)
8484
ctx = set_value("invocation_span_instance", span, context=parent_context)
85-
# suppress instrumentation for llm from apmplus, such as openai
86-
ctx = set_value("suppress_language_model_instrumentation", True, context=ctx)
85+
# suppress instrumentation for llm to avoid auto instrument from apmplus, such as openai
86+
ctx = set_value(
87+
"suppress_language_model_instrumentation", True, context=ctx
88+
)
8789

8890
token = attach(ctx) # mount context on `invocation` root span in Google ADK
8991
setattr(span, "_invocation_token", token) # for later detach
9092

91-
if span.name.startswith("agent_run"):
93+
if span.name.startswith("agent_run") or span.name.startswith("invoke_agent"):
9294
span.set_attribute("gen_ai.operation.name", "agent")
9395
span.set_attribute("gen_ai.span.kind", "agent")
9496

veadk/tracing/telemetry/telemetry.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,9 @@
2020
from google.adk.models.llm_response import LlmResponse
2121
from google.adk.tools import BaseTool
2222
from opentelemetry import trace
23-
from opentelemetry.context import get_value, set_value
23+
from opentelemetry.context import get_value
2424
from opentelemetry.sdk.trace import Span, _Span
2525

26-
from veadk.utils.misc import safe_json_serialize
2726
from veadk.tracing.telemetry.attributes.attributes import ATTRIBUTES
2827
from veadk.tracing.telemetry.attributes.extractors.types import (
2928
ExtractorResponse,
@@ -93,8 +92,7 @@ def _set_agent_input_attribute(
9392
user_content = invocation_context.user_content
9493
if user_content and user_content.parts:
9594
# set gen_ai.input attribute required by APMPlus
96-
# TODO: 优化 gen_ai.input,目前无法序列化
97-
span.set_attribute("gen_ai.input", safe_json_serialize(user_content.parts))
95+
span.set_attribute("gen_ai.input", user_content.model_dump(exclude_none=True))
9896

9997
span.add_event(
10098
"gen_ai.user.message",
@@ -132,15 +130,9 @@ def _set_agent_input_attribute(
132130

133131
def _set_agent_output_attribute(span: Span, llm_response: LlmResponse) -> None:
134132
content = llm_response.content
135-
print(f"### response: {llm_response}")
136-
print(f"### output_transcription: {llm_response.output_transcription}")
137-
print(f"### input_transcription: {llm_response.input_transcription}")
138-
139-
140133
if content and content.parts:
141134
# set gen_ai.output attribute required by APMPlus
142-
span.set_attribute("gen_ai.output", safe_json_serialize(content.parts))
143-
print("### parts: ", content.parts)
135+
span.set_attribute("gen_ai.output", content.model_dump(exclude_none=True))
144136
for idx, part in enumerate(content.parts):
145137
if part.text:
146138
span.add_event(
@@ -163,7 +155,7 @@ def set_common_attributes_on_model_span(
163155
invocation_span: Span = get_value("invocation_span_instance") # type: ignore
164156
agent_run_span: Span = get_value("agent_run_span_instance") # type: ignore
165157

166-
if invocation_span and (invocation_span.name.startswith("invocation") or invocation_span.name.startswith("invoke")):
158+
if invocation_span and invocation_span.name.startswith("invocation"):
167159
_set_agent_input_attribute(invocation_span, invocation_context)
168160
_set_agent_output_attribute(invocation_span, llm_response)
169161
for attr_name, attr_extractor in common_attributes.items():
@@ -187,10 +179,14 @@ def set_common_attributes_on_model_span(
187179
) # we can ignore this warning, cause we manually set the attribute to int before
188180
invocation_span.set_attribute(
189181
# record input/output token usage?
190-
"gen_ai.usage.total_tokens", accumulated_total_token_usage
182+
"gen_ai.usage.total_tokens",
183+
accumulated_total_token_usage,
191184
)
192185

193-
if agent_run_span and agent_run_span.name.startswith("agent_run"):
186+
if agent_run_span and (
187+
agent_run_span.name.startswith("agent_run")
188+
or agent_run_span.name.startswith("invoke_agent")
189+
):
194190
_set_agent_input_attribute(agent_run_span, invocation_context)
195191
_set_agent_output_attribute(agent_run_span, llm_response)
196192
for attr_name, attr_extractor in common_attributes.items():

0 commit comments

Comments
 (0)