Skip to content

Commit 5fb7aae

Browse files
committed
feat: add gen_ai input
1 parent 66df2b0 commit 5fb7aae

File tree

4 files changed

+31
-4
lines changed

4 files changed

+31
-4
lines changed

veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -324,6 +324,8 @@ def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
324324
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
325325
return ExtractorResponse(content="chat")
326326

327+
def llm_gen_ai_span_kind(params: LLMAttributesParams) -> ExtractorResponse:
328+
return ExtractorResponse(content="llm")
327329

328330
# def llm_gen_ai_system_message(params: LLMAttributesParams) -> ExtractorResponse:
329331
# event_attributes = {
@@ -559,6 +561,7 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
559561
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
560562
# -> 1.4. span kind
561563
"gen_ai.operation.name": llm_gen_ai_operation_name,
564+
"gen_ai.span.kind": llm_gen_ai_span_kind, # apmplus required
562565
# -> 1.5. inputs
563566
"gen_ai.prompt": llm_gen_ai_prompt,
564567
# -> 1.6. outputs

veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,9 @@
2222
def tool_gen_ai_operation_name(params: ToolAttributesParams) -> ExtractorResponse:
2323
return ExtractorResponse(content="execute_tool")
2424

25+
def tool_gen_ai_span_kind(params: ToolAttributesParams) -> ExtractorResponse:
26+
return ExtractorResponse(content="tool")
27+
2528

2629
def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
2730
tool_input = {
@@ -73,4 +76,7 @@ def tool_gen_ai_tool_output(params: ToolAttributesParams) -> ExtractorResponse:
7376
"gen_ai.tool.output": tool_gen_ai_tool_output, # TLS required
7477
"cozeloop.input": tool_gen_ai_tool_input, # CozeLoop required
7578
"cozeloop.output": tool_gen_ai_tool_output, # CozeLoop required
79+
"gen_ai.span.kind": tool_gen_ai_span_kind, # apmplus required
80+
"gen_ai.input": tool_gen_ai_tool_input, # apmplus required
81+
"gen_ai.output": tool_gen_ai_tool_output, # apmplus required
7682
}

veadk/tracing/telemetry/exporters/inmemory_exporter.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,16 +77,20 @@ def __init__(self, exporter: _InMemoryExporter) -> None:
7777
super().__init__(exporter)
7878

7979
def on_start(self, span, parent_context) -> None:
80-
if span.name.startswith("invocation"):
80+
if span.name.startswith("invocation") or span.name.startswith("invoke") :
8181
span.set_attribute("gen_ai.operation.name", "chain")
82+
span.set_attribute("gen_ai.span.kind", "workflow")
8283
span.set_attribute("gen_ai.usage.total_tokens", 0)
83-
8484
ctx = set_value("invocation_span_instance", span, context=parent_context)
85+
# suppress instrumentation for llm from apmplus, such as openai
86+
ctx = set_value("suppress_language_model_instrumentation", True, context=ctx)
87+
8588
token = attach(ctx) # mount context on `invocation` root span in Google ADK
8689
setattr(span, "_invocation_token", token) # for later detach
8790

8891
if span.name.startswith("agent_run"):
8992
span.set_attribute("gen_ai.operation.name", "agent")
93+
span.set_attribute("gen_ai.span.kind", "agent")
9094

9195
ctx = set_value("agent_run_span_instance", span, context=parent_context)
9296
token = attach(ctx)

veadk/tracing/telemetry/telemetry.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,10 @@
2020
from google.adk.models.llm_response import LlmResponse
2121
from google.adk.tools import BaseTool
2222
from opentelemetry import trace
23-
from opentelemetry.context import get_value
23+
from opentelemetry.context import get_value, set_value
2424
from opentelemetry.sdk.trace import Span, _Span
2525

26+
from veadk.utils.misc import safe_json_serialize
2627
from veadk.tracing.telemetry.attributes.attributes import ATTRIBUTES
2728
from veadk.tracing.telemetry.attributes.extractors.types import (
2829
ExtractorResponse,
@@ -91,6 +92,10 @@ def _set_agent_input_attribute(
9192

9293
user_content = invocation_context.user_content
9394
if user_content and user_content.parts:
95+
# set gen_ai.input attribute required by APMPlus
96+
# TODO: 优化 gen_ai.input,目前无法序列化
97+
span.set_attribute("gen_ai.input", safe_json_serialize(user_content.parts))
98+
9499
span.add_event(
95100
"gen_ai.user.message",
96101
{
@@ -127,7 +132,15 @@ def _set_agent_input_attribute(
127132

128133
def _set_agent_output_attribute(span: Span, llm_response: LlmResponse) -> None:
129134
content = llm_response.content
135+
print(f"### response: {llm_response}")
136+
print(f"### output_transcription: {llm_response.output_transcription}")
137+
print(f"### input_transcription: {llm_response.input_transcription}")
138+
139+
130140
if content and content.parts:
141+
# set gen_ai.output attribute required by APMPlus
142+
span.set_attribute("gen_ai.output", safe_json_serialize(content.parts))
143+
print("### parts: ", content.parts)
131144
for idx, part in enumerate(content.parts):
132145
if part.text:
133146
span.add_event(
@@ -150,7 +163,7 @@ def set_common_attributes_on_model_span(
150163
invocation_span: Span = get_value("invocation_span_instance") # type: ignore
151164
agent_run_span: Span = get_value("agent_run_span_instance") # type: ignore
152165

153-
if invocation_span and invocation_span.name.startswith("invocation"):
166+
if invocation_span and (invocation_span.name.startswith("invocation") or invocation_span.name.startswith("invoke")):
154167
_set_agent_input_attribute(invocation_span, invocation_context)
155168
_set_agent_output_attribute(invocation_span, llm_response)
156169
for attr_name, attr_extractor in common_attributes.items():
@@ -173,6 +186,7 @@ def set_common_attributes_on_model_span(
173186
current_step_token_usage + int(prev_total_token_usage) # type: ignore
174187
) # we can ignore this warning, cause we manually set the attribute to int before
175188
invocation_span.set_attribute(
189+
# record input/output token usage?
176190
"gen_ai.usage.total_tokens", accumulated_total_token_usage
177191
)
178192

0 commit comments

Comments
 (0)