Skip to content

Commit f6e964e

Browse files
authored
feat(tracing): support APMPLus session (#218)
* feat: add gen_ai input * feat: agent run span * feat: format
1 parent c20e5ab commit f6e964e

File tree

4 files changed

+33
-4
lines changed

4 files changed

+33
-4
lines changed

veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,10 @@ def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
325325
return ExtractorResponse(content="chat")
326326

327327

328+
def llm_gen_ai_span_kind(params: LLMAttributesParams) -> ExtractorResponse:
329+
return ExtractorResponse(content="llm")
330+
331+
328332
# def llm_gen_ai_system_message(params: LLMAttributesParams) -> ExtractorResponse:
329333
# event_attributes = {
330334
# "content": str(params.llm_request.config.system_instruction),
@@ -559,6 +563,7 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
559563
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
560564
# -> 1.4. span kind
561565
"gen_ai.operation.name": llm_gen_ai_operation_name,
566+
"gen_ai.span.kind": llm_gen_ai_span_kind, # apmplus required
562567
# -> 1.5. inputs
563568
"gen_ai.prompt": llm_gen_ai_prompt,
564569
# -> 1.6. outputs

veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,10 @@ def tool_gen_ai_operation_name(params: ToolAttributesParams) -> ExtractorRespons
2323
return ExtractorResponse(content="execute_tool")
2424

2525

26+
def tool_gen_ai_span_kind(params: ToolAttributesParams) -> ExtractorResponse:
27+
return ExtractorResponse(content="tool")
28+
29+
2630
def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
2731
tool_input = {
2832
"role": "tool",
@@ -73,4 +77,7 @@ def tool_gen_ai_tool_output(params: ToolAttributesParams) -> ExtractorResponse:
7377
"gen_ai.tool.output": tool_gen_ai_tool_output, # TLS required
7478
"cozeloop.input": tool_gen_ai_tool_input, # CozeLoop required
7579
"cozeloop.output": tool_gen_ai_tool_output, # CozeLoop required
80+
"gen_ai.span.kind": tool_gen_ai_span_kind, # apmplus required
81+
"gen_ai.input": tool_gen_ai_tool_input, # apmplus required
82+
"gen_ai.output": tool_gen_ai_tool_output, # apmplus required
7683
}

veadk/tracing/telemetry/exporters/inmemory_exporter.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,14 +79,20 @@ def __init__(self, exporter: _InMemoryExporter) -> None:
7979
def on_start(self, span, parent_context) -> None:
8080
if span.name.startswith("invocation"):
8181
span.set_attribute("gen_ai.operation.name", "chain")
82+
span.set_attribute("gen_ai.span.kind", "workflow")
8283
span.set_attribute("gen_ai.usage.total_tokens", 0)
83-
8484
ctx = set_value("invocation_span_instance", span, context=parent_context)
85+
# suppress instrumentation for llm to avoid auto instrument from apmplus, such as openai
86+
ctx = set_value(
87+
"suppress_language_model_instrumentation", True, context=ctx
88+
)
89+
8590
token = attach(ctx) # mount context on `invocation` root span in Google ADK
8691
setattr(span, "_invocation_token", token) # for later detach
8792

88-
if span.name.startswith("agent_run"):
93+
if span.name.startswith("agent_run") or span.name.startswith("invoke_agent"):
8994
span.set_attribute("gen_ai.operation.name", "agent")
95+
span.set_attribute("gen_ai.span.kind", "agent")
9096

9197
ctx = set_value("agent_run_span_instance", span, context=parent_context)
9298
token = attach(ctx)

veadk/tracing/telemetry/telemetry.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,9 @@ def _set_agent_input_attribute(
9191

9292
user_content = invocation_context.user_content
9393
if user_content and user_content.parts:
94+
# set gen_ai.input attribute required by APMPlus
95+
span.set_attribute("gen_ai.input", user_content.model_dump(exclude_none=True))
96+
9497
span.add_event(
9598
"gen_ai.user.message",
9699
{
@@ -128,6 +131,9 @@ def _set_agent_input_attribute(
128131
def _set_agent_output_attribute(span: Span, llm_response: LlmResponse) -> None:
129132
content = llm_response.content
130133
if content and content.parts:
134+
# set gen_ai.output attribute required by APMPlus
135+
span.set_attribute("gen_ai.output", content.model_dump(exclude_none=True))
136+
131137
for idx, part in enumerate(content.parts):
132138
if part.text:
133139
span.add_event(
@@ -173,10 +179,15 @@ def set_common_attributes_on_model_span(
173179
current_step_token_usage + int(prev_total_token_usage) # type: ignore
174180
) # we can ignore this warning, cause we manually set the attribute to int before
175181
invocation_span.set_attribute(
176-
"gen_ai.usage.total_tokens", accumulated_total_token_usage
182+
# record input/output token usage?
183+
"gen_ai.usage.total_tokens",
184+
accumulated_total_token_usage,
177185
)
178186

179-
if agent_run_span and agent_run_span.name.startswith("agent_run"):
187+
if agent_run_span and (
188+
agent_run_span.name.startswith("agent_run")
189+
or agent_run_span.name.startswith("invoke_agent")
190+
):
180191
_set_agent_input_attribute(agent_run_span, invocation_context)
181192
_set_agent_output_attribute(agent_run_span, llm_response)
182193
for attr_name, attr_extractor in common_attributes.items():

0 commit comments

Comments
 (0)