Skip to content

Commit b8b81f1

Browse files
committed
update attributions
1 parent 1db9d5b commit b8b81f1

File tree

7 files changed

+92
-45
lines changed

7 files changed

+92
-45
lines changed

veadk/tracing/telemetry/attributes/attributes.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,7 @@
66
common_gen_ai_user_id,
77
)
88
from veadk.tracing.telemetry.attributes.extractors.llm_attributes_extrators import (
9-
llm_gen_ai_completion,
10-
llm_gen_ai_prompt,
11-
llm_gen_ai_request_model,
12-
llm_gen_ai_request_type,
13-
llm_gen_ai_response_model,
9+
LLM_ATTRIBUTES,
1410
)
1511

1612
ATTRIBUTES = {
@@ -21,12 +17,6 @@
2117
"gen_ai.user.id": common_gen_ai_user_id,
2218
"gen_ai.session.id": common_gen_ai_session_id,
2319
},
24-
"llm": {
25-
"gen_ai.request.model": llm_gen_ai_request_model,
26-
"gen_ai.request.type": llm_gen_ai_request_type,
27-
"gen_ai.response.model": llm_gen_ai_response_model,
28-
"gen_ai.prompt": llm_gen_ai_prompt,
29-
"gen_ai.completion": llm_gen_ai_completion,
30-
},
20+
"llm": LLM_ATTRIBUTES,
3121
"tool": ...,
3222
}

veadk/tracing/telemetry/attributes/extractors/llm_attributes_extrators.py

Lines changed: 62 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,68 @@ def llm_gen_ai_completion(params: LLMAttributesParams) -> list[dict] | None:
5858

5959

6060
def llm_gen_ai_response_stop_reason(params: LLMAttributesParams) -> str | None:
61-
return params.llm_response.stop_reason
61+
return "<no_stop_reason_provided>"
6262

6363

6464
def llm_gen_ai_response_finish_reason(params: LLMAttributesParams) -> str | None:
65-
return params.llm_response.finish_reason
65+
# TODO: update to google-adk v1.12.0
66+
return None
67+
68+
69+
def llm_gen_ai_usage_input_tokens(params: LLMAttributesParams) -> int | None:
70+
if params.llm_response.usage_metadata:
71+
return params.llm_response.usage_metadata.prompt_token_count
72+
return None
73+
74+
75+
def llm_gen_ai_usage_output_tokens(params: LLMAttributesParams) -> int | None:
76+
if params.llm_response.usage_metadata:
77+
return params.llm_response.usage_metadata.candidates_token_count
78+
return None
79+
80+
81+
def llm_gen_ai_usage_total_tokens(params: LLMAttributesParams) -> int | None:
82+
if params.llm_response.usage_metadata:
83+
return params.llm_response.usage_metadata.total_token_count
84+
return None
85+
86+
87+
def llm_gen_ai_usage_cache_creation_input_tokens(
88+
params: LLMAttributesParams,
89+
) -> int | None:
90+
if params.llm_response.usage_metadata:
91+
return None
92+
# return params.llm_response.usage_metadata.cached_content_token_count
93+
return None
94+
95+
96+
def llm_gen_ai_usage_cache_read_input_tokens(params: LLMAttributesParams) -> int | None:
97+
if params.llm_response.usage_metadata:
98+
return None
99+
# return params.llm_response.usage_metadata.prompt_token_count
100+
return None
101+
102+
103+
def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> bool | None:
104+
# return params.llm_request.stream
105+
return None
106+
107+
108+
LLM_ATTRIBUTES = {
109+
"gen_ai.request.model": llm_gen_ai_request_model,
110+
"gen_ai.request.type": llm_gen_ai_request_type,
111+
"gen_ai.response.model": llm_gen_ai_response_model,
112+
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
113+
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
114+
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
115+
"gen_ai.prompt": llm_gen_ai_prompt,
116+
"gen_ai.completion": llm_gen_ai_completion,
117+
"gen_ai.response.stop_reason": llm_gen_ai_response_stop_reason,
118+
"gen_ai.response.finish_reason": llm_gen_ai_response_finish_reason,
119+
"gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens,
120+
"gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,
121+
"gen_ai.usage.total_tokens": llm_gen_ai_usage_total_tokens,
122+
"gen_ai.usage.cache_creation_input_tokens": llm_gen_ai_usage_cache_creation_input_tokens,
123+
"gen_ai.usage.cache_read_input_tokens": llm_gen_ai_usage_cache_read_input_tokens,
124+
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
125+
}

veadk/tracing/telemetry/exporters/apmplus_exporter.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
from typing import Any
16+
1517
from opentelemetry import metrics
1618
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter
1719
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
@@ -48,10 +50,10 @@ class APMPlusExporterConfig(BaseModel):
4850
)
4951

5052

51-
class APMPlusExporter(BaseModel, BaseExporter):
53+
class APMPlusExporter(BaseExporter):
5254
config: APMPlusExporterConfig = Field(default_factory=APMPlusExporterConfig)
5355

54-
def model_post_init(self) -> None:
56+
def model_post_init(self, context: Any) -> None:
5557
headers = {
5658
"x-byteapm-appkey": self.config.app_key,
5759
}

veadk/tracing/telemetry/exporters/base_exporter.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,19 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from abc import ABC
16-
1715
from opentelemetry.sdk.trace import SpanProcessor
1816
from opentelemetry.sdk.trace.export import SpanExporter
17+
from pydantic import BaseModel, ConfigDict, Field
18+
1919

20+
class BaseExporter(BaseModel):
21+
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
2022

21-
class BaseExporter(ABC):
22-
def __init__(
23-
self, resource_attributes: dict | None = None, headers: dict | None = None
24-
) -> None:
25-
self.resource_attributes = resource_attributes or {}
26-
self.headers = headers or {}
23+
resource_attributes: dict = Field(default_factory=dict)
24+
headers: dict = Field(default_factory=dict)
2725

28-
self._exporter: SpanExporter | None = None
29-
self.processor: SpanProcessor | None = None
26+
_exporter: SpanExporter | None = None
27+
processor: SpanProcessor | None = None
3028

3129
def export(self) -> None:
3230
"""Force export of telemetry data."""

veadk/tracing/telemetry/exporters/cozeloop_exporter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ class CozeloopExporterConfig(BaseModel):
4141
)
4242

4343

44-
class CozeloopExporter(BaseModel, BaseExporter):
44+
class CozeloopExporter(BaseExporter):
4545
config: CozeloopExporterConfig = Field(default_factory=CozeloopExporterConfig)
4646

4747
def model_post_init(self) -> None:

veadk/tracing/telemetry/exporters/tls_exporter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ class TLSExporterConfig(BaseModel):
4444
secret_key: str = Field(default_factory=lambda: getenv("VOLCENGINE_SECRET_KEY"))
4545

4646

47-
class TLSExporter(BaseModel, BaseExporter):
47+
class TLSExporter(BaseExporter):
4848
config: TLSExporterConfig = Field(default_factory=TLSExporterConfig)
4949

5050
def model_post_init(self) -> None:

veadk/tracing/telemetry/telemetry.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99
from opentelemetry import trace
1010

1111
from veadk.tracing.telemetry.attributes.attributes import ATTRIBUTES
12+
from veadk.tracing.telemetry.attributes.extractors.llm_attributes_extrators import (
13+
LLMAttributesParams,
14+
)
1215
from veadk.utils.logger import get_logger
1316

1417
logger = get_logger(__name__)
@@ -41,25 +44,19 @@ def trace_call_llm(
4144
) -> None:
4245
span = trace.get_current_span()
4346

44-
# common_attributes = ATTRIBUTES.get("common", {})
4547
llm_attributes = ATTRIBUTES.get("llm", {})
4648

47-
# for attr_name, attr_extractor in common_attributes.items():
48-
# # set attribute anyway
49-
# span.set_attribute(
50-
# attr_name,
51-
# attr_extractor(invocation_context, event_id, llm_request, llm_response),
52-
# )
53-
5449
for attr_name, attr_extractor in llm_attributes.items():
50+
params = LLMAttributesParams(
51+
invocation_context, event_id, llm_request, llm_response
52+
)
5553
# set attribute anyway
56-
value = attr_extractor(invocation_context, event_id, llm_request, llm_response)
57-
if isinstance(value, dict):
58-
for key, val in value.items():
59-
# gen_ai. and gen_ai_
60-
span.set_attribute(f"{attr_name}{key}", val)
54+
value = attr_extractor(params)
55+
if isinstance(value, list):
56+
for _value in value:
57+
for key, val in _value.items():
58+
# gen_ai. and gen_ai_
59+
logger.debug(f"Set attribute {attr_name}{key} = {val}")
60+
span.set_attribute(f"{attr_name}{key}", val)
6161
else:
62-
span.set_attribute(
63-
attr_name,
64-
attr_extractor(invocation_context, event_id, llm_request, llm_response),
65-
)
62+
span.set_attribute(attr_name, value)

0 commit comments

Comments
 (0)