|
14 | 14 |
|
15 | 15 | from typing import Any |
16 | 16 |
|
| 17 | +from google.adk.models.llm_request import LlmRequest |
| 18 | +from google.adk.models.llm_response import LlmResponse |
| 19 | +from opentelemetry import metrics |
17 | 20 | from opentelemetry import metrics as metrics_api |
18 | 21 | from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter |
19 | 22 | from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter |
| 23 | +from opentelemetry.metrics._internal import Meter |
20 | 24 | from opentelemetry.sdk import metrics as metrics_sdk |
21 | 25 | from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader |
22 | 26 | from opentelemetry.sdk.resources import Resource |
|
26 | 30 |
|
27 | 31 | from veadk.config import getenv |
28 | 32 | from veadk.tracing.telemetry.exporters.base_exporter import BaseExporter |
29 | | -from veadk.tracing.telemetry.metrics.opentelemetry_metrics import MeterUploader |
30 | 33 | from veadk.utils.logger import get_logger |
31 | 34 |
|
32 | 35 | logger = get_logger(__name__) |
33 | 36 |
|
34 | 37 |
|
| 38 | +class MeterUploader: |
| 39 | + def __init__( |
| 40 | + self, name: str, endpoint: str, headers: dict, resource_attributes: dict |
| 41 | + ) -> None: |
| 42 | + # global_metrics_provider -> global_tracer_provider |
| 43 | + # exporter -> exporter |
| 44 | + # metric_reader -> processor |
| 45 | + global_metrics_provider = metrics_api.get_meter_provider() |
| 46 | + |
| 47 | + # 1. init resource |
| 48 | + if hasattr(global_metrics_provider, "_sdk_config"): |
| 49 | + global_resource = global_metrics_provider._sdk_config.resource # type: ignore |
| 50 | + else: |
| 51 | + global_resource = Resource.create() |
| 52 | + |
| 53 | + resource = global_resource.merge(Resource.create(resource_attributes)) |
| 54 | + |
| 55 | + # 2. init exporter and reader |
| 56 | + exporter = OTLPMetricExporter(endpoint=endpoint, headers=headers) |
| 57 | + metric_reader = PeriodicExportingMetricReader(exporter) |
| 58 | + |
| 59 | + metrics_api.set_meter_provider( |
| 60 | + metrics_sdk.MeterProvider(metric_readers=[metric_reader], resource=resource) |
| 61 | + ) |
| 62 | + |
| 63 | + # 3. init meter |
| 64 | + self.meter: Meter = metrics.get_meter(name=name) |
| 65 | + |
| 66 | + # create meter attributes |
| 67 | + self.llm_invoke_counter = self.meter.create_counter( |
| 68 | + name="gen_ai.chat.count", |
| 69 | + description="Number of LLM invocations", |
| 70 | + unit="count", |
| 71 | + ) |
| 72 | + self.token_usage = self.meter.create_histogram( |
| 73 | + name="gen_ai.client.token.usage", |
| 74 | + description="Token consumption of LLM invocations", |
| 75 | + unit="count", |
| 76 | + ) |
| 77 | + |
| 78 | + def record(self, llm_request: LlmRequest, llm_response: LlmResponse) -> None: |
| 79 | + attributes = { |
| 80 | + "gen_ai_system": "volcengine", |
| 81 | + "gen_ai_response_model": llm_request.model, |
| 82 | + "gen_ai_operation_name": "chat_completions", |
| 83 | + "stream": "false", |
| 84 | + "server_address": "api.volcengine.com", |
| 85 | + } # required by Volcengine APMPlus |
| 86 | + |
| 87 | + if llm_response.usage_metadata: |
| 88 | + # llm invocation number += 1 |
| 89 | + self.llm_invoke_counter.add(1, attributes) |
| 90 | + |
| 91 | + # upload token usage |
| 92 | + input_token = llm_response.usage_metadata.prompt_token_count |
| 93 | + output_token = llm_response.usage_metadata.candidates_token_count |
| 94 | + |
| 95 | + if input_token: |
| 96 | + token_attributes = {**attributes, "gen_ai_token_type": "input"} |
| 97 | + self.token_usage.record(input_token, attributes=token_attributes) |
| 98 | + if output_token: |
| 99 | + token_attributes = {**attributes, "gen_ai_token_type": "output"} |
| 100 | + self.token_usage.record(output_token, attributes=token_attributes) |
| 101 | + |
| 102 | + |
35 | 103 | class APMPlusExporterConfig(BaseModel): |
36 | 104 | endpoint: str = Field( |
37 | 105 | default_factory=lambda: getenv( |
@@ -69,25 +137,13 @@ def model_post_init(self, context: Any) -> None: |
69 | 137 | endpoint=self.config.endpoint, insecure=True, headers=self.headers |
70 | 138 | ) |
71 | 139 | self.processor = BatchSpanProcessor(self._exporter) |
72 | | - self.meter_uploader = self._init_meter_uploader(exporter_id="apmplus") |
73 | | - |
74 | | - def _init_meter_uploader(self, exporter_id: str) -> MeterUploader: |
75 | | - global_metrics_provider = metrics_api.get_meter_provider() |
76 | | - if hasattr(global_metrics_provider, "_sdk_config"): |
77 | | - global_resource = global_metrics_provider._sdk_config.resource |
78 | | - else: |
79 | | - global_resource = Resource.create() |
80 | | - resource = global_resource.merge(Resource.create(self.resource_attributes)) |
81 | | - |
82 | | - exporter = OTLPMetricExporter( |
83 | | - endpoint=self.config.endpoint, headers=self.headers |
84 | | - ) |
85 | | - metric_reader = PeriodicExportingMetricReader(exporter) |
86 | 140 |
|
87 | | - metrics_api.set_meter_provider( |
88 | | - metrics_sdk.MeterProvider(metric_readers=[metric_reader], resource=resource) |
| 141 | + self.meter_uploader = MeterUploader( |
| 142 | + name="apmplus_meter", |
| 143 | + endpoint=self.config.endpoint, |
| 144 | + headers=self.headers, |
| 145 | + resource_attributes=self.resource_attributes, |
89 | 146 | ) |
90 | | - return MeterUploader(exporter_id=exporter_id) |
91 | 147 |
|
92 | 148 | @override |
93 | 149 | def export(self) -> None: |
|
0 commit comments