1111# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212# See the License for the specific language governing permissions and
1313# limitations under the License.
14-
15- import time
16-
14+ from google .adk .models import LlmResponse
15+ from opentelemetry import metrics
1716from opentelemetry .metrics ._internal import Meter
18- from opentelemetry .sdk .metrics import MeterProvider
19- from opentelemetry .sdk .metrics .export import PeriodicExportingMetricReader
2017
2118from veadk .config import getenv
2219
23-
24- class MeterContext :
25- def __init__ (
26- self ,
27- meter : Meter ,
28- provider : MeterProvider ,
29- reader : PeriodicExportingMetricReader ,
30- ):
31- self .meter = meter
32- self .provider = provider
33- self .reader = reader
20+ METER_NAME_TEMPLATE = "veadk.{exporter_id}.meter"
3421
3522
3623class MeterUploader :
37- def __init__ (self , meter_context : MeterContext ):
38- self .meter = meter_context . meter
39- self . provider = meter_context . provider
40- self . reader = meter_context . reader
24+ def __init__ (self , exporter_id : str ):
25+ self .meter : Meter = metrics . get_meter (
26+ METER_NAME_TEMPLATE . format ( exporter_id = exporter_id )
27+ )
4128
4229 self .base_attributes = {
4330 "gen_ai_system" : "volcengine" ,
@@ -57,17 +44,11 @@ def __init__(self, meter_context: MeterContext):
5744 unit = "count" ,
5845 )
5946
60- def record (self , prompt_tokens : list [int ], completion_tokens : list [int ]):
61- self .llm_invoke_counter .add (len (completion_tokens ), self .base_attributes )
62-
63- for prompt_token in prompt_tokens :
64- token_attributes = {** self .base_attributes , "gen_ai_token_type" : "input" }
65- self .token_usage .record (prompt_token , attributes = token_attributes )
66- for completion_token in completion_tokens :
67- token_attributes = {** self .base_attributes , "gen_ai_token_type" : "output" }
68- self .token_usage .record (completion_token , attributes = token_attributes )
69-
70- def close (self ):
71- time .sleep (0.05 )
72- self .reader .force_flush ()
73- self .provider .shutdown ()
47+ def record (self , llm_response : LlmResponse ):
48+ input_token = llm_response .usage_metadata .prompt_token_count
49+ output_token = llm_response .usage_metadata .candidates_token_count
50+ self .llm_invoke_counter .add (1 , self .base_attributes )
51+ token_attributes = {** self .base_attributes , "gen_ai_token_type" : "input" }
52+ self .token_usage .record (input_token , attributes = token_attributes )
53+ token_attributes = {** self .base_attributes , "gen_ai_token_type" : "output" }
54+ self .token_usage .record (output_token , attributes = token_attributes )
0 commit comments