1212# See the License for the specific language governing permissions and
1313# limitations under the License.
1414
15- from typing import Union , Optional , Iterator , AsyncIterator , Awaitable
16-
15+ import functools
1716import logging
18- import time
1917import os
20- import functools
21-
18+ import time
19+ from typing import AsyncIterator , Awaitable , Iterator , Optional , Union
2220
23- from google .genai .models import Models , AsyncModels
21+ from google .genai .models import AsyncModels , Models
2422from google .genai .types import (
23+ BlockedReason ,
2524 ContentListUnion ,
2625 ContentListUnionDict ,
2726 GenerateContentConfigOrDict ,
2827 GenerateContentResponse ,
2928)
30- from .flags import is_content_recording_enabled
31- from .otel_wrapper import OTelWrapper
3229
3330from opentelemetry import trace
3431from opentelemetry .semconv ._incubating .attributes import gen_ai_attributes
3532from opentelemetry .semconv .attributes import error_attributes
3633
34+ from .flags import is_content_recording_enabled
35+ from .otel_wrapper import OTelWrapper
3736
3837_logger = logging .getLogger (__name__ )
3938
@@ -84,20 +83,20 @@ def _get_gemini_system_name():
8483
8584
8685def _guess_genai_system_from_env ():
87- if os .environ .get (' GOOGLE_GENAI_USE_VERTEXAI' , '0' ).lower () in [
88- ' true' ,
89- '1' ,
86+ if os .environ .get (" GOOGLE_GENAI_USE_VERTEXAI" , "0" ).lower () in [
87+ " true" ,
88+ "1" ,
9089 ]:
9190 return _get_vertexai_system_name ()
9291 return _get_gemini_system_name ()
9392
9493
9594
9695def _determine_genai_system (models_object : Union [Models , AsyncModels ]):
97- client = getattr (models_object , ' _api_client' )
96+ client = getattr (models_object , " _api_client" )
9897 if not client :
9998 return _guess_genai_system_from_env ()
100- vertexai_attr = getattr (client , ' vertexai' )
99+ vertexai_attr = getattr (client , " vertexai" )
101100 if vertexai_attr is None :
102101 return _guess_genai_system_from_env ()
103102 if vertexai_attr :
@@ -111,7 +110,7 @@ def _get_config_property(
111110 path : str ):
112111 if config is None :
113112 return None
114- path_segments = path .split ('.' )
113+ path_segments = path .split ("." )
115114 current_context = config
116115 for path_segment in path_segments :
117116 if current_context is None :
@@ -126,7 +125,7 @@ def _get_config_property(
126125def _get_response_property (
127126 response : GenerateContentResponse ,
128127 path : str ):
129- path_segments = path .split ('.' )
128+ path_segments = path .split ("." )
130129 current_context = response
131130 for path_segment in path_segments :
132131 if current_context is None :
@@ -139,15 +138,15 @@ def _get_response_property(
139138
140139
141140def _get_temperature (config : Optional [GenerateContentConfigOrDict ]):
142- return _get_config_property (config , ' temperature' )
141+ return _get_config_property (config , " temperature" )
143142
144143
145144def _get_top_k (config : Optional [GenerateContentConfigOrDict ]):
146- return _get_config_property (config , ' top_k' )
145+ return _get_config_property (config , " top_k" )
147146
148147
149148def _get_top_p (config : Optional [GenerateContentConfigOrDict ]):
150- return _get_config_property (config , ' top_p' )
149+ return _get_config_property (config , " top_p" )
151150
152151
153152_SPAN_ATTRIBUTE_TO_CONFIG_EXTRACTOR = {
@@ -181,7 +180,7 @@ def start_span_as_current_span(self, name):
181180 attributes = {
182181 gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
183182 gen_ai_attributes .GEN_AI_REQUEST_MODEL : self ._genai_request_model ,
184- gen_ai_attributes .GEN_AI_OPERATION_NAME : ' GenerateContent' ,
183+ gen_ai_attributes .GEN_AI_OPERATION_NAME : " GenerateContent" ,
185184 }
186185 )
187186
@@ -196,7 +195,7 @@ def process_request(
196195 span .set_attribute (attribute_key , attribute_value )
197196 self ._maybe_log_system_instruction (config = config )
198197 self ._maybe_log_user_prompt (contents )
199-
198+
200199
201200 def process_response (self , response : GenerateContentResponse ):
202201 self ._maybe_update_token_counts (response )
@@ -216,8 +215,8 @@ def finalize_processing(self):
216215 self ._otel_wrapper .done ()
217216
218217 def _maybe_update_token_counts (self , response : GenerateContentResponse ):
219- input_tokens = _get_response_property (response , ' usage_metadata.prompt_token_count' )
220- output_tokens = _get_response_property (response , ' usage_metadata.candidates_token_count' )
218+ input_tokens = _get_response_property (response , " usage_metadata.prompt_token_count" )
219+ output_tokens = _get_response_property (response , " usage_metadata.candidates_token_count" )
221220 if input_tokens :
222221 self ._input_tokens += input_tokens
223222 if output_tokens :
@@ -228,26 +227,26 @@ def _maybe_update_error_type(self, response: GenerateContentResponse):
228227 return
229228 if ((not response .prompt_feedback ) or
230229 (not response .prompt_feedback .block_reason ) or
231- (block_reason == genai_types . BlockedReason .BLOCKED_REASON_UNSPECIFIED )):
232- self ._error_type = ' NO_CANDIDATES'
230+ (response . prompt_feedback . block_reason == BlockedReason .BLOCKED_REASON_UNSPECIFIED )):
231+ self ._error_type = " NO_CANDIDATES"
233232 return
234233 block_reason = response .prompt_feedback .block_reason
235- self ._error_type = ' BLOCKED_{}' .format (block_reason .name )
234+ self ._error_type = " BLOCKED_{}" .format (block_reason .name )
236235
237236 def _maybe_log_system_instruction (self , config : Optional [GenerateContentConfigOrDict ]= None ):
238237 if not self ._content_recording_enabled :
239238 return
240- system_instruction = _get_config_property (config , ' system_instruction' )
239+ system_instruction = _get_config_property (config , " system_instruction" )
241240 if not system_instruction :
242241 return
243242 self ._otel_wrapper .log_system_prompt (
244243 attributes = {
245244 gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
246245 },
247246 body = {
248- ' content' : system_instruction ,
247+ " content" : system_instruction ,
249248 })
250-
249+
251250 def _maybe_log_user_prompt (self , contents : Union [ContentListUnion , ContentListUnionDict ]):
252251 if not self ._content_recording_enabled :
253252 return
@@ -256,7 +255,7 @@ def _maybe_log_user_prompt(self, contents: Union[ContentListUnion, ContentListUn
256255 gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
257256 },
258257 body = {
259- ' content' : contents ,
258+ " content" : contents ,
260259 })
261260
262261 def _maybe_log_response (self , response : GenerateContentResponse ):
@@ -267,34 +266,34 @@ def _maybe_log_response(self, response: GenerateContentResponse):
267266 gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
268267 },
269268 body = {
270- ' content' : response .model_dump (),
269+ " content" : response .model_dump (),
271270 })
272271
273272 def _record_token_usage_metric (self ):
274273 self ._otel_wrapper .token_usage_metric .record (
275274 self ._input_tokens ,
276275 attributes = {
277- gen_ai_attributes .GEN_AI_TOKEN_TYPE : ' input' ,
276+ gen_ai_attributes .GEN_AI_TOKEN_TYPE : " input" ,
278277 gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
279278 gen_ai_attributes .GEN_AI_REQUEST_MODEL : self ._genai_request_model ,
280- gen_ai_attributes .GEN_AI_OPERATION_NAME : ' GenerateContent' ,
279+ gen_ai_attributes .GEN_AI_OPERATION_NAME : " GenerateContent" ,
281280 }
282281 )
283282 self ._otel_wrapper .token_usage_metric .record (
284283 self ._output_tokens ,
285284 attributes = {
286- gen_ai_attributes .GEN_AI_TOKEN_TYPE : ' output' ,
285+ gen_ai_attributes .GEN_AI_TOKEN_TYPE : " output" ,
287286 gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
288287 gen_ai_attributes .GEN_AI_REQUEST_MODEL : self ._genai_request_model ,
289- gen_ai_attributes .GEN_AI_OPERATION_NAME : ' GenerateContent' ,
288+ gen_ai_attributes .GEN_AI_OPERATION_NAME : " GenerateContent" ,
290289 }
291290 )
292-
291+
293292 def _record_duration_metric (self ):
294293 attributes = {
295294 gen_ai_attributes .GEN_AI_SYSTEM : self ._genai_system ,
296295 gen_ai_attributes .GEN_AI_REQUEST_MODEL : self ._genai_request_model ,
297- gen_ai_attributes .GEN_AI_OPERATION_NAME : ' GenerateContent' ,
296+ gen_ai_attributes .GEN_AI_OPERATION_NAME : " GenerateContent" ,
298297 }
299298 if self ._error_type is not None :
300299 attributes [error_attributes .ERROR_TYPE ] = self ._error_type
@@ -319,7 +318,7 @@ def instrumented_generate_content(
319318 contents : Union [ContentListUnion , ContentListUnionDict ],
320319 config : Optional [GenerateContentConfigOrDict ] = None ) -> GenerateContentResponse :
321320 helper = _GenerateContentInstrumentationHelper (self , otel_wrapper , model )
322- with helper .start_span_as_current_span (' google.genai.Models.generate_content' ):
321+ with helper .start_span_as_current_span (" google.genai.Models.generate_content" ):
323322 helper .process_request (contents , config )
324323 try :
325324 response = wrapped_func (self , model = model , contents = contents , config = config )
@@ -348,7 +347,7 @@ def instrumented_generate_content_stream(
348347 contents : Union [ContentListUnion , ContentListUnionDict ],
349348 config : Optional [GenerateContentConfigOrDict ] = None ) -> Iterator [GenerateContentResponse ]:
350349 helper = _GenerateContentInstrumentationHelper (self , otel_wrapper , model )
351- with helper .start_span_as_current_span (' google.genai.Models.generate_content_stream' ):
350+ with helper .start_span_as_current_span (" google.genai.Models.generate_content_stream" ):
352351 helper .process_request (contents , config )
353352 try :
354353 for response in wrapped_func (self , model = model , contents = contents , config = config ):
@@ -377,7 +376,7 @@ async def instrumented_generate_content(
377376 contents : Union [ContentListUnion , ContentListUnionDict ],
378377 config : Optional [GenerateContentConfigOrDict ] = None ) -> GenerateContentResponse :
379378 helper = _GenerateContentInstrumentationHelper (self , otel_wrapper , model )
380- with helper .start_span_as_current_span (' google.genai.AsyncModels.generate_content' ):
379+ with helper .start_span_as_current_span (" google.genai.AsyncModels.generate_content" ):
381380 helper .process_request (contents , config )
382381 try :
383382 response = await wrapped_func (self , model = model , contents = contents , config = config )
@@ -406,7 +405,7 @@ async def instrumented_generate_content_stream(
406405 contents : Union [ContentListUnion , ContentListUnionDict ],
407406 config : Optional [GenerateContentConfigOrDict ] = None ) -> Awaitable [AsyncIterator [GenerateContentResponse ]]:
408407 helper = _GenerateContentInstrumentationHelper (self , otel_wrapper , model )
409- with helper .start_span_as_current_span (' google.genai.AsyncModels.generate_content_stream' ):
408+ with helper .start_span_as_current_span (" google.genai.AsyncModels.generate_content_stream" ):
410409 helper .process_request (contents , config )
411410 try :
412411 async for response in wrapped_func (self , model = model , contents = contents , config = config ):
@@ -431,4 +430,4 @@ def instrument_generate_content(otel_wrapper: OTelWrapper) -> object:
431430 Models .generate_content_stream = _create_instrumented_generate_content_stream (snapshot , otel_wrapper )
432431 AsyncModels .generate_content = _create_instrumented_async_generate_content (snapshot , otel_wrapper )
433432 AsyncModels .generate_content_stream = _create_instrumented_async_generate_content_stream (snapshot , otel_wrapper )
434- return snapshot
433+ return snapshot
0 commit comments