4141
4242from .flags import is_content_recording_enabled
4343from .otel_wrapper import OTelWrapper
44+ from .dict_util import flatten_dict
45+ from .custom_semconv import CUSTOM_LLM_REQUEST_PREFIX
4446
4547_logger = logging .getLogger (__name__ )
4648
@@ -129,21 +131,45 @@ def _determine_genai_system(models_object: Union[Models, AsyncModels]):
129131 return _get_gemini_system_name ()
130132
131133
132- def _get_config_property (
133- config : Optional [GenerateContentConfigOrDict ], path : str
134- ) -> Any :
134+ def _to_dict (value : object ):
135+ if isinstance (value , dict ):
136+ return value
137+ if hasattr (value , "model_dump" ):
138+ return value .model_dump ()
139+ return json .loads (json .dumps (value ))
140+
141+
142+ def _add_request_options_to_span (span , config : Optional [GenerateContentConfigOrDict ]):
135143 if config is None :
136- return None
137- path_segments = path .split ("." )
138- current_context : Any = config
139- for path_segment in path_segments :
140- if current_context is None :
141- return None
142- if isinstance (current_context , dict ):
143- current_context = current_context .get (path_segment )
144- else :
145- current_context = getattr (current_context , path_segment )
146- return current_context
144+ return
145+ span_context = span .get_span_context ()
146+ if not span_context .trace_flags .sampled :
147+ return
148+ attributes = flatten_dict (
149+ _to_dict (config ),
150+ key_prefix = CUSTOM_LLM_REQUEST_PREFIX ,
151+ exclude_keys = [
152+ # System instruction can be overly long for a span attribute.
153+ # Additionally, it is recorded as an event (log), instead.
154+ "gen_ai.gcp.request.system_instruction" ,
155+ # Headers could include sensitive information, therefore it is
156+ # best that we not record these options.
157+ "gen_ai.gcp.request.http_options.headers" ,
158+ ],
159+ rename_keys = {
160+ # TODO: add more entries here as more semantic conventions are
161+ # generalized to cover more of the available config options.
162+ "gen_ai.gcp.request.temperature" : "gen_ai.request.temperature" ,
163+ "gen_ai.gcp.request.top_k" : "gen_ai.request.top_k" ,
164+ "gen_ai.gcp.request.top_p" : "gen_ai.request.top_p" ,
165+ "gen_ai.gcp.request.candidate_count" : "gen_ai.request.choice.count" ,
166+ "gen_ai.gcp.request.max_output_tokens" : "gen_ai.request.max_tokens" ,
167+ "gen_ai.gcp.request.stop_sequences" : "gen_ai.request.stop_sequences" ,
168+ "gen_ai.gcp.request.presence_penalty" : "gen_ai.request.presence_penalty" ,
169+ "gen_ai.gcp.request.seed" : "gen_ai.request.seed" ,
170+ }
171+ )
172+ span .set_attributes (attributes )
147173
148174
149175def _get_response_property (response : GenerateContentResponse , path : str ):
@@ -159,44 +185,6 @@ def _get_response_property(response: GenerateContentResponse, path: str):
159185 return current_context
160186
161187
162- def _get_temperature (config : Optional [GenerateContentConfigOrDict ]):
163- return _get_config_property (config , "temperature" )
164-
165-
166- def _get_top_k (config : Optional [GenerateContentConfigOrDict ]):
167- return _get_config_property (config , "top_k" )
168-
169-
170- def _get_top_p (config : Optional [GenerateContentConfigOrDict ]):
171- return _get_config_property (config , "top_p" )
172-
173-
174- # A map from define attributes to the function that can obtain
175- # the relevant information from the request object.
176- #
177- # TODO: expand this to cover a larger set of the available
178- # span attributes from GenAI semantic conventions.
179- #
180- # TODO: define semantic conventions for attributes that
181- # are relevant for the Google GenAI SDK which are not
182- # currently covered by the existing semantic conventions.
183- #
184- # See also: TODOS.md
185- _SPAN_ATTRIBUTE_TO_CONFIG_EXTRACTOR = {
186- gen_ai_attributes .GEN_AI_REQUEST_TEMPERATURE : _get_temperature ,
187- gen_ai_attributes .GEN_AI_REQUEST_TOP_K : _get_top_k ,
188- gen_ai_attributes .GEN_AI_REQUEST_TOP_P : _get_top_p ,
189- }
190-
191-
192- def _to_dict (value : object ):
193- if isinstance (value , dict ):
194- return value
195- if hasattr (value , "model_dump" ):
196- return value .model_dump ()
197- return json .loads (json .dumps (value ))
198-
199-
200188class _GenerateContentInstrumentationHelper :
201189 def __init__ (
202190 self ,
@@ -237,13 +225,7 @@ def process_request(
237225 config : Optional [GenerateContentConfigOrDict ],
238226 ):
239227 span = trace .get_current_span ()
240- for (
241- attribute_key ,
242- extractor ,
243- ) in _SPAN_ATTRIBUTE_TO_CONFIG_EXTRACTOR .items ():
244- attribute_value = extractor (config )
245- if attribute_value is not None :
246- span .set_attribute (attribute_key , attribute_value )
228+ _add_request_options_to_span (span , config )
247229 self ._maybe_log_system_instruction (config = config )
248230 self ._maybe_log_user_prompt (contents )
249231
@@ -317,7 +299,12 @@ def _maybe_update_error_type(self, response: GenerateContentResponse):
317299 def _maybe_log_system_instruction (
318300 self , config : Optional [GenerateContentConfigOrDict ] = None
319301 ):
320- system_instruction = _get_config_property (config , "system_instruction" )
302+ system_instruction = None
303+ if config is not None :
304+ if isinstance (config , dict ):
305+ system_instruction = config ["system_instruction" ]
306+ else :
307+ system_instruction = config .system_instruction
321308 if not system_instruction :
322309 return
323310 attributes = {
0 commit comments