|
17 | 17 | import json |
18 | 18 | import logging |
19 | 19 | import os |
| 20 | +from re import S |
20 | 21 | import time |
21 | 22 | from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union |
22 | 23 |
|
23 | | -from google.genai.models import AsyncModels, Models |
| 24 | +from google.genai.models import AsyncModels, Models, t as transformers |
24 | 25 | from google.genai.types import ( |
25 | 26 | BlockedReason, |
26 | 27 | Candidate, |
27 | 28 | Content, |
28 | 29 | ContentListUnion, |
| 30 | + GenerateContentConfig, |
| 31 | + ContentUnion, |
29 | 32 | ContentListUnionDict, |
| 33 | + Content, |
30 | 34 | ContentUnion, |
31 | 35 | ContentUnionDict, |
32 | 36 | GenerateContentConfig, |
|
47 | 51 | from .flags import is_content_recording_enabled |
48 | 52 | from .otel_wrapper import OTelWrapper |
49 | 53 | from .tool_call_wrapper import wrapped as wrapped_tool |
| 54 | +from .message import ( |
| 55 | + ContentUnion, |
| 56 | + to_input_messages, |
| 57 | + to_output_message, |
| 58 | + to_system_instruction, |
| 59 | +) |
50 | 60 |
|
51 | 61 | _logger = logging.getLogger(__name__) |
52 | 62 |
|
@@ -143,6 +153,17 @@ def _to_dict(value: object): |
143 | 153 | return json.loads(json.dumps(value)) |
144 | 154 |
|
145 | 155 |
|
| 156 | +def _config_to_system_instruction( |
| 157 | + config: GenerateContentConfigOrDict | None, |
| 158 | +) -> ContentUnion | None: |
| 159 | + if not config: |
| 160 | + return None |
| 161 | + |
| 162 | + if isinstance(config, dict): |
| 163 | + return GenerateContentConfig.model_validate(config).system_instruction |
| 164 | + return config.system_instruction |
| 165 | + |
| 166 | + |
146 | 167 | def _add_request_options_to_span( |
147 | 168 | span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList |
148 | 169 | ): |
@@ -242,6 +263,7 @@ def __init__( |
242 | 263 | ): |
243 | 264 | self._start_time = time.time_ns() |
244 | 265 | self._otel_wrapper = otel_wrapper |
| 266 | + self._models_object = models_object |
245 | 267 | self._genai_system = _determine_genai_system(models_object) |
246 | 268 | self._genai_request_model = model |
247 | 269 | self._finish_reasons_set = set() |
@@ -290,14 +312,21 @@ def process_request( |
290 | 312 | _add_request_options_to_span( |
291 | 313 | span, config, self._generate_content_config_key_allowlist |
292 | 314 | ) |
293 | | - self._maybe_log_system_instruction(config=config) |
294 | | - self._maybe_log_user_prompt(contents) |
295 | 315 |
|
296 | | - def process_response(self, response: GenerateContentResponse): |
| 316 | + def process_completion( |
| 317 | + self, |
| 318 | + *, |
| 319 | + config: Optional[GenerateContentConfigOrDict], |
| 320 | + request: Union[ContentListUnion, ContentListUnionDict], |
| 321 | + response: GenerateContentResponse, |
| 322 | + ): |
297 | 323 | # TODO: Determine if there are other response properties that |
298 | 324 | # need to be reflected back into the span attributes. |
299 | 325 | # |
300 | 326 | # See also: TODOS.md. |
| 327 | + self._maybe_log_completion_details( |
| 328 | + config=config, request=request, response=response |
| 329 | + ) |
301 | 330 | self._update_finish_reasons(response) |
302 | 331 | self._maybe_update_token_counts(response) |
303 | 332 | self._maybe_update_error_type(response) |
@@ -373,6 +402,45 @@ def _maybe_update_error_type(self, response: GenerateContentResponse): |
373 | 402 | block_reason = response.prompt_feedback.block_reason.name.upper() |
374 | 403 | self._error_type = f"BLOCKED_{block_reason}" |
375 | 404 |
|
| 405 | + def _maybe_log_completion_details( |
| 406 | + self, |
| 407 | + *, |
| 408 | + config: Optional[GenerateContentConfigOrDict], |
| 409 | + request: Union[ContentListUnion, ContentListUnionDict], |
| 410 | + response: GenerateContentResponse, |
| 411 | + ) -> None: |
| 412 | + def _transform_content( |
| 413 | + content: Union[ |
| 414 | + ContentListUnion, ContentListUnionDict, Content, None |
| 415 | + ], |
| 416 | + ) -> list[Content]: |
| 417 | + if content is None: |
| 418 | + return [] |
| 419 | + return transformers.t_contents( |
| 420 | + self._models_object._api_client, content |
| 421 | + ) |
| 422 | + |
| 423 | + attributes = { |
| 424 | + gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system, |
| 425 | + } |
| 426 | + |
| 427 | + system_instruction = to_system_instruction( |
| 428 | + contents=_transform_content(_config_to_system_instruction(config)) |
| 429 | + ) |
| 430 | + input_messages = to_input_messages( |
| 431 | + contents=_transform_content(request) |
| 432 | + ) |
| 433 | + output_message = to_output_message( |
| 434 | + candidates=response.candidates or [] |
| 435 | + ) |
| 436 | + |
| 437 | + self._otel_wrapper.log_completion_details( |
| 438 | + system_instructions=system_instruction, |
| 439 | + input_messages=input_messages, |
| 440 | + output_messages=output_message, |
| 441 | + attributes=attributes, |
| 442 | + ) |
| 443 | + |
376 | 444 | def _maybe_log_system_instruction( |
377 | 445 | self, config: Optional[GenerateContentConfigOrDict] = None |
378 | 446 | ): |
@@ -596,7 +664,9 @@ def instrumented_generate_content( |
596 | 664 | config=helper.wrapped_config(config), |
597 | 665 | **kwargs, |
598 | 666 | ) |
599 | | - helper.process_response(response) |
| 667 | + helper.process_completion( |
| 668 | + config=config, request=contents, response=response |
| 669 | + ) |
600 | 670 | return response |
601 | 671 | except Exception as error: |
602 | 672 | helper.process_error(error) |
@@ -641,7 +711,9 @@ def instrumented_generate_content_stream( |
641 | 711 | config=helper.wrapped_config(config), |
642 | 712 | **kwargs, |
643 | 713 | ): |
644 | | - helper.process_response(response) |
| 714 | + helper.process_completion( |
| 715 | + config=config, request=contents, response=response |
| 716 | + ) |
645 | 717 | yield response |
646 | 718 | except Exception as error: |
647 | 719 | helper.process_error(error) |
@@ -686,7 +758,10 @@ async def instrumented_generate_content( |
686 | 758 | config=helper.wrapped_config(config), |
687 | 759 | **kwargs, |
688 | 760 | ) |
689 | | - helper.process_response(response) |
| 761 | + helper.process_completion( |
| 762 | + config=config, request=contents, response=response |
| 763 | + ) |
| 764 | + |
690 | 765 | return response |
691 | 766 | except Exception as error: |
692 | 767 | helper.process_error(error) |
@@ -744,7 +819,9 @@ async def _response_async_generator_wrapper(): |
744 | 819 | with trace.use_span(span, end_on_exit=True): |
745 | 820 | try: |
746 | 821 | async for response in response_async_generator: |
747 | | - helper.process_response(response) |
| 822 | + helper.process_completion( |
| 823 | + config=config, request=contents, response=response |
| 824 | + ) |
748 | 825 | yield response |
749 | 826 | except Exception as error: |
750 | 827 | helper.process_error(error) |
|
0 commit comments