Skip to content

Commit be446f1

Browse files
committed
[WIP] Prototype of semconv open-telemetry#2179 (event format)
1 parent 29dfd56 commit be446f1

File tree

6 files changed

+463
-11
lines changed

6 files changed

+463
-11
lines changed

instrumentation-genai/opentelemetry-instrumentation-google-genai/pyproject.toml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,3 +77,8 @@ stubPath = "types"
7777
reportMissingImports = "error"
7878
reportMissingTypeStubs = false
7979
pythonVersion = "3.9"
80+
81+
[dependency-groups]
82+
dev = [
83+
"pillow>=11.2.1",
84+
]

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 83 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -17,16 +17,20 @@
1717
import json
1818
import logging
1919
import os
20+
from re import S
2021
import time
2122
from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union
2223

23-
from google.genai.models import AsyncModels, Models
24+
from google.genai.models import AsyncModels, Models, t as transformers
2425
from google.genai.types import (
2526
BlockedReason,
2627
Candidate,
2728
Content,
2829
ContentListUnion,
30+
GenerateContentConfig,
31+
ContentUnion,
2932
ContentListUnionDict,
33+
Content,
3034
ContentUnion,
3135
ContentUnionDict,
3236
GenerateContentConfig,
@@ -47,6 +51,12 @@
4751
from .flags import is_content_recording_enabled
4852
from .otel_wrapper import OTelWrapper
4953
from .tool_call_wrapper import wrapped as wrapped_tool
54+
from .message import (
55+
ContentUnion,
56+
to_input_messages,
57+
to_output_message,
58+
to_system_instruction,
59+
)
5060

5161
_logger = logging.getLogger(__name__)
5262

@@ -143,6 +153,17 @@ def _to_dict(value: object):
143153
return json.loads(json.dumps(value))
144154

145155

156+
def _config_to_system_instruction(
157+
config: GenerateContentConfigOrDict | None,
158+
) -> ContentUnion | None:
159+
if not config:
160+
return None
161+
162+
if isinstance(config, dict):
163+
return GenerateContentConfig.model_validate(config).system_instruction
164+
return config.system_instruction
165+
166+
146167
def _add_request_options_to_span(
147168
span, config: Optional[GenerateContentConfigOrDict], allow_list: AllowList
148169
):
@@ -242,6 +263,7 @@ def __init__(
242263
):
243264
self._start_time = time.time_ns()
244265
self._otel_wrapper = otel_wrapper
266+
self._models_object = models_object
245267
self._genai_system = _determine_genai_system(models_object)
246268
self._genai_request_model = model
247269
self._finish_reasons_set = set()
@@ -290,14 +312,21 @@ def process_request(
290312
_add_request_options_to_span(
291313
span, config, self._generate_content_config_key_allowlist
292314
)
293-
self._maybe_log_system_instruction(config=config)
294-
self._maybe_log_user_prompt(contents)
295315

296-
def process_response(self, response: GenerateContentResponse):
316+
def process_completion(
317+
self,
318+
*,
319+
config: Optional[GenerateContentConfigOrDict],
320+
request: Union[ContentListUnion, ContentListUnionDict],
321+
response: GenerateContentResponse,
322+
):
297323
# TODO: Determine if there are other response properties that
298324
# need to be reflected back into the span attributes.
299325
#
300326
# See also: TODOS.md.
327+
self._maybe_log_completion_details(
328+
config=config, request=request, response=response
329+
)
301330
self._update_finish_reasons(response)
302331
self._maybe_update_token_counts(response)
303332
self._maybe_update_error_type(response)
@@ -373,6 +402,43 @@ def _maybe_update_error_type(self, response: GenerateContentResponse):
373402
block_reason = response.prompt_feedback.block_reason.name.upper()
374403
self._error_type = f"BLOCKED_{block_reason}"
375404

405+
def _maybe_log_completion_details(
406+
self,
407+
*,
408+
config: Optional[GenerateContentConfigOrDict],
409+
request: Union[ContentListUnion, ContentListUnionDict],
410+
response: GenerateContentResponse,
411+
) -> None:
412+
def _transform_content(
413+
content: Union[
414+
ContentListUnion, ContentListUnionDict, Content, None
415+
],
416+
) -> list[Content]:
417+
if content is None:
418+
return []
419+
return transformers.t_contents(content)
420+
421+
attributes = {
422+
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
423+
}
424+
425+
system_instruction = to_system_instruction(
426+
contents=_transform_content(_config_to_system_instruction(config))
427+
)
428+
input_messages = to_input_messages(
429+
contents=_transform_content(request)
430+
)
431+
output_message = to_output_message(
432+
candidates=response.candidates or []
433+
)
434+
435+
self._otel_wrapper.log_completion_details(
436+
system_instructions=system_instruction,
437+
input_messages=input_messages,
438+
output_messages=output_message,
439+
attributes=attributes,
440+
)
441+
376442
def _maybe_log_system_instruction(
377443
self, config: Optional[GenerateContentConfigOrDict] = None
378444
):
@@ -596,7 +662,9 @@ def instrumented_generate_content(
596662
config=helper.wrapped_config(config),
597663
**kwargs,
598664
)
599-
helper.process_response(response)
665+
helper.process_completion(
666+
config=config, request=contents, response=response
667+
)
600668
return response
601669
except Exception as error:
602670
helper.process_error(error)
@@ -641,7 +709,9 @@ def instrumented_generate_content_stream(
641709
config=helper.wrapped_config(config),
642710
**kwargs,
643711
):
644-
helper.process_response(response)
712+
helper.process_completion(
713+
config=config, request=contents, response=response
714+
)
645715
yield response
646716
except Exception as error:
647717
helper.process_error(error)
@@ -686,7 +756,10 @@ async def instrumented_generate_content(
686756
config=helper.wrapped_config(config),
687757
**kwargs,
688758
)
689-
helper.process_response(response)
759+
helper.process_completion(
760+
config=config, request=contents, response=response
761+
)
762+
690763
return response
691764
except Exception as error:
692765
helper.process_error(error)
@@ -744,7 +817,9 @@ async def _response_async_generator_wrapper():
744817
with trace.use_span(span, end_on_exit=True):
745818
try:
746819
async for response in response_async_generator:
747-
helper.process_response(response)
820+
helper.process_completion(
821+
config=config, request=contents, response=response
822+
)
748823
yield response
749824
except Exception as error:
750825
helper.process_error(error)
Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import logging
16+
from .message_models import (
17+
BlobPart,
18+
ChatMessage,
19+
Choice,
20+
FileDataPart,
21+
InputMessages,
22+
MessagePart,
23+
SystemMessage,
24+
OutputMessages,
25+
TextPart,
26+
ToolCallPart,
27+
ToolCallResponsePart,
28+
UnknownPart,
29+
)
30+
31+
from google.genai.models import t as transfomers
32+
33+
from google.genai.types import (
34+
BlockedReason,
35+
Candidate,
36+
Content,
37+
ContentListUnion,
38+
ContentListUnionDict,
39+
ContentUnion,
40+
GenerateContentConfig,
41+
ContentUnionDict,
42+
GenerateContentConfig,
43+
GenerateContentConfigOrDict,
44+
GenerateContentResponse,
45+
Part,
46+
PartUnion,
47+
)
48+
49+
_logger = logging.getLogger(__name__)
50+
51+
52+
def to_input_messages(
53+
*,
54+
contents: list[Content],
55+
) -> InputMessages:
56+
return InputMessages(
57+
messages=[_to_chat_message(content) for content in contents]
58+
)
59+
60+
61+
def to_output_message(
62+
*,
63+
candidates: list[Candidate],
64+
) -> OutputMessages:
65+
choices = [
66+
Choice(
67+
finish_reason=candidate.finish_reason or "",
68+
message=_to_chat_message(candidate.content)
69+
if candidate.content
70+
else None,
71+
)
72+
for candidate in candidates
73+
]
74+
return OutputMessages(choices=choices)
75+
76+
77+
def to_system_instruction(
78+
*,
79+
contents: list[Content],
80+
) -> SystemMessage | None:
81+
return SystemMessage(
82+
messages=[_to_chat_message(content) for content in contents]
83+
)
84+
85+
86+
def _to_chat_message(
87+
content: Content,
88+
) -> ChatMessage:
89+
parts = content.parts or []
90+
return ChatMessage(role="system", parts=[_to_part(part) for part in parts])
91+
92+
93+
def _to_part(part: Part) -> MessagePart:
94+
if (text := part.text) is not None:
95+
return TextPart(content=text)
96+
97+
if data := part.inline_data:
98+
return BlobPart(mime_type=data.mime_type or "", data=data.data or b"")
99+
100+
if data := part.file_data:
101+
return FileDataPart(
102+
mime_type=data.mime_type or "", file_uri=data.file_uri or ""
103+
)
104+
105+
if call := part.function_call:
106+
return ToolCallPart(
107+
id=call.id or "", name=call.name or "", arguments=call.args
108+
)
109+
110+
if response := part.function_response:
111+
return ToolCallResponsePart(
112+
id=response.id or "",
113+
name=response.name or "",
114+
result=response.response,
115+
)
116+
117+
_logger.info("Unknown part dropped from telemetry %s", part)
118+
return UnknownPart()

0 commit comments

Comments
 (0)