Skip to content

Commit b41e035

Browse files
committed
Vertex response gen_ai.choice events
1 parent 9b581e7 commit b41e035

File tree

3 files changed

+101
-5
lines changed
  • instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai

3 files changed

+101
-5
lines changed

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/events.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@
2020
schematized in YAML and the Weaver tool supports it.
2121
"""
2222

23+
from __future__ import annotations
24+
25+
from dataclasses import asdict, dataclass
26+
from typing import Literal
27+
2328
from opentelemetry._events import Event
2429
from opentelemetry.semconv._incubating.attributes import gen_ai_attributes
2530
from opentelemetry.util.types import AnyValue
@@ -89,3 +94,42 @@ def system_event(
8994
},
9095
body=body,
9196
)
97+
98+
99+
@dataclass
100+
class ChoiceMessage:
101+
"""The message field for a gen_ai.choice event"""
102+
103+
content: AnyValue = None
104+
role: str = "assistant"
105+
106+
107+
FinishReason = Literal[
108+
"content_filter", "error", "length", "stop", "tool_calls"
109+
]
110+
111+
112+
# TODO add tool calls
113+
# https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3216
114+
def choice_event(
115+
*,
116+
finish_reason: FinishReason | str,
117+
index: int,
118+
message: ChoiceMessage,
119+
) -> Event:
120+
"""Creates a choice event, which describes the Gen AI response message.
121+
https://github.com/open-telemetry/semantic-conventions/blob/v1.28.0/docs/gen-ai/gen-ai-events.md#event-gen_aichoice
122+
"""
123+
body: dict[str, AnyValue] = {
124+
"finish_reason": finish_reason,
125+
"index": index,
126+
"message": asdict(message),
127+
}
128+
129+
return Event(
130+
name="gen_ai.choice",
131+
attributes={
132+
gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value,
133+
},
134+
body=body,
135+
)

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
get_server_attributes,
3030
get_span_name,
3131
request_to_events,
32+
response_to_events,
3233
)
3334
from opentelemetry.trace import SpanKind, Tracer
3435

@@ -131,10 +132,11 @@ def traced_method(
131132

132133
if span.is_recording():
133134
span.set_attributes(get_genai_response_attributes(response))
134-
# TODO: add response attributes and events
135-
# _set_response_attributes(
136-
# span, result, event_logger, capture_content
137-
# )
135+
for event in response_to_events(
136+
response=response, capture_content=capture_content
137+
):
138+
event_logger.emit(event)
139+
138140
return response
139141

140142
return traced_method

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,10 @@
2828

2929
from opentelemetry._events import Event
3030
from opentelemetry.instrumentation.vertexai.events import (
31+
ChoiceMessage,
32+
FinishReason,
3133
assistant_event,
34+
choice_event,
3235
system_event,
3336
user_event,
3437
)
@@ -55,6 +58,9 @@
5558
)
5659

5760

61+
_MODEL = "model"
62+
63+
5864
@dataclass(frozen=True)
5965
class GenerateContentParams:
6066
model: str
@@ -204,7 +210,7 @@ def request_to_events(
204210

205211
for content in params.contents or []:
206212
# Assistant message
207-
if content.role == "model":
213+
if content.role == _MODEL:
208214
request_content = _parts_to_any_value(
209215
capture_content=capture_content, parts=content.parts
210216
)
@@ -218,6 +224,27 @@ def request_to_events(
218224
yield user_event(role=content.role, content=request_content)
219225

220226

227+
def response_to_events(
228+
*,
229+
response: prediction_service.GenerateContentResponse
230+
| prediction_service_v1beta1.GenerateContentResponse,
231+
capture_content: bool,
232+
) -> Iterable[Event]:
233+
for candidate in response.candidates:
234+
yield choice_event(
235+
finish_reason=_map_finish_reason(candidate.finish_reason),
236+
index=candidate.index,
237+
# default to "model" since Vertex uses that instead of assistant
238+
message=ChoiceMessage(
239+
role=candidate.content.role or _MODEL,
240+
content=_parts_to_any_value(
241+
capture_content=capture_content,
242+
parts=candidate.content.parts,
243+
),
244+
),
245+
)
246+
247+
221248
def _parts_to_any_value(
222249
*,
223250
capture_content: bool,
@@ -230,3 +257,26 @@ def _parts_to_any_value(
230257
cast("dict[str, AnyValue]", type(part).to_dict(part)) # type: ignore[reportUnknownMemberType]
231258
for part in parts
232259
]
260+
261+
262+
def _map_finish_reason(
263+
finish_reason: content.Candidate.FinishReason
264+
| content_v1beta1.Candidate.FinishReason,
265+
) -> FinishReason | str:
266+
EnumType = type(finish_reason)
267+
if (
268+
finish_reason is EnumType.FINISH_REASON_UNSPECIFIED
269+
or finish_reason is EnumType.OTHER
270+
):
271+
return "error"
272+
if finish_reason is EnumType.STOP:
273+
return "stop"
274+
if finish_reason is EnumType.MAX_TOKENS:
275+
return "length"
276+
277+
# There are a lot of specific enum values from Vertex that would map to "content_filter".
278+
# I'm worried trying to map the enum obfuscates the telemetry because 1) it over
279+
# generalizes and 2) half of the values are from the OTel enum and others from the vertex
280+
# enum. See for reference
281+
# https://github.com/googleapis/python-aiplatform/blob/c5023698c7068e2f84523f91b824641c9ef2d694/google/cloud/aiplatform_v1/types/content.py#L786-L822
282+
return finish_reason.name.lower()

0 commit comments

Comments
 (0)