Skip to content

Commit be2c377

Browse files
authored
Merge branch 'main' into openai-example-fixes
2 parents fcebe85 + c0bc2c9 commit be2c377

File tree

10 files changed

+247
-28
lines changed

10 files changed

+247
-28
lines changed

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1111
1212
## Unreleased
1313

14+
### Fixed
15+
- `opentelemetry-instrumentation-redis` Add missing entry in doc string for `def _instrument`
16+
([#3247](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3247))
17+
1418
## Version 1.30.0/0.51b0 (2025-02-03)
1519

1620
### Added

instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ To uninstrument clients, call the uninstrument method:
7777
7878
References
7979
----------
80-
* `OpenTelemetry OpenAI Instrumentation <https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/openai/openai.html>`_
80+
* `OpenTelemetry OpenAI Instrumentation <https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation-genai/openai.html>`_
8181
* `OpenTelemetry Project <https://opentelemetry.io/>`_
8282
* `OpenTelemetry Python Examples <https://github.com/open-telemetry/opentelemetry-python/tree/main/docs/examples>`_
8383

instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1515
([#3208](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3208))
1616
- VertexAI emit user, system, and assistant events
1717
([#3203](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3203))
18+
- Add Vertex gen AI response span attributes
19+
([#3227](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3227))

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/events.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@
2020
schematized in YAML and the Weaver tool supports it.
2121
"""
2222

23+
from __future__ import annotations
24+
25+
from dataclasses import asdict, dataclass
26+
from typing import Literal
27+
2328
from opentelemetry._events import Event
2429
from opentelemetry.semconv._incubating.attributes import gen_ai_attributes
2530
from opentelemetry.util.types import AnyValue
@@ -89,3 +94,46 @@ def system_event(
8994
},
9095
body=body,
9196
)
97+
98+
99+
@dataclass
100+
class ChoiceMessage:
101+
"""The message field for a gen_ai.choice event"""
102+
103+
content: AnyValue = None
104+
role: str = "assistant"
105+
106+
107+
FinishReason = Literal[
108+
"content_filter", "error", "length", "stop", "tool_calls"
109+
]
110+
111+
112+
# TODO add tool calls
113+
# https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3216
114+
def choice_event(
115+
*,
116+
finish_reason: FinishReason | str,
117+
index: int,
118+
message: ChoiceMessage,
119+
) -> Event:
120+
"""Creates a choice event, which describes the Gen AI response message.
121+
https://github.com/open-telemetry/semantic-conventions/blob/v1.28.0/docs/gen-ai/gen-ai-events.md#event-gen_aichoice
122+
"""
123+
body: dict[str, AnyValue] = {
124+
"finish_reason": finish_reason,
125+
"index": index,
126+
"message": asdict(
127+
message,
128+
# filter nulls
129+
dict_factory=lambda kvs: {k: v for (k, v) in kvs if v is not None},
130+
),
131+
}
132+
133+
return Event(
134+
name="gen_ai.choice",
135+
attributes={
136+
gen_ai_attributes.GEN_AI_SYSTEM: gen_ai_attributes.GenAiSystemValues.VERTEX_AI.value,
137+
},
138+
body=body,
139+
)

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,11 @@
2525
from opentelemetry.instrumentation.vertexai.utils import (
2626
GenerateContentParams,
2727
get_genai_request_attributes,
28+
get_genai_response_attributes,
2829
get_server_attributes,
2930
get_span_name,
3031
request_to_events,
32+
response_to_events,
3133
)
3234
from opentelemetry.trace import SpanKind, Tracer
3335

@@ -113,25 +115,28 @@ def traced_method(
113115
name=span_name,
114116
kind=SpanKind.CLIENT,
115117
attributes=span_attributes,
116-
) as _span:
118+
) as span:
117119
for event in request_to_events(
118120
params=params, capture_content=capture_content
119121
):
120122
event_logger.emit(event)
121123

122124
# TODO: set error.type attribute
123125
# https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md
124-
result = wrapped(*args, **kwargs)
126+
response = wrapped(*args, **kwargs)
125127
# TODO: handle streaming
126128
# if is_streaming(kwargs):
127129
# return StreamWrapper(
128130
# result, span, event_logger, capture_content
129131
# )
130132

131-
# TODO: add response attributes and events
132-
# _set_response_attributes(
133-
# span, result, event_logger, capture_content
134-
# )
135-
return result
133+
if span.is_recording():
134+
span.set_attributes(get_genai_response_attributes(response))
135+
for event in response_to_events(
136+
response=response, capture_content=capture_content
137+
):
138+
event_logger.emit(event)
139+
140+
return response
136141

137142
return traced_method

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py

Lines changed: 73 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,10 @@
2828

2929
from opentelemetry._events import Event
3030
from opentelemetry.instrumentation.vertexai.events import (
31+
ChoiceMessage,
32+
FinishReason,
3133
assistant_event,
34+
choice_event,
3235
system_event,
3336
user_event,
3437
)
@@ -39,15 +42,25 @@
3942
from opentelemetry.util.types import AnyValue, AttributeValue
4043

4144
if TYPE_CHECKING:
42-
from google.cloud.aiplatform_v1.types import content, tool
45+
from google.cloud.aiplatform_v1.types import (
46+
content,
47+
prediction_service,
48+
tool,
49+
)
4350
from google.cloud.aiplatform_v1beta1.types import (
4451
content as content_v1beta1,
4552
)
53+
from google.cloud.aiplatform_v1beta1.types import (
54+
prediction_service as prediction_service_v1beta1,
55+
)
4656
from google.cloud.aiplatform_v1beta1.types import (
4757
tool as tool_v1beta1,
4858
)
4959

5060

61+
_MODEL = "model"
62+
63+
5164
@dataclass(frozen=True)
5265
class GenerateContentParams:
5366
model: str
@@ -137,6 +150,24 @@ def get_genai_request_attributes(
137150
return attributes
138151

139152

153+
def get_genai_response_attributes(
154+
response: prediction_service.GenerateContentResponse
155+
| prediction_service_v1beta1.GenerateContentResponse,
156+
) -> dict[str, AttributeValue]:
157+
finish_reasons: list[str] = [
158+
_map_finish_reason(candidate.finish_reason)
159+
for candidate in response.candidates
160+
]
161+
# TODO: add gen_ai.response.id once available in the python client
162+
# https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3246
163+
return {
164+
GenAIAttributes.GEN_AI_RESPONSE_MODEL: response.model_version,
165+
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS: finish_reasons,
166+
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS: response.usage_metadata.prompt_token_count,
167+
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS: response.usage_metadata.candidates_token_count,
168+
}
169+
170+
140171
_MODEL_STRIP_RE = re.compile(
141172
r"^projects/(.*)/locations/(.*)/publishers/google/models/"
142173
)
@@ -182,7 +213,7 @@ def request_to_events(
182213

183214
for content in params.contents or []:
184215
# Assistant message
185-
if content.role == "model":
216+
if content.role == _MODEL:
186217
request_content = _parts_to_any_value(
187218
capture_content=capture_content, parts=content.parts
188219
)
@@ -196,6 +227,27 @@ def request_to_events(
196227
yield user_event(role=content.role, content=request_content)
197228

198229

230+
def response_to_events(
231+
*,
232+
response: prediction_service.GenerateContentResponse
233+
| prediction_service_v1beta1.GenerateContentResponse,
234+
capture_content: bool,
235+
) -> Iterable[Event]:
236+
for candidate in response.candidates:
237+
yield choice_event(
238+
finish_reason=_map_finish_reason(candidate.finish_reason),
239+
index=candidate.index,
240+
# default to "model" since Vertex uses that instead of assistant
241+
message=ChoiceMessage(
242+
role=candidate.content.role or _MODEL,
243+
content=_parts_to_any_value(
244+
capture_content=capture_content,
245+
parts=candidate.content.parts,
246+
),
247+
),
248+
)
249+
250+
199251
def _parts_to_any_value(
200252
*,
201253
capture_content: bool,
@@ -208,3 +260,22 @@ def _parts_to_any_value(
208260
cast("dict[str, AnyValue]", type(part).to_dict(part)) # type: ignore[reportUnknownMemberType]
209261
for part in parts
210262
]
263+
264+
265+
def _map_finish_reason(
266+
finish_reason: content.Candidate.FinishReason
267+
| content_v1beta1.Candidate.FinishReason,
268+
) -> FinishReason | str:
269+
EnumType = type(finish_reason) # pylint: disable=invalid-name
270+
if (
271+
finish_reason is EnumType.FINISH_REASON_UNSPECIFIED
272+
or finish_reason is EnumType.OTHER
273+
):
274+
return "error"
275+
if finish_reason is EnumType.STOP:
276+
return "stop"
277+
if finish_reason is EnumType.MAX_TOKENS:
278+
return "length"
279+
280+
# If there is no 1:1 mapping to an OTel preferred enum value, use the exact vertex reason
281+
return finish_reason.name

0 commit comments

Comments
 (0)