Skip to content

Commit e43c9f2

Browse files
committed
Fix all tests
1 parent 28a8d27 commit e43c9f2

File tree

5 files changed

+569
-288
lines changed

5 files changed

+569
-288
lines changed

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py

Lines changed: 47 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def handle_response(
150150
)
151151
self.event_logger.emit(
152152
create_operation_details_event(
153-
api_endpoint = api_endpoint,
153+
api_endpoint=api_endpoint,
154154
params=params,
155155
capture_content=self.capture_content,
156156
response=response,
@@ -225,13 +225,30 @@ def generate_content(
225225
| prediction_service_v1beta1.GenerateContentResponse
226226
):
227227
if self.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
228-
_instrumentation = self._with_default_instrumentation
228+
with self._with_default_instrumentation(
229+
instance, args, kwargs
230+
) as handle_response:
231+
response = wrapped(*args, **kwargs)
232+
handle_response(response)
233+
return response
229234
else:
230-
_instrumentation = self._with_new_instrumentation
231-
with _instrumentation(instance, args, kwargs) as handle_response:
232-
response = wrapped(*args, **kwargs)
233-
handle_response(response)
234-
return response
235+
with self._with_new_instrumentation(
236+
instance, args, kwargs
237+
) as handle_response:
238+
try:
239+
response = wrapped(*args, **kwargs)
240+
except Exception as e:
241+
self.event_logger.emit(
242+
create_operation_details_event(
243+
params=_extract_params(*args, **kwargs),
244+
response=None,
245+
capture_content=self.capture_content,
246+
api_endpoint=instance.api_endpoint,
247+
)
248+
)
249+
raise e
250+
handle_response(response)
251+
return response
235252

236253
async def agenerate_content(
237254
self,
@@ -251,10 +268,27 @@ async def agenerate_content(
251268
| prediction_service_v1beta1.GenerateContentResponse
252269
):
253270
if self.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
254-
_instrumentation = self._with_default_instrumentation
271+
with self._with_default_instrumentation(
272+
instance, args, kwargs
273+
) as handle_response:
274+
response = await wrapped(*args, **kwargs)
275+
handle_response(response)
276+
return response
255277
else:
256-
_instrumentation = self._with_new_instrumentation
257-
with _instrumentation(instance, args, kwargs) as handle_response:
258-
response = await wrapped(*args, **kwargs)
259-
handle_response(response)
260-
return response
278+
with self._with_new_instrumentation(
279+
instance, args, kwargs
280+
) as handle_response:
281+
try:
282+
response = await wrapped(*args, **kwargs)
283+
except Exception as e:
284+
self.event_logger.emit(
285+
create_operation_details_event(
286+
params=_extract_params(*args, **kwargs),
287+
response=None,
288+
capture_content=self.capture_content,
289+
api_endpoint=instance.api_endpoint,
290+
)
291+
)
292+
raise e
293+
handle_response(response)
294+
return response

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py

Lines changed: 27 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,8 @@ def get_genai_request_attributes(
129129
generation_config.temperature
130130
)
131131
if "top_p" in generation_config:
132+
# There is also a top_k parameter ( The maximum number of tokens to consider when sampling.),
133+
# but no semconv yet exists for it.
132134
attributes[GenAIAttributes.GEN_AI_REQUEST_TOP_P] = (
133135
generation_config.top_p
134136
)
@@ -144,14 +146,28 @@ def get_genai_request_attributes(
144146
attributes[GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY] = (
145147
generation_config.frequency_penalty
146148
)
147-
if "seed" in generation_config and use_latest_semconvs:
148-
attributes[GenAIAttributes.GEN_AI_REQUEST_SEED] = (
149-
generation_config.seed
150-
)
151149
if "stop_sequences" in generation_config:
152150
attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = (
153151
generation_config.stop_sequences
154152
)
153+
if use_latest_semconvs:
154+
if "seed" in generation_config:
155+
attributes[GenAIAttributes.GEN_AI_REQUEST_SEED] = (
156+
generation_config.seed
157+
)
158+
if "candidate_count" in generation_config:
159+
attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = (
160+
generation_config.candidate_count
161+
)
162+
if "response_mime_type" in generation_config:
163+
if generation_config.response_mime_type == "text/plain":
164+
attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = "text"
165+
elif generation_config.response_mime_type == "application/json":
166+
attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = "json"
167+
else:
168+
attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = (
169+
generation_config.response_mime_type
170+
)
155171

156172
return attributes
157173

@@ -164,8 +180,6 @@ def get_genai_response_attributes(
164180
_map_finish_reason(candidate.finish_reason)
165181
for candidate in response.candidates
166182
]
167-
# TODO: add gen_ai.response.id once available in the python client
168-
# https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3246
169183
return {
170184
GenAIAttributes.GEN_AI_RESPONSE_MODEL: response.model_version,
171185
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS: finish_reasons,
@@ -262,14 +276,16 @@ def create_operation_details_event(
262276
*,
263277
api_endpoint: str,
264278
response: prediction_service.GenerateContentResponse
265-
| prediction_service_v1beta1.GenerateContentResponse,
279+
| prediction_service_v1beta1.GenerateContentResponse
280+
| None,
266281
params: GenerateContentParams,
267282
capture_content: bool,
268283
) -> Event:
269284
event = Event(name="gen_ai.client.inference.operation.details")
270285
attributes: dict[str, AnyValue] = {
271286
**get_genai_request_attributes(True, params),
272287
**get_server_attributes(api_endpoint),
288+
**(get_genai_response_attributes(response) if response else {}),
273289
}
274290
event.attributes = attributes
275291
if not capture_content:
@@ -287,7 +303,7 @@ def create_operation_details_event(
287303
attributes["gen_ai.input.messages"] = [
288304
_convert_content_to_message(content) for content in params.contents
289305
]
290-
if response.candidates:
306+
if response and response.candidates:
291307
attributes["gen_ai.output.messages"] = (
292308
_convert_response_to_output_messages(response)
293309
)
@@ -310,13 +326,13 @@ def _convert_content_to_message(content: content.Content) -> dict:
310326
message = {}
311327
message["role"] = content.role
312328
message["parts"] = []
313-
for part in content.parts:
329+
for idx, part in enumerate(content.parts):
314330
if "function_response" in part:
315331
part = part.function_response
316332
message["parts"].append(
317333
{
318334
"type": "tool_call_response",
319-
"id": part.id,
335+
"id": f"{part.name}_{idx}",
320336
"response": json_format.MessageToDict(part._pb.response),
321337
}
322338
)
@@ -325,9 +341,8 @@ def _convert_content_to_message(content: content.Content) -> dict:
325341
message["parts"].append(
326342
{
327343
"type": "tool_call",
328-
"id": part.id,
344+
"id": f"{part.name}_{idx}",
329345
"name": part.name,
330-
# TODO: support partial_args/streaming here?
331346
"response": json_format.MessageToDict(
332347
part._pb.args,
333348
),

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/conftest.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
Generator,
1111
Mapping,
1212
MutableMapping,
13-
Optional,
1413
Protocol,
1514
TypeVar,
1615
)
@@ -33,6 +32,7 @@
3332

3433
from opentelemetry.instrumentation._semconv import (
3534
OTEL_SEMCONV_STABILITY_OPT_IN,
35+
_OpenTelemetrySemanticConventionStability,
3636
)
3737
from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
3838
from opentelemetry.instrumentation.vertexai.utils import (
@@ -122,13 +122,17 @@ def vertexai_init(vcr: VCR) -> None:
122122
def instrument_no_content(
123123
tracer_provider, event_logger_provider, meter_provider, request
124124
):
125+
# Reset global state..
126+
_OpenTelemetrySemanticConventionStability._initialized = False
125127
os.environ.update(
126128
{OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "False"}
127129
)
128130
if request.param:
129131
os.environ.update(
130132
{OTEL_SEMCONV_STABILITY_OPT_IN: "gen_ai_latest_experimental"}
131133
)
134+
else:
135+
os.environ.update({OTEL_SEMCONV_STABILITY_OPT_IN: "stable"})
132136

133137
instrumentor = VertexAIInstrumentor()
134138
instrumentor.instrument(
@@ -150,13 +154,17 @@ def instrument_no_content(
150154
def instrument_with_content(
151155
tracer_provider, event_logger_provider, meter_provider, request
152156
):
157+
# Reset global state..
158+
_OpenTelemetrySemanticConventionStability._initialized = False
153159
os.environ.update(
154160
{OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "True"}
155161
)
156162
if request.param:
157163
os.environ.update(
158164
{OTEL_SEMCONV_STABILITY_OPT_IN: "gen_ai_latest_experimental"}
159165
)
166+
else:
167+
os.environ.update({OTEL_SEMCONV_STABILITY_OPT_IN: "stable"})
160168
instrumentor = VertexAIInstrumentor()
161169
instrumentor.instrument(
162170
tracer_provider=tracer_provider,
@@ -304,7 +312,6 @@ def __call__(self): ...
304312
def fixture_generate_content(
305313
request: pytest.FixtureRequest,
306314
vcr: VCR,
307-
cassette_name: Optional[str] = None,
308315
) -> Generator[GenerateContentFixture, None, None]:
309316
"""This fixture parameterizes tests that use it to test calling both
310317
GenerativeModel.generate_content() and GenerativeModel.generate_content_async().
@@ -322,6 +329,6 @@ def wrapper(model: GenerativeModel, *args, **kwargs) -> None:
322329
return model.generate_content(*args, **kwargs)
323330

324331
with vcr.use_cassette(
325-
cassette_name or request.node.originalname, allow_playback_repeats=True
332+
request.node.originalname, allow_playback_repeats=True
326333
):
327334
yield wrapper

0 commit comments

Comments
 (0)