diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md index cba7e190fa..63ea5f6859 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md @@ -9,7 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Update instrumentation to use the latest semantic convention changes made in https://github.com/open-telemetry/semantic-conventions/pull/2179. Now only a single event and span (`gen_ai.client.inference.operation.details`) are used to capture prompt and response content. These changes are opt-in, -users will need to set the environment variable OTEL_SEMCONV_STABILITY_OPT_IN to `gen_ai_latest_experimental` to see them ([#3799](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3799)) and ([#3709](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3709)). +users will need to set the environment variable OTEL_SEMCONV_STABILITY_OPT_IN to `gen_ai_latest_experimental` to see them ([#3799](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3799)) and ([#3709](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3709)). Update instrumentation to call upload hook. - Implement uninstrument for `opentelemetry-instrumentation-vertexai` ([#3328](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3328)) - VertexAI support for async calling diff --git a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py index e11ea6e8b6..4ab006eb92 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py +++ b/instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py @@ -172,6 +172,9 @@ def handle_response( | prediction_service_v1beta1.GenerateContentResponse | None, ) -> None: + event = LogRecord( + event_name="gen_ai.client.inference.operation.details", + ) attributes = ( get_server_attributes(instance.api_endpoint) # type: ignore[reportUnknownMemberType] | request_attributes @@ -203,6 +206,13 @@ def handle_response( ) for candidate in response.candidates ] + self.completion_hook.on_completion( + inputs=inputs, + outputs=outputs, + system_instruction=system_instructions, + span=span, + log_record=event, + ) content_attributes = { k: [asdict(x) for x in v] for k, v in [ @@ -227,9 +237,6 @@ def handle_response( for k, v in content_attributes.items() } ) - event = LogRecord( - event_name="gen_ai.client.inference.operation.details", - ) event.attributes = attributes if capture_content in ( ContentCapturingMode.SPAN_AND_EVENT, @@ -237,13 +244,6 @@ def handle_response( ): event.attributes |= content_attributes self.logger.emit(event) - self.completion_hook.on_completion( - inputs=inputs, - outputs=outputs, - system_instruction=system_instructions, - span=span, - log_record=event, - ) yield handle_response