Skip to content

Commit 51bd151

Browse files
committed
Resolve merge conflict.
2 parents 1415f2c + 194f0b0 commit 51bd151

File tree

22 files changed

+738
-159
lines changed

22 files changed

+738
-159
lines changed

.github/workflows/package-prepare-patch-release.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,11 @@ on:
99
- opentelemetry-resource-detector-azure
1010
- opentelemetry-sdk-extension-aws
1111
- opentelemetry-instrumentation-openai-v2
12+
- opentelemetry-instrumentation-vertexai
1213
description: 'Package to be released'
1314
required: true
15+
run-name: "[Package][${{ inputs.package }}] Prepare patch release"
16+
1417
jobs:
1518
prepare-patch-release:
1619
runs-on: ubuntu-latest

.github/workflows/package-prepare-release.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,11 @@ on:
99
- opentelemetry-resource-detector-azure
1010
- opentelemetry-sdk-extension-aws
1111
- opentelemetry-instrumentation-openai-v2
12+
- opentelemetry-instrumentation-vertexai
1213
description: 'Package to be released'
1314
required: true
1415

16+
run-name: "[Package][${{ inputs.package }}] Prepare release"
1517
jobs:
1618
prereqs:
1719
runs-on: ubuntu-latest

.github/workflows/package-release.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,10 @@ on:
99
- opentelemetry-resource-detector-azure
1010
- opentelemetry-sdk-extension-aws
1111
- opentelemetry-instrumentation-openai-v2
12+
- opentelemetry-instrumentation-vertexai
1213
description: 'Package to be released'
1314
required: true
15+
run-name: "[Package][${{ inputs.package }}] Release"
1416
jobs:
1517
release:
1618
runs-on: ubuntu-latest

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1919
([#3258](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3258))
2020
- `opentelemetry-instrumentation-botocore` Add support for GenAI system events
2121
([#3266](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3266))
22+
- `opentelemetry-instrumentation-botocore` Add support for GenAI choice events
23+
([#3275](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3275))
24+
- `opentelemetry-instrumentation` make it simpler to initialize auto-instrumentation programmatically
25+
([#3273](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3273))
2226

2327
### Fixed
2428

RELEASING.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
> - opentelemetry-resource-detector-azure
2222
> - opentelemetry-sdk-extension-aws
2323
> - opentelemetry-instrumentation-openai-v2
24+
> - opentelemetry-instrumentation-vertexai
2425
>
2526
> These libraries are also excluded from the general release.
2627

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 126 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
# limitations under the License.
1414

1515
import functools
16+
import json
1617
import logging
1718
import os
1819
import time
@@ -21,6 +22,10 @@
2122
from google.genai.models import AsyncModels, Models
2223
from google.genai.types import (
2324
BlockedReason,
25+
Candidate,
26+
Content,
27+
ContentUnion,
28+
ContentUnionDict,
2429
ContentListUnion,
2530
ContentListUnionDict,
2631
GenerateContentConfigOrDict,
@@ -40,6 +45,10 @@
4045
_logger = logging.getLogger(__name__)
4146

4247

48+
# Constant used to make the absence of content more understandable.
49+
_CONTENT_ELIDED = "<elided>"
50+
51+
4352
class _MethodsSnapshot:
4453
def __init__(self):
4554
self._original_generate_content = Models.generate_content
@@ -177,6 +186,15 @@ def _get_top_p(config: Optional[GenerateContentConfigOrDict]):
177186
}
178187

179188

189+
def _to_dict(value: object):
190+
if isinstance(value, dict):
191+
return value
192+
if hasattr(value, 'model_dump'):
193+
return value.model_dump()
194+
return json.loads(json.dumps(value))
195+
196+
197+
180198
class _GenerateContentInstrumentationHelper:
181199
def __init__(
182200
self,
@@ -193,6 +211,8 @@ def __init__(
193211
self._input_tokens = 0
194212
self._output_tokens = 0
195213
self._content_recording_enabled = is_content_recording_enabled()
214+
self._response_index = 0
215+
self._candidate_index = 0
196216

197217
def start_span_as_current_span(self, model_name, function_name):
198218
return self._otel_wrapper.start_as_current_span(
@@ -230,6 +250,7 @@ def process_response(self, response: GenerateContentResponse):
230250
self._maybe_update_token_counts(response)
231251
self._maybe_update_error_type(response)
232252
self._maybe_log_response(response)
253+
self._response_index += 1
233254

234255
def process_error(self, e: Exception):
235256
self._error_type = str(e.__class__.__name__)
@@ -291,64 +312,145 @@ def _maybe_update_error_type(self, response: GenerateContentResponse):
291312
def _maybe_log_system_instruction(
292313
self, config: Optional[GenerateContentConfigOrDict] = None
293314
):
294-
if not self._content_recording_enabled:
295-
return
296315
system_instruction = _get_config_property(config, "system_instruction")
297316
if not system_instruction:
298317
return
318+
attributes = {
319+
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
320+
}
299321
# TODO: determine if "role" should be reported here or not. It is unclear
300322
# since the caller does not supply a "role" and since this comes through
301323
# a property named "system_instruction" which would seem to align with
302324
# the default "role" that is allowed to be omitted by default.
303325
#
304326
# See also: "TODOS.md"
327+
body = {}
328+
if self._content_recording_enabled:
329+
body["content"] = _to_dict(system_instruction)
330+
else:
331+
body["content"] = _CONTENT_ELIDED
305332
self._otel_wrapper.log_system_prompt(
306-
attributes={
307-
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
308-
},
309-
body={
310-
"content": system_instruction,
311-
},
333+
attributes=attributes,
334+
body=body,
312335
)
313336

314337
def _maybe_log_user_prompt(
315338
self, contents: Union[ContentListUnion, ContentListUnionDict]
316339
):
317-
if not self._content_recording_enabled:
318-
return
340+
if isinstance(contents, list):
341+
total=len(contents)
342+
index=0
343+
for entry in contents:
344+
self._maybe_log_single_user_prompt(entry, index=index, total=total)
345+
index += 1
346+
else:
347+
self._maybe_log_single_user_prompt(contents)
348+
349+
def _maybe_log_single_user_prompt(
350+
self,
351+
contents: Union[ContentUnion, ContentUnionDict],
352+
index=0,
353+
total=1):
354+
# TODO: figure out how to report the index in a manner that is
355+
# aligned with the OTel semantic conventions.
356+
attributes = {
357+
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
358+
}
359+
319360
# TODO: determine if "role" should be reported here or not and, if so,
320361
# what the value ought to be. It is not clear whether there is always
321362
# a role supplied (and it looks like there could be cases where there
322363
# is more than one role present in the supplied contents)?
323364
#
324365
# See also: "TODOS.md"
366+
body = {}
367+
if self._content_recording_enabled:
368+
logged_contents = contents
369+
if isinstance(contents, list):
370+
logged_contents = Content(parts=contents)
371+
body["content"] = _to_dict(logged_contents)
372+
else:
373+
body["content"] = _CONTENT_ELIDED
325374
self._otel_wrapper.log_user_prompt(
326-
attributes={
327-
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
328-
},
329-
body={
330-
"content": contents,
331-
},
375+
attributes=attributes,
376+
body=body,
332377
)
333378

379+
def _maybe_log_response_stats(self, response: GenerateContentResponse):
380+
# TODO: Determine if there is a way that we can log a summary
381+
# of the overall response in a manner that is aligned with
382+
# Semantic Conventions. For example, it would be natural
383+
# to report an event that looks something like:
384+
#
385+
# gen_ai.response.stats {
386+
# response_index: 0,
387+
# candidate_count: 3,
388+
# parts_per_candidate: [
389+
# 3,
390+
# 1,
391+
# 5
392+
# ]
393+
# }
394+
#
395+
pass
396+
397+
def _maybe_log_response_safety_ratings(self, response: GenerateContentResponse):
398+
# TODO: Determine if there is a way that we can log
399+
# the "prompt_feedback". This would be especially useful
400+
# in the case where the response is blocked.
401+
pass
402+
334403
def _maybe_log_response(self, response: GenerateContentResponse):
335-
if not self._content_recording_enabled:
404+
self._maybe_log_response_stats(response)
405+
self._maybe_log_response_safety_ratings(response)
406+
if not response.candidates:
336407
return
408+
candidate_in_response_index = 0
409+
for candidate in response.candidates:
410+
self._maybe_log_response_candidate(
411+
candidate,
412+
flat_candidate_index=self._candidate_index,
413+
candidate_in_response_index=candidate_in_response_index,
414+
response_index=self._response_index)
415+
self._candidate_index += 1
416+
candidate_in_response_index += 1
417+
418+
def _maybe_log_response_candidate(
419+
self,
420+
candidate: Candidate,
421+
flat_candidate_index: int,
422+
candidate_in_response_index: int,
423+
response_index: int):
424+
# TODO: Determine if there might be a way to report the
425+
# response index and candidate response index.
426+
attributes={
427+
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
428+
}
337429
# TODO: determine if "role" should be reported here or not and, if so,
338430
# what the value ought to be.
339431
#
340432
# TODO: extract tool information into a separate tool message.
341433
#
342-
# TODO: determine if/when we need to emit a 'gen_ai.choice' event.
434+
# TODO: determine if/when we need to emit a 'gen_ai.assistant.message' event.
435+
#
436+
# TODO: determine how to report other relevant details in the candidate that
437+
# are not presently captured by Semantic Conventions. For example, the
438+
# "citation_metadata", "grounding_metadata", "logprobs_result", etc.
343439
#
344440
# See also: "TODOS.md"
441+
body={
442+
"index": flat_candidate_index,
443+
}
444+
if self._content_recording_enabled:
445+
if candidate.content:
446+
body["content"] = _to_dict(candidate.content)
447+
else:
448+
body["content"] = _CONTENT_ELIDED
449+
if candidate.finish_reason is not None:
450+
body["finish_reason"] = candidate.finish_reason.name
345451
self._otel_wrapper.log_response_content(
346-
attributes={
347-
gen_ai_attributes.GEN_AI_SYSTEM: self._genai_system,
348-
},
349-
body={
350-
"content": response.model_dump(),
351-
},
452+
attributes=attributes,
453+
body=body,
352454
)
353455

354456
def _record_token_usage_metric(self):

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/otel_wrapper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def log_user_prompt(self, attributes, body):
8484

8585
def log_response_content(self, attributes, body):
8686
_logger.debug("Recording response.")
87-
event_name = "gen_ai.assistant.message"
87+
event_name = "gen_ai.choice"
8888
self._log_event(event_name, attributes, body)
8989

9090
def _log_event(self, event_name, attributes, body):

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,10 @@ def test_does_not_record_system_prompt_as_log_if_disabled_by_env(self):
134134
self.generate_content(
135135
model="gemini-2.0-flash", contents="Some input", config=config
136136
)
137-
self.otel.assert_does_not_have_event_named("gen_ai.system.message")
137+
self.otel.assert_has_event_named("gen_ai.system.message")
138+
event_record = self.otel.get_event_named("gen_ai.system.message")
139+
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
140+
self.assertEqual(event_record.body["content"], "<elided>")
138141

139142
def test_does_not_record_system_prompt_as_log_if_no_system_prompt_present(
140143
self,
@@ -163,16 +166,19 @@ def test_does_not_record_user_prompt_as_log_if_disabled_by_env(self):
163166
)
164167
self.configure_valid_response()
165168
self.generate_content(model="gemini-2.0-flash", contents="Some input")
166-
self.otel.assert_does_not_have_event_named("gen_ai.user.message")
169+
self.otel.assert_has_event_named("gen_ai.user.message")
170+
event_record = self.otel.get_event_named("gen_ai.user.message")
171+
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
172+
self.assertEqual(event_record.body["content"], "<elided>")
167173

168174
def test_records_response_as_log(self):
169175
os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = (
170176
"true"
171177
)
172178
self.configure_valid_response(response_text="Some response content")
173179
self.generate_content(model="gemini-2.0-flash", contents="Some input")
174-
self.otel.assert_has_event_named("gen_ai.assistant.message")
175-
event_record = self.otel.get_event_named("gen_ai.assistant.message")
180+
self.otel.assert_has_event_named("gen_ai.choice")
181+
event_record = self.otel.get_event_named("gen_ai.choice")
176182
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
177183
self.assertIn(
178184
"Some response content", json.dumps(event_record.body["content"])
@@ -184,7 +190,10 @@ def test_does_not_record_response_as_log_if_disabled_by_env(self):
184190
)
185191
self.configure_valid_response(response_text="Some response content")
186192
self.generate_content(model="gemini-2.0-flash", contents="Some input")
187-
self.otel.assert_does_not_have_event_named("gen_ai.assistant.message")
193+
self.otel.assert_has_event_named("gen_ai.choice")
194+
event_record = self.otel.get_event_named("gen_ai.choice")
195+
self.assertEqual(event_record.attributes["gen_ai.system"], "gemini")
196+
self.assertEqual(event_record.body["content"], "<elided>")
188197

189198
def test_records_metrics_data(self):
190199
self.configure_valid_response()

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import json
1615
import os
1716
import unittest
1817

instrumentation-genai/opentelemetry-instrumentation-vertexai/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
## Version 2.0b0 (2025-02-21)
11+
1012
- Added Vertex AI spans for request parameters
1113
([#3192](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3192))
1214
- Initial VertexAI instrumentation

0 commit comments

Comments
 (0)