Skip to content

Commit 3ed2432

Browse files
committed
Make a lot of changes
1 parent f199af5 commit 3ed2432

File tree

6 files changed

+202
-108
lines changed

6 files changed

+202
-108
lines changed

instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10+
- Fixes bugs
11+
1012
## Version 0.4b0 (2025-10-16)
1113

1214
- Implement the new semantic convention changes made in https://github.com/open-telemetry/semantic-conventions/pull/2179.

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 107 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,18 @@ def start_span_as_current_span(
350350
end_on_exit=end_on_exit,
351351
)
352352

353+
def create_final_attributes(self) -> dict[str, Any]:
354+
final_attributes = {
355+
gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS: self._input_tokens,
356+
gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS: self._output_tokens,
357+
gen_ai_attributes.GEN_AI_RESPONSE_FINISH_REASONS: sorted(
358+
self._finish_reasons_set
359+
),
360+
}
361+
if self._error_type:
362+
final_attributes[error_attributes.ERROR_TYPE] = self._error_type
363+
return final_attributes
364+
353365
def process_request(
354366
self,
355367
contents: Union[ContentListUnion, ContentListUnionDict],
@@ -366,25 +378,6 @@ def process_response(self, response: GenerateContentResponse):
366378
def process_error(self, e: Exception):
367379
self._error_type = str(e.__class__.__name__)
368380

369-
def finalize_processing(self):
370-
span = trace.get_current_span()
371-
span.set_attribute(
372-
gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS, self._input_tokens
373-
)
374-
span.set_attribute(
375-
gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS, self._output_tokens
376-
)
377-
span.set_attribute(
378-
gen_ai_attributes.GEN_AI_RESPONSE_FINISH_REASONS,
379-
sorted(self._finish_reasons_set),
380-
)
381-
if self.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
382-
span.set_attribute(
383-
gen_ai_attributes.GEN_AI_SYSTEM, self._genai_system
384-
)
385-
self._record_token_usage_metric()
386-
self._record_duration_metric()
387-
388381
def _update_response(self, response: GenerateContentResponse):
389382
# TODO: Determine if there are other response properties that
390383
# need to be reflected back into the span attributes.
@@ -448,10 +441,15 @@ def _maybe_update_error_type(self, response: GenerateContentResponse):
448441
def _maybe_log_completion_details(
449442
self,
450443
request_attributes: dict[str, Any],
444+
final_attributes: dict[str, Any],
445+
is_experimental_mode: bool,
451446
request: Union[ContentListUnion, ContentListUnionDict],
452447
candidates: list[Candidate],
453448
config: Optional[GenerateContentConfigOrDict] = None,
454449
):
450+
if not is_experimental_mode:
451+
print("not experimental mode?")
452+
return
455453
system_instructions = []
456454
if system_content := _config_to_system_instruction(config):
457455
system_instructions = to_system_instructions(
@@ -465,7 +463,7 @@ def _maybe_log_completion_details(
465463
span = trace.get_current_span()
466464
event = LogRecord(
467465
event_name="gen_ai.client.inference.operation.details",
468-
attributes=request_attributes,
466+
attributes=request_attributes | final_attributes,
469467
)
470468
self.completion_hook.on_completion(
471469
inputs=input_messages,
@@ -487,6 +485,7 @@ def _maybe_log_completion_details(
487485
**(event.attributes or {}),
488486
**completion_details_attributes,
489487
}
488+
print("writing completion event..")
490489
self._otel_wrapper.log_completion_details(event=event)
491490

492491
if self._content_recording_enabled in [
@@ -707,6 +706,7 @@ def instrumented_generate_content(
707706
config: Optional[GenerateContentConfigOrDict] = None,
708707
**kwargs: Any,
709708
) -> GenerateContentResponse:
709+
print("in instrumented code..")
710710
candidates = []
711711
helper = _GenerateContentInstrumentationHelper(
712712
self,
@@ -719,6 +719,7 @@ def instrumented_generate_content(
719719
helper.sem_conv_opt_in_mode
720720
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
721721
)
722+
print(f"opt in mode: {helper.sem_conv_opt_in_mode}")
722723
request_attributes = create_request_attributes(
723724
config,
724725
is_experimental_mode,
@@ -730,32 +731,45 @@ def instrumented_generate_content(
730731
span.set_attributes(request_attributes)
731732
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
732733
helper.process_request(contents, config)
734+
span.set_attribute(
735+
gen_ai_attributes.GEN_AI_SYSTEM, helper._genai_system
736+
)
733737
try:
738+
print("trying to get resp..")
734739
response = wrapped_func(
735740
self,
736741
model=model,
737742
contents=contents,
738743
config=helper.wrapped_config(config),
739744
**kwargs,
740745
)
741-
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
742-
helper.process_response(response)
743-
elif is_experimental_mode:
744-
candidates = response.candidates
746+
print("resp over..")
747+
if is_experimental_mode:
745748
helper._update_response(response)
749+
if response.candidates:
750+
candidates += response.candidates
751+
746752
else:
747-
raise ValueError(
748-
f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported."
749-
)
753+
helper.process_response(response)
750754
return response
751755
except Exception as error:
756+
print("EXCEPTION RIASED.. PROCESSING ERROR>>>")
752757
helper.process_error(error)
753758
raise
754759
finally:
760+
print("in the finnally block..")
761+
final_attributes = helper.create_final_attributes()
762+
span.set_attributes(final_attributes)
755763
helper._maybe_log_completion_details(
756-
request_attributes, contents, candidates, config
764+
request_attributes,
765+
final_attributes,
766+
is_experimental_mode,
767+
contents,
768+
candidates,
769+
config,
757770
)
758-
helper.finalize_processing()
771+
helper._record_token_usage_metric()
772+
helper._record_duration_metric()
759773

760774
return instrumented_generate_content
761775

@@ -800,6 +814,9 @@ def instrumented_generate_content_stream(
800814
span.set_attributes(request_attributes)
801815
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
802816
helper.process_request(contents, config)
817+
span.set_attribute(
818+
gen_ai_attributes.GEN_AI_SYSTEM, helper._genai_system
819+
)
803820
try:
804821
for response in wrapped_func(
805822
self,
@@ -808,25 +825,30 @@ def instrumented_generate_content_stream(
808825
config=helper.wrapped_config(config),
809826
**kwargs,
810827
):
811-
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
812-
helper.process_response(response)
813-
elif is_experimental_mode:
828+
if is_experimental_mode:
814829
helper._update_response(response)
815830
if response.candidates:
816831
candidates += response.candidates
832+
817833
else:
818-
raise ValueError(
819-
f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported."
820-
)
834+
helper.process_response(response)
821835
yield response
822836
except Exception as error:
823837
helper.process_error(error)
824838
raise
825839
finally:
840+
final_attributes = helper.create_final_attributes()
841+
span.set_attributes(final_attributes)
826842
helper._maybe_log_completion_details(
827-
request_attributes, contents, candidates, config
843+
request_attributes,
844+
final_attributes,
845+
is_experimental_mode,
846+
contents,
847+
candidates,
848+
config,
828849
)
829-
helper.finalize_processing()
850+
helper._record_token_usage_metric()
851+
helper._record_duration_metric()
830852

831853
return instrumented_generate_content_stream
832854

@@ -871,6 +893,9 @@ async def instrumented_generate_content(
871893
span.set_attributes(request_attributes)
872894
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
873895
helper.process_request(contents, config)
896+
span.set_attribute(
897+
gen_ai_attributes.GEN_AI_SYSTEM, helper._genai_system
898+
)
874899
try:
875900
response = await wrapped_func(
876901
self,
@@ -879,31 +904,29 @@ async def instrumented_generate_content(
879904
config=helper.wrapped_config(config),
880905
**kwargs,
881906
)
882-
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
883-
helper.process_response(response)
884-
elif (
885-
helper.sem_conv_opt_in_mode
886-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
887-
):
907+
if is_experimental_mode:
888908
helper._update_response(response)
889909
if response.candidates:
890910
candidates += response.candidates
891911
else:
892-
raise ValueError(
893-
f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported."
894-
)
912+
helper.process_response(response)
895913
return response
896914
except Exception as error:
897915
helper.process_error(error)
898916
raise
899917
finally:
918+
final_attributes = helper.create_final_attributes()
919+
span.set_attributes(final_attributes)
900920
helper._maybe_log_completion_details(
901921
request_attributes,
922+
final_attributes,
923+
is_experimental_mode,
902924
contents,
903925
candidates,
904926
config,
905927
)
906-
helper.finalize_processing()
928+
helper._record_token_usage_metric()
929+
helper._record_duration_metric()
907930

908931
return instrumented_generate_content
909932

@@ -948,8 +971,11 @@ async def instrumented_generate_content_stream(
948971
end_on_exit=False,
949972
) as span:
950973
span.set_attributes(request_attributes)
951-
if helper.sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
974+
if not is_experimental_mode:
952975
helper.process_request(contents, config)
976+
span.set_attribute(
977+
gen_ai_attributes.GEN_AI_SYSTEM, helper._genai_system
978+
)
953979
try:
954980
response_async_generator = await wrapped_func(
955981
self,
@@ -960,7 +986,18 @@ async def instrumented_generate_content_stream(
960986
)
961987
except Exception as error: # pylint: disable=broad-exception-caught
962988
helper.process_error(error)
963-
helper.finalize_processing()
989+
helper._record_token_usage_metric()
990+
final_attributes = helper.create_final_attributes()
991+
span.set_attributes(final_attributes)
992+
helper._maybe_log_completion_details(
993+
request_attributes,
994+
final_attributes,
995+
is_experimental_mode,
996+
contents,
997+
[],
998+
config,
999+
)
1000+
helper._record_duration_metric()
9641001
with trace.use_span(span, end_on_exit=True):
9651002
raise
9661003

@@ -969,31 +1006,30 @@ async def _response_async_generator_wrapper():
9691006
with trace.use_span(span, end_on_exit=True):
9701007
try:
9711008
async for response in response_async_generator:
972-
if (
973-
helper.sem_conv_opt_in_mode
974-
== _StabilityMode.DEFAULT
975-
):
976-
helper.process_response(response)
977-
elif (
978-
helper.sem_conv_opt_in_mode
979-
== _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
980-
):
1009+
if is_experimental_mode:
9811010
helper._update_response(response)
9821011
if response.candidates:
9831012
candidates += response.candidates
1013+
9841014
else:
985-
raise ValueError(
986-
f"Sem Conv opt in mode {helper.sem_conv_opt_in_mode} not supported."
987-
)
1015+
helper.process_response(response)
9881016
yield response
9891017
except Exception as error:
9901018
helper.process_error(error)
9911019
raise
9921020
finally:
1021+
final_attributes = helper.create_final_attributes()
1022+
span.set_attributes(final_attributes)
9931023
helper._maybe_log_completion_details(
994-
request_attributes, contents, candidates, config
1024+
request_attributes,
1025+
final_attributes,
1026+
is_experimental_mode,
1027+
contents,
1028+
candidates,
1029+
config,
9951030
)
996-
helper.finalize_processing()
1031+
helper._record_token_usage_metric()
1032+
helper._record_duration_metric()
9971033

9981034
return _response_async_generator_wrapper()
9991035

@@ -1010,6 +1046,14 @@ def instrument_generate_content(
10101046
completion_hook: CompletionHook,
10111047
generate_content_config_key_allowlist: Optional[AllowList] = None,
10121048
) -> object:
1049+
opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode(
1050+
_OpenTelemetryStabilitySignalType.GEN_AI
1051+
)
1052+
if (
1053+
opt_in_mode != _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL
1054+
and opt_in_mode != _StabilityMode.DEFAULT
1055+
):
1056+
raise ValueError(f"Sem Conv opt in mode {opt_in_mode} not supported.")
10131057
snapshot = _MethodsSnapshot()
10141058
Models.generate_content = _create_instrumented_generate_content(
10151059
snapshot,

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,31 @@
1414

1515
import os
1616
import unittest
17+
from unittest.mock import patch
1718

1819
import google.genai
1920

21+
from opentelemetry.instrumentation._semconv import (
22+
_OpenTelemetrySemanticConventionStability,
23+
)
24+
2025
from .auth import FakeCredentials
2126
from .instrumentation_context import InstrumentationContext
2227
from .otel_mocker import OTelMocker
2328

2429

2530
class TestCase(unittest.TestCase):
2631
def setUp(self):
32+
# Most tests want this environment variable setup. Need to figure out a less hacky way of doing this.
33+
with patch.dict(
34+
"os.environ",
35+
{
36+
"OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT": "true",
37+
"OTEL_SEMCONV_STABILITY_OPT_IN": "default",
38+
},
39+
):
40+
_OpenTelemetrySemanticConventionStability._initialized = False
41+
_OpenTelemetrySemanticConventionStability._initialize()
2742
self._otel = OTelMocker()
2843
self._otel.install()
2944
self._instrumentation_context = None

0 commit comments

Comments
 (0)