Skip to content

Commit 1415f2c

Browse files
committed
Checkpoint current state.
1 parent 4adef35 commit 1415f2c

File tree

9 files changed

+214
-271
lines changed

9 files changed

+214
-271
lines changed

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

Lines changed: 24 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -40,11 +40,6 @@
4040
_logger = logging.getLogger(__name__)
4141

4242

43-
# Enable these after these cases are fully vetted and tested
44-
_INSTRUMENT_STREAMING = False
45-
_INSTRUMENT_ASYNC = False
46-
47-
4843
class _MethodsSnapshot:
4944
def __init__(self):
5045
self._original_generate_content = Models.generate_content
@@ -436,10 +431,6 @@ def _create_instrumented_generate_content_stream(
436431
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
437432
):
438433
wrapped_func = snapshot.generate_content_stream
439-
if not _INSTRUMENT_STREAMING:
440-
# TODO: remove once this case has been fully tested
441-
return wrapped_func
442-
443434
@functools.wraps(wrapped_func)
444435
def instrumented_generate_content_stream(
445436
self: Models,
@@ -479,10 +470,6 @@ def _create_instrumented_async_generate_content(
479470
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
480471
):
481472
wrapped_func = snapshot.async_generate_content
482-
if not _INSTRUMENT_ASYNC:
483-
# TODO: remove once this case has been fully tested
484-
return wrapped_func
485-
486473
@functools.wraps(wrapped_func)
487474
async def instrumented_generate_content(
488475
self: AsyncModels,
@@ -523,10 +510,6 @@ def _create_instrumented_async_generate_content_stream( # pyright: ignore
523510
snapshot: _MethodsSnapshot, otel_wrapper: OTelWrapper
524511
):
525512
wrapped_func = snapshot.async_generate_content_stream
526-
if not _INSTRUMENT_ASYNC or not _INSTRUMENT_STREAMING:
527-
# TODO: remove once this case has been fully tested
528-
return wrapped_func
529-
530513
@functools.wraps(wrapped_func)
531514
async def instrumented_generate_content_stream(
532515
self: AsyncModels,
@@ -539,25 +522,30 @@ async def instrumented_generate_content_stream(
539522
helper = _GenerateContentInstrumentationHelper(
540523
self, otel_wrapper, model
541524
)
542-
with helper.start_span_as_current_span(
543-
model, "google.genai.AsyncModels.generate_content_stream"
544-
):
545-
helper.process_request(contents, config)
546-
try:
547-
async for response in await wrapped_func(
548-
self,
549-
model=model,
550-
contents=contents,
551-
config=config,
552-
**kwargs,
553-
): # pyright: ignore
554-
helper.process_response(response)
555-
yield response # pyright: ignore
556-
except Exception as error:
557-
helper.process_error(error)
558-
raise
559-
finally:
560-
helper.finalize_processing()
525+
async def _internal_generator():
526+
with helper.start_span_as_current_span(
527+
model, "google.genai.AsyncModels.generate_content_stream"
528+
):
529+
helper.process_request(contents, config)
530+
try:
531+
async for response in await wrapped_func(
532+
self,
533+
model=model,
534+
contents=contents,
535+
config=config,
536+
**kwargs,
537+
): # pyright: ignore
538+
helper.process_response(response)
539+
yield response # pyright: ignore
540+
except Exception as error:
541+
helper.process_error(error)
542+
raise
543+
finally:
544+
helper.finalize_processing()
545+
class _GeneratorProvider:
546+
def __aiter__(self):
547+
return _internal_generator()
548+
return _GeneratorProvider()
561549

562550
return instrumented_generate_content_stream
563551

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,27 @@ def generate_response_from_dict(args):
123123
raise ValueError(f"Unsupported response type: {type(response)}")
124124

125125

126+
def _to_stream_response_generator(response_generators):
127+
if len(response_generators) == 1:
128+
return response_generators[0]
129+
def combined_generator(args):
130+
first_response = response_generators[0](args)
131+
if first_response.status_code != 200:
132+
return first_response
133+
result = requests.Response()
134+
result.status_code = 200
135+
result.headers['content-type'] = 'application/json'
136+
result.encoding = 'utf-8'
137+
result.headers['transfer-encoding'] = 'chunked'
138+
contents = []
139+
for generator in response_generators:
140+
response = generator(args)
141+
if response.status_code != 200:
142+
continue
143+
# TODO: ...
144+
return combined_generator
145+
146+
126147
class RequestsMocker:
127148
def __init__(self):
128149
self._original_send = requests.sessions.Session.send
@@ -159,6 +180,37 @@ def _do_send(
159180
session: requests.sessions.Session,
160181
request: requests.PreparedRequest,
161182
**kwargs,
183+
):
184+
stream=kwargs.get('stream', False)
185+
if not stream:
186+
return _do_send_non_streaming(session, request, **kwargs)
187+
return _do_send_streaming(session, request, **kwargs)
188+
189+
def _do_send_streaming(
190+
self,
191+
session: requests.sessions.Session,
192+
request: requests.PreparedRequest,
193+
**kwargs,
194+
):
195+
response_generators = []
196+
for matcher, response_generator in self._handlers:
197+
if matcher is None:
198+
response_generators.append(response_generator)
199+
elif matcher(args):
200+
response_generators.append(response_generator)
201+
if not response_generators:
202+
response_generators.append(_return_404)
203+
args = RequestsCallArgs(session, request, **kwargs)
204+
response_generator = _to_stream_response_generator(response_generators)
205+
result = call.response
206+
self._calls.append(call)
207+
return result
208+
209+
def _do_send_non_streaming(
210+
self,
211+
session: requests.sessions.Session,
212+
request: requests.PreparedRequest,
213+
**kwargs,
162214
):
163215
args = RequestsCallArgs(session, request, **kwargs)
164216
response_generator = self._lookup_response_generator(args)

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py

Lines changed: 5 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -17,31 +17,7 @@
1717
import unittest
1818

1919
from ..common.base import TestCase
20-
21-
22-
def create_valid_response(
23-
response_text="The model response", input_tokens=10, output_tokens=20
24-
):
25-
return {
26-
"modelVersion": "gemini-2.0-flash-test123",
27-
"usageMetadata": {
28-
"promptTokenCount": input_tokens,
29-
"candidatesTokenCount": output_tokens,
30-
"totalTokenCount": input_tokens + output_tokens,
31-
},
32-
"candidates": [
33-
{
34-
"content": {
35-
"role": "model",
36-
"parts": [
37-
{
38-
"text": response_text,
39-
}
40-
],
41-
}
42-
}
43-
],
44-
}
20+
from .util import create_valid_response
4521

4622

4723
class NonStreamingTestCase(TestCase):
@@ -56,22 +32,17 @@ def setUp(self): # pylint: disable=invalid-name
5632
def generate_content(self, *args, **kwargs):
5733
raise NotImplementedError("Must implement 'generate_content'.")
5834

35+
@property
5936
def expected_function_name(self):
6037
raise NotImplementedError("Must implement 'expected_function_name'.")
6138

6239
def configure_valid_response(
6340
self,
64-
response_text="The model_response",
65-
input_tokens=10,
66-
output_tokens=20,
41+
*args,
42+
**kwargs
6743
):
6844
self.requests.add_response(
69-
create_valid_response(
70-
response_text=response_text,
71-
input_tokens=input_tokens,
72-
output_tokens=output_tokens,
73-
)
74-
)
45+
create_valid_response(*args, **kwargs))
7546

7647
def test_instrumentation_does_not_break_core_functionality(self):
7748
self.configure_valid_response(response_text="Yep, it works!")
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import json
16+
import os
17+
import unittest
18+
19+
from ..common.base import TestCase
20+
from .util import create_valid_response
21+
22+
23+
class StreamingTestCase(TestCase):
24+
# The "setUp" function is defined by "unittest.TestCase" and thus
25+
# this name must be used. Uncertain why pylint doesn't seem to
26+
# recognize that this is a unit test class for which this is inherited.
27+
def setUp(self): # pylint: disable=invalid-name
28+
super().setUp()
29+
if self.__class__ == StreamingTestCase:
30+
raise unittest.SkipTest("Skipping testcase base.")
31+
32+
def generate_content(self, *args, **kwargs):
33+
raise NotImplementedError("Must implement 'generate_content'.")
34+
35+
@property
36+
def expected_function_name(self):
37+
raise NotImplementedError("Must implement 'expected_function_name'.")
38+
39+
def configure_valid_response(
40+
self,
41+
*args,
42+
**kwargs
43+
):
44+
self.requests.add_response(
45+
create_valid_response(*args, **kwargs))
46+
47+
def test_instrumentation_does_not_break_core_functionality(self):
48+
self.configure_valid_response(response_text="Yep, it works!")
49+
responses = self.generate_content(
50+
model="gemini-2.0-flash", contents="Does this work?"
51+
)
52+
self.assertEqual(len(responses), 1)
53+
response = responses[0]
54+
self.assertEqual(response.text, "Yep, it works!")

instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_async_nonstreaming.py

Lines changed: 6 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -12,76 +12,18 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
16-
# TODO: Once the async non-streaming case has been fully implemented,
17-
# reimplement this in terms of "nonstreaming_base.py".
18-
1915
import asyncio
20-
import logging
21-
import unittest
22-
23-
from ..common.base import TestCase
2416

17+
from .nonstreaming_base import NonStreamingTestCase
2518

26-
def create_valid_response(
27-
response_text="The model response", input_tokens=10, output_tokens=20
28-
):
29-
return {
30-
"modelVersion": "gemini-2.0-flash-test123",
31-
"usageMetadata": {
32-
"promptTokenCount": input_tokens,
33-
"candidatesTokenCount": output_tokens,
34-
"totalTokenCount": input_tokens + output_tokens,
35-
},
36-
"candidates": [
37-
{
38-
"content": {
39-
"role": "model",
40-
"parts": [
41-
{
42-
"text": response_text,
43-
}
44-
],
45-
}
46-
}
47-
],
48-
}
49-
50-
51-
# Temporary test fixture just to ensure that the in-progress work to
52-
# implement this case doesn't break the original code.
53-
class TestGenerateContentAsyncNonstreaming(TestCase):
54-
def configure_valid_response(
55-
self,
56-
response_text="The model_response",
57-
input_tokens=10,
58-
output_tokens=20,
59-
):
60-
self.requests.add_response(
61-
create_valid_response(
62-
response_text=response_text,
63-
input_tokens=input_tokens,
64-
output_tokens=output_tokens,
65-
)
66-
)
6719

20+
class TestGenerateContentAsyncNonstreaming(NonStreamingTestCase):
21+
6822
def generate_content(self, *args, **kwargs):
6923
return asyncio.run(
7024
self.client.aio.models.generate_content(*args, **kwargs) # pylint: disable=missing-kwoa
7125
)
7226

73-
def test_async_generate_content_not_broken_by_instrumentation(self):
74-
self.configure_valid_response(response_text="Yep, it works!")
75-
response = self.generate_content(
76-
model="gemini-2.0-flash", contents="Does this work?"
77-
)
78-
self.assertEqual(response.text, "Yep, it works!")
79-
80-
81-
def main():
82-
logging.basicConfig(level=logging.DEBUG)
83-
unittest.main()
84-
85-
86-
if __name__ == "__main__":
87-
main()
27+
@property
28+
def expected_function_name(self):
29+
return 'google.genai.AsyncModels.generate_content'

0 commit comments

Comments
 (0)