Skip to content

Commit 7fcf00e

Browse files
committed
Add test case for a longer streamGenerateContent request
1 parent ccc964b commit 7fcf00e

File tree

2 files changed

+322
-0
lines changed

2 files changed

+322
-0
lines changed
Lines changed: 273 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,273 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"contents": [
6+
{
7+
"role": "user",
8+
"parts": [
9+
{
10+
"text": "Explain verbosely that this is a test in 5 paragraphs."
11+
}
12+
]
13+
}
14+
]
15+
}
16+
headers:
17+
Accept:
18+
- '*/*'
19+
Accept-Encoding:
20+
- gzip, deflate
21+
Connection:
22+
- keep-alive
23+
Content-Length:
24+
- '177'
25+
Content-Type:
26+
- application/json
27+
User-Agent:
28+
- python-requests/2.32.3
29+
method: POST
30+
uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?%24alt=json%3Benum-encoding%3Dint
31+
response:
32+
body:
33+
string: |-
34+
[
35+
{
36+
"candidates": [
37+
{
38+
"content": {
39+
"role": "model",
40+
"parts": [
41+
{
42+
"text": "This"
43+
}
44+
]
45+
}
46+
}
47+
],
48+
"usageMetadata": {},
49+
"modelVersion": "gemini-1.5-flash-002",
50+
"createTime": "2025-03-05T16:13:46.266718Z",
51+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
52+
},
53+
{
54+
"candidates": [
55+
{
56+
"content": {
57+
"role": "model",
58+
"parts": [
59+
{
60+
"text": " communication serves as a formal notification that the following interaction constitutes a test. The"
61+
}
62+
]
63+
}
64+
}
65+
],
66+
"modelVersion": "gemini-1.5-flash-002",
67+
"createTime": "2025-03-05T16:13:46.266718Z",
68+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
69+
},
70+
{
71+
"candidates": [
72+
{
73+
"content": {
74+
"role": "model",
75+
"parts": [
76+
{
77+
"text": " purpose of this test is to evaluate the capabilities and limitations of a particular system,"
78+
}
79+
]
80+
}
81+
}
82+
],
83+
"modelVersion": "gemini-1.5-flash-002",
84+
"createTime": "2025-03-05T16:13:46.266718Z",
85+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
86+
},
87+
{
88+
"candidates": [
89+
{
90+
"content": {
91+
"role": "model",
92+
"parts": [
93+
{
94+
"text": " in this case, a language processing model. It is not intended to be a genuine request for information or assistance in any real-world scenario. Please"
95+
}
96+
]
97+
}
98+
}
99+
],
100+
"modelVersion": "gemini-1.5-flash-002",
101+
"createTime": "2025-03-05T16:13:46.266718Z",
102+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
103+
},
104+
{
105+
"candidates": [
106+
{
107+
"content": {
108+
"role": "model",
109+
"parts": [
110+
{
111+
"text": " understand that any responses generated are purely for the purposes of analysis and should not be interpreted as factual, comprehensive, or suitable for application outside of this specific testing environment"
112+
}
113+
]
114+
}
115+
}
116+
],
117+
"modelVersion": "gemini-1.5-flash-002",
118+
"createTime": "2025-03-05T16:13:46.266718Z",
119+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
120+
},
121+
{
122+
"candidates": [
123+
{
124+
"content": {
125+
"role": "model",
126+
"parts": [
127+
{
128+
"text": ". Consider this a controlled experiment, designed to measure performance and identify areas for potential improvement.\n\n\nThe nature of this test involves prompting the system with a variety of inputs designed to elicit diverse responses. These inputs may range from straightforward questions to"
129+
}
130+
]
131+
}
132+
}
133+
],
134+
"modelVersion": "gemini-1.5-flash-002",
135+
"createTime": "2025-03-05T16:13:46.266718Z",
136+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
137+
},
138+
{
139+
"candidates": [
140+
{
141+
"content": {
142+
"role": "model",
143+
"parts": [
144+
{
145+
"text": " complex narratives, requiring nuanced understanding and creative generation of text. The system's output will be carefully examined and scrutinized to assess its ability to process information accurately, generate coherent and grammatically correct text, and adapt its response style to"
146+
}
147+
]
148+
}
149+
}
150+
],
151+
"modelVersion": "gemini-1.5-flash-002",
152+
"createTime": "2025-03-05T16:13:46.266718Z",
153+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
154+
},
155+
{
156+
"candidates": [
157+
{
158+
"content": {
159+
"role": "model",
160+
"parts": [
161+
{
162+
"text": " different types of prompts. This comprehensive analysis will contribute valuable data for refining and enhancing the system's overall functionality and effectiveness.\n\n\nIt is crucial to remember that this test is entirely artificial and devoid of real-world consequences. Any information provided within the test should be treated as hypothetical and not indicative of genuine knowledge or expertise"
163+
}
164+
]
165+
}
166+
}
167+
],
168+
"modelVersion": "gemini-1.5-flash-002",
169+
"createTime": "2025-03-05T16:13:46.266718Z",
170+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
171+
},
172+
{
173+
"candidates": [
174+
{
175+
"content": {
176+
"role": "model",
177+
"parts": [
178+
{
179+
"text": " on the part of the system. No actions or decisions should be taken based on the responses generated during this evaluation. The focus remains solely on the technical aspects of the system's performance, including its capacity for reasoning, problem-solving, and language generation within a predetermined testing framework.\n\n\nThe results obtained from this"
180+
}
181+
]
182+
}
183+
}
184+
],
185+
"modelVersion": "gemini-1.5-flash-002",
186+
"createTime": "2025-03-05T16:13:46.266718Z",
187+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
188+
},
189+
{
190+
"candidates": [
191+
{
192+
"content": {
193+
"role": "model",
194+
"parts": [
195+
{
196+
"text": " test will be used internally to improve the system's capabilities and address any identified weaknesses. This is an ongoing process of refinement and enhancement. The data collected will contribute to the iterative development and improvement of the system's algorithms and underlying architecture, ultimately aiming to achieve more accurate, reliable, and efficient processing of information and"
197+
}
198+
]
199+
}
200+
}
201+
],
202+
"modelVersion": "gemini-1.5-flash-002",
203+
"createTime": "2025-03-05T16:13:46.266718Z",
204+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
205+
},
206+
{
207+
"candidates": [
208+
{
209+
"content": {
210+
"role": "model",
211+
"parts": [
212+
{
213+
"text": " generation of text. Consider this a vital stage in the continuous evolution of this technology.\n\n\nIn conclusion, this explicit declaration serves to reiterate that the current exchange is purely a test. All interactions are strictly confined within this testing context and should not be interpreted as indicative of any operational readiness or real-world application."
214+
}
215+
]
216+
}
217+
}
218+
],
219+
"modelVersion": "gemini-1.5-flash-002",
220+
"createTime": "2025-03-05T16:13:46.266718Z",
221+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
222+
},
223+
{
224+
"candidates": [
225+
{
226+
"content": {
227+
"role": "model",
228+
"parts": [
229+
{
230+
"text": " The primary objective is to assess performance characteristics and inform future development efforts. Thank you for your understanding and participation in this essential evaluation process.\n"
231+
}
232+
]
233+
},
234+
"finishReason": 1
235+
}
236+
],
237+
"usageMetadata": {
238+
"promptTokenCount": 13,
239+
"candidatesTokenCount": 478,
240+
"totalTokenCount": 491,
241+
"promptTokensDetails": [
242+
{
243+
"modality": 1,
244+
"tokenCount": 13
245+
}
246+
],
247+
"candidatesTokensDetails": [
248+
{
249+
"modality": 1,
250+
"tokenCount": 478
251+
}
252+
]
253+
},
254+
"modelVersion": "gemini-1.5-flash-002",
255+
"createTime": "2025-03-05T16:13:46.266718Z",
256+
"responseId": "OnjIZ96jEJ3A-O4P68OL4AY"
257+
}
258+
]
259+
headers:
260+
Content-Type:
261+
- application/json; charset=UTF-8
262+
Transfer-Encoding:
263+
- chunked
264+
Vary:
265+
- Origin
266+
- X-Origin
267+
- Referer
268+
content-length:
269+
- '6856'
270+
status:
271+
code: 200
272+
message: OK
273+
version: 1

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_stream_chat_completions.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,55 @@ def test_stream_generate_content(
101101
}
102102

103103

104+
@pytest.mark.vcr
105+
def test_stream_generate_content_long(
106+
span_exporter: InMemorySpanExporter,
107+
log_exporter: InMemoryLogExporter,
108+
instrument_with_content: VertexAIInstrumentor,
109+
):
110+
model = GenerativeModel("gemini-1.5-flash-002")
111+
list(
112+
model.generate_content(
113+
[
114+
Content(
115+
role="user",
116+
parts=[
117+
Part.from_text(
118+
"Explain verbosely that this is a test in 5 paragraphs."
119+
)
120+
],
121+
),
122+
],
123+
stream=True,
124+
)
125+
)
126+
127+
# Emits span
128+
spans = span_exporter.get_finished_spans()
129+
assert len(spans) == 1
130+
assert spans[0].name == "chat gemini-1.5-flash-002"
131+
assert dict(spans[0].attributes) == {
132+
"gen_ai.operation.name": "chat",
133+
"gen_ai.request.model": "gemini-1.5-flash-002",
134+
"gen_ai.response.finish_reasons": ("stop",),
135+
"gen_ai.response.model": "gemini-1.5-flash-002",
136+
"gen_ai.system": "vertex_ai",
137+
"gen_ai.usage.input_tokens": 13,
138+
"gen_ai.usage.output_tokens": 478,
139+
"server.address": "us-central1-aiplatform.googleapis.com",
140+
"server.port": 443,
141+
}
142+
143+
# Emits user and multiple choice events
144+
logs = log_exporter.get_finished_logs()
145+
assert len(logs) == 13
146+
user_log, *choice_logs = [log_data.log_record for log_data in logs]
147+
148+
assert user_log.attributes["event.name"] == "gen_ai.user.message"
149+
for choice_log in choice_logs:
150+
assert choice_log.attributes["event.name"] == "gen_ai.choice"
151+
152+
104153
@pytest.mark.vcr
105154
@pytest.mark.skip(
106155
"Bug in client library "

0 commit comments

Comments
 (0)