Skip to content

Commit 0dd1f25

Browse files
committed
Working sanitized VCR for tests, but missing some assertions
1 parent 11a6dc2 commit 0dd1f25

File tree

4 files changed

+169
-53
lines changed

4 files changed

+169
-53
lines changed
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
interactions:
2+
- request:
3+
body: "{\n \"contents\": [\n {\n \"role\": \"user\",\n \"parts\":
4+
[\n {\n \"fileData\": {\n \"mimeType\": \"image/jpeg\",\n
5+
\ \"fileUri\": \"gs://generativeai-downloads/images/scones.jpg\"\n
6+
\ }\n },\n {\n \"text\": \"what is shown in this
7+
image?\"\n }\n ]\n }\n ]\n}"
8+
headers:
9+
Accept:
10+
- '*/*'
11+
Accept-Encoding:
12+
- gzip, deflate
13+
Connection:
14+
- keep-alive
15+
Content-Length:
16+
- '317'
17+
Content-Type:
18+
- application/json
19+
User-Agent:
20+
- python-requests/2.32.3
21+
method: POST
22+
uri: https://us-central1-aiplatform.googleapis.com/v1beta1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-pro-vision:generateContent?%24alt=json%3Benum-encoding%3Dint
23+
response:
24+
body:
25+
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\":
26+
\"model\",\n \"parts\": [\n {\n \"text\": \" The
27+
image shows a table with a cup of coffee, a bowl of blueberries, and several
28+
blueberry scones. There are also pink flowers on the table.\"\n }\n
29+
\ ]\n },\n \"finishReason\": 1,\n \"safetyRatings\":
30+
[\n {\n \"category\": 1,\n \"probability\": 1,\n
31+
\ \"probabilityScore\": 0.024780273,\n \"severity\": 1,\n
32+
\ \"severityScore\": 0.072753906\n },\n {\n \"category\":
33+
2,\n \"probability\": 1,\n \"probabilityScore\": 0.025512695,\n
34+
\ \"severity\": 1,\n \"severityScore\": 0.06738281\n },\n
35+
\ {\n \"category\": 3,\n \"probability\": 1,\n \"probabilityScore\":
36+
0.040283203,\n \"severity\": 1,\n \"severityScore\": 0.03515625\n
37+
\ },\n {\n \"category\": 4,\n \"probability\":
38+
1,\n \"probabilityScore\": 0.07910156,\n \"severity\": 1,\n
39+
\ \"severityScore\": 0.083984375\n }\n ],\n \"avgLogprobs\":
40+
-0.068832365671793613\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
41+
265,\n \"candidatesTokenCount\": 30,\n \"totalTokenCount\": 295\n },\n
42+
\ \"modelVersion\": \"gemini-pro-vision\"\n}\n"
43+
headers:
44+
Cache-Control:
45+
- private
46+
Content-Encoding:
47+
- gzip
48+
Content-Type:
49+
- application/json; charset=UTF-8
50+
Transfer-Encoding:
51+
- chunked
52+
Vary:
53+
- Origin
54+
- X-Origin
55+
- Referer
56+
status:
57+
code: 200
58+
message: OK
59+
version: 1

instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py

Lines changed: 60 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
"""Unit tests configuration module."""
22

3+
from os import replace
4+
import re
5+
from typing import Any, Mapping, MutableMapping
6+
37
import pytest
8+
from google.auth.credentials import AnonymousCredentials
49

510
from opentelemetry import trace
611
from opentelemetry.instrumentation.vertexai_v2 import VertexAIInstrumentor
@@ -12,6 +17,11 @@
1217

1318
pytest_plugins = []
1419

20+
import vertexai
21+
from vcr import VCR
22+
from vcr.record_mode import RecordMode
23+
from vcr.request import Request
24+
1525

1626
@pytest.fixture(scope="session")
1727
def exporter():
@@ -32,6 +42,55 @@ def clear_exporter(exporter):
3242
exporter.clear()
3343

3444

45+
@pytest.fixture(autouse=True)
46+
def vertexai_init(vcr: VCR) -> None:
47+
# Unfortunately I couldn't find a nice way to globally reset the global_config for each
48+
# test because different vertex submodules reference the global instance directly
49+
# https://github.com/googleapis/python-aiplatform/blob/v1.74.0/google/cloud/aiplatform/initializer.py#L687
50+
# so this config will leak if we don't call init() for each test.
51+
52+
# When not recording (in CI), don't do any auth. That prevents trying to read application
53+
# default credentials from the filesystem or metadata server and oauth token exchange. This
54+
# is not the interesting part of our instrumentation to test.
55+
if vcr.record_mode is RecordMode.NONE:
56+
vertexai.init(credentials=AnonymousCredentials())
57+
else:
58+
vertexai.init()
59+
60+
3561
@pytest.fixture(scope="module")
3662
def vcr_config():
37-
return {"filter_headers": ["authorization"]}
63+
filter_header_regexes = [
64+
r"X-.*",
65+
"Server",
66+
"Date",
67+
"Expires",
68+
"Authorization",
69+
]
70+
71+
def filter_headers(headers: Mapping[str, str]) -> Mapping[str, str]:
72+
return {
73+
key: val
74+
for key, val in headers.items()
75+
if not any(
76+
re.match(filter_re, key, re.IGNORECASE)
77+
for filter_re in filter_header_regexes
78+
)
79+
}
80+
81+
def before_record_cb(request: Request):
82+
request.headers = filter_headers(request.headers)
83+
request.uri = re.sub(
84+
r"/projects/[^/]+/", f"/projects/fake-project/", request.uri
85+
)
86+
return request
87+
88+
def before_response_cb(response: MutableMapping[str, Any]):
89+
response["headers"] = filter_headers(response["headers"])
90+
return response
91+
92+
return {
93+
"before_record_request": before_record_cb,
94+
"before_record_response": before_response_cb,
95+
"ignore_hosts": ["oauth2.googleapis.com"],
96+
}

instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py

Lines changed: 0 additions & 52 deletions
This file was deleted.
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import pytest
2+
from vertexai.preview.generative_models import GenerativeModel, Part
3+
4+
# from opentelemetry.semconv_ai import SpanAttributes
5+
6+
7+
@pytest.mark.vcr
8+
def test_vertexai_generate_content(exporter):
9+
multimodal_model = GenerativeModel("gemini-pro-vision")
10+
response = multimodal_model.generate_content(
11+
[
12+
Part.from_uri(
13+
"gs://generativeai-downloads/images/scones.jpg",
14+
mime_type="image/jpeg",
15+
),
16+
"what is shown in this image?",
17+
]
18+
)
19+
20+
spans = exporter.get_finished_spans()
21+
assert [span.name for span in spans] == [
22+
"text_completion gemini-pro-vision"
23+
]
24+
25+
vertexai_span = spans[0]
26+
assert len(spans) == 1
27+
# assert (
28+
# "what is shown in this image?"
29+
# in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
30+
# )
31+
# assert (
32+
# vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
33+
# == "gemini-pro-vision"
34+
# )
35+
# assert (
36+
# vertexai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS]
37+
# == response._raw_response.usage_metadata.total_token_count
38+
# )
39+
# assert (
40+
# vertexai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]
41+
# == response._raw_response.usage_metadata.prompt_token_count
42+
# )
43+
# assert (
44+
# vertexai_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]
45+
# == response._raw_response.usage_metadata.candidates_token_count
46+
# )
47+
# assert (
48+
# vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"]
49+
# == response.text
50+
# )

0 commit comments

Comments
 (0)