Skip to content

Commit 8f617ab

Browse files
committed
Run ruff
1 parent 6b70752 commit 8f617ab

File tree

4 files changed

+60
-26
lines changed

4 files changed

+60
-26
lines changed

instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/src/opentelemetry/instrumentation/vertexai_v2/__init__.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,26 +18,25 @@
1818
import os
1919
import types
2020
from typing import Collection
21-
from opentelemetry.instrumentation.vertexai.config import Config
22-
from opentelemetry.instrumentation.vertexai.utils import dont_throw
21+
2322
from wrapt import wrap_function_wrapper
2423

2524
from opentelemetry import context as context_api
26-
from opentelemetry.trace import get_tracer, SpanKind
27-
from opentelemetry.trace.status import Status, StatusCode
28-
2925
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
3026
from opentelemetry.instrumentation.utils import (
3127
_SUPPRESS_INSTRUMENTATION_KEY,
3228
unwrap,
3329
)
34-
30+
from opentelemetry.instrumentation.vertexai.config import Config
31+
from opentelemetry.instrumentation.vertexai.utils import dont_throw
32+
from opentelemetry.instrumentation.vertexai.version import __version__
3533
from opentelemetry.semconv_ai import (
3634
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
37-
SpanAttributes,
3835
LLMRequestTypeValues,
36+
SpanAttributes,
3937
)
40-
from opentelemetry.instrumentation.vertexai.version import __version__
38+
from opentelemetry.trace import SpanKind, get_tracer
39+
from opentelemetry.trace.status import Status, StatusCode
4140

4241
logger = logging.getLogger(__name__)
4342

instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/conftest.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,14 @@
11
"""Unit tests configuration module."""
22

33
import pytest
4+
45
from opentelemetry import trace
6+
from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
57
from opentelemetry.sdk.trace import TracerProvider
6-
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
78
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
8-
from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
9+
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
10+
InMemorySpanExporter,
11+
)
912

1013
pytest_plugins = []
1114

instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_bison.py

Lines changed: 46 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,13 @@
22

33
import pytest
44
import vertexai
5+
from vertexai.language_models import (
6+
ChatModel,
7+
InputOutputTextPair,
8+
TextGenerationModel,
9+
)
10+
511
from opentelemetry.semconv_ai import SpanAttributes
6-
from vertexai.language_models import TextGenerationModel, ChatModel, InputOutputTextPair
712

813
vertexai.init()
914

@@ -31,14 +36,17 @@ def test_vertexai_predict(exporter):
3136

3237
vertexai_span = spans[0]
3338
assert (
34-
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison@001"
39+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
40+
== "text-bison@001"
3541
)
3642
assert (
3743
"Give me ten interview questions for the role of program manager."
3844
in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
3945
)
4046
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8
41-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
47+
assert (
48+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
49+
)
4250
assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40
4351
assert (
4452
vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"]
@@ -74,14 +82,17 @@ async def async_predict_text() -> str:
7482

7583
vertexai_span = spans[0]
7684
assert (
77-
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison@001"
85+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
86+
== "text-bison@001"
7887
)
7988
assert (
8089
"Give me ten interview questions for the role of program manager."
8190
in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
8291
)
8392
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8
84-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
93+
assert (
94+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
95+
)
8596
assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40
8697
assert (
8798
vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"]
@@ -111,13 +122,18 @@ def test_vertexai_stream(exporter):
111122
]
112123

113124
vertexai_span = spans[0]
114-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison"
125+
assert (
126+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
127+
== "text-bison"
128+
)
115129
assert (
116130
"Give me ten interview questions for the role of program manager."
117131
in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
118132
)
119133
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8
120-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
134+
assert (
135+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
136+
)
121137
assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40
122138
assert vertexai_span.attributes[
123139
f"{SpanAttributes.LLM_COMPLETIONS}.0.content"
@@ -129,7 +145,9 @@ def test_vertexai_stream_async(exporter):
129145
async def async_streaming_prediction() -> list:
130146
"""Streaming Text Example with a Large Language Model"""
131147

132-
text_generation_model = TextGenerationModel.from_pretrained("text-bison")
148+
text_generation_model = TextGenerationModel.from_pretrained(
149+
"text-bison"
150+
)
133151
parameters = {
134152
"max_output_tokens": 256,
135153
"top_p": 0.8,
@@ -151,13 +169,18 @@ async def async_streaming_prediction() -> list:
151169
]
152170

153171
vertexai_span = spans[0]
154-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "text-bison"
172+
assert (
173+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
174+
== "text-bison"
175+
)
155176
assert (
156177
"Give me ten interview questions for the role of program manager."
157178
in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
158179
)
159180
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.8
160-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
181+
assert (
182+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
183+
)
161184
assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40
162185
assert vertexai_span.attributes[
163186
f"{SpanAttributes.LLM_COMPLETIONS}.0.content"
@@ -197,14 +220,17 @@ def test_vertexai_chat(exporter):
197220

198221
vertexai_span = spans[0]
199222
assert (
200-
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "chat-bison@001"
223+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
224+
== "chat-bison@001"
201225
)
202226
assert (
203227
"How many planets are there in the solar system?"
204228
in vertexai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"]
205229
)
206230
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95
207-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
231+
assert (
232+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
233+
)
208234
assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40
209235
assert (
210236
vertexai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"]
@@ -247,11 +273,16 @@ def test_vertexai_chat_stream(exporter):
247273

248274
vertexai_span = spans[0]
249275
assert (
250-
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "chat-bison@001"
276+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL]
277+
== "chat-bison@001"
251278
)
252279
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TOP_P] == 0.95
253-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] == 0.8
254-
assert vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
280+
assert (
281+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] == 0.8
282+
)
283+
assert (
284+
vertexai_span.attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] == 256
285+
)
255286
assert vertexai_span.attributes[SpanAttributes.LLM_TOP_K] == 40
256287
assert vertexai_span.attributes[
257288
f"{SpanAttributes.LLM_COMPLETIONS}.0.content"

instrumentation-genai/opentelemetry-instrumentation-vertexai-v2/tests/disabled_test_gemini.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
import pytest
22
import vertexai
3-
from opentelemetry.semconv_ai import SpanAttributes
43
from vertexai.preview.generative_models import GenerativeModel, Part
54

5+
from opentelemetry.semconv_ai import SpanAttributes
6+
67
vertexai.init()
78

89

0 commit comments

Comments
 (0)