Skip to content

Commit 302c2a4

Browse files
committed
Use standard OTel tracing error handling
1 parent 5dfc282 commit 302c2a4

File tree

5 files changed

+116
-36
lines changed

5 files changed

+116
-36
lines changed

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/patch.py

Lines changed: 13 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
GenerateContentParams,
2222
get_genai_request_attributes,
2323
get_span_name,
24-
handle_span_exception,
2524
)
2625
from opentelemetry.trace import SpanKind, Tracer
2726

@@ -85,33 +84,26 @@ def extract_params(
8584
name=span_name,
8685
kind=SpanKind.CLIENT,
8786
attributes=span_attributes,
88-
end_on_exit=False,
89-
) as span:
87+
) as _span:
9088
# TODO: emit request events
9189
# if span.is_recording():
9290
# for message in kwargs.get("messages", []):
9391
# event_logger.emit(
9492
# message_to_event(message, capture_content)
9593
# )
9694

97-
try:
98-
result = wrapped(*args, **kwargs)
99-
# TODO: handle streaming
100-
# if is_streaming(kwargs):
101-
# return StreamWrapper(
102-
# result, span, event_logger, capture_content
103-
# )
95+
result = wrapped(*args, **kwargs)
96+
# TODO: handle streaming
97+
# if is_streaming(kwargs):
98+
# return StreamWrapper(
99+
# result, span, event_logger, capture_content
100+
# )
104101

105-
# TODO: add response attributes and events
106-
# if span.is_recording():
107-
# _set_response_attributes(
108-
# span, result, event_logger, capture_content
109-
# )
110-
span.end()
111-
return result
112-
113-
except Exception as error:
114-
handle_span_exception(span, error)
115-
raise
102+
# TODO: add response attributes and events
103+
# if span.is_recording():
104+
# _set_response_attributes(
105+
# span, result, event_logger, capture_content
106+
# )
107+
return result
116108

117109
return traced_method

instrumentation-genai/opentelemetry-instrumentation-vertexai/src/opentelemetry/instrumentation/vertexai/utils.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,6 @@
2929
from opentelemetry.semconv._incubating.attributes import (
3030
gen_ai_attributes as GenAIAttributes,
3131
)
32-
from opentelemetry.semconv.attributes import (
33-
error_attributes as ErrorAttributes,
34-
)
35-
from opentelemetry.trace import Span
36-
from opentelemetry.trace.status import Status, StatusCode
3732
from opentelemetry.util.types import AttributeValue
3833

3934
if TYPE_CHECKING:
@@ -146,12 +141,3 @@ def get_span_name(span_attributes: Mapping[str, AttributeValue]):
146141
name = span_attributes.get(GenAIAttributes.GEN_AI_OPERATION_NAME, "")
147142
model = span_attributes.get(GenAIAttributes.GEN_AI_REQUEST_MODEL, "")
148143
return f"{name} {model}"
149-
150-
151-
def handle_span_exception(span: Span, error: Exception):
152-
span.set_status(Status(StatusCode.ERROR, str(error)))
153-
if span.is_recording():
154-
span.set_attribute(
155-
ErrorAttributes.ERROR_TYPE, type(error).__qualname__
156-
)
157-
span.end()

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/cassettes/test_vertexai_generate_content.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ interactions:
4343
]
4444
},
4545
"finishReason": 1,
46-
"avgLogprobs": -0.005634686664531105
46+
"avgLogprobs": -0.0054909539850134595
4747
}
4848
],
4949
"usageMetadata": {
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"contents": [
6+
{
7+
"role": "user",
8+
"parts": [
9+
{
10+
"text": "Say this is a test"
11+
}
12+
]
13+
}
14+
],
15+
"generationConfig": {
16+
"temperature": 1000.0
17+
}
18+
}
19+
headers:
20+
Accept:
21+
- '*/*'
22+
Accept-Encoding:
23+
- gzip, deflate
24+
Connection:
25+
- keep-alive
26+
Content-Length:
27+
- '196'
28+
Content-Type:
29+
- application/json
30+
User-Agent:
31+
- python-requests/2.32.3
32+
method: POST
33+
uri: https://us-central1-aiplatform.googleapis.com/v1/projects/fake-project/locations/us-central1/publishers/google/models/gemini-1.5-flash-002:generateContent?%24alt=json%3Benum-encoding%3Dint
34+
response:
35+
body:
36+
string: |-
37+
{
38+
"error": {
39+
"code": 400,
40+
"message": "Unable to submit request because it has a temperature value of 1000 but the supported range is from 0 (inclusive) to 2.0001 (exclusive). Update the value and try again.",
41+
"status": "INVALID_ARGUMENT",
42+
"details": [
43+
{
44+
"@type": "type.googleapis.com/google.rpc.DebugInfo",
45+
"detail": "[ORIGINAL ERROR] generic::invalid_argument: Unable to submit request because it has a temperature value of 1000 but the supported range is from 0 (inclusive) to 2.0001 (exclusive). Update the value and try again. [google.rpc.error_details_ext] { message: \"Unable to submit request because it has a temperature value of 1000 but the supported range is from 0 (inclusive) to 2.0001 (exclusive). Update the value and try again.\" }"
46+
}
47+
]
48+
}
49+
}
50+
headers:
51+
Content-Type:
52+
- application/json; charset=UTF-8
53+
Transfer-Encoding:
54+
- chunked
55+
Vary:
56+
- Origin
57+
- X-Origin
58+
- Referer
59+
content-length:
60+
- '809'
61+
status:
62+
code: 400
63+
message: Bad Request
64+
version: 1

instrumentation-genai/opentelemetry-instrumentation-vertexai/tests/test_chat_completions.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import pytest
2+
from google.api_core.exceptions import BadRequest
23
from vertexai.generative_models import (
34
Content,
45
GenerationConfig,
@@ -10,6 +11,7 @@
1011
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
1112
InMemorySpanExporter,
1213
)
14+
from opentelemetry.trace import StatusCode
1315

1416

1517
@pytest.mark.vcr
@@ -34,6 +36,42 @@ def test_vertexai_generate_content(
3436
}
3537

3638

39+
@pytest.mark.vcr
40+
def test_vertexai_generate_content_error(
41+
span_exporter: InMemorySpanExporter,
42+
instrument_with_content: VertexAIInstrumentor,
43+
):
44+
model = GenerativeModel("gemini-1.5-flash-002")
45+
try:
46+
# Temperature out of range causes error
47+
model.generate_content(
48+
[
49+
Content(
50+
role="user", parts=[Part.from_text("Say this is a test")]
51+
)
52+
],
53+
generation_config=GenerationConfig(temperature=1000),
54+
)
55+
except BadRequest:
56+
pass
57+
58+
spans = span_exporter.get_finished_spans()
59+
assert len(spans) == 1
60+
assert spans[0].name == "chat gemini-1.5-flash-002"
61+
assert dict(spans[0].attributes) == {
62+
"gen_ai.operation.name": "chat",
63+
"gen_ai.request.model": "gemini-1.5-flash-002",
64+
"gen_ai.request.temperature": 1000.0,
65+
"gen_ai.system": "vertex_ai",
66+
}
67+
# Sets error status
68+
assert spans[0].status.status_code == StatusCode.ERROR
69+
70+
# Records exception event
71+
assert len(spans[0].events) == 1
72+
assert spans[0].events[0].name == "exception"
73+
74+
3775
@pytest.mark.vcr()
3876
def test_chat_completion_extra_client_level_params(
3977
span_exporter, instrument_no_content

0 commit comments

Comments
 (0)