Skip to content

Commit c250004

Browse files
committed
Add test for error case and rework things a bit
1 parent 1a3fb9c commit c250004

File tree

4 files changed

+236
-95
lines changed

4 files changed

+236
-95
lines changed

instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@
2424
from opentelemetry.instrumentation.botocore.extensions.types import (
2525
_AttributeMapT,
2626
_AwsSdkExtension,
27+
_BotoClientErrorT,
28+
)
29+
from opentelemetry.semconv._incubating.attributes.error_attributes import (
30+
ERROR_TYPE,
2731
)
2832
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
2933
GEN_AI_OPERATION_NAME,
@@ -40,6 +44,7 @@
4044
GenAiSystemValues,
4145
)
4246
from opentelemetry.trace.span import Span
47+
from opentelemetry.trace.status import Status, StatusCode
4348

4449
_logger = logging.getLogger(__name__)
4550

@@ -56,25 +61,17 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
5661
_HANDLED_OPERATIONS = {"Converse"}
5762

5863
def extract_attributes(self, attributes: _AttributeMapT):
59-
attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value
60-
6164
if self._call_context.operation not in self._HANDLED_OPERATIONS:
6265
return
6366

67+
attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value
68+
6469
model_id = self._call_context.params.get(_MODEL_ID_KEY)
6570
if model_id:
6671
attributes[GEN_AI_REQUEST_MODEL] = model_id
67-
68-
# FIXME: add other model patterns
69-
text_model_patterns = [
70-
"amazon.titan-text",
71-
"anthropic.claude",
72-
"meta.llama",
73-
]
74-
if any(pattern in model_id for pattern in text_model_patterns):
75-
attributes[GEN_AI_OPERATION_NAME] = (
76-
GenAiOperationNameValues.CHAT.value
77-
)
72+
attributes[GEN_AI_OPERATION_NAME] = (
73+
GenAiOperationNameValues.CHAT.value
74+
)
7875

7976
if inference_config := self._call_context.params.get(
8077
"inferenceConfig"
@@ -122,9 +119,7 @@ def on_success(self, span: Span, result: dict[str, Any]):
122119
if self._call_context.operation not in self._HANDLED_OPERATIONS:
123120
return
124121

125-
model_id = self._call_context.params.get(_MODEL_ID_KEY)
126-
127-
if not model_id:
122+
if not span.is_recording():
128123
return
129124

130125
if usage := result.get("usage"):
@@ -144,3 +139,11 @@ def on_success(self, span: Span, result: dict[str, Any]):
144139
GEN_AI_RESPONSE_FINISH_REASONS,
145140
[stop_reason],
146141
)
142+
143+
def on_error(self, span: Span, exception: _BotoClientErrorT):
144+
if self._call_context.operation not in self._HANDLED_OPERATIONS:
145+
return
146+
147+
span.set_status(Status(StatusCode.ERROR, str(exception)))
148+
if span.is_recording():
149+
span.set_attribute(ERROR_TYPE, type(exception).__qualname__)
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import annotations
16+
17+
from typing import Any
18+
19+
from opentelemetry.sdk.trace import ReadableSpan
20+
from opentelemetry.semconv._incubating.attributes import (
21+
gen_ai_attributes as GenAIAttributes,
22+
)
23+
24+
25+
def assert_completion_attributes(
26+
span: ReadableSpan,
27+
request_model: str,
28+
response: dict[str, Any] | None,
29+
operation_name: str = "chat",
30+
request_top_p: int | None = None,
31+
request_temperature: int | None = None,
32+
request_max_tokens: int | None = None,
33+
request_stop_sequences: list[str] | None = None,
34+
):
35+
if usage := (response and response.get("usage")):
36+
input_tokens = usage["inputTokens"]
37+
output_tokens = usage["outputTokens"]
38+
else:
39+
input_tokens, output_tokens = None, None
40+
41+
if response:
42+
finish_reason = (response["stopReason"],)
43+
else:
44+
finish_reason = None
45+
46+
return assert_all_attributes(
47+
span,
48+
request_model,
49+
input_tokens,
50+
output_tokens,
51+
finish_reason,
52+
operation_name,
53+
request_top_p,
54+
request_temperature,
55+
request_max_tokens,
56+
tuple(request_stop_sequences)
57+
if request_stop_sequences is not None
58+
else request_stop_sequences,
59+
)
60+
61+
62+
def assert_equal_or_not_present(value, attribute_name, span):
63+
if value:
64+
assert value == span.attributes[attribute_name]
65+
else:
66+
assert attribute_name not in span.attributes
67+
68+
69+
def assert_all_attributes(
70+
span: ReadableSpan,
71+
request_model: str,
72+
input_tokens: int | None = None,
73+
output_tokens: int | None = None,
74+
finish_reason: tuple[str] | None = None,
75+
operation_name: str = "chat",
76+
request_top_p: int | None = None,
77+
request_temperature: int | None = None,
78+
request_max_tokens: int | None = None,
79+
request_stop_sequences: tuple[str] | None = None,
80+
):
81+
assert span.name == f"{operation_name} {request_model}"
82+
assert (
83+
operation_name
84+
== span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]
85+
)
86+
assert (
87+
GenAIAttributes.GenAiSystemValues.AWS_BEDROCK.value
88+
== span.attributes[GenAIAttributes.GEN_AI_SYSTEM]
89+
)
90+
assert (
91+
request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
92+
)
93+
94+
assert_equal_or_not_present(
95+
input_tokens, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, span
96+
)
97+
assert_equal_or_not_present(
98+
output_tokens, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, span
99+
)
100+
assert_equal_or_not_present(
101+
finish_reason, GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, span
102+
)
103+
assert_equal_or_not_present(
104+
request_top_p, GenAIAttributes.GEN_AI_REQUEST_TOP_P, span
105+
)
106+
assert_equal_or_not_present(
107+
request_temperature, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, span
108+
)
109+
assert_equal_or_not_present(
110+
request_max_tokens, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, span
111+
)
112+
assert_equal_or_not_present(
113+
request_stop_sequences,
114+
GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES,
115+
span,
116+
)
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": [
9+
{
10+
"text": "Say this is a test"
11+
}
12+
]
13+
}
14+
]
15+
}
16+
headers:
17+
Content-Length:
18+
- '77'
19+
Content-Type:
20+
- !!binary |
21+
YXBwbGljYXRpb24vanNvbg==
22+
User-Agent:
23+
- !!binary |
24+
Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x
25+
MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0
26+
aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2
27+
X-Amz-Date:
28+
- !!binary |
29+
MjAyNTAxMTVUMTEwMTQ3Wg==
30+
X-Amz-Security-Token:
31+
- test_aws_security_token
32+
X-Amzn-Trace-Id:
33+
- !!binary |
34+
Um9vdD0xLWIzM2JhNTkxLTdkYmQ0ZDZmYTBmZTdmYzc2MTExOThmNztQYXJlbnQ9NzRmNmQ1NTEz
35+
MzkzMzUxNTtTYW1wbGVkPTE=
36+
amz-sdk-invocation-id:
37+
- !!binary |
38+
NTQ5MmQ0NTktNzhkNi00ZWY4LTlmMDMtZTA5ODhkZGRiZDI5
39+
amz-sdk-request:
40+
- !!binary |
41+
YXR0ZW1wdD0x
42+
authorization:
43+
- Bearer test_aws_authorization
44+
method: POST
45+
uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/does-not-exist/converse
46+
response:
47+
body:
48+
string: |-
49+
{
50+
"message": "The provided model identifier is invalid."
51+
}
52+
headers:
53+
Connection:
54+
- keep-alive
55+
Content-Length:
56+
- '55'
57+
Content-Type:
58+
- application/json
59+
Date:
60+
- Wed, 15 Jan 2025 11:01:47 GMT
61+
Set-Cookie: test_set_cookie
62+
x-amzn-ErrorType:
63+
- ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/
64+
x-amzn-RequestId:
65+
- d425bf99-8a4e-4d83-8d77-a48410dd82b2
66+
status:
67+
code: 400
68+
message: Bad Request
69+
version: 1

instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py

Lines changed: 32 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -14,15 +14,15 @@
1414

1515
from __future__ import annotations
1616

17-
from typing import Any
18-
1917
import boto3
2018
import pytest
2119

22-
from opentelemetry.sdk.trace import ReadableSpan
23-
from opentelemetry.semconv._incubating.attributes import (
24-
gen_ai_attributes as GenAIAttributes,
20+
from opentelemetry.semconv._incubating.attributes.error_attributes import (
21+
ERROR_TYPE,
2522
)
23+
from opentelemetry.trace.status import StatusCode
24+
25+
from .bedrock_utils import assert_completion_attributes
2626

2727
BOTO3_VERSION = tuple(int(x) for x in boto3.__version__.split("."))
2828

@@ -68,82 +68,35 @@ def test_converse_with_content(
6868
assert len(logs) == 0
6969

7070

71-
def assert_completion_attributes(
72-
span: ReadableSpan,
73-
request_model: str,
74-
response: dict[str, Any],
75-
operation_name: str = "chat",
76-
request_top_p: int | None = None,
77-
request_temperature: int | None = None,
78-
request_max_tokens: int | None = None,
79-
request_stop_sequences: list[str] | None = None,
71+
@pytest.mark.skipif(
72+
BOTO3_VERSION < (1, 35, 56), reason="Converse API not available"
73+
)
74+
@pytest.mark.vcr()
75+
def test_converse_with_invalid_model(
76+
span_exporter,
77+
log_exporter,
78+
bedrock_runtime_client,
79+
instrument_with_content,
8080
):
81-
return assert_all_attributes(
82-
span,
83-
request_model,
84-
response["usage"]["inputTokens"],
85-
response["usage"]["outputTokens"],
86-
(response["stopReason"],),
87-
operation_name,
88-
request_top_p,
89-
request_temperature,
90-
request_max_tokens,
91-
tuple(request_stop_sequences),
92-
)
93-
81+
messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}]
9482

95-
def assert_equal_or_not_present(value, attribute_name, span):
96-
if value:
97-
assert value == span.attributes[attribute_name]
98-
else:
99-
assert attribute_name not in span.attributes
100-
101-
102-
def assert_all_attributes(
103-
span: ReadableSpan,
104-
request_model: str,
105-
input_tokens: int | None = None,
106-
output_tokens: int | None = None,
107-
finish_reason: tuple[str] | None = None,
108-
operation_name: str = "chat",
109-
request_top_p: int | None = None,
110-
request_temperature: int | None = None,
111-
request_max_tokens: int | None = None,
112-
request_stop_sequences: tuple[str] | None = None,
113-
):
114-
assert span.name == f"{operation_name} {request_model}"
115-
assert (
116-
operation_name
117-
== span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]
118-
)
119-
assert (
120-
GenAIAttributes.GenAiSystemValues.AWS_BEDROCK.value
121-
== span.attributes[GenAIAttributes.GEN_AI_SYSTEM]
122-
)
123-
assert (
124-
request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
125-
)
83+
llm_model_value = "does-not-exist"
84+
with pytest.raises(bedrock_runtime_client.exceptions.ValidationException):
85+
bedrock_runtime_client.converse(
86+
messages=messages,
87+
modelId=llm_model_value,
88+
)
12689

127-
assert_equal_or_not_present(
128-
input_tokens, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, span
129-
)
130-
assert_equal_or_not_present(
131-
output_tokens, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, span
132-
)
133-
assert_equal_or_not_present(
134-
finish_reason, GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, span
135-
)
136-
assert_equal_or_not_present(
137-
request_top_p, GenAIAttributes.GEN_AI_REQUEST_TOP_P, span
138-
)
139-
assert_equal_or_not_present(
140-
request_temperature, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, span
141-
)
142-
assert_equal_or_not_present(
143-
request_max_tokens, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, span
144-
)
145-
assert_equal_or_not_present(
146-
request_stop_sequences,
147-
GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES,
90+
(span,) = span_exporter.get_finished_spans()
91+
assert_completion_attributes(
14892
span,
93+
llm_model_value,
94+
None,
95+
"chat",
14996
)
97+
98+
assert span.status.status_code == StatusCode.ERROR
99+
assert span.attributes[ERROR_TYPE] == "ValidationException"
100+
101+
logs = log_exporter.get_finished_logs()
102+
assert len(logs) == 0

0 commit comments

Comments
 (0)