Skip to content

Commit fa8a699

Browse files
authored
botocore: don't crash when sending content as string with InvokeModel (#3342)
As apparently langchain does with Claude.
1 parent 23cadea commit fa8a699

File tree

3 files changed

+125
-0
lines changed

3 files changed

+125
-0
lines changed

instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -386,6 +386,10 @@ def extract_tool_results(
386386
if not content:
387387
return
388388

389+
# langchain sends content as string with InvokeModel and Anthropic Claude
390+
if isinstance(content, str):
391+
return
392+
389393
# Converse format
390394
tool_results = [
391395
item["toolResult"] for item in content if "toolResult" in item
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
interactions:
2+
- request:
3+
body: |-
4+
{
5+
"messages": [
6+
{
7+
"role": "user",
8+
"content": "say this is a test"
9+
}
10+
],
11+
"max_tokens": 10,
12+
"anthropic_version": "bedrock-2023-05-31"
13+
}
14+
headers:
15+
Content-Length:
16+
- '126'
17+
User-Agent:
18+
- Boto3/1.35.56 md/Botocore#1.35.56 ua/2.0 os/linux#6.1.0-1034-oem md/arch#x86_64
19+
lang/python#3.10.12 md/pyimpl#CPython cfg/retry-mode#legacy Botocore/1.35.56
20+
X-Amz-Date:
21+
- 20250306T091535Z
22+
X-Amz-Security-Token:
23+
- test_aws_security_token
24+
X-Amzn-Trace-Id:
25+
- Root=1-fcd0825a-03f328cca8cde3e741cd83b4;Parent=5a8eccf8d7b031e7;Sampled=1
26+
amz-sdk-invocation-id:
27+
- e8a6c9c5-8a45-4ad7-881b-0761d121abc7
28+
amz-sdk-request:
29+
- attempt=1
30+
authorization:
31+
- Bearer test_aws_authorization
32+
method: POST
33+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.anthropic.claude-3-5-sonnet-20240620-v1%3A0/invoke
34+
response:
35+
body:
36+
string: |-
37+
{
38+
"id": "msg_bdrk_01SjMAZgb8kNMweUzf3moSbU",
39+
"type": "message",
40+
"role": "assistant",
41+
"model": "claude-3-5-sonnet-20240620",
42+
"content": [
43+
{
44+
"type": "text",
45+
"text": "This is a test."
46+
}
47+
],
48+
"stop_reason": "end_turn",
49+
"stop_sequence": null,
50+
"usage": {
51+
"input_tokens": 12,
52+
"output_tokens": 8
53+
}
54+
}
55+
headers:
56+
Connection:
57+
- keep-alive
58+
Content-Type:
59+
- application/json
60+
Date:
61+
- Thu, 06 Mar 2025 09:15:36 GMT
62+
Set-Cookie: test_set_cookie
63+
X-Amzn-Bedrock-Input-Token-Count:
64+
- '12'
65+
X-Amzn-Bedrock-Invocation-Latency:
66+
- '544'
67+
X-Amzn-Bedrock-Output-Token-Count:
68+
- '8'
69+
x-amzn-RequestId:
70+
- e97cb4f9-61fe-4a62-a29a-a582ddd17414
71+
status:
72+
code: 200
73+
message: OK
74+
version: 1

instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1256,6 +1256,53 @@ def test_invoke_model_with_content(
12561256
assert_message_in_logs(logs[1], "gen_ai.choice", choice_body, span)
12571257

12581258

1259+
@pytest.mark.vcr()
1260+
def test_invoke_model_with_content_user_content_as_string(
1261+
span_exporter,
1262+
log_exporter,
1263+
bedrock_runtime_client,
1264+
instrument_with_content,
1265+
):
1266+
llm_model_value = "us.anthropic.claude-3-5-sonnet-20240620-v1:0"
1267+
max_tokens = 10
1268+
body = json.dumps(
1269+
{
1270+
"messages": [{"role": "user", "content": "say this is a test"}],
1271+
"max_tokens": max_tokens,
1272+
"anthropic_version": "bedrock-2023-05-31",
1273+
}
1274+
)
1275+
response = bedrock_runtime_client.invoke_model(
1276+
body=body,
1277+
modelId=llm_model_value,
1278+
)
1279+
1280+
(span,) = span_exporter.get_finished_spans()
1281+
assert_completion_attributes_from_streaming_body(
1282+
span,
1283+
llm_model_value,
1284+
response,
1285+
"chat",
1286+
request_max_tokens=max_tokens,
1287+
)
1288+
1289+
logs = log_exporter.get_finished_logs()
1290+
assert len(logs) == 2
1291+
user_content = {"content": "say this is a test"}
1292+
assert_message_in_logs(logs[0], "gen_ai.user.message", user_content, span)
1293+
1294+
message = {
1295+
"role": "assistant",
1296+
"content": [{"type": "text", "text": "This is a test."}],
1297+
}
1298+
choice_body = {
1299+
"index": 0,
1300+
"finish_reason": "end_turn",
1301+
"message": message,
1302+
}
1303+
assert_message_in_logs(logs[1], "gen_ai.choice", choice_body, span)
1304+
1305+
12591306
@pytest.mark.parametrize(
12601307
"model_family",
12611308
["amazon.nova", "anthropic.claude"],

0 commit comments

Comments
 (0)