Skip to content

Commit 71703bc

Browse files
authored
Merge branch 'main' into fix-notice-error-bug
2 parents 46f6aac + 2dd463d commit 71703bc

File tree

7 files changed

+375
-5
lines changed

7 files changed

+375
-5
lines changed

.github/workflows/trivy.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,6 @@ jobs:
6161

6262
- name: Upload Trivy scan results to GitHub Security tab
6363
if: ${{ github.event_name == 'schedule' }}
64-
uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # 4.31.0
64+
uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # 4.31.2
6565
with:
6666
sarif_file: "trivy-results.sarif"

newrelic/hooks/external_botocore.py

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -394,7 +394,7 @@ def extract_bedrock_claude_model_request(request_body, bedrock_attrs):
394394
]
395395
else:
396396
input_message_list = [{"role": "user", "content": request_body.get("prompt")}]
397-
bedrock_attrs["request.max_tokens"] = request_body.get("max_tokens_to_sample")
397+
bedrock_attrs["request.max_tokens"] = request_body.get("max_tokens_to_sample") or request_body.get("max_tokens")
398398
bedrock_attrs["request.temperature"] = request_body.get("temperature")
399399
bedrock_attrs["input_message_list"] = input_message_list
400400

@@ -406,7 +406,13 @@ def extract_bedrock_claude_model_response(response_body, bedrock_attrs):
406406
response_body = json.loads(response_body)
407407
role = response_body.get("role", "assistant")
408408
content = response_body.get("content") or response_body.get("completion")
409-
output_message_list = [{"role": role, "content": content}]
409+
410+
# For Claude Sonnet 3+ models, the content key holds a list with the type and text of the output
411+
if isinstance(content, list):
412+
output_message_list = [{"role": "assistant", "content": result.get("text")} for result in content]
413+
else:
414+
output_message_list = [{"role": role, "content": content}]
415+
410416
bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason")
411417
bedrock_attrs["output_message_list"] = output_message_list
412418

@@ -420,6 +426,7 @@ def extract_bedrock_claude_model_streaming_response(response_body, bedrock_attrs
420426
bedrock_attrs["output_message_list"] = [{"role": "assistant", "content": ""}]
421427
bedrock_attrs["output_message_list"][0]["content"] += content
422428
bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason")
429+
423430
return bedrock_attrs
424431

425432

@@ -639,7 +646,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs):
639646

640647
# Determine extractor by model type
641648
for extractor_name, request_extractor, response_extractor, stream_extractor in MODEL_EXTRACTORS: # noqa: B007
642-
if model.startswith(extractor_name):
649+
if extractor_name in model:
643650
break
644651
else:
645652
# Model was not found in extractor list
@@ -1057,6 +1064,13 @@ def handle_chat_completion_event(transaction, bedrock_attrs):
10571064

10581065
input_message_list = bedrock_attrs.get("input_message_list", [])
10591066
output_message_list = bedrock_attrs.get("output_message_list", [])
1067+
1068+
no_output_content = len(output_message_list) == 1 and not output_message_list[0].get("content", "")
1069+
1070+
# This checks handles Sonnet 3+ models which report an additional empty input and empty output in streaming cases after the main content has been generated
1071+
if not input_message_list and no_output_content:
1072+
return
1073+
10601074
number_of_messages = (
10611075
len(input_message_list) + len(output_message_list)
10621076
) or None # If 0, attribute will be set to None and removed

tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ def request_streaming(request):
7373
"amazon.titan-text-express-v1",
7474
"ai21.j2-mid-v1",
7575
"anthropic.claude-instant-v1",
76+
"anthropic.claude-3-sonnet-20240229-v1:0",
7677
"meta.llama2-13b-chat-v1",
7778
"mistral.mistral-7b-instruct-v0:2",
7879
],

tests/external_botocore/_mock_external_bedrock_server_invoke_model.py

Lines changed: 83 additions & 0 deletions
Large diffs are not rendered by default.

tests/external_botocore/_test_bedrock_chat_completion.py

Lines changed: 271 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
"amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}',
2020
"ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}',
2121
"anthropic.claude-instant-v1": '{"prompt": "Human: %s Assistant:", "temperature": %f, "max_tokens_to_sample": %d}',
22+
"anthropic.claude-3-sonnet-20240229-v1:0": '{"anthropic_version": "bedrock-2023-05-31", "messages": [{"role": "user", "content": "%s"}], "temperature": %f, "max_tokens": %d}',
2223
"cohere.command-text-v14": '{"prompt": "%s", "temperature": %f, "max_tokens": %d}',
2324
"meta.llama2-13b-chat-v1": '{"prompt": "%s", "temperature": %f, "max_gen_len": %d}',
2425
"mistral.mistral-7b-instruct-v0:2": '{"prompt": "<s>[INST] %s [/INST]", "temperature": %f, "max_tokens": %d}',
@@ -262,6 +263,65 @@
262263
},
263264
),
264265
],
266+
"anthropic.claude-3-sonnet-20240229-v1:0": [
267+
(
268+
{"type": "LlmChatCompletionSummary"},
269+
{
270+
"id": None, # UUID that varies with each run
271+
"llm.conversation_id": "my-awesome-id",
272+
"llm.foo": "bar",
273+
"span_id": None,
274+
"trace_id": "trace-id",
275+
"request_id": "ab38295d-df9c-4141-8173-38221651bf46",
276+
"duration": None, # Response time varies each test run
277+
"request.model": "anthropic.claude-3-sonnet-20240229-v1:0",
278+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
279+
"request.temperature": 0.7,
280+
"request.max_tokens": 100,
281+
"response.choices.finish_reason": "end_turn",
282+
"vendor": "bedrock",
283+
"ingest_source": "Python",
284+
"response.number_of_messages": 2,
285+
},
286+
),
287+
(
288+
{"type": "LlmChatCompletionMessage"},
289+
{
290+
"id": None, # UUID that varies with each run
291+
"llm.conversation_id": "my-awesome-id",
292+
"llm.foo": "bar",
293+
"request_id": "ab38295d-df9c-4141-8173-38221651bf46",
294+
"span_id": None,
295+
"trace_id": "trace-id",
296+
"content": "What is 212 degrees Fahrenheit converted to Celsius?",
297+
"role": "user",
298+
"completion_id": None,
299+
"sequence": 0,
300+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
301+
"vendor": "bedrock",
302+
"ingest_source": "Python",
303+
},
304+
),
305+
(
306+
{"type": "LlmChatCompletionMessage"},
307+
{
308+
"id": None, # UUID that varies with each run
309+
"llm.conversation_id": "my-awesome-id",
310+
"llm.foo": "bar",
311+
"request_id": "ab38295d-df9c-4141-8173-38221651bf46",
312+
"span_id": None,
313+
"trace_id": "trace-id",
314+
"content": "212 degrees Fahrenheit equals 100 degrees Celsius. This is the boiling point of water at standard atmospheric pressure.",
315+
"role": "assistant",
316+
"completion_id": None,
317+
"sequence": 1,
318+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
319+
"vendor": "bedrock",
320+
"ingest_source": "Python",
321+
"is_response": True,
322+
},
323+
),
324+
],
265325
"cohere.command-text-v14": [
266326
(
267327
{"type": "LlmChatCompletionSummary"},
@@ -555,6 +615,62 @@
555615
},
556616
),
557617
],
618+
"anthropic.claude-3-sonnet-20240229-v1:0": [
619+
(
620+
{"type": "LlmChatCompletionSummary"},
621+
{
622+
"id": None, # UUID that varies with each run
623+
"llm.conversation_id": "my-awesome-id",
624+
"llm.foo": "bar",
625+
"span_id": None,
626+
"trace_id": "trace-id",
627+
"request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff",
628+
"duration": None, # Response time varies each test run
629+
"request.model": "anthropic.claude-3-sonnet-20240229-v1:0",
630+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
631+
"vendor": "bedrock",
632+
"ingest_source": "Python",
633+
"response.number_of_messages": 2,
634+
},
635+
),
636+
(
637+
{"type": "LlmChatCompletionMessage"},
638+
{
639+
"id": None, # UUID that varies with each run
640+
"llm.conversation_id": "my-awesome-id",
641+
"llm.foo": "bar",
642+
"request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff",
643+
"span_id": None,
644+
"trace_id": "trace-id",
645+
"content": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:",
646+
"role": "user",
647+
"completion_id": None,
648+
"sequence": 0,
649+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
650+
"vendor": "bedrock",
651+
"ingest_source": "Python",
652+
},
653+
),
654+
(
655+
{"type": "LlmChatCompletionMessage"},
656+
{
657+
"id": None, # UUID that varies with each run
658+
"llm.conversation_id": "my-awesome-id",
659+
"llm.foo": "bar",
660+
"request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff",
661+
"span_id": None,
662+
"trace_id": "trace-id",
663+
"content": "I'm ready for a friendly conversation! I'll share specific details when I can, and if I don't know something, I'll be straightforward about that. What would you like to talk about today?",
664+
"role": "assistant",
665+
"completion_id": None,
666+
"sequence": 1,
667+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
668+
"vendor": "bedrock",
669+
"ingest_source": "Python",
670+
"is_response": True,
671+
},
672+
),
673+
],
558674
"meta.llama2-13b-chat-v1": [
559675
(
560676
{"type": "LlmChatCompletionSummary"},
@@ -787,6 +903,63 @@
787903
},
788904
),
789905
],
906+
"anthropic.claude-3-sonnet-20240229-v1:0": [
907+
(
908+
{"type": "LlmChatCompletionSummary"},
909+
{
910+
"id": None, # UUID that varies with each run
911+
"llm.conversation_id": "my-awesome-id",
912+
"llm.foo": "bar",
913+
"span_id": None,
914+
"trace_id": "trace-id",
915+
"request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e",
916+
"duration": None, # Response time varies each test run
917+
"request.model": "anthropic.claude-3-sonnet-20240229-v1:0",
918+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
919+
"response.choices.finish_reason": "end_turn",
920+
"vendor": "bedrock",
921+
"ingest_source": "Python",
922+
"response.number_of_messages": 2,
923+
},
924+
),
925+
(
926+
{"type": "LlmChatCompletionMessage"},
927+
{
928+
"id": None, # UUID that varies with each run
929+
"llm.conversation_id": "my-awesome-id",
930+
"llm.foo": "bar",
931+
"request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e",
932+
"span_id": None,
933+
"trace_id": "trace-id",
934+
"content": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:",
935+
"role": "user",
936+
"completion_id": None,
937+
"sequence": 0,
938+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
939+
"vendor": "bedrock",
940+
"ingest_source": "Python",
941+
},
942+
),
943+
(
944+
{"type": "LlmChatCompletionMessage"},
945+
{
946+
"id": None, # UUID that varies with each run
947+
"llm.conversation_id": "my-awesome-id",
948+
"llm.foo": "bar",
949+
"request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e",
950+
"span_id": None,
951+
"trace_id": "trace-id",
952+
"content": "I'm ready for a friendly conversation! I'll share specific details when I can, and if I don't know something, I'll be straightforward about that. What would you like to talk about today?",
953+
"role": "assistant",
954+
"completion_id": None,
955+
"sequence": 1,
956+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
957+
"vendor": "bedrock",
958+
"ingest_source": "Python",
959+
"is_response": True,
960+
},
961+
),
962+
],
790963
"meta.llama2-13b-chat-v1": [
791964
(
792965
{"type": "LlmChatCompletionSummary"},
@@ -1024,6 +1197,64 @@
10241197
},
10251198
),
10261199
],
1200+
"anthropic.claude-3-sonnet-20240229-v1:0": [
1201+
(
1202+
{"type": "LlmChatCompletionSummary"},
1203+
{
1204+
"id": None, # UUID that varies with each run
1205+
"llm.conversation_id": "my-awesome-id",
1206+
"llm.foo": "bar",
1207+
"span_id": None,
1208+
"trace_id": "trace-id",
1209+
"request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f",
1210+
"duration": None, # Response time varies each test run
1211+
"request.model": "anthropic.claude-3-sonnet-20240229-v1:0",
1212+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
1213+
"request.temperature": 0.7,
1214+
"request.max_tokens": 100,
1215+
"vendor": "bedrock",
1216+
"ingest_source": "Python",
1217+
"response.number_of_messages": 2,
1218+
},
1219+
),
1220+
(
1221+
{"type": "LlmChatCompletionMessage"},
1222+
{
1223+
"id": None, # UUID that varies with each run
1224+
"llm.conversation_id": "my-awesome-id",
1225+
"llm.foo": "bar",
1226+
"request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f",
1227+
"span_id": None,
1228+
"trace_id": "trace-id",
1229+
"content": "What is 212 degrees Fahrenheit converted to Celsius?",
1230+
"role": "user",
1231+
"completion_id": None,
1232+
"sequence": 0,
1233+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
1234+
"vendor": "bedrock",
1235+
"ingest_source": "Python",
1236+
},
1237+
),
1238+
(
1239+
{"type": "LlmChatCompletionMessage"},
1240+
{
1241+
"id": None, # UUID that varies with each run
1242+
"llm.conversation_id": "my-awesome-id",
1243+
"llm.foo": "bar",
1244+
"request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f",
1245+
"span_id": None,
1246+
"trace_id": "trace-id",
1247+
"content": "212 degrees Fahrenheit is equivalent to 100 degrees Celsius.\n\nThis is the boiling point of water at standard atmospheric pressure.",
1248+
"role": "assistant",
1249+
"completion_id": None,
1250+
"sequence": 1,
1251+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
1252+
"vendor": "bedrock",
1253+
"ingest_source": "Python",
1254+
"is_response": True,
1255+
},
1256+
),
1257+
],
10271258
"cohere.command-text-v14": [
10281259
(
10291260
{"type": "LlmChatCompletionSummary"},
@@ -1326,6 +1557,46 @@
13261557
},
13271558
),
13281559
],
1560+
"anthropic.claude-3-sonnet-20240229-v1:0": [
1561+
(
1562+
{"type": "LlmChatCompletionSummary"},
1563+
{
1564+
"id": None, # UUID that varies with each run
1565+
"llm.conversation_id": "my-awesome-id",
1566+
"llm.foo": "bar",
1567+
"span_id": None,
1568+
"trace_id": "trace-id",
1569+
"request_id": "282ba076-576f-46aa-a2e6-680392132e87",
1570+
"duration": None, # Response time varies each test run
1571+
"request.model": "anthropic.claude-3-sonnet-20240229-v1:0",
1572+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
1573+
"request.temperature": 0.7,
1574+
"request.max_tokens": 100,
1575+
"vendor": "bedrock",
1576+
"ingest_source": "Python",
1577+
"response.number_of_messages": 1,
1578+
"error": True,
1579+
},
1580+
),
1581+
(
1582+
{"type": "LlmChatCompletionMessage"},
1583+
{
1584+
"id": None, # UUID that varies with each run
1585+
"llm.conversation_id": "my-awesome-id",
1586+
"llm.foo": "bar",
1587+
"request_id": "282ba076-576f-46aa-a2e6-680392132e87",
1588+
"span_id": None,
1589+
"trace_id": "trace-id",
1590+
"content": "Invalid Token",
1591+
"role": "user",
1592+
"completion_id": None,
1593+
"sequence": 0,
1594+
"response.model": "anthropic.claude-3-sonnet-20240229-v1:0",
1595+
"vendor": "bedrock",
1596+
"ingest_source": "Python",
1597+
},
1598+
),
1599+
],
13291600
"cohere.command-text-v14": [
13301601
(
13311602
{"type": "LlmChatCompletionSummary"},

tests/external_botocore/test_bedrock_chat_completion_invoke_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ def request_streaming(request):
7373
"amazon.titan-text-express-v1",
7474
"ai21.j2-mid-v1",
7575
"anthropic.claude-instant-v1",
76+
"anthropic.claude-3-sonnet-20240229-v1:0",
7677
"cohere.command-text-v14",
7778
"meta.llama2-13b-chat-v1",
7879
"mistral.mistral-7b-instruct-v0:2",
@@ -107,7 +108,6 @@ def _exercise_streaming_model(prompt, temperature=0.7, max_tokens=100):
107108
body = (payload_template % (prompt, temperature, max_tokens)).encode("utf-8")
108109
if request_streaming:
109110
body = BytesIO(body)
110-
111111
response = bedrock_server.invoke_model_with_response_stream(
112112
body=body, modelId=model_id, accept="application/json", contentType="application/json"
113113
)

0 commit comments

Comments
 (0)