diff --git a/newrelic/hooks/external_aiobotocore.py b/newrelic/hooks/external_aiobotocore.py index ddb9d4d056..15daa7bd6d 100644 --- a/newrelic/hooks/external_aiobotocore.py +++ b/newrelic/hooks/external_aiobotocore.py @@ -149,6 +149,17 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): bedrock_attrs = extract_bedrock_converse_attrs( args[1], response, response_headers, model, span_id, trace_id ) + + if response_streaming: + # Wrap EventStream object here to intercept __iter__ method instead of instrumenting class. + # This class is used in numerous other services in botocore, and would cause conflicts. + response["stream"] = stream = AsyncEventStreamWrapper(response["stream"]) + stream._nr_ft = ft or None + stream._nr_bedrock_attrs = bedrock_attrs or {} + stream._nr_model_extractor = stream_extractor or None + stream._nr_is_converse = True + return response + else: bedrock_attrs = { "request_id": response_headers.get("x-amzn-requestid"), diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 39317ea752..2fac1988bc 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -826,6 +826,16 @@ def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): bedrock_attrs = extract_bedrock_converse_attrs(kwargs, response, response_headers, model, span_id, trace_id) try: + if response_streaming: + # Wrap EventStream object here to intercept __iter__ method instead of instrumenting class. + # This class is used in numerous other services in botocore, and would cause conflicts. + response["stream"] = stream = EventStreamWrapper(response["stream"]) + stream._nr_ft = ft + stream._nr_bedrock_attrs = bedrock_attrs + stream._nr_model_extractor = stream_extractor + stream._nr_is_converse = True + return response + ft.__exit__(None, None, None) bedrock_attrs["duration"] = ft.duration * 1000 run_bedrock_response_extractor(response_extractor, {}, bedrock_attrs, False, transaction) @@ -840,6 +850,7 @@ def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): def extract_bedrock_converse_attrs(kwargs, response, response_headers, model, span_id, trace_id): input_message_list = [] + output_message_list = None # If a system message is supplied, it is under its own key in kwargs rather than with the other input messages if "system" in kwargs.keys(): input_message_list.extend({"role": "system", "content": result["text"]} for result in kwargs.get("system", [])) @@ -850,10 +861,11 @@ def extract_bedrock_converse_attrs(kwargs, response, response_headers, model, sp [{"role": "user", "content": result["text"]} for result in kwargs["messages"][-1].get("content", [])] ) - output_message_list = [ - {"role": "assistant", "content": result["text"]} - for result in response.get("output").get("message").get("content", []) - ] + if "output" in response: + output_message_list = [ + {"role": "assistant", "content": result["text"]} + for result in response.get("output").get("message").get("content", []) + ] bedrock_attrs = { "request_id": response_headers.get("x-amzn-requestid"), @@ -861,24 +873,117 @@ def extract_bedrock_converse_attrs(kwargs, response, response_headers, model, sp "span_id": span_id, "trace_id": trace_id, "response.choices.finish_reason": response.get("stopReason"), - "output_message_list": output_message_list, "request.max_tokens": kwargs.get("inferenceConfig", {}).get("maxTokens", None), "request.temperature": kwargs.get("inferenceConfig", {}).get("temperature", None), "input_message_list": input_message_list, } + + if output_message_list is not None: + bedrock_attrs["output_message_list"] = output_message_list + return bedrock_attrs +class BedrockRecordEventMixin: + def record_events_on_stop_iteration(self, transaction): + if hasattr(self, "_nr_ft"): + bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) + self._nr_ft.__exit__(None, None, None) + + # If there are no bedrock attrs exit early as there's no data to record. + if not bedrock_attrs: + return + + try: + bedrock_attrs["duration"] = self._nr_ft.duration * 1000 + handle_chat_completion_event(transaction, bedrock_attrs) + except Exception: + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) + + # Clear cached data as this can be very large. + self._nr_bedrock_attrs.clear() + + def record_error(self, transaction, exc): + if hasattr(self, "_nr_ft"): + try: + ft = self._nr_ft + error_attributes = getattr(self, "_nr_bedrock_attrs", {}) + + # If there are no bedrock attrs exit early as there's no data to record. + if not error_attributes: + return + + error_attributes = bedrock_error_attributes(exc, error_attributes) + notice_error_attributes = { + "http.statusCode": error_attributes.get("http.statusCode"), + "error.message": error_attributes.get("error.message"), + "error.code": error_attributes.get("error.code"), + } + notice_error_attributes.update({"completion_id": str(uuid.uuid4())}) + + ft.notice_error(attributes=notice_error_attributes) + + ft.__exit__(*sys.exc_info()) + error_attributes["duration"] = ft.duration * 1000 + + handle_chat_completion_event(transaction, error_attributes) + + # Clear cached data as this can be very large. + error_attributes.clear() + except Exception: + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, exc_info=True) + + def record_stream_chunk(self, event, transaction): + if event: + try: + if getattr(self, "_nr_is_converse", False): + return self.converse_record_stream_chunk(event, transaction) + else: + return self.invoke_record_stream_chunk(event, transaction) + except Exception: + _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) + + def invoke_record_stream_chunk(self, event, transaction): + bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) + chunk = json.loads(event["chunk"]["bytes"].decode("utf-8")) + self._nr_model_extractor(chunk, bedrock_attrs) + # In Langchain, the bedrock iterator exits early if type is "content_block_stop". + # So we need to call the record events here since stop iteration will not be raised. + _type = chunk.get("type") + if _type == "content_block_stop": + self.record_events_on_stop_iteration(transaction) + + def converse_record_stream_chunk(self, event, transaction): + bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) + if "contentBlockDelta" in event: + if not bedrock_attrs: + return + + content = ((event.get("contentBlockDelta") or {}).get("delta") or {}).get("text", "") + if "output_message_list" not in bedrock_attrs: + bedrock_attrs["output_message_list"] = [{"role": "assistant", "content": ""}] + bedrock_attrs["output_message_list"][0]["content"] += content + + if "messageStop" in event: + bedrock_attrs["response.choices.finish_reason"] = (event.get("messageStop") or {}).get("stopReason", "") + + # TODO: Is this also subject to the content_block_stop behavior from Langchain? + # If so, that would preclude us from ever capturing the messageStop event with the stopReason. + # if "contentBlockStop" in event: + # self.record_events_on_stop_iteration(transaction) + + class EventStreamWrapper(ObjectProxy): def __iter__(self): g = GeneratorProxy(self.__wrapped__.__iter__()) g._nr_ft = getattr(self, "_nr_ft", None) g._nr_bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) g._nr_model_extractor = getattr(self, "_nr_model_extractor", NULL_EXTRACTOR) + g._nr_is_converse = getattr(self, "_nr_is_converse", False) return g -class GeneratorProxy(ObjectProxy): +class GeneratorProxy(BedrockRecordEventMixin, ObjectProxy): def __init__(self, wrapped): super().__init__(wrapped) @@ -893,12 +998,12 @@ def __next__(self): return_val = None try: return_val = self.__wrapped__.__next__() - record_stream_chunk(self, return_val, transaction) + self.record_stream_chunk(return_val, transaction) except StopIteration: - record_events_on_stop_iteration(self, transaction) + self.record_events_on_stop_iteration(transaction) raise except Exception as exc: - record_error(self, transaction, exc) + self.record_error(transaction, exc) raise return return_val @@ -912,13 +1017,11 @@ def __aiter__(self): g._nr_ft = getattr(self, "_nr_ft", None) g._nr_bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) g._nr_model_extractor = getattr(self, "_nr_model_extractor", NULL_EXTRACTOR) + g._nr_is_converse = getattr(self, "_nr_is_converse", False) return g -class AsyncGeneratorProxy(ObjectProxy): - def __init__(self, wrapped): - super().__init__(wrapped) - +class AsyncGeneratorProxy(BedrockRecordEventMixin, ObjectProxy): def __aiter__(self): return self @@ -929,12 +1032,12 @@ async def __anext__(self): return_val = None try: return_val = await self.__wrapped__.__anext__() - record_stream_chunk(self, return_val, transaction) + self.record_stream_chunk(return_val, transaction) except StopAsyncIteration: - record_events_on_stop_iteration(self, transaction) + self.record_events_on_stop_iteration(transaction) raise except Exception as exc: - record_error(self, transaction, exc) + self.record_error(transaction, exc) raise return return_val @@ -942,70 +1045,6 @@ async def aclose(self): return await super().aclose() -def record_stream_chunk(self, return_val, transaction): - if return_val: - try: - chunk = json.loads(return_val["chunk"]["bytes"].decode("utf-8")) - self._nr_model_extractor(chunk, self._nr_bedrock_attrs) - # In Langchain, the bedrock iterator exits early if type is "content_block_stop". - # So we need to call the record events here since stop iteration will not be raised. - _type = chunk.get("type") - if _type == "content_block_stop": - record_events_on_stop_iteration(self, transaction) - except Exception: - _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) - - -def record_events_on_stop_iteration(self, transaction): - if hasattr(self, "_nr_ft"): - bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) - self._nr_ft.__exit__(None, None, None) - - # If there are no bedrock attrs exit early as there's no data to record. - if not bedrock_attrs: - return - - try: - bedrock_attrs["duration"] = self._nr_ft.duration * 1000 - handle_chat_completion_event(transaction, bedrock_attrs) - except Exception: - _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) - - # Clear cached data as this can be very large. - self._nr_bedrock_attrs.clear() - - -def record_error(self, transaction, exc): - if hasattr(self, "_nr_ft"): - try: - ft = self._nr_ft - error_attributes = getattr(self, "_nr_bedrock_attrs", {}) - - # If there are no bedrock attrs exit early as there's no data to record. - if not error_attributes: - return - - error_attributes = bedrock_error_attributes(exc, error_attributes) - notice_error_attributes = { - "http.statusCode": error_attributes.get("http.statusCode"), - "error.message": error_attributes.get("error.message"), - "error.code": error_attributes.get("error.code"), - } - notice_error_attributes.update({"completion_id": str(uuid.uuid4())}) - - ft.notice_error(attributes=notice_error_attributes) - - ft.__exit__(*sys.exc_info()) - error_attributes["duration"] = ft.duration * 1000 - - handle_chat_completion_event(transaction, error_attributes) - - # Clear cached data as this can be very large. - error_attributes.clear() - except Exception: - _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, exc_info=True) - - def handle_embedding_event(transaction, bedrock_attrs): embedding_id = str(uuid.uuid4()) @@ -1551,6 +1590,7 @@ def wrap_serialize_to_request(wrapped, instance, args, kwargs): response_streaming=True ), ("bedrock-runtime", "converse"): wrap_bedrock_runtime_converse(response_streaming=False), + ("bedrock-runtime", "converse_stream"): wrap_bedrock_runtime_converse(response_streaming=True), } diff --git a/tests/external_aiobotocore/test_bedrock_chat_completion_converse.py b/tests/external_aiobotocore/test_bedrock_chat_completion_converse.py index da9c5818e7..55843b832c 100644 --- a/tests/external_aiobotocore/test_bedrock_chat_completion_converse.py +++ b/tests/external_aiobotocore/test_bedrock_chat_completion_converse.py @@ -15,6 +15,12 @@ import botocore.exceptions import pytest from conftest import BOTOCORE_VERSION +from external_botocore._test_bedrock_chat_completion_converse import ( + chat_completion_expected_events, + chat_completion_expected_streaming_events, + chat_completion_invalid_access_key_error_events, + chat_completion_invalid_model_error_events, +) from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( add_token_count_to_events, @@ -36,113 +42,65 @@ from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name -chat_completion_expected_events = [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "span_id": None, - "trace_id": "trace-id", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "duration": None, # Response time varies each test run - "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.choices.finish_reason": "max_tokens", - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 3, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "span_id": None, - "trace_id": "trace-id", - "content": "You are a scientist.", - "role": "system", - "completion_id": None, - "sequence": 0, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "span_id": None, - "trace_id": "trace-id", - "content": "What is 212 degrees Fahrenheit converted to Celsius?", - "role": "user", - "completion_id": None, - "sequence": 1, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "span_id": None, - "trace_id": "trace-id", - "content": "To convert 212°F to Celsius, we can use the formula:\n\nC = (F - 32) × 5/9\n\nWhere:\nC is the temperature in Celsius\nF is the temperature in Fahrenheit\n\nPlugging in 212°F, we get:\n\nC = (212 - 32) × 5/9\nC = 180 × 5/9\nC = 100\n\nTherefore, 212°", # noqa: RUF001 - "role": "assistant", - "completion_id": None, - "sequence": 2, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - "is_response": True, - }, - ), -] + +@pytest.fixture(scope="session", params=[False, True], ids=["ResponseStandard", "ResponseStreaming"]) +def response_streaming(request): + return request.param + + +@pytest.fixture(scope="session") +def expected_metric(response_streaming): + return ("Llm/completion/Bedrock/converse" + ("_stream" if response_streaming else ""), 1) + + +@pytest.fixture(scope="session") +def expected_events(response_streaming): + return chat_completion_expected_streaming_events if response_streaming else chat_completion_expected_events @pytest.fixture(scope="module") -def exercise_model(loop, bedrock_converse_server): +def exercise_model(loop, bedrock_converse_server, response_streaming): def _exercise_model(message): async def coro(): inference_config = {"temperature": 0.7, "maxTokens": 100} - response = await bedrock_converse_server.converse( + _response = await bedrock_converse_server.converse( modelId="anthropic.claude-3-sonnet-20240229-v1:0", messages=message, system=[{"text": "You are a scientist."}], inferenceConfig=inference_config, ) - assert response return loop.run_until_complete(coro()) - return _exercise_model + def _exercise_model_streaming(message): + async def coro(): + inference_config = {"temperature": 0.7, "maxTokens": 100} + + response = await bedrock_converse_server.converse_stream( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + system=[{"text": "You are a scientist."}], + inferenceConfig=inference_config, + ) + _responses = [r async for r in response["stream"]] # Consume the response stream + + return loop.run_until_complete(coro()) + + return _exercise_model_streaming if response_streaming else _exercise_model @reset_core_stats_engine() -def test_bedrock_chat_completion_in_txn_with_llm_metadata(set_trace_info, exercise_model): - @validate_custom_events(events_with_context_attrs(chat_completion_expected_events)) - # One summary event, one user message, and one response message from the assistant +def test_bedrock_chat_completion_in_txn_with_llm_metadata( + set_trace_info, exercise_model, expected_metric, expected_events +): + @validate_custom_events(events_with_context_attrs(expected_events)) + # One summary event, one system message, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_in_txn_with_llm_metadata", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -162,14 +120,14 @@ def _test(): @disabled_ai_monitoring_record_content_settings @reset_core_stats_engine() -def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model): - @validate_custom_events(events_sans_content(chat_completion_expected_events)) +def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model, expected_metric, expected_events): + @validate_custom_events(events_sans_content(expected_events)) # One summary event, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_no_content", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -188,14 +146,14 @@ def _test(): @reset_core_stats_engine() @override_llm_token_callback_settings(llm_token_count_callback) -def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model): - @validate_custom_events(add_token_count_to_events(chat_completion_expected_events)) +def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model, expected_metric, expected_events): + @validate_custom_events(add_token_count_to_events(expected_events)) # One summary event, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_with_token_count", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -213,13 +171,13 @@ def _test(): @reset_core_stats_engine() -def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model): - @validate_custom_events(events_sans_llm_metadata(chat_completion_expected_events)) +def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model, expected_metric, expected_events): + @validate_custom_events(events_sans_llm_metadata(expected_events)) @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_in_txn_no_llm_metadata", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -250,54 +208,37 @@ def test_bedrock_chat_completion_disabled_ai_monitoring_settings(set_trace_info, exercise_model(message) -chat_completion_invalid_access_key_error_events = [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "span_id": None, - "trace_id": "trace-id", - "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", - "duration": None, # Response time varies each test run - "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "request.temperature": 0.7, - "request.max_tokens": 100, - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 1, - "error": True, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", - "span_id": None, - "trace_id": "trace-id", - "content": "Invalid Token", - "role": "user", - "completion_id": None, - "sequence": 0, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), -] - _client_error = botocore.exceptions.ClientError _client_error_name = callable_name(_client_error) +@pytest.fixture +def exercise_converse_incorrect_access_key(loop, bedrock_converse_server, response_streaming, monkeypatch): + def _exercise_converse_incorrect_access_key(): + async def _coro(): + monkeypatch.setattr( + bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY" + ) + + message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] + request = ( + bedrock_converse_server.converse_stream if response_streaming else bedrock_converse_server.converse + ) + with pytest.raises(_client_error): + await request( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + inferenceConfig={"temperature": 0.7, "maxTokens": 100}, + ) + + loop.run_until_complete(_coro()) + + return _exercise_converse_incorrect_access_key + + @reset_core_stats_engine() def test_bedrock_chat_completion_error_incorrect_access_key( - loop, monkeypatch, bedrock_converse_server, exercise_model, set_trace_info + exercise_converse_incorrect_access_key, set_trace_info, expected_metric ): """ A request is made to the server with invalid credentials. botocore will reach out to the server and receive an @@ -320,8 +261,8 @@ def test_bedrock_chat_completion_error_incorrect_access_key( ) @validate_transaction_metrics( name="test_bedrock_chat_completion", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -332,121 +273,79 @@ def _test(): add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - converse_incorrect_access_key(loop, bedrock_converse_server, monkeypatch) + exercise_converse_incorrect_access_key() _test() -def converse_incorrect_access_key(loop, bedrock_converse_server, monkeypatch): - async def _coro(): - monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") - - with pytest.raises(_client_error): - message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] - response = await bedrock_converse_server.converse( - modelId="anthropic.claude-3-sonnet-20240229-v1:0", - messages=message, - inferenceConfig={"temperature": 0.7, "maxTokens": 100}, - ) - assert response - - loop.run_until_complete(_coro()) - - -chat_completion_invalid_model_error_events = [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", - "span_id": None, - "trace_id": "trace-id", - "duration": None, # Response time varies each test run - "request.model": "does-not-exist", - "response.model": "does-not-exist", - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.number_of_messages": 1, - "vendor": "bedrock", - "ingest_source": "Python", - "error": True, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "span_id": None, - "trace_id": "trace-id", - "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", - "content": "Model does not exist.", - "role": "user", - "completion_id": None, - "response.model": "does-not-exist", - "sequence": 0, - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), -] - - @reset_core_stats_engine() -def test_bedrock_chat_completion_error_invalid_model(loop, bedrock_converse_server, set_trace_info): - @validate_custom_events(chat_completion_invalid_model_error_events) +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_error_incorrect_access_key_with_token_count( + exercise_converse_incorrect_access_key, set_trace_info, expected_metric +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(add_token_count_to_events(chat_completion_invalid_access_key_error_events)) @validate_error_trace_attributes( - "botocore.errorfactory:ValidationException", + _client_error_name, exact_attrs={ "agent": {}, "intrinsic": {}, "user": { - "http.statusCode": 400, - "error.message": "The provided model identifier is invalid.", - "error.code": "ValidationException", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", }, }, ) @validate_transaction_metrics( - name="test_bedrock_chat_completion_error_invalid_model", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + name="test_bedrock_chat_completion_incorrect_access_key_with_token_count", + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) - @background_task(name="test_bedrock_chat_completion_error_invalid_model") + @background_task(name="test_bedrock_chat_completion_incorrect_access_key_with_token_count") def _test(): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - converse_invalid_model(loop, bedrock_converse_server) + exercise_converse_incorrect_access_key() _test() -def converse_invalid_model(loop, bedrock_converse_server): - async def _coro(): - with pytest.raises(_client_error): - message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] +@pytest.fixture +def exercise_converse_invalid_model(loop, bedrock_converse_server, response_streaming, monkeypatch): + def _exercise_converse_invalid_model(): + async def _coro(): + monkeypatch.setattr( + bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY" + ) - response = await bedrock_converse_server.converse( - modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100} + message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] + request = ( + bedrock_converse_server.converse_stream if response_streaming else bedrock_converse_server.converse ) + with pytest.raises(_client_error): + await request( + modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100} + ) - assert response + loop.run_until_complete(_coro()) - loop.run_until_complete(_coro()) + return _exercise_converse_invalid_model @reset_core_stats_engine() -@disabled_ai_monitoring_record_content_settings -def test_bedrock_chat_completion_error_invalid_model_no_content(loop, bedrock_converse_server, set_trace_info): - @validate_custom_events(events_sans_content(chat_completion_invalid_model_error_events)) +def test_bedrock_chat_completion_error_invalid_model(exercise_converse_invalid_model, set_trace_info, expected_metric): + @validate_custom_events(events_with_context_attrs(chat_completion_invalid_model_error_events)) @validate_error_trace_attributes( "botocore.errorfactory:ValidationException", exact_attrs={ @@ -460,62 +359,57 @@ def test_bedrock_chat_completion_error_invalid_model_no_content(loop, bedrock_co }, ) @validate_transaction_metrics( - name="test_bedrock_chat_completion_error_invalid_model_no_content", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + name="test_bedrock_chat_completion_error_invalid_model", + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) - @background_task(name="test_bedrock_chat_completion_error_invalid_model_no_content") + @background_task(name="test_bedrock_chat_completion_error_invalid_model") def _test(): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - converse_invalid_model(loop, bedrock_converse_server) + with WithLlmCustomAttributes({"context": "attr"}): + exercise_converse_invalid_model() _test() @reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) -def test_bedrock_chat_completion_error_incorrect_access_key_with_token_count( - monkeypatch, bedrock_converse_server, loop, set_trace_info +@disabled_ai_monitoring_record_content_settings +def test_bedrock_chat_completion_error_invalid_model_no_content( + exercise_converse_invalid_model, set_trace_info, expected_metric ): - """ - A request is made to the server with invalid credentials. botocore will reach out to the server and receive an - UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer - events. The error response can also be parsed, and will be included as attributes on the recorded exception. - """ - - @validate_custom_events(add_token_count_to_events(chat_completion_invalid_access_key_error_events)) + @validate_custom_events(events_sans_content(chat_completion_invalid_model_error_events)) @validate_error_trace_attributes( - _client_error_name, + "botocore.errorfactory:ValidationException", exact_attrs={ "agent": {}, "intrinsic": {}, "user": { - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", }, }, ) @validate_transaction_metrics( - name="test_bedrock_chat_completion_incorrect_access_key_with_token_count", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + name="test_bedrock_chat_completion_error_invalid_model_no_content", + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) - @background_task(name="test_bedrock_chat_completion_incorrect_access_key_with_token_count") + @background_task(name="test_bedrock_chat_completion_error_invalid_model_no_content") def _test(): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - converse_incorrect_access_key(loop, bedrock_converse_server, monkeypatch) + exercise_converse_invalid_model() _test() diff --git a/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py b/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py index e02cc5b543..207db7e31e 100644 --- a/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py +++ b/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py @@ -14,13 +14,13 @@ import json import os from io import BytesIO +from pprint import pformat -import botocore.errorfactory import botocore.eventstream import botocore.exceptions import pytest from conftest import BOTOCORE_VERSION -from external_botocore._test_bedrock_chat_completion import ( +from external_botocore._test_bedrock_chat_completion_invoke_model import ( chat_completion_expected_events, chat_completion_expected_malformed_request_body_events, chat_completion_expected_malformed_response_body_events, @@ -858,7 +858,12 @@ def test_bedrock_chat_completion_functions_marked_as_wrapped_for_sdk_compatibili def test_chat_models_instrumented(loop): import aiobotocore - SUPPORTED_MODELS = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" not in model] + def _is_supported_model(model): + supported_models = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" not in model] + for supported_model in supported_models: + if supported_model in model: + return True + return False _id = os.environ.get("AWS_ACCESS_KEY_ID") key = os.environ.get("AWS_SECRET_ACCESS_KEY") @@ -871,12 +876,8 @@ def test_chat_models_instrumented(loop): try: response = loop.run_until_complete(client.list_foundation_models(byOutputModality="TEXT")) models = [model["modelId"] for model in response["modelSummaries"]] - not_supported = [] - for model in models: - is_supported = any(model.startswith(supported_model) for supported_model in SUPPORTED_MODELS) - if not is_supported: - not_supported.append(model) + not_supported = [model for model in models if not _is_supported_model(model)] - assert not not_supported, f"The following unsupported models were found: {not_supported}" + assert not not_supported, f"The following unsupported models were found: {pformat(not_supported)}" finally: loop.run_until_complete(client.__aexit__(None, None, None)) diff --git a/tests/external_aiobotocore/test_bedrock_embeddings.py b/tests/external_aiobotocore/test_bedrock_embeddings.py index 96b930feb5..b964122294 100644 --- a/tests/external_aiobotocore/test_bedrock_embeddings.py +++ b/tests/external_aiobotocore/test_bedrock_embeddings.py @@ -14,6 +14,7 @@ import json import os from io import BytesIO +from pprint import pformat import botocore.exceptions import pytest @@ -414,7 +415,12 @@ async def _test(): def test_embedding_models_instrumented(loop): import aiobotocore - SUPPORTED_MODELS = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" in model] + def _is_supported_model(model): + supported_models = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" in model] + for supported_model in supported_models: + if supported_model in model: + return True + return False _id = os.environ.get("AWS_ACCESS_KEY_ID") key = os.environ.get("AWS_SECRET_ACCESS_KEY") @@ -427,12 +433,8 @@ def test_embedding_models_instrumented(loop): try: response = client.list_foundation_models(byOutputModality="EMBEDDING") models = [model["modelId"] for model in response["modelSummaries"]] - not_supported = [] - for model in models: - is_supported = any(model.startswith(supported_model) for supported_model in SUPPORTED_MODELS) - if not is_supported: - not_supported.append(model) + not_supported = [model for model in models if not _is_supported_model(model)] - assert not not_supported, f"The following unsupported models were found: {not_supported}" + assert not not_supported, f"The following unsupported models were found: {pformat(not_supported)}" finally: loop.run_until_complete(client.__aexit__(None, None, None)) diff --git a/tests/external_botocore/_mock_external_bedrock_server_converse.py b/tests/external_botocore/_mock_external_bedrock_server_converse.py index aef6d52856..bc93c8b773 100644 --- a/tests/external_botocore/_mock_external_bedrock_server_converse.py +++ b/tests/external_botocore/_mock_external_bedrock_server_converse.py @@ -16,6 +16,105 @@ from testing_support.mock_external_http_server import MockExternalHTTPServer +STREAMED_RESPONSES = { + "What is 212 degrees Fahrenheit converted to Celsius?": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "f070b880-e0fb-4537-8093-796671c39239", + }, + 200, + [ + "000000b2000000528a40b4c50b3a6576656e742d7479706507000c6d65737361676553746172740d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a30222c22726f6c65223a22617373697374616e74227d40ff8268000000ae000000575f3a3ac90b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22546f227d2c2270223a226162636465666768696a6b6c6d6e6f70717273227d57b47eb0", + "000000b800000057b09a58eb0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220636f6e76657274227d2c2270223a226162636465666768696a6b6c6d6e6f7071727374757677227d7f921878", + "000000c600000057f67806450b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222046616872656e227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c227d725b3c0b", + "000000a800000057d07acf690b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2268656974227d2c2270223a226162636465666768696a6b227d926527fe", + "000000b400000057756ab5ea0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220746f227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778227d47f66bd8", + "000000a400000057158a22680b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222043656c73697573227d2c2270223a22616263227dc03a975f", + "000000c8000000574948b8240b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222c227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f5051525354227db2e3dafb", + "000000ad00000057189a40190b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220796f75227d2c2270223a226162636465666768696a6b6c6d6e6f70227d76c0e56b", + "000000c500000057b1d87c950b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220757365227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e227de3731476", + "000000cb000000570ee8c2f40b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220746865227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f5051525354227dd4810232", + "000000d3000000575e781eb70b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220666f726d756c61227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758227df6672f41", + "000000d00000005719d864670b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a223a227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031227dbd8afb45", + "000000b6000000570faae68a0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e5c6e43227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778227d088d049f", + "000000a700000057522a58b80b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22203d227d2c2270223a226162636465666768696a6b6c227d88e54236", + "000000b70000005732cacf3a0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222028227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142227de6ec1ebe", + "000000b400000057756ab5ea0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2246227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a227d02007761", + "000000c900000057742891940b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22202d227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f5051525354227d3b3f080c", + "000000ab0000005797dab5b90b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f7071227d5638cc83", + "0000009d00000057b9bbf89f0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a223332227d2c2270223a226162227dc02cb212", + "000000bc00000057451afe2b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2229227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748227da0e9aee9", + "000000c700000057cb182ff50b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22202a227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152227d0e3821bb", + "000000b70000005732cacf3a0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a414243227d1daf3cc5", + "000000b400000057756ab5ea0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2235227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a227dada5d973", + "000000d10000005724b84dd70b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222f227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a303132227db97b8201", + "000000bc00000057451afe2b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2239227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748227d99250da7", + "000000ad00000057189a40190b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e5c6e5768657265227d2c2270223a226162636465666768696a6b227d5f2ed4ef", + "0000009f00000057c37babff0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a223a227d2c2270223a226162636465227d85a07294", + "000000a900000057ed1ae6d90b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e43227d2c2270223a226162636465666768696a6b6c6d227d50fa22de", + "000000ce00000057c6084d840b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22206973227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758227dfe3dc5ac", + "000000c8000000574948b8240b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220746865227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f5051227d3f77fbbc", + "000000c1000000574458da550b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222074656d7065726174757265227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142227d402a7229", + "000000d200000057631837070b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220696e227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031227df5f66d94", + "000000d90000005714c806160b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222043656c73697573227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a30313233227d3daccf94", + "000000b500000057480a9c5a0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e46227d2c2270223a226162636465666768696a6b6c6d6e6f70717273747576777879227d5042c3ff", + "000000cf00000057fb6864340b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22206973227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f50515253545556575859227da79da7ad", + "000000bd00000057787ad79b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220746865227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a414243444546227dbd3a0aec", + "000000b70000005732cacf3a0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222074656d7065726174757265227d2c2270223a226162636465666768696a6b6c6d6e6f707172227d1560b810", + "000000bf0000005702ba84fb0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220696e227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a414243444546474849227d40f78c16", + "000000ce00000057c6084d840b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222046616872656e227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f5051525354227d47b98626", + "000000a2000000579acad7c80b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2268656974227d2c2270223a226162636465227d54cc33be", + "000000da0000005753687cc60b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e5c6e506c7567227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031323334227d9eb4ac9a", + "000000bc00000057451afe2b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2267696e67227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445227d3a11d9ac000000c500000057b1d87c950b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220696e227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f227d391bdff3", + "0000009e00000057fe1b824f0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a2261626364227da292de09", + "000000b70000005732cacf3a0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22323132227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a41227dbfd117db", + "000000c20000005703f8a0850b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22c2b0227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d227d1166f202", + "000000a100000057dd6aad180b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2246227d2c2270223a2261626364656667227dcba24fa6", + "000000b300000057c74a69fa0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220666f72227d2c2270223a226162636465666768696a6b6c6d6e6f70717273747576227dd306dee6", + "000000c700000057cb182ff50b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222046227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152227d3bdbedf1", + "000000c600000057f67806450b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a223a227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152227d71d79c49", + "000000ae000000575f3a3ac90b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e5c6e43227d2c2270223a226162636465666768696a6b6c6d6e6f70227d2d8a1cce", + "000000bf0000005702ba84fb0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22203d227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a227de81a06eb", + "000000b6000000570faae68a0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222028227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a41227dea662b27", + "000000d500000057d138eb170b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22323132227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031323334227da7888b21", + "000000d700000057abf8b8770b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a303132333435363738227d63107603", + "000000c0000000577938f3e50b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222d227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c227d9e32b6f5", + "000000c600000057f67806450b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152227db3145f6b", + "0000009f00000057c37babff0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a223332227d2c2270223a2261626364227d277c3f97", + "000000a300000057a7aafe780b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2229227d2c2270223a22616263646566676869227dd05f85ca", + "000000bc00000057451afe2b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22202a227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a41424344454647227db0dfade1", + "000000aa00000057aaba9c090b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f70227da476449e", + "000000ac0000005725fa69a90b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2235227d2c2270223a226162636465666768696a6b6c6d6e6f707172227deedc54f0", + "000000ca000000573388eb440b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222f227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f50515253545556227d7abef087", + "000000d00000005719d864670b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2239227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031227de7c50a2e", + "0000009f00000057c37babff0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e43227d2c2270223a22616263227df88e9dc2", + "000000ac0000005725fa69a90b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22203d227d2c2270223a226162636465666768696a6b6c6d6e6f7071227d6f5c7d17", + "000000bd00000057787ad79b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a414243444546474849227d1c650877", + "000000a400000057158a22680b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22313830227d2c2270223a226162636465666768227dba33e936", + "000000bb00000057f73a223b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a41424344454647227df14100ef", + "000000a400000057158a22680b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222a227d2c2270223a226162636465666768696a227da79b0693", + "000000c700000057cb182ff50b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f50515253227de52ff51e", + "000000aa00000057aaba9c090b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2235227d2c2270223a226162636465666768696a6b6c6d6e6f70227df5cf9fcf", + "000000b9000000578dfa715b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222f227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445227dc22fcb78", + "0000009d00000057b9bbf89f0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2239227d2c2270223a22616263227db33d112d", + "000000b9000000578dfa715b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e43227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a414243227d6e135792", + "000000c20000005703f8a0850b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22203d227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d227d242e22f6", + "000000a000000057e00a84a80b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a22616263646566227d64c7e90b", + "000000a800000057d07acf690b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22313030227d2c2270223a226162636465666768696a6b6c227dee65d4c5", + "000000e200000057c2398f810b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a225c6e5c6e5468657265666f7265227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031323334353637227d43ae3a9e", + "000000c600000057f67806450b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222c227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152227df0760dea", + "000000a50000005728ea0bd80b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b227db714fc15", + "000000ab0000005797dab5b90b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a22323132227d2c2270223a226162636465666768696a6b6c6d6e6f227de9fc19df", + "000000be000000573fdaad4b0b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2220227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a227dd7107790", + "000000c600000057f67806450b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a2264656772656573227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c227d15374080", + "000000dd00000057e148a0d60b3a6576656e742d74797065070011636f6e74656e74426c6f636b44656c74610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2264656c7461223a7b2274657874223a222046616872656e227d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a303132333435363738227d8993e5c9", + "000000a800000056a77dffff0b3a6576656e742d74797065070010636f6e74656e74426c6f636b53746f700d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b22636f6e74656e74426c6f636b496e646578223a302c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a227d1c361897", + "000000bd00000051911972ae0b3a6576656e742d7479706507000b6d65737361676553746f700d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a303132333435222c2273746f70526561736f6e223a226d61785f746f6b656e73227d2963d7e1", + "000000f00000004ebc72e3a30b3a6576656e742d747970650700086d657461646174610d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226d657472696373223a7b226c6174656e63794d73223a323134397d2c2270223a226162636465666768696a6b6c6d6e6f707172737475767778222c227573616765223a7b22696e707574546f6b656e73223a32362c226f7574707574546f6b656e73223a3130302c22736572766572546f6f6c5573616765223a7b7d2c22746f74616c546f6b656e73223a3132367d7dd415e186", + ], + ] +} + RESPONSES = { "What is 212 degrees Fahrenheit converted to Celsius?": [ {"Content-Type": "application/json", "x-amzn-RequestId": "c20d345e-6878-4778-b674-6b187bae8ecf"}, @@ -65,6 +164,7 @@ def simple_get(self): except Exception: content = body + stream = self.path.endswith("converse-stream") prompt = extract_shortened_prompt_converse(content) if not prompt: self.send_response(500) @@ -73,11 +173,23 @@ def simple_get(self): return headers, status_code, response = ({}, 0, "") - - for k, v in RESPONSES.items(): - if prompt.startswith(k): - headers, status_code, response = v - break + if stream: + for k, v in STREAMED_RESPONSES.items(): + if prompt.startswith(k): + headers, status_code, response = v + break + if not response: + for k, v in RESPONSES.items(): + # Only look for error responses returned immediately instead of in a stream + if prompt.startswith(k) and v[1] >= 400: + headers, status_code, response = v + stream = False # Response will not be streamed + break + else: + for k, v in RESPONSES.items(): + if prompt.startswith(k): + headers, status_code, response = v + break if not response: # If no matches found @@ -94,10 +206,19 @@ def simple_get(self): self.send_header(k, v) self.end_headers() - # Send response body - response_body = json.dumps(response).encode("utf-8") + if stream: + # Send response body + for resp in response: + self.wfile.write(bytes.fromhex(resp)) + else: + # Send response body + response_body = json.dumps(response).encode("utf-8") + + if "Malformed Body" in prompt: + # Remove end of response to make invalid JSON + response_body = response_body[:-4] - self.wfile.write(response_body) + self.wfile.write(response_body) return diff --git a/tests/external_botocore/_test_bedrock_chat_completion_converse.py b/tests/external_botocore/_test_bedrock_chat_completion_converse.py new file mode 100644 index 0000000000..cdec652292 --- /dev/null +++ b/tests/external_botocore/_test_bedrock_chat_completion_converse.py @@ -0,0 +1,253 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ignore unicode characters in this file from LLM responses +# ruff: noqa: RUF001 + +chat_completion_expected_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "max_tokens", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "To convert 212°F to Celsius, we can use the formula:\n\nC = (F - 32) × 5/9\n\nWhere:\nC is the temperature in Celsius\nF is the temperature in Fahrenheit\n\nPlugging in 212°F, we get:\n\nC = (212 - 32) × 5/9\nC = 180 × 5/9\nC = 100\n\nTherefore, 212°", + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), +] + +chat_completion_expected_streaming_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "f070b880-e0fb-4537-8093-796671c39239", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "max_tokens", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f070b880-e0fb-4537-8093-796671c39239", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f070b880-e0fb-4537-8093-796671c39239", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f070b880-e0fb-4537-8093-796671c39239", + "span_id": None, + "trace_id": "trace-id", + "content": "To convert Fahrenheit to Celsius, you use the formula:\n\nC = (F - 32) * 5/9\n\nWhere:\nC is the temperature in Celsius\nF is the temperature in Fahrenheit\n\nPlugging in 212°F for F:\n\nC = (212 - 32) * 5/9\nC = 180 * 5/9\nC = 100\n\nTherefore, 212 degrees Fahren", + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), +] + +chat_completion_invalid_access_key_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] +chat_completion_invalid_model_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "response.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", + "content": "Model does not exist.", + "role": "user", + "completion_id": None, + "response.model": "does-not-exist", + "sequence": 0, + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion_invoke_model.py similarity index 100% rename from tests/external_botocore/_test_bedrock_chat_completion.py rename to tests/external_botocore/_test_bedrock_chat_completion_invoke_model.py diff --git a/tests/external_botocore/test_chat_completion_converse.py b/tests/external_botocore/test_bedrock_chat_completion_converse.py similarity index 54% rename from tests/external_botocore/test_chat_completion_converse.py rename to tests/external_botocore/test_bedrock_chat_completion_converse.py index 96ead41dd7..e365b5163b 100644 --- a/tests/external_botocore/test_chat_completion_converse.py +++ b/tests/external_botocore/test_bedrock_chat_completion_converse.py @@ -14,6 +14,12 @@ import botocore.exceptions import pytest +from _test_bedrock_chat_completion_converse import ( + chat_completion_expected_events, + chat_completion_expected_streaming_events, + chat_completion_invalid_access_key_error_events, + chat_completion_invalid_model_error_events, +) from conftest import BOTOCORE_VERSION from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( @@ -36,109 +42,59 @@ from newrelic.api.transaction import add_custom_attribute from newrelic.common.object_names import callable_name -chat_completion_expected_events = [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "span_id": None, - "trace_id": "trace-id", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "duration": None, # Response time varies each test run - "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.choices.finish_reason": "max_tokens", - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 3, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "span_id": None, - "trace_id": "trace-id", - "content": "You are a scientist.", - "role": "system", - "completion_id": None, - "sequence": 0, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "span_id": None, - "trace_id": "trace-id", - "content": "What is 212 degrees Fahrenheit converted to Celsius?", - "role": "user", - "completion_id": None, - "sequence": 1, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", - "span_id": None, - "trace_id": "trace-id", - "content": "To convert 212°F to Celsius, we can use the formula:\n\nC = (F - 32) × 5/9\n\nWhere:\nC is the temperature in Celsius\nF is the temperature in Fahrenheit\n\nPlugging in 212°F, we get:\n\nC = (212 - 32) × 5/9\nC = 180 × 5/9\nC = 100\n\nTherefore, 212°", # noqa: RUF001 - "role": "assistant", - "completion_id": None, - "sequence": 2, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - "is_response": True, - }, - ), -] + +@pytest.fixture(scope="session", params=[False, True], ids=["ResponseStandard", "ResponseStreaming"]) +def response_streaming(request): + return request.param + + +@pytest.fixture(scope="session") +def expected_metric(response_streaming): + return ("Llm/completion/Bedrock/converse" + ("_stream" if response_streaming else ""), 1) + + +@pytest.fixture(scope="session") +def expected_events(response_streaming): + return chat_completion_expected_streaming_events if response_streaming else chat_completion_expected_events @pytest.fixture(scope="module") -def exercise_model(bedrock_converse_server): +def exercise_model(bedrock_converse_server, response_streaming): def _exercise_model(message): inference_config = {"temperature": 0.7, "maxTokens": 100} - response = bedrock_converse_server.converse( + _response = bedrock_converse_server.converse( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + system=[{"text": "You are a scientist."}], + inferenceConfig=inference_config, + ) + + def _exercise_model_streaming(message): + inference_config = {"temperature": 0.7, "maxTokens": 100} + + response = bedrock_converse_server.converse_stream( modelId="anthropic.claude-3-sonnet-20240229-v1:0", messages=message, system=[{"text": "You are a scientist."}], inferenceConfig=inference_config, ) + _responses = list(response["stream"]) # Consume the response stream - return _exercise_model + return _exercise_model_streaming if response_streaming else _exercise_model @reset_core_stats_engine() -def test_bedrock_chat_completion_in_txn_with_llm_metadata(set_trace_info, exercise_model): - @validate_custom_events(events_with_context_attrs(chat_completion_expected_events)) - # One summary event, one user message, and one response message from the assistant +def test_bedrock_chat_completion_in_txn_with_llm_metadata( + set_trace_info, exercise_model, expected_metric, expected_events +): + @validate_custom_events(events_with_context_attrs(expected_events)) + # One summary event, one system message, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_in_txn_with_llm_metadata", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -158,14 +114,14 @@ def _test(): @disabled_ai_monitoring_record_content_settings @reset_core_stats_engine() -def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model): - @validate_custom_events(events_sans_content(chat_completion_expected_events)) +def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model, expected_metric, expected_events): + @validate_custom_events(events_sans_content(expected_events)) # One summary event, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_no_content", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -184,14 +140,14 @@ def _test(): @reset_core_stats_engine() @override_llm_token_callback_settings(llm_token_count_callback) -def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model): - @validate_custom_events(add_token_count_to_events(chat_completion_expected_events)) +def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model, expected_metric, expected_events): + @validate_custom_events(add_token_count_to_events(expected_events)) # One summary event, one user message, and one response message from the assistant @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_with_token_count", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -209,13 +165,13 @@ def _test(): @reset_core_stats_engine() -def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model): - @validate_custom_events(events_sans_llm_metadata(chat_completion_expected_events)) +def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model, expected_metric, expected_events): + @validate_custom_events(events_sans_llm_metadata(expected_events)) @validate_custom_event_count(count=4) @validate_transaction_metrics( name="test_bedrock_chat_completion_in_txn_no_llm_metadata", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @@ -246,54 +202,30 @@ def test_bedrock_chat_completion_disabled_ai_monitoring_settings(set_trace_info, exercise_model(message) -chat_completion_invalid_access_key_error_events = [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "span_id": None, - "trace_id": "trace-id", - "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", - "duration": None, # Response time varies each test run - "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "request.temperature": 0.7, - "request.max_tokens": 100, - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 1, - "error": True, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", - "span_id": None, - "trace_id": "trace-id", - "content": "Invalid Token", - "role": "user", - "completion_id": None, - "sequence": 0, - "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), -] - _client_error = botocore.exceptions.ClientError _client_error_name = callable_name(_client_error) +@pytest.fixture +def exercise_converse_incorrect_access_key(bedrock_converse_server, response_streaming, monkeypatch): + def _exercise_converse_incorrect_access_key(): + monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] + request = bedrock_converse_server.converse_stream if response_streaming else bedrock_converse_server.converse + with pytest.raises(_client_error): + request( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + inferenceConfig={"temperature": 0.7, "maxTokens": 100}, + ) + + return _exercise_converse_incorrect_access_key + + @reset_core_stats_engine() def test_bedrock_chat_completion_error_incorrect_access_key( - monkeypatch, bedrock_converse_server, exercise_model, set_trace_info + exercise_converse_incorrect_access_key, set_trace_info, expected_metric ): """ A request is made to the server with invalid credentials. botocore will reach out to the server and receive an @@ -316,122 +248,82 @@ def test_bedrock_chat_completion_error_incorrect_access_key( ) @validate_transaction_metrics( name="test_bedrock_chat_completion", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) @background_task(name="test_bedrock_chat_completion") def _test(): - monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") - - with pytest.raises(_client_error): - set_trace_info() - add_custom_attribute("llm.conversation_id", "my-awesome-id") - add_custom_attribute("llm.foo", "bar") - add_custom_attribute("non_llm_attr", "python-agent") - - message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] - - response = bedrock_converse_server.converse( - modelId="anthropic.claude-3-sonnet-20240229-v1:0", - messages=message, - inferenceConfig={"temperature": 0.7, "maxTokens": 100}, - ) + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") - assert response + exercise_converse_incorrect_access_key() _test() -chat_completion_invalid_model_error_events = [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", - "span_id": None, - "trace_id": "trace-id", - "duration": None, # Response time varies each test run - "request.model": "does-not-exist", - "response.model": "does-not-exist", - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.number_of_messages": 1, - "vendor": "bedrock", - "ingest_source": "Python", - "error": True, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, - "llm.conversation_id": "my-awesome-id", - "llm.foo": "bar", - "span_id": None, - "trace_id": "trace-id", - "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", - "content": "Model does not exist.", - "role": "user", - "completion_id": None, - "response.model": "does-not-exist", - "sequence": 0, - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), -] - - @reset_core_stats_engine() -def test_bedrock_chat_completion_error_invalid_model(bedrock_converse_server, set_trace_info): - @validate_custom_events(events_with_context_attrs(chat_completion_invalid_model_error_events)) +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_error_incorrect_access_key_with_token_count( + exercise_converse_incorrect_access_key, set_trace_info, expected_metric +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(add_token_count_to_events(chat_completion_invalid_access_key_error_events)) @validate_error_trace_attributes( - "botocore.errorfactory:ValidationException", + _client_error_name, exact_attrs={ "agent": {}, "intrinsic": {}, "user": { - "http.statusCode": 400, - "error.message": "The provided model identifier is invalid.", - "error.code": "ValidationException", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", }, }, ) @validate_transaction_metrics( - name="test_bedrock_chat_completion_error_invalid_model", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + name="test_bedrock_chat_completion_incorrect_access_key_with_token_count", + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) - @background_task(name="test_bedrock_chat_completion_error_invalid_model") + @background_task(name="test_bedrock_chat_completion_incorrect_access_key_with_token_count") def _test(): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - with pytest.raises(_client_error): - with WithLlmCustomAttributes({"context": "attr"}): - message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] + exercise_converse_incorrect_access_key() - response = bedrock_converse_server.converse( - modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100} - ) + _test() - assert response - _test() +@pytest.fixture +def exercise_converse_invalid_model(bedrock_converse_server, response_streaming, monkeypatch): + def _exercise_converse_invalid_model(): + monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] + request = bedrock_converse_server.converse_stream if response_streaming else bedrock_converse_server.converse + with pytest.raises(_client_error): + request(modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100}) + + return _exercise_converse_invalid_model @reset_core_stats_engine() -@disabled_ai_monitoring_record_content_settings -def test_bedrock_chat_completion_error_invalid_model_no_content(bedrock_converse_server, set_trace_info): - @validate_custom_events(events_sans_content(chat_completion_invalid_model_error_events)) +def test_bedrock_chat_completion_error_invalid_model(exercise_converse_invalid_model, set_trace_info, expected_metric): + @validate_custom_events(events_with_context_attrs(chat_completion_invalid_model_error_events)) @validate_error_trace_attributes( "botocore.errorfactory:ValidationException", exact_attrs={ @@ -445,80 +337,57 @@ def test_bedrock_chat_completion_error_invalid_model_no_content(bedrock_converse }, ) @validate_transaction_metrics( - name="test_bedrock_chat_completion_error_invalid_model_no_content", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + name="test_bedrock_chat_completion_error_invalid_model", + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) - @background_task(name="test_bedrock_chat_completion_error_invalid_model_no_content") + @background_task(name="test_bedrock_chat_completion_error_invalid_model") def _test(): set_trace_info() add_custom_attribute("llm.conversation_id", "my-awesome-id") add_custom_attribute("llm.foo", "bar") add_custom_attribute("non_llm_attr", "python-agent") - with pytest.raises(_client_error): - message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] - - response = bedrock_converse_server.converse( - modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100} - ) - - assert response + with WithLlmCustomAttributes({"context": "attr"}): + exercise_converse_invalid_model() _test() @reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) -def test_bedrock_chat_completion_error_incorrect_access_key_with_token_count( - monkeypatch, bedrock_converse_server, exercise_model, set_trace_info +@disabled_ai_monitoring_record_content_settings +def test_bedrock_chat_completion_error_invalid_model_no_content( + exercise_converse_invalid_model, set_trace_info, expected_metric ): - """ - A request is made to the server with invalid credentials. botocore will reach out to the server and receive an - UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer - events. The error response can also be parsed, and will be included as attributes on the recorded exception. - """ - - @validate_custom_events(add_token_count_to_events(chat_completion_invalid_access_key_error_events)) + @validate_custom_events(events_sans_content(chat_completion_invalid_model_error_events)) @validate_error_trace_attributes( - _client_error_name, + "botocore.errorfactory:ValidationException", exact_attrs={ "agent": {}, "intrinsic": {}, "user": { - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", }, }, ) @validate_transaction_metrics( - name="test_bedrock_chat_completion_incorrect_access_key_with_token_count", - scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], - rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + name="test_bedrock_chat_completion_error_invalid_model_no_content", + scoped_metrics=[expected_metric], + rollup_metrics=[expected_metric], custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], background_task=True, ) - @background_task(name="test_bedrock_chat_completion_incorrect_access_key_with_token_count") + @background_task(name="test_bedrock_chat_completion_error_invalid_model_no_content") def _test(): - monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") - - with pytest.raises(_client_error): - set_trace_info() - add_custom_attribute("llm.conversation_id", "my-awesome-id") - add_custom_attribute("llm.foo", "bar") - add_custom_attribute("non_llm_attr", "python-agent") - - message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] - - response = bedrock_converse_server.converse( - modelId="anthropic.claude-3-sonnet-20240229-v1:0", - messages=message, - inferenceConfig={"temperature": 0.7, "maxTokens": 100}, - ) + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") - assert response + exercise_converse_invalid_model() _test() diff --git a/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py b/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py index 4422685b9f..9acb0e8ed2 100644 --- a/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py +++ b/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py @@ -14,13 +14,13 @@ import json import os from io import BytesIO +from pprint import pformat import boto3 -import botocore.errorfactory import botocore.eventstream import botocore.exceptions import pytest -from _test_bedrock_chat_completion import ( +from _test_bedrock_chat_completion_invoke_model import ( chat_completion_expected_events, chat_completion_expected_malformed_request_body_events, chat_completion_expected_malformed_response_body_events, @@ -816,7 +816,12 @@ def test_bedrock_chat_completion_functions_marked_as_wrapped_for_sdk_compatibili def test_chat_models_instrumented(): - SUPPORTED_MODELS = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" not in model] + def _is_supported_model(model): + supported_models = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" not in model] + for supported_model in supported_models: + if supported_model in model: + return True + return False _id = os.environ.get("AWS_ACCESS_KEY_ID") key = os.environ.get("AWS_SECRET_ACCESS_KEY") @@ -826,10 +831,6 @@ def test_chat_models_instrumented(): client = boto3.client("bedrock", "us-east-1") response = client.list_foundation_models(byOutputModality="TEXT") models = [model["modelId"] for model in response["modelSummaries"]] - not_supported = [] - for model in models: - is_supported = any(model.startswith(supported_model) for supported_model in SUPPORTED_MODELS) - if not is_supported: - not_supported.append(model) + not_supported = [model for model in models if not _is_supported_model(model)] - assert not not_supported, f"The following unsupported models were found: {not_supported}" + assert not not_supported, f"The following unsupported models were found: {pformat(not_supported)}" diff --git a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py index 82537cd10a..b25516cd5b 100644 --- a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py +++ b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py @@ -13,7 +13,7 @@ # limitations under the License. import pytest -from _test_bedrock_chat_completion import ( +from _test_bedrock_chat_completion_invoke_model import ( chat_completion_langchain_expected_events, chat_completion_langchain_expected_streaming_events, ) diff --git a/tests/external_botocore/test_bedrock_embeddings.py b/tests/external_botocore/test_bedrock_embeddings.py index 417e24b2d9..36a5db6619 100644 --- a/tests/external_botocore/test_bedrock_embeddings.py +++ b/tests/external_botocore/test_bedrock_embeddings.py @@ -14,6 +14,7 @@ import json import os from io import BytesIO +from pprint import pformat import boto3 import botocore.exceptions @@ -409,7 +410,12 @@ def _test(): def test_embedding_models_instrumented(): - SUPPORTED_MODELS = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" in model] + def _is_supported_model(model): + supported_models = [model for model, _, _, _ in MODEL_EXTRACTORS if "embed" in model] + for supported_model in supported_models: + if supported_model in model: + return True + return False _id = os.environ.get("AWS_ACCESS_KEY_ID") key = os.environ.get("AWS_SECRET_ACCESS_KEY") @@ -419,10 +425,6 @@ def test_embedding_models_instrumented(): client = boto3.client("bedrock", "us-east-1") response = client.list_foundation_models(byOutputModality="EMBEDDING") models = [model["modelId"] for model in response["modelSummaries"]] - not_supported = [] - for model in models: - is_supported = any(model.startswith(supported_model) for supported_model in SUPPORTED_MODELS) - if not is_supported: - not_supported.append(model) + not_supported = [model for model in models if not _is_supported_model(model)] - assert not not_supported, f"The following unsupported models were found: {not_supported}" + assert not not_supported, f"The following unsupported models were found: {pformat(not_supported)}" diff --git a/tests/testing_support/validators/validate_custom_event.py b/tests/testing_support/validators/validate_custom_event.py index deeef7fb25..5e3eb65b74 100644 --- a/tests/testing_support/validators/validate_custom_event.py +++ b/tests/testing_support/validators/validate_custom_event.py @@ -13,6 +13,7 @@ # limitations under the License. import time +from pprint import pformat from newrelic.common.object_wrapper import function_wrapper from testing_support.fixtures import core_application_stats_engine @@ -61,7 +62,9 @@ def _validate_custom_event_count(wrapped, instance, args, kwargs): raise else: stats = core_application_stats_engine(None) - assert stats.custom_events.num_samples == count + assert stats.custom_events.num_samples == count, ( + f"Expected: {count}, Got: {stats.custom_events.num_samples}\nEvents: {pformat(list(stats.custom_events))}" + ) return result diff --git a/tests/testing_support/validators/validate_custom_events.py b/tests/testing_support/validators/validate_custom_events.py index 8a1bad4342..e3f1c1a15a 100644 --- a/tests/testing_support/validators/validate_custom_events.py +++ b/tests/testing_support/validators/validate_custom_events.py @@ -14,6 +14,7 @@ import copy import time +from pprint import pformat from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper from testing_support.fixtures import catch_background_exceptions @@ -100,8 +101,8 @@ def _check_event_attributes(expected, captured, mismatches): def _event_details(matching_custom_events, captured, mismatches): details = [ f"matching_custom_events={matching_custom_events}", - f"mismatches={mismatches}", - f"captured_events={captured}", + f"mismatches={pformat(mismatches)}", + f"captured_events={pformat(captured)}", ] return "\n".join(details)