@@ -657,21 +657,109 @@ def test_guardrail_information_in_metadata(mock_env_vars):
657
657
assert guardrail_info ["guardrail_response" ]["score" ] == 0.1
658
658
659
659
660
+ def create_standard_logging_payload_with_spend_metrics () -> StandardLoggingPayload :
661
+ """Create a StandardLoggingPayload object with spend metrics for testing"""
662
+ from datetime import datetime , timezone
663
+
664
+ # Create a budget reset time 24 hours from now
665
+ budget_reset_at = datetime .now (timezone .utc ) + timedelta (hours = 24 )
666
+
667
+ return {
668
+ "id" : "test-request-id-spend" ,
669
+ "trace_id" : "test-trace-id-spend" ,
670
+ "call_type" : "completion" ,
671
+ "stream" : None ,
672
+ "response_cost" : 0.15 ,
673
+ "response_cost_failure_debug_info" : None ,
674
+ "status" : "success" ,
675
+ "custom_llm_provider" : "openai" ,
676
+ "total_tokens" : 30 ,
677
+ "prompt_tokens" : 10 ,
678
+ "completion_tokens" : 20 ,
679
+ "startTime" : 1234567890.0 ,
680
+ "endTime" : 1234567891.0 ,
681
+ "completionStartTime" : 1234567890.5 ,
682
+ "response_time" : 1.0 ,
683
+ "model_map_information" : {
684
+ "model_map_key" : "gpt-4" ,
685
+ "model_map_value" : None
686
+ },
687
+ "model" : "gpt-4" ,
688
+ "model_id" : "model-123" ,
689
+ "model_group" : "openai-gpt" ,
690
+ "api_base" : "https://api.openai.com" ,
691
+ "metadata" : {
692
+ "user_api_key_hash" : "test_hash" ,
693
+ "user_api_key_org_id" : None ,
694
+ "user_api_key_alias" : "test_alias" ,
695
+ "user_api_key_team_id" : "test_team" ,
696
+ "user_api_key_user_id" : "test_user" ,
697
+ "user_api_key_team_alias" : "test_team_alias" ,
698
+ "user_api_key_user_email" : None ,
699
+ "user_api_key_end_user_id" : None ,
700
+ "user_api_key_request_route" : None ,
701
+ "user_api_key_max_budget" : 10.0 , # $10 max budget
702
+ "user_api_key_budget_reset_at" : budget_reset_at .isoformat (),
703
+ "spend_logs_metadata" : None ,
704
+ "requester_ip_address" : "127.0.0.1" ,
705
+ "requester_metadata" : None ,
706
+ "requester_custom_headers" : None ,
707
+ "prompt_management_metadata" : None ,
708
+ "mcp_tool_call_metadata" : None ,
709
+ "vector_store_request_metadata" : None ,
710
+ "applied_guardrails" : None ,
711
+ "usage_object" : None ,
712
+ "cold_storage_object_key" : None ,
713
+ },
714
+ "cache_hit" : False ,
715
+ "cache_key" : None ,
716
+ "saved_cache_cost" : 0.0 ,
717
+ "request_tags" : [],
718
+ "end_user" : None ,
719
+ "requester_ip_address" : "127.0.0.1" ,
720
+ "messages" : [{"role" : "user" , "content" : "Hello, world!" }],
721
+ "response" : {"choices" : [{"message" : {"content" : "Hi there!" }}]},
722
+ "error_str" : None ,
723
+ "error_information" : None ,
724
+ "model_parameters" : {"stream" : False },
725
+ "hidden_params" : {
726
+ "model_id" : "model-123" ,
727
+ "cache_key" : None ,
728
+ "api_base" : "https://api.openai.com" ,
729
+ "response_cost" : "0.15" ,
730
+ "litellm_overhead_time_ms" : None ,
731
+ "additional_headers" : None ,
732
+ "batch_models" : None ,
733
+ "litellm_model_name" : None ,
734
+ "usage_object" : None ,
735
+ },
736
+ "guardrail_information" : None ,
737
+ "standard_built_in_tools_params" : None ,
738
+ } # type: ignore
739
+
740
+
660
741
def create_standard_logging_payload_with_tool_calls () -> StandardLoggingPayload :
661
742
"""Create a StandardLoggingPayload object with tool calls for testing"""
662
743
return {
663
744
"id" : "test-request-id-tool-calls" ,
745
+ "trace_id" : "test-trace-id-tool-calls" ,
664
746
"call_type" : "completion" ,
747
+ "stream" : None ,
665
748
"response_cost" : 0.05 ,
666
749
"response_cost_failure_debug_info" : None ,
667
750
"status" : "success" ,
751
+ "custom_llm_provider" : "openai" ,
668
752
"total_tokens" : 50 ,
669
753
"prompt_tokens" : 20 ,
670
754
"completion_tokens" : 30 ,
671
755
"startTime" : 1234567890.0 ,
672
756
"endTime" : 1234567891.0 ,
673
757
"completionStartTime" : 1234567890.5 ,
674
- "model_map_information" : {"model_map_key" : "gpt-4" , "model_map_value" : None },
758
+ "response_time" : 1.0 ,
759
+ "model_map_information" : {
760
+ "model_map_key" : "gpt-4" ,
761
+ "model_map_value" : None
762
+ },
675
763
"model" : "gpt-4" ,
676
764
"model_id" : "model-123" ,
677
765
"model_group" : "openai-gpt" ,
@@ -746,6 +834,7 @@ def create_standard_logging_payload_with_tool_calls() -> StandardLoggingPayload:
746
834
]
747
835
},
748
836
"error_str" : None ,
837
+ "error_information" : None ,
749
838
"model_parameters" : {"temperature" : 0.7 },
750
839
"hidden_params" : {
751
840
"model_id" : "model-123" ,
@@ -758,14 +847,9 @@ def create_standard_logging_payload_with_tool_calls() -> StandardLoggingPayload:
758
847
"litellm_model_name" : None ,
759
848
"usage_object" : None ,
760
849
},
761
- "stream" : None ,
762
- "response_time" : 1.0 ,
763
- "error_information" : None ,
764
850
"guardrail_information" : None ,
765
851
"standard_built_in_tools_params" : None ,
766
- "trace_id" : "test-trace-id-tool-calls" ,
767
- "custom_llm_provider" : "openai" ,
768
- }
852
+ } # type: ignore
769
853
770
854
771
855
class TestDataDogLLMObsLoggerToolCalls :
@@ -897,3 +981,51 @@ def test_tool_call_response_handling(self, mock_env_vars):
897
981
assert len (output_tool_calls ) == 1
898
982
output_function_info = output_tool_calls [0 ].get ("function" , {})
899
983
assert output_function_info .get ("name" ) == "format_response"
984
+
985
+
986
+ def test_spend_metrics_in_datadog_payload (mock_env_vars ):
987
+ """Test that spend metrics are correctly included in DataDog LLM Observability payloads"""
988
+ with patch (
989
+ "litellm.integrations.datadog.datadog_llm_obs.get_async_httpx_client"
990
+ ), patch ("asyncio.create_task" ):
991
+ logger = DataDogLLMObsLogger ()
992
+
993
+ standard_payload = create_standard_logging_payload_with_spend_metrics ()
994
+
995
+ kwargs = {
996
+ "standard_logging_object" : standard_payload ,
997
+ "litellm_params" : {"metadata" : {}},
998
+ }
999
+
1000
+ start_time = datetime .now ()
1001
+ end_time = datetime .now ()
1002
+
1003
+ payload = logger .create_llm_obs_payload (kwargs , start_time , end_time )
1004
+
1005
+ # Verify basic payload structure
1006
+ assert payload .get ("name" ) == "litellm_llm_call"
1007
+ assert payload .get ("status" ) == "ok"
1008
+
1009
+ # Verify spend metrics are included in metadata
1010
+ meta = payload .get ("meta" , {})
1011
+ assert meta is not None , "Meta section should exist in payload"
1012
+
1013
+ metadata = meta .get ("metadata" , {})
1014
+ assert metadata is not None , "Metadata section should exist in meta"
1015
+
1016
+ spend_metrics = metadata .get ("spend_metrics" , {})
1017
+ assert spend_metrics , "Spend metrics should exist in metadata"
1018
+
1019
+ # Check that all three spend metrics are present
1020
+ assert "litellm_spend_metric" in spend_metrics
1021
+ assert "litellm_api_key_max_budget_metric" in spend_metrics
1022
+ assert "litellm_api_key_budget_remaining_hours_metric" in spend_metrics
1023
+
1024
+ # Verify the values are correct
1025
+ assert spend_metrics ["litellm_spend_metric" ] == 0.15 # response_cost
1026
+ assert spend_metrics ["litellm_api_key_max_budget_metric" ] == 10.0 # max budget
1027
+
1028
+ # Verify remaining hours is a reasonable value (should be close to 24 since we set it to 24 hours from now)
1029
+ remaining_hours = spend_metrics ["litellm_api_key_budget_remaining_hours_metric" ]
1030
+ assert isinstance (remaining_hours , (int , float ))
1031
+ assert 20 <= remaining_hours <= 25 # Should be close to 24 hours
0 commit comments