@@ -109,12 +109,16 @@ async def test_proxy_failure_metrics():
109
109
expected_metric_pattern = 'litellm_proxy_failed_requests_metric_total{api_key_alias="None",end_user="None",exception_class="Openai.RateLimitError",exception_status="429",hashed_api_key="88dc28d0f030c55ed4ab77ed8faf098196cb1c05df778539800c9f1243fe6b4b",requested_model="fake-azure-endpoint",route="/chat/completions",team="None",team_alias="None",user="default_user_id",user_email="None"}'
110
110
111
111
# Check if the pattern is in metrics (this metric doesn't include user_email field)
112
- assert any (expected_metric_pattern in line for line in metrics .split ('\n ' )), f"Expected failure metric pattern not found in /metrics. Pattern: { expected_metric_pattern } "
113
-
112
+ assert any (
113
+ expected_metric_pattern in line for line in metrics .split ("\n " )
114
+ ), f"Expected failure metric pattern not found in /metrics. Pattern: { expected_metric_pattern } "
115
+
114
116
# Check total requests metric which includes user_email
115
117
total_requests_pattern = 'litellm_proxy_total_requests_metric_total{api_key_alias="None",end_user="None",hashed_api_key="88dc28d0f030c55ed4ab77ed8faf098196cb1c05df778539800c9f1243fe6b4b",requested_model="fake-azure-endpoint",route="/chat/completions",status_code="429",team="None",team_alias="None",user="default_user_id",user_email="None"}'
116
118
117
- assert any (total_requests_pattern in line for line in metrics .split ('\n ' )), f"Expected total requests metric pattern not found in /metrics. Pattern: { total_requests_pattern } "
119
+ assert any (
120
+ total_requests_pattern in line for line in metrics .split ("\n " )
121
+ ), f"Expected total requests metric pattern not found in /metrics. Pattern: { total_requests_pattern } "
118
122
119
123
120
124
@pytest .mark .asyncio
@@ -378,7 +382,9 @@ async def test_team_budget_metrics():
378
382
assert first_budget ["total" ] == 10.0 , "Total budget metric is incorrect"
379
383
print ("first_budget['remaining_hours']" , first_budget ["remaining_hours" ])
380
384
# Budget should have positive remaining hours, up to 7 days
381
- assert 0 < first_budget ["remaining_hours" ] <= 168 , "Budget should have positive remaining hours, up to 7 days"
385
+ assert (
386
+ 0 < first_budget ["remaining_hours" ] <= 168
387
+ ), "Budget should have positive remaining hours, up to 7 days"
382
388
383
389
# Get team info and verify spend matches prometheus metrics
384
390
team_info = await get_team_info (session , team_id )
@@ -510,7 +516,9 @@ async def test_key_budget_metrics():
510
516
print ("first_budget['remaining_hours']" , first_budget ["remaining_hours" ])
511
517
# The budget reset time is now standardized - for "7d" it resets on Monday at midnight
512
518
# So we'll check if it's within a reasonable range (0-7 days depending on current day of week)
513
- assert 0 <= first_budget ["remaining_hours" ] <= 168 , "Budget remaining hours should be within a reasonable range (0-7 days depending on day of week)"
519
+ assert (
520
+ 0 <= first_budget ["remaining_hours" ] <= 168
521
+ ), "Budget remaining hours should be within a reasonable range (0-7 days depending on day of week)"
514
522
515
523
# Get key info and verify spend matches prometheus metrics
516
524
key_info = await get_key_info (session , key )
@@ -570,6 +578,7 @@ async def test_user_email_metrics():
570
578
user_email in metrics_after_first
571
579
), "user_email should be tracked correctly"
572
580
581
+
573
582
@pytest .mark .asyncio
574
583
async def test_user_email_in_all_required_metrics ():
575
584
"""
@@ -579,7 +588,7 @@ async def test_user_email_in_all_required_metrics():
579
588
- litellm_input_tokens_metric_total
580
589
- litellm_output_tokens_metric_total
581
590
- litellm_requests_metric_total
582
- - litellm_spend_metric
591
+ - litellm_spend_metric_total
583
592
"""
584
593
async with aiohttp .ClientSession () as session :
585
594
# Create a user with user_email
@@ -611,16 +620,21 @@ async def test_user_email_in_all_required_metrics():
611
620
"litellm_input_tokens_metric_total" ,
612
621
"litellm_output_tokens_metric_total" ,
613
622
"litellm_requests_metric_total" ,
614
- "litellm_spend_metric"
623
+ "litellm_spend_metric_total" ,
615
624
]
616
625
617
626
import re
627
+
618
628
for metric_name in required_metrics_with_user_email :
619
629
# Check that the metric exists and contains user_email label
620
630
# Look for the metric with user_email in its labels
621
- pattern = rf'{ metric_name } {{[^}}]*user_email="{ re .escape (user_email )} "[^}}]*}}'
631
+ pattern = (
632
+ rf'{ metric_name } {{[^}}]*user_email="{ re .escape (user_email )} "[^}}]*}}'
633
+ )
622
634
matches = re .findall (pattern , metrics_text )
623
- assert len (matches ) > 0 , f"Metric { metric_name } should contain user_email={ user_email } but was not found in metrics"
635
+ assert (
636
+ len (matches ) > 0
637
+ ), f"Metric { metric_name } should contain user_email={ user_email } but was not found in metrics"
624
638
625
639
# Also test failure metric by making a bad request
626
640
try :
@@ -639,4 +653,6 @@ async def test_user_email_in_all_required_metrics():
639
653
# Check that failure metric also contains user_email
640
654
failure_pattern = rf'litellm_proxy_failed_requests_metric_total{{[^}}]*user_email="{ re .escape (user_email )} "[^}}]*}}'
641
655
failure_matches = re .findall (failure_pattern , metrics_text )
642
- assert len (failure_matches ) > 0 , f"litellm_proxy_failed_requests_metric_total should contain user_email={ user_email } "
656
+ assert (
657
+ len (failure_matches ) > 0
658
+ ), f"litellm_proxy_failed_requests_metric_total should contain user_email={ user_email } "
0 commit comments