Skip to content

Commit f8f4207

Browse files
authored
[Security Fix] fix: don't log JWT SSO token on .info() log (#15145)
* fix: get_redirect_response_from_openid * fix info log check * fix: forward_upstream_to_client
1 parent 8991657 commit f8f4207

File tree

4 files changed

+75
-9
lines changed

4 files changed

+75
-9
lines changed

litellm/model_prices_and_context_window_backup.json

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3308,6 +3308,64 @@
33083308
"supports_tool_choice": true,
33093309
"supports_web_search": true
33103310
},
3311+
"azure_ai/grok-4": {
3312+
"input_cost_per_token": 5.5e-06,
3313+
"litellm_provider": "azure_ai",
3314+
"max_input_tokens": 131072,
3315+
"max_output_tokens": 131072,
3316+
"max_tokens": 131072,
3317+
"mode": "chat",
3318+
"output_cost_per_token": 2.75e-05,
3319+
"source": "https://azure.microsoft.com/en-us/blog/grok-4-is-now-available-in-azure-ai-foundry-unlock-frontier-intelligence-and-business-ready-capabilities/",
3320+
"supports_function_calling": true,
3321+
"supports_reasoning": true,
3322+
"supports_response_schema": true,
3323+
"supports_tool_choice": true,
3324+
"supports_web_search": true
3325+
},
3326+
"azure_ai/grok-4-fast-non-reasoning": {
3327+
"input_cost_per_token": 5e-06,
3328+
"litellm_provider": "azure_ai",
3329+
"max_input_tokens": 131072,
3330+
"max_output_tokens": 131072,
3331+
"max_tokens": 131072,
3332+
"mode": "chat",
3333+
"output_cost_per_token": 2.5e-03,
3334+
"source": "https://azure.microsoft.com/en-us/blog/grok-4-is-now-available-in-azure-ai-foundry-unlock-frontier-intelligence-and-business-ready-capabilities/",
3335+
"supports_function_calling": true,
3336+
"supports_response_schema": true,
3337+
"supports_tool_choice": true,
3338+
"supports_web_search": true
3339+
},
3340+
"azure_ai/grok-4-fast-reasoning": {
3341+
"input_cost_per_token": 5.8e-06,
3342+
"litellm_provider": "azure_ai",
3343+
"max_input_tokens": 131072,
3344+
"max_output_tokens": 131072,
3345+
"max_tokens": 131072,
3346+
"mode": "chat",
3347+
"output_cost_per_token": 2.9e-03,
3348+
"source": "https://azure.microsoft.com/en-us/blog/grok-4-is-now-available-in-azure-ai-foundry-unlock-frontier-intelligence-and-business-ready-capabilities/",
3349+
"supports_function_calling": true,
3350+
"supports_reasoning": true,
3351+
"supports_response_schema": true,
3352+
"supports_tool_choice": true,
3353+
"supports_web_search": true
3354+
},
3355+
"azure_ai/grok-code-fast-1": {
3356+
"input_cost_per_token": 3.5e-06,
3357+
"litellm_provider": "azure_ai",
3358+
"max_input_tokens": 131072,
3359+
"max_output_tokens": 131072,
3360+
"max_tokens": 131072,
3361+
"mode": "chat",
3362+
"output_cost_per_token": 1.75e-05,
3363+
"source": "https://azure.microsoft.com/en-us/blog/grok-4-is-now-available-in-azure-ai-foundry-unlock-frontier-intelligence-and-business-ready-capabilities/",
3364+
"supports_function_calling": true,
3365+
"supports_response_schema": true,
3366+
"supports_tool_choice": true,
3367+
"supports_web_search": true
3368+
},
33113369
"azure_ai/jais-30b-chat": {
33123370
"input_cost_per_token": 0.0032,
33133371
"litellm_provider": "azure_ai",

litellm/proxy/management_endpoints/ui_sso.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1594,7 +1594,6 @@ async def get_redirect_response_from_openid( # noqa: PLR0915
15941594
master_key or "",
15951595
algorithm="HS256",
15961596
)
1597-
verbose_proxy_logger.info(f"user_id: {user_id}; jwt_token: {jwt_token}")
15981597
if user_id is not None and isinstance(user_id, str):
15991598
litellm_dashboard_ui += "?login=success"
16001599
verbose_proxy_logger.info(f"Redirecting to {litellm_dashboard_ui}")

litellm/proxy/pass_through_endpoints/pass_through_endpoints.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1254,7 +1254,7 @@ async def forward_upstream_to_client() -> None:
12541254
logging_obj.model_call_details[
12551255
"custom_llm_provider"
12561256
] = "vertex_ai_language_models"
1257-
verbose_proxy_logger.info(
1257+
verbose_proxy_logger.debug(
12581258
f"WebSocket passthrough ({endpoint}): Successfully extracted model '{extracted_model}' and set provider to 'vertex_ai' from server setup response"
12591259
)
12601260
else:

tests/code_coverage_tests/info_log_check.py

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,13 @@ def _is_format_string_with_sensitive_data(self, arg) -> bool:
126126
for value in arg.values:
127127
if isinstance(value, ast.FormattedValue):
128128
value_str = self._get_arg_string(value.value).lower()
129-
if any(pattern in value_str for pattern in
130-
['request', 'response', 'data', 'body', 'content', 'messages']):
129+
# Check for any sensitive data patterns in f-string interpolations
130+
sensitive_f_string_patterns = [
131+
'request', 'response', 'data', 'body', 'content', 'messages',
132+
'token', 'jwt', 'auth', 'api_key', 'apikey', 'credential',
133+
'secret', 'password', 'passwd'
134+
]
135+
if any(pattern in value_str for pattern in sensitive_f_string_patterns):
131136
return True
132137

133138
# Check for .format() calls
@@ -137,10 +142,14 @@ def _is_format_string_with_sensitive_data(self, arg) -> bool:
137142
base_str = self._get_arg_string(arg.func.value).lower()
138143
if "{}" in base_str or "{" in base_str:
139144
# Check format arguments for sensitive data
145+
sensitive_format_patterns = [
146+
'request', 'response', 'data', 'body', 'content',
147+
'token', 'jwt', 'auth', 'api_key', 'apikey', 'credential',
148+
'secret', 'password', 'passwd'
149+
]
140150
for format_arg in arg.args:
141151
format_str = self._get_arg_string(format_arg).lower()
142-
if any(pattern in format_str for pattern in
143-
['request', 'response', 'data', 'body', 'content']):
152+
if any(pattern in format_str for pattern in sensitive_format_patterns):
144153
return True
145154

146155
return False
@@ -171,16 +180,16 @@ def _get_violation_reason(self, arg) -> str:
171180
"""Get a human-readable reason for the violation"""
172181
arg_str = self._get_arg_string(arg).lower()
173182

174-
if 'request' in arg_str:
183+
if any(pattern in arg_str for pattern in ['jwt', 'token', 'api_key', 'apikey', 'auth', 'credential', 'secret', 'password', 'passwd']):
184+
return "Potentially logging authentication/secret data (JWT, token, API key, etc.)"
185+
elif 'request' in arg_str:
175186
return "Potentially logging request data"
176187
elif 'response' in arg_str:
177188
return "Potentially logging response data"
178189
elif any(pattern in arg_str for pattern in ['data', 'body', 'payload', 'content']):
179190
return "Potentially logging sensitive data/body/content"
180191
elif any(pattern in arg_str for pattern in ['messages', 'input', 'output']):
181192
return "Potentially logging message/input/output data"
182-
elif any(pattern in arg_str for pattern in ['api_key', 'token', 'auth', 'credentials']):
183-
return "Potentially logging authentication data"
184193
else:
185194
return "Potentially logging sensitive data"
186195

0 commit comments

Comments
 (0)