@@ -253,9 +253,7 @@ def generate_feedback_box():
253
253
from litellm .proxy .management_endpoints .internal_user_endpoints import (
254
254
router as internal_user_router ,
255
255
)
256
- from litellm .proxy .management_endpoints .internal_user_endpoints import (
257
- user_update ,
258
- )
256
+ from litellm .proxy .management_endpoints .internal_user_endpoints import user_update
259
257
from litellm .proxy .management_endpoints .key_management_endpoints import (
260
258
delete_verification_tokens ,
261
259
duration_in_seconds ,
@@ -302,9 +300,7 @@ def generate_feedback_box():
302
300
from litellm .proxy .openai_files_endpoints .files_endpoints import (
303
301
router as openai_files_router ,
304
302
)
305
- from litellm .proxy .openai_files_endpoints .files_endpoints import (
306
- set_files_config ,
307
- )
303
+ from litellm .proxy .openai_files_endpoints .files_endpoints import set_files_config
308
304
from litellm .proxy .pass_through_endpoints .llm_passthrough_endpoints import (
309
305
passthrough_endpoint_router ,
310
306
)
@@ -467,9 +463,9 @@ def generate_feedback_box():
467
463
server_root_path = os .getenv ("SERVER_ROOT_PATH" , "" )
468
464
_license_check = LicenseCheck ()
469
465
premium_user : bool = _license_check .is_premium ()
470
- premium_user_data : Optional [
471
- "EnterpriseLicenseData"
472
- ] = _license_check . airgapped_license_data
466
+ premium_user_data : Optional ["EnterpriseLicenseData" ] = (
467
+ _license_check . airgapped_license_data
468
+ )
473
469
global_max_parallel_request_retries_env : Optional [str ] = os .getenv (
474
470
"LITELLM_GLOBAL_MAX_PARALLEL_REQUEST_RETRIES"
475
471
)
@@ -966,9 +962,9 @@ def swagger_monkey_patch(*args, **kwargs):
966
962
dual_cache = user_api_key_cache
967
963
)
968
964
litellm .logging_callback_manager .add_litellm_callback (model_max_budget_limiter )
969
- redis_usage_cache : Optional [
970
- RedisCache
971
- ] = None # redis cache used for tracking spend, tpm/rpm limits
965
+ redis_usage_cache : Optional [RedisCache ] = (
966
+ None # redis cache used for tracking spend, tpm/rpm limits
967
+ )
972
968
user_custom_auth = None
973
969
user_custom_key_generate = None
974
970
user_custom_sso = None
@@ -1299,9 +1295,9 @@ async def _update_team_cache():
1299
1295
_id = "team_id:{}" .format (team_id )
1300
1296
try :
1301
1297
# Fetch the existing cost for the given user
1302
- existing_spend_obj : Optional [
1303
- LiteLLM_TeamTable
1304
- ] = await user_api_key_cache . async_get_cache ( key = _id )
1298
+ existing_spend_obj : Optional [LiteLLM_TeamTable ] = (
1299
+ await user_api_key_cache . async_get_cache ( key = _id )
1300
+ )
1305
1301
if existing_spend_obj is None :
1306
1302
# do nothing if team not in api key cache
1307
1303
return
@@ -1878,9 +1874,7 @@ async def load_config( # noqa: PLR0915
1878
1874
f"{ blue_color_code } Set Global BitBucket Config on LiteLLM Proxy{ reset_color_code } "
1879
1875
)
1880
1876
elif key == "global_gitlab_config" :
1881
- from litellm .integrations .gitlab import (
1882
- set_global_gitlab_config ,
1883
- )
1877
+ from litellm .integrations .gitlab import set_global_gitlab_config
1884
1878
1885
1879
set_global_gitlab_config (value )
1886
1880
verbose_proxy_logger .info (
@@ -2541,10 +2535,14 @@ def decrypt_model_list_from_db(self, new_models: list) -> list:
2541
2535
_model_list : list = []
2542
2536
for m in new_models :
2543
2537
_litellm_params = m .litellm_params
2538
+ if isinstance (_litellm_params , BaseModel ):
2539
+ _litellm_params = _litellm_params .model_dump ()
2544
2540
if isinstance (_litellm_params , dict ):
2545
2541
# decrypt values
2546
2542
for k , v in _litellm_params .items ():
2547
- decrypted_value = decrypt_value_helper (value = v , key = k )
2543
+ decrypted_value = decrypt_value_helper (
2544
+ value = v , key = k , return_original_value = True
2545
+ )
2548
2546
_litellm_params [k ] = decrypted_value
2549
2547
_litellm_params = LiteLLM_Params (** _litellm_params )
2550
2548
else :
@@ -2628,7 +2626,7 @@ def _add_callback_from_db_to_in_memory_litellm_callbacks(
2628
2626
) -> None :
2629
2627
"""
2630
2628
Helper method to add a single callback to litellm for specified event types.
2631
-
2629
+
2632
2630
Args:
2633
2631
callback: The callback name to add
2634
2632
event_types: List of event types (e.g., ["success"], ["failure"], or ["success", "failure"])
@@ -3153,10 +3151,10 @@ async def _init_guardrails_in_db(self, prisma_client: PrismaClient):
3153
3151
)
3154
3152
3155
3153
try :
3156
- guardrails_in_db : List [
3157
- Guardrail
3158
- ] = await GuardrailRegistry . get_all_guardrails_from_db (
3159
- prisma_client = prisma_client
3154
+ guardrails_in_db : List [Guardrail ] = (
3155
+ await GuardrailRegistry . get_all_guardrails_from_db (
3156
+ prisma_client = prisma_client
3157
+ )
3160
3158
)
3161
3159
verbose_proxy_logger .debug (
3162
3160
"guardrails from the DB %s" , str (guardrails_in_db )
@@ -3386,9 +3384,9 @@ async def initialize( # noqa: PLR0915
3386
3384
user_api_base = api_base
3387
3385
dynamic_config [user_model ]["api_base" ] = api_base
3388
3386
if api_version :
3389
- os .environ [
3390
- "AZURE_API_VERSION"
3391
- ] = api_version # set this for azure - litellm can read this from the env
3387
+ os .environ ["AZURE_API_VERSION" ] = (
3388
+ api_version # set this for azure - litellm can read this from the env
3389
+ )
3392
3390
if max_tokens : # model-specific param
3393
3391
dynamic_config [user_model ]["max_tokens" ] = max_tokens
3394
3392
if temperature : # model-specific param
@@ -3888,10 +3886,10 @@ async def _initialize_spend_tracking_background_jobs(
3888
3886
LITELLM_KEY_ROTATION_CHECK_INTERVAL_SECONDS ,
3889
3887
LITELLM_KEY_ROTATION_ENABLED ,
3890
3888
)
3891
-
3889
+
3892
3890
key_rotation_enabled : Optional [bool ] = str_to_bool (LITELLM_KEY_ROTATION_ENABLED )
3893
3891
verbose_proxy_logger .debug (f"key_rotation_enabled: { key_rotation_enabled } " )
3894
-
3892
+
3895
3893
if key_rotation_enabled is True :
3896
3894
try :
3897
3895
from litellm .proxy .common_utils .key_rotation_manager import (
@@ -3902,19 +3900,25 @@ async def _initialize_spend_tracking_background_jobs(
3902
3900
global prisma_client
3903
3901
if prisma_client is not None :
3904
3902
key_rotation_manager = KeyRotationManager (prisma_client )
3905
- verbose_proxy_logger .debug (f"Key rotation background job scheduled every { LITELLM_KEY_ROTATION_CHECK_INTERVAL_SECONDS } seconds (LITELLM_KEY_ROTATION_ENABLED=true)" )
3903
+ verbose_proxy_logger .debug (
3904
+ f"Key rotation background job scheduled every { LITELLM_KEY_ROTATION_CHECK_INTERVAL_SECONDS } seconds (LITELLM_KEY_ROTATION_ENABLED=true)"
3905
+ )
3906
3906
scheduler .add_job (
3907
3907
key_rotation_manager .process_rotations ,
3908
3908
"interval" ,
3909
3909
seconds = LITELLM_KEY_ROTATION_CHECK_INTERVAL_SECONDS ,
3910
- id = "key_rotation_job"
3910
+ id = "key_rotation_job" ,
3911
3911
)
3912
3912
else :
3913
- verbose_proxy_logger .warning ("Key rotation enabled but prisma_client not available" )
3913
+ verbose_proxy_logger .warning (
3914
+ "Key rotation enabled but prisma_client not available"
3915
+ )
3914
3916
except Exception as e :
3915
3917
verbose_proxy_logger .warning (f"Failed to setup key rotation job: { e } " )
3916
3918
else :
3917
- verbose_proxy_logger .debug ("Key rotation disabled (set LITELLM_KEY_ROTATION_ENABLED=true to enable)" )
3919
+ verbose_proxy_logger .debug (
3920
+ "Key rotation disabled (set LITELLM_KEY_ROTATION_ENABLED=true to enable)"
3921
+ )
3918
3922
3919
3923
@classmethod
3920
3924
async def _setup_prisma_client (
@@ -8745,9 +8749,9 @@ async def get_config_list(
8745
8749
hasattr (sub_field_info , "description" )
8746
8750
and sub_field_info .description is not None
8747
8751
):
8748
- nested_fields [
8749
- idx
8750
- ]. field_description = sub_field_info . description
8752
+ nested_fields [idx ]. field_description = (
8753
+ sub_field_info . description
8754
+ )
8751
8755
idx += 1
8752
8756
8753
8757
_stored_in_db = None
0 commit comments