Skip to content

Commit 50d717c

Browse files
committed
Apply Black formatting and fix Ruff issues
- Format code with Black to meet style requirements - Fix auto-fixable Ruff linting issues - Maintain header implementation functionality
1 parent 3986b07 commit 50d717c

File tree

2 files changed

+104
-81
lines changed

2 files changed

+104
-81
lines changed

litellm/main.py

Lines changed: 19 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1079,7 +1079,6 @@ def completion( # type: ignore # noqa: PLR0915
10791079
prompt_id=prompt_id, non_default_params=non_default_params
10801080
)
10811081
):
1082-
10831082
(
10841083
model,
10851084
messages,
@@ -2034,7 +2033,6 @@ def completion( # type: ignore # noqa: PLR0915
20342033

20352034
try:
20362035
if use_base_llm_http_handler:
2037-
20382036
response = base_llm_http_handler.completion(
20392037
model=model,
20402038
messages=messages,
@@ -2550,15 +2548,10 @@ def completion( # type: ignore # noqa: PLR0915
25502548
)
25512549
elif custom_llm_provider == "compactifai":
25522550
api_key = (
2553-
api_key
2554-
or get_secret_str("COMPACTIFAI_API_KEY")
2555-
or litellm.api_key
2551+
api_key or get_secret_str("COMPACTIFAI_API_KEY") or litellm.api_key
25562552
)
25572553

2558-
api_base = (
2559-
api_base
2560-
or "https://api.compactif.ai/v1"
2561-
)
2554+
api_base = api_base or "https://api.compactif.ai/v1"
25622555

25632556
## COMPLETION CALL
25642557
response = base_llm_http_handler.completion(
@@ -3144,9 +3137,9 @@ def completion( # type: ignore # noqa: PLR0915
31443137
"aws_region_name" not in optional_params
31453138
or optional_params["aws_region_name"] is None
31463139
):
3147-
optional_params["aws_region_name"] = (
3148-
aws_bedrock_client.meta.region_name
3149-
)
3140+
optional_params[
3141+
"aws_region_name"
3142+
] = aws_bedrock_client.meta.region_name
31503143

31513144
bedrock_route = BedrockModelInfo.get_bedrock_route(model)
31523145
if bedrock_route == "converse":
@@ -3488,7 +3481,6 @@ def completion( # type: ignore # noqa: PLR0915
34883481
)
34893482
raise e
34903483
elif custom_llm_provider == "gradient_ai":
3491-
34923484
api_base = litellm.api_base or api_base
34933485
response = base_llm_http_handler.completion(
34943486
model=model,
@@ -3848,7 +3840,7 @@ def embedding(
38483840
*,
38493841
aembedding: Literal[True],
38503842
**kwargs,
3851-
) -> Coroutine[Any, Any, EmbeddingResponse]:
3843+
) -> Coroutine[Any, Any, EmbeddingResponse]:
38523844
...
38533845

38543846

@@ -3874,7 +3866,7 @@ def embedding(
38743866
*,
38753867
aembedding: Literal[False] = False,
38763868
**kwargs,
3877-
) -> EmbeddingResponse:
3869+
) -> EmbeddingResponse:
38783870
...
38793871

38803872
# fmt: on
@@ -5127,9 +5119,9 @@ def adapter_completion(
51275119
new_kwargs = translation_obj.translate_completion_input_params(kwargs=kwargs)
51285120

51295121
response: Union[ModelResponse, CustomStreamWrapper] = completion(**new_kwargs) # type: ignore
5130-
translated_response: Optional[Union[BaseModel, AdapterCompletionStreamWrapper]] = (
5131-
None
5132-
)
5122+
translated_response: Optional[
5123+
Union[BaseModel, AdapterCompletionStreamWrapper]
5124+
] = None
51335125
if isinstance(response, ModelResponse):
51345126
translated_response = translation_obj.translate_completion_output_params(
51355127
response=response
@@ -6117,9 +6109,9 @@ def stream_chunk_builder( # noqa: PLR0915
61176109
]
61186110

61196111
if len(content_chunks) > 0:
6120-
response["choices"][0]["message"]["content"] = (
6121-
processor.get_combined_content(content_chunks)
6122-
)
6112+
response["choices"][0]["message"][
6113+
"content"
6114+
] = processor.get_combined_content(content_chunks)
61236115

61246116
thinking_blocks = [
61256117
chunk
@@ -6130,9 +6122,9 @@ def stream_chunk_builder( # noqa: PLR0915
61306122
]
61316123

61326124
if len(thinking_blocks) > 0:
6133-
response["choices"][0]["message"]["thinking_blocks"] = (
6134-
processor.get_combined_thinking_content(thinking_blocks)
6135-
)
6125+
response["choices"][0]["message"][
6126+
"thinking_blocks"
6127+
] = processor.get_combined_thinking_content(thinking_blocks)
61366128

61376129
reasoning_chunks = [
61386130
chunk
@@ -6143,9 +6135,9 @@ def stream_chunk_builder( # noqa: PLR0915
61436135
]
61446136

61456137
if len(reasoning_chunks) > 0:
6146-
response["choices"][0]["message"]["reasoning_content"] = (
6147-
processor.get_combined_reasoning_content(reasoning_chunks)
6148-
)
6138+
response["choices"][0]["message"][
6139+
"reasoning_content"
6140+
] = processor.get_combined_reasoning_content(reasoning_chunks)
61496141

61506142
audio_chunks = [
61516143
chunk

0 commit comments

Comments
 (0)