Skip to content

Commit 3b524ba

Browse files
committed
format
1 parent e05ffcf commit 3b524ba

File tree

2 files changed

+8
-6
lines changed

2 files changed

+8
-6
lines changed

litellm/google_genai/adapters/handler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@ def _prepare_completion_kwargs(
3838
completion_kwargs: Dict[str, Any] = dict(completion_request)
3939

4040
# feed metadata for custom callback
41-
if 'metadata' in extra_kwargs:
42-
completion_kwargs['metadata'] = extra_kwargs['metadata']
41+
if "metadata" in extra_kwargs:
42+
completion_kwargs["metadata"] = extra_kwargs["metadata"]
4343

4444
if stream:
4545
completion_kwargs["stream"] = stream

litellm/proxy/google_endpoints/endpoints.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -181,9 +181,11 @@ async def google_count_tokens(request: Request, model_name: str):
181181
from litellm.proxy._types import TokenCountRequest
182182

183183
# Translate contents to openai format messages using the adapter
184-
messages = (GoogleGenAIAdapter()
185-
.translate_generate_content_to_completion(model_name, contents)
186-
.get("messages", []))
184+
messages = (
185+
GoogleGenAIAdapter()
186+
.translate_generate_content_to_completion(model_name, contents)
187+
.get("messages", [])
188+
)
187189

188190
token_request = TokenCountRequest(
189191
model=model_name,
@@ -209,7 +211,7 @@ async def google_count_tokens(request: Request, model_name: str):
209211
totalTokens=token_response.total_tokens or 0,
210212
promptTokensDetails=[],
211213
)
212-
214+
213215
#########################################################
214216
# Return the response in the well known format
215217
#########################################################

0 commit comments

Comments
 (0)