Skip to content

Commit b064ec8

Browse files
ephrimstanleyshivamrawat1
authored andcommitted
Managed batches - Address PR bot comments from #22464
Made-with: Cursor
1 parent 0435375 commit b064ec8

File tree

5 files changed

+380
-7
lines changed

5 files changed

+380
-7
lines changed

litellm/files/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ async def afile_retrieve(
336336
@client
337337
def file_retrieve(
338338
file_id: str,
339-
custom_llm_provider: Literal["openai", "azure", "hosted_vllm", "manus"] = "openai",
339+
custom_llm_provider: Literal["openai", "azure", "gemini", "vertex_ai", "hosted_vllm", "manus"] = "openai",
340340
extra_headers: Optional[Dict[str, str]] = None,
341341
extra_body: Optional[Dict[str, str]] = None,
342342
**kwargs,

litellm/llms/vertex_ai/batches/handler.py

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -108,11 +108,19 @@ async def _async_create_batch(
108108
client = get_async_httpx_client(
109109
llm_provider=litellm.LlmProviders.VERTEX_AI,
110110
)
111-
response = await client.post(
112-
url=api_base,
113-
headers=headers,
114-
data=json.dumps(vertex_batch_request),
115-
)
111+
try:
112+
response = await client.post(
113+
url=api_base,
114+
headers=headers,
115+
data=json.dumps(vertex_batch_request),
116+
)
117+
except httpx.HTTPStatusError as e:
118+
error_body = e.response.text
119+
litellm.verbose_logger.error(
120+
"Vertex AI batch create failed: status=%s, body=%s",
121+
e.response.status_code, error_body[:1000],
122+
)
123+
raise
116124
if response.status_code != 200:
117125
raise Exception(f"Error: {response.status_code} {response.text}")
118126

litellm/llms/vertex_ai/files/transformation.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import json
22
import os
33
import time
4+
import urllib.parse
45
from typing import Any, Dict, List, Optional, Tuple, Union
56

67
from httpx import Headers, Response
@@ -365,7 +366,14 @@ def transform_delete_file_response(
365366
logging_obj: LiteLLMLoggingObj,
366367
litellm_params: dict,
367368
) -> FileDeleted:
368-
raise NotImplementedError("VertexAIFilesConfig does not support file deletion")
369+
file_id = "deleted"
370+
if hasattr(raw_response, "request") and raw_response.request:
371+
url = str(raw_response.request.url)
372+
if "/b/" in url and "/o/" in url:
373+
bucket_part = url.split("/b/")[-1].split("/o/")[0]
374+
encoded_name = url.split("/o/")[-1].split("?")[0]
375+
file_id = f"gs://{bucket_part}/{urllib.parse.unquote(encoded_name)}"
376+
return FileDeleted(id=file_id, deleted=True, object="file")
369377

370378
def transform_list_files_request(
371379
self,
Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
"""
2+
Tests for Fix 1: file_retrieve Literal type was missing 'vertex_ai' and 'gemini',
3+
causing a type mismatch when afile_retrieve delegated to the sync function.
4+
"""
5+
6+
import pytest
7+
from unittest.mock import MagicMock, patch
8+
9+
from litellm.files.main import file_retrieve
10+
11+
12+
class TestFileRetrieveProviderRouting:
13+
"""
14+
Verify that file_retrieve accepts 'vertex_ai' and 'gemini' providers and
15+
routes them through ProviderConfigManager / base_llm_http_handler.
16+
"""
17+
18+
def _make_mock_file_object(self):
19+
mock = MagicMock()
20+
mock.model_dump.return_value = {
21+
"id": "gs://my-bucket/file.jsonl",
22+
"object": "file",
23+
"bytes": 1024,
24+
"created_at": 0,
25+
"filename": "file.jsonl",
26+
"purpose": "batch",
27+
"status": "processed",
28+
}
29+
return mock
30+
31+
def test_should_route_vertex_ai_through_provider_config(self):
32+
"""
33+
Regression: file_retrieve Literal type was missing 'vertex_ai',
34+
so passing custom_llm_provider='vertex_ai' would fail type-checking
35+
and potentially cause a routing failure at runtime.
36+
"""
37+
mock_file = self._make_mock_file_object()
38+
39+
with patch(
40+
"litellm.files.main.base_llm_http_handler.retrieve_file",
41+
return_value=mock_file,
42+
) as mock_retrieve:
43+
result = file_retrieve(
44+
file_id="gs://my-bucket/file.jsonl",
45+
custom_llm_provider="vertex_ai",
46+
)
47+
48+
mock_retrieve.assert_called_once()
49+
assert result is not None
50+
51+
def test_should_route_gemini_through_provider_config(self):
52+
"""
53+
Regression: file_retrieve Literal type was also missing 'gemini'.
54+
"""
55+
mock_file = self._make_mock_file_object()
56+
57+
with patch(
58+
"litellm.files.main.base_llm_http_handler.retrieve_file",
59+
return_value=mock_file,
60+
) as mock_retrieve:
61+
result = file_retrieve(
62+
file_id="some-gemini-file-id",
63+
custom_llm_provider="gemini",
64+
)
65+
66+
mock_retrieve.assert_called_once()
67+
assert result is not None
68+
69+
def test_should_pass_file_id_to_handler_for_vertex_ai(self):
70+
"""Verify the file_id is forwarded correctly to the underlying handler."""
71+
mock_file = self._make_mock_file_object()
72+
expected_file_id = "gs://my-bucket/path/to/file.jsonl"
73+
74+
with patch(
75+
"litellm.files.main.base_llm_http_handler.retrieve_file",
76+
return_value=mock_file,
77+
) as mock_retrieve:
78+
file_retrieve(
79+
file_id=expected_file_id,
80+
custom_llm_provider="vertex_ai",
81+
)
82+
83+
call_kwargs = mock_retrieve.call_args.kwargs
84+
assert call_kwargs.get("file_id") == expected_file_id
85+
86+
def test_should_not_raise_bad_request_for_vertex_ai(self):
87+
"""
88+
Before the fix, vertex_ai fell through to the else-branch which raised
89+
BadRequestError. Verify it no longer does.
90+
"""
91+
import litellm
92+
93+
mock_file = self._make_mock_file_object()
94+
95+
with patch(
96+
"litellm.files.main.base_llm_http_handler.retrieve_file",
97+
return_value=mock_file,
98+
):
99+
try:
100+
file_retrieve(
101+
file_id="gs://my-bucket/file.jsonl",
102+
custom_llm_provider="vertex_ai",
103+
)
104+
except litellm.exceptions.BadRequestError as e:
105+
pytest.fail(
106+
f"file_retrieve raised BadRequestError for vertex_ai: {e}"
107+
)
108+
109+
def test_should_not_raise_bad_request_for_gemini(self):
110+
"""Same as above but for 'gemini'."""
111+
import litellm
112+
113+
mock_file = self._make_mock_file_object()
114+
115+
with patch(
116+
"litellm.files.main.base_llm_http_handler.retrieve_file",
117+
return_value=mock_file,
118+
):
119+
try:
120+
file_retrieve(
121+
file_id="some-file-id",
122+
custom_llm_provider="gemini",
123+
)
124+
except litellm.exceptions.BadRequestError as e:
125+
pytest.fail(
126+
f"file_retrieve raised BadRequestError for gemini: {e}"
127+
)

0 commit comments

Comments
 (0)