Skip to content

Commit cd4b05f

Browse files
Merge pull request #14997 from eycjur/fix_azure_batch
[Fix] Use the `extra_query` parameter for GET requests in Azure Batch
2 parents 1257033 + 900241e commit cd4b05f

File tree

3 files changed

+16
-5
lines changed

3 files changed

+16
-5
lines changed

docs/my-website/docs/providers/azure/azure.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -931,7 +931,7 @@ curl http://localhost:4000/v1/batches \
931931
```python
932932
retrieved_batch = client.batches.retrieve(
933933
batch.id,
934-
extra_body={"custom_llm_provider": "azure"}
934+
extra_query={"custom_llm_provider": "azure"}
935935
)
936936
```
937937

@@ -978,7 +978,7 @@ curl http://localhost:4000/v1/batches/batch_abc123/cancel \
978978
<TabItem value="sdk" label="OpenAI Python SDK">
979979

980980
```python
981-
client.batches.list(extra_body={"custom_llm_provider": "azure"})
981+
client.batches.list(extra_query={"custom_llm_provider": "azure"})
982982
```
983983

984984
</TabItem>

litellm/proxy/batches_endpoints/endpoints.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from litellm.proxy.common_request_processing import ProxyBaseLLMRequestProcessing
1818
from litellm.proxy.common_utils.http_parsing_utils import _read_request_body
1919
from litellm.proxy.common_utils.openai_endpoint_utils import (
20-
get_custom_llm_provider_from_request_body,
20+
get_custom_llm_provider_from_request_query,
2121
)
2222
from litellm.proxy.openai_files_endpoints.common_utils import (
2323
_is_base64_encoded_unified_file_id,
@@ -282,7 +282,7 @@ async def retrieve_batch(
282282
else:
283283
custom_llm_provider = (
284284
provider
285-
or await get_custom_llm_provider_from_request_body(request=request)
285+
or get_custom_llm_provider_from_request_query(request=request)
286286
or "openai"
287287
)
288288
response = await litellm.aretrieve_batch(
@@ -392,7 +392,7 @@ async def list_batches(
392392
else:
393393
custom_llm_provider = (
394394
provider
395-
or await get_custom_llm_provider_from_request_body(request=request)
395+
or get_custom_llm_provider_from_request_query(request=request)
396396
or "openai"
397397
)
398398
response = await litellm.alist_batches(

litellm/proxy/common_utils/openai_endpoint_utils.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,3 +38,14 @@ async def get_custom_llm_provider_from_request_body(request: Request) -> Optiona
3838
if "custom_llm_provider" in request_body:
3939
return request_body["custom_llm_provider"]
4040
return None
41+
42+
43+
def get_custom_llm_provider_from_request_query(request: Request) -> Optional[str]:
44+
"""
45+
Get the `custom_llm_provider` from the request query parameters
46+
47+
Safely reads the request query parameters
48+
"""
49+
if "custom_llm_provider" in request.query_params:
50+
return request.query_params["custom_llm_provider"]
51+
return None

0 commit comments

Comments
 (0)