File tree Expand file tree Collapse file tree 3 files changed +16
-5
lines changed
docs/my-website/docs/providers/azure Expand file tree Collapse file tree 3 files changed +16
-5
lines changed Original file line number Diff line number Diff line change @@ -931,7 +931,7 @@ curl http://localhost:4000/v1/batches \
931
931
` ` ` python
932
932
retrieved_batch = client.batches.retrieve(
933
933
batch.id,
934
- extra_body ={"custom_llm_provider": "azure"}
934
+ extra_query ={"custom_llm_provider": "azure"}
935
935
)
936
936
` ` `
937
937
@@ -978,7 +978,7 @@ curl http://localhost:4000/v1/batches/batch_abc123/cancel \
978
978
<TabItem value="sdk" label="OpenAI Python SDK">
979
979
980
980
` ` ` python
981
- client.batches.list(extra_body ={"custom_llm_provider": "azure"})
981
+ client.batches.list(extra_query ={"custom_llm_provider": "azure"})
982
982
` ` `
983
983
984
984
</TabItem>
Original file line number Diff line number Diff line change 17
17
from litellm .proxy .common_request_processing import ProxyBaseLLMRequestProcessing
18
18
from litellm .proxy .common_utils .http_parsing_utils import _read_request_body
19
19
from litellm .proxy .common_utils .openai_endpoint_utils import (
20
- get_custom_llm_provider_from_request_body ,
20
+ get_custom_llm_provider_from_request_query ,
21
21
)
22
22
from litellm .proxy .openai_files_endpoints .common_utils import (
23
23
_is_base64_encoded_unified_file_id ,
@@ -282,7 +282,7 @@ async def retrieve_batch(
282
282
else :
283
283
custom_llm_provider = (
284
284
provider
285
- or await get_custom_llm_provider_from_request_body (request = request )
285
+ or get_custom_llm_provider_from_request_query (request = request )
286
286
or "openai"
287
287
)
288
288
response = await litellm .aretrieve_batch (
@@ -392,7 +392,7 @@ async def list_batches(
392
392
else :
393
393
custom_llm_provider = (
394
394
provider
395
- or await get_custom_llm_provider_from_request_body (request = request )
395
+ or get_custom_llm_provider_from_request_query (request = request )
396
396
or "openai"
397
397
)
398
398
response = await litellm .alist_batches (
Original file line number Diff line number Diff line change @@ -38,3 +38,14 @@ async def get_custom_llm_provider_from_request_body(request: Request) -> Optiona
38
38
if "custom_llm_provider" in request_body :
39
39
return request_body ["custom_llm_provider" ]
40
40
return None
41
+
42
+
43
+ def get_custom_llm_provider_from_request_query (request : Request ) -> Optional [str ]:
44
+ """
45
+ Get the `custom_llm_provider` from the request query parameters
46
+
47
+ Safely reads the request query parameters
48
+ """
49
+ if "custom_llm_provider" in request .query_params :
50
+ return request .query_params ["custom_llm_provider" ]
51
+ return None
You can’t perform that action at this time.
0 commit comments