We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 10371ad commit ec2d68eCopy full SHA for ec2d68e
tracecat/agent/llm_proxy/provider_openai.py
@@ -146,6 +146,11 @@ def _normalize_openai_unsupported_params(
146
def _normalize_custom_model_provider_payload(payload: dict[str, Any]) -> None:
147
if "max_completion_tokens" in payload and "max_tokens" not in payload:
148
payload["max_tokens"] = payload.pop("max_completion_tokens")
149
+ # parallel_tool_calls is not part of the OpenAI spec and causes LiteLLM to
150
+ # generate a malformed tool_choice when routing to Bedrock (missing the
151
+ # required `type` field). Drop it for custom providers since we cannot
152
+ # control how the downstream proxy translates it.
153
+ payload.pop("parallel_tool_calls", None)
154
155
156
def _normalize_openai_payload(
0 commit comments