Skip to content

Commit 0132c30

Browse files
authored
Suport None reasoning for Anthropic (#164)
Support reasoning for Anthropic
1 parent 0a186a1 commit 0132c30

File tree

1 file changed

+8
-4
lines changed

1 file changed

+8
-4
lines changed

line/llm_agent/provider.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,17 +73,21 @@ def __init__(
7373
self._supports_reasoning_effort = "reasoning_effort" in supported
7474

7575
# Determine the right default when no explicit reasoning_effort is configured.
76-
# "none" is ideal (disables reasoning entirely) but not all providers support it.
77-
# Probe litellm's own parameter mapping to find out: if mapping "none" through the
78-
# provider's config raises, fall back to "low" (the lowest universally-supported level).
76+
# Goal: use the absolute lowest reasoning level each provider supports.
77+
# Almost all providers support "low", so start there and then check if we can do better.
7978
self._default_reasoning_effort = "low"
8079
if self._supports_reasoning_effort:
8180
try:
8281
_, provider, _, _ = get_llm_provider(model=model)
8382
get_optional_params(model=model, custom_llm_provider=provider, reasoning_effort="none")
8483
self._default_reasoning_effort = "none"
8584
except Exception:
86-
pass
85+
# HACK: Anthropic's LiteLLM mapping annoyingly doesn't support `"none"` (the string) as a
86+
# value for reasoning_effort, so None (omitting the param) is the correct way
87+
# to skip the thinking block entirely; "low" would still enable a 1024-token
88+
# thinking budget.
89+
if "anthropic" in model.lower():
90+
self._default_reasoning_effort = None
8791

8892
def chat(
8993
self,

0 commit comments

Comments
 (0)