Skip to content

Commit 7f23b5f

Browse files
author
Nahuel Defossé
committed
fix: Add provider-specific parameter validation for Claude models
- Claude doesn't support both temperature and top_p simultaneously - Add _get_llm_params() helper to apply provider-specific constraints - Remove top_p for Claude models, keeping only temperature - Apply to both LITELLM and LITELLM_PROXY configurations - Allow env vars to override constraints if explicitly set
1 parent 975c8b1 commit 7f23b5f

File tree

1 file changed

+48
-4
lines changed

1 file changed

+48
-4
lines changed

src/agentics/core/llm_connections.py

Lines changed: 48 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,32 @@ def _check_env(*var_names: str) -> bool:
5050
return all(os.getenv(var) for var in var_names)
5151

5252

53+
def _get_llm_params(model: str) -> dict:
54+
"""
55+
Get provider-specific LLM parameters based on the model name.
56+
57+
Some providers have constraints (e.g., Claude doesn't allow both temperature and top_p).
58+
59+
Args:
60+
model: The model identifier (e.g., "aws/claude-haiku-4-5", "gpt-4")
61+
62+
Returns:
63+
dict: LLM parameters with provider-specific constraints applied
64+
"""
65+
params: dict = {
66+
"temperature": 0.8,
67+
"top_p": 0.9,
68+
}
69+
70+
# Claude models don't support both temperature and top_p together
71+
if "claude" in model.lower():
72+
# For Claude, only use temperature, remove top_p
73+
params.pop("top_p", None)
74+
params["temperature"] = 0.7
75+
76+
return params
77+
78+
5379
def get_llms_env_vars() -> dict[str, list[str]]:
5480
"""
5581
Get the environment variables used for each LLM.
@@ -168,10 +194,18 @@ def get_available_llms() -> dict[str, LLM | AsyncOpenAI]:
168194
if not model_name.startswith("litellm/"):
169195
model_name = f"litellm/{model_name}"
170196

197+
# Get provider-specific parameters
198+
litellm_params = _get_llm_params(model_name)
199+
200+
# Override with env vars if present
201+
if os.getenv("LITELLM_TEMPERATURE"):
202+
litellm_params["temperature"] = float(os.getenv("LITELLM_TEMPERATURE"))
203+
if os.getenv("LITELLM_TOP_P") and "top_p" in litellm_params:
204+
litellm_params["top_p"] = float(os.getenv("LITELLM_TOP_P"))
205+
171206
litellm_llm = LLM(
172207
model=model_name,
173-
temperature=float(os.getenv("LITELLM_TEMPERATURE", "0.8")),
174-
top_p=float(os.getenv("LITELLM_TOP_P", "0.9")),
208+
**litellm_params,
175209
)
176210
llms["litellm"] = litellm_llm
177211
_llms_env_vars["litellm"] = [
@@ -191,12 +225,22 @@ def get_available_llms() -> dict[str, LLM | AsyncOpenAI]:
191225
"Please set LITELLM_PROXY_MODEL to a value like 'litellm_proxy/<name>'."
192226
)
193227
else:
228+
# Get provider-specific parameters
229+
proxy_params = _get_llm_params(proxy_model)
230+
231+
# Override with env vars if present
232+
if os.getenv("LITELLM_PROXY_TEMPERATURE"):
233+
proxy_params["temperature"] = float(
234+
os.getenv("LITELLM_PROXY_TEMPERATURE")
235+
)
236+
if os.getenv("LITELLM_PROXY_TOP_P") and "top_p" in proxy_params:
237+
proxy_params["top_p"] = float(os.getenv("LITELLM_PROXY_TOP_P"))
238+
194239
litellm_proxy_llm = LLM(
195240
model=proxy_model,
196-
temperature=float(os.getenv("LITELLM_PROXY_TEMPERATURE", "0.8")),
197-
top_p=float(os.getenv("LITELLM_PROXY_TOP_P", "0.9")),
198241
api_key=os.getenv("LITELLM_PROXY_API_KEY"),
199242
base_url=os.getenv("LITELLM_PROXY_URL"),
243+
**proxy_params,
200244
)
201245
llms["litellm_proxy_llm"] = litellm_proxy_llm
202246
llms["litellm_proxy"] = litellm_proxy_llm

0 commit comments

Comments
 (0)