Skip to content

Commit c4b0da5

Browse files
authored
Merge pull request #230 from carl-shen/xs_fix_ollama_base_url
fix: ollama provider not respecting OLLAMA_ENDPOINT env var
2 parents 6e90106 + f96d83b commit c4b0da5

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/utils/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -106,21 +106,21 @@ def get_llm_model(provider: str, **kwargs):
106106
base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
107107
else:
108108
base_url = kwargs.get("base_url")
109-
109+
110110
if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"):
111111
return DeepSeekR1ChatOllama(
112112
model=kwargs.get("model_name", "deepseek-r1:14b"),
113113
temperature=kwargs.get("temperature", 0.0),
114114
num_ctx=kwargs.get("num_ctx", 32000),
115-
base_url=kwargs.get("base_url", base_url),
115+
base_url=base_url,
116116
)
117117
else:
118118
return ChatOllama(
119119
model=kwargs.get("model_name", "qwen2.5:7b"),
120120
temperature=kwargs.get("temperature", 0.0),
121121
num_ctx=kwargs.get("num_ctx", 32000),
122122
num_predict=kwargs.get("num_predict", 1024),
123-
base_url=kwargs.get("base_url", base_url),
123+
base_url=base_url,
124124
)
125125
elif provider == "azure_openai":
126126
if not kwargs.get("base_url", ""):

0 commit comments

Comments
 (0)