Skip to content

Commit da66e29

Browse files
authored
Add OpenAI o1 model support with tests (#26)
1 parent 49f2ae2 commit da66e29

File tree

3 files changed

+28
-9
lines changed

3 files changed

+28
-9
lines changed

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ html5lib>=1.1
66
duckduckgo-search>=4.1.1
77

88
# LLM integration
9-
openai>=1.12.0
9+
openai>=1.59.8 # o1 support
1010
anthropic>=0.42.0
1111
python-dotenv>=1.0.0
1212

tests/test_llm_api.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,19 @@ def test_query_with_custom_model(self, mock_create_client):
279279
temperature=0.7
280280
)
281281

282+
@unittest.skipIf(skip_llm_tests, skip_message)
283+
@patch('tools.llm_api.create_llm_client')
284+
def test_query_o1_model(self, mock_create_client):
285+
mock_create_client.return_value = self.mock_openai_client
286+
response = query_llm("Test prompt", model="o1")
287+
self.assertEqual(response, "Test OpenAI response")
288+
self.mock_openai_client.chat.completions.create.assert_called_once_with(
289+
model="o1",
290+
messages=[{"role": "user", "content": "Test prompt"}],
291+
response_format={"type": "text"},
292+
reasoning_effort="low"
293+
)
294+
282295
@unittest.skipIf(skip_llm_tests, skip_message)
283296
@patch('tools.llm_api.create_llm_client')
284297
def test_query_with_existing_client(self, mock_create_client):

tools/llm_api.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -110,13 +110,19 @@ def query_llm(prompt, client=None, model=None, provider="openai"):
110110
model = "Qwen/Qwen2.5-32B-Instruct-AWQ"
111111

112112
if provider in ["openai", "local", "deepseek", "azure"]:
113-
response = client.chat.completions.create(
114-
model=model,
115-
messages=[
116-
{"role": "user", "content": prompt}
117-
],
118-
temperature=0.7,
119-
)
113+
kwargs = {
114+
"model": model,
115+
"messages": [{"role": "user", "content": prompt}],
116+
"temperature": 0.7,
117+
}
118+
119+
# Add o1-specific parameters
120+
if model == "o1":
121+
kwargs["response_format"] = {"type": "text"}
122+
kwargs["reasoning_effort"] = "low"
123+
del kwargs["temperature"]
124+
125+
response = client.chat.completions.create(**kwargs)
120126
return response.choices[0].message.content
121127
elif provider == "anthropic":
122128
response = client.messages.create(
@@ -162,4 +168,4 @@ def main():
162168
print("Failed to get response from LLM")
163169

164170
if __name__ == "__main__":
165-
main()
171+
main()

0 commit comments

Comments
 (0)