Skip to content

Commit 86403dd

Browse files
author
yuan.wang
committed
fix make test
1 parent 42f3403 commit 86403dd

File tree

7 files changed

+62
-38
lines changed

7 files changed

+62
-38
lines changed

src/memos/llms/ollama.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,9 @@ def generate(self, messages: MessageList, **kwargs) -> Any:
7777
tools=kwargs.get("tools"),
7878
)
7979
logger.info(f"Raw response from Ollama: {response.model_dump_json()}")
80-
if response.message.tool_calls:
81-
return self.tool_call_parser(response.message.tool_calls)
80+
tool_calls = getattr(response.message, "tool_calls", None)
81+
if isinstance(tool_calls, list) and len(tool_calls) > 0:
82+
return self.tool_call_parser(tool_calls)
8283

8384
str_thinking = (
8485
f"<think>{response.message.thinking}</think>"

src/memos/llms/openai.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -41,18 +41,18 @@ def generate(self, messages: MessageList, **kwargs) -> str:
4141
tools=kwargs.get("tools", NOT_GIVEN),
4242
)
4343
logger.info(f"Response from OpenAI: {response.model_dump_json()}")
44-
if response.choices[0].message.tool_calls:
45-
return self.tool_call_parser(response.choices[0].message.tool_calls)
46-
reasoning_content = (
47-
f"<think>{response.choices[0].message.reasoning_content}</think>"
48-
if hasattr(response.choices[0].message, "reasoning_content")
49-
else ""
50-
)
44+
tool_calls = getattr(response.choices[0].message, "tool_calls", None)
45+
if isinstance(tool_calls, list) and len(tool_calls) > 0:
46+
return self.tool_call_parser(tool_calls)
5147
response_content = response.choices[0].message.content
48+
reasoning_content = getattr(response.choices[0].message, "reasoning_content", None)
49+
if isinstance(reasoning_content, str) and reasoning_content:
50+
reasoning_content = f"<think>{reasoning_content}</think>"
5251
if self.config.remove_think_prefix:
5352
return remove_thinking_tags(response_content)
54-
else:
53+
if reasoning_content:
5554
return reasoning_content + response_content
55+
return response_content
5656

5757
@timed(log=True, log_prefix="OpenAI LLM")
5858
def generate_stream(self, messages: MessageList, **kwargs) -> Generator[str, None, None]:

tests/configs/test_llm.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,14 @@ def test_base_llm_config():
1919
required_fields=[
2020
"model_name_or_path",
2121
],
22-
optional_fields=["temperature", "max_tokens", "top_p", "top_k", "remove_think_prefix"],
22+
optional_fields=[
23+
"temperature",
24+
"max_tokens",
25+
"top_p",
26+
"top_k",
27+
"remove_think_prefix",
28+
"default_headers",
29+
],
2330
)
2431

2532
check_config_instantiation_valid(
@@ -48,6 +55,7 @@ def test_openai_llm_config():
4855
"api_base",
4956
"remove_think_prefix",
5057
"extra_body",
58+
"default_headers",
5159
],
5260
)
5361

@@ -79,6 +87,8 @@ def test_ollama_llm_config():
7987
"top_k",
8088
"remove_think_prefix",
8189
"api_base",
90+
"default_headers",
91+
"enable_thinking",
8292
],
8393
)
8494

@@ -111,6 +121,7 @@ def test_hf_llm_config():
111121
"do_sample",
112122
"remove_think_prefix",
113123
"add_generation_prompt",
124+
"default_headers",
114125
],
115126
)
116127

tests/llms/test_deepseek.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,14 @@ def test_deepseek_llm_generate_with_and_without_think_prefix(self):
1212
"""Test DeepSeekLLM generate method with and without <think> tag removal."""
1313

1414
# Simulated full content including <think> tag
15-
full_content = "<think>Thinking in progress...</think>Hello from DeepSeek!"
15+
full_content = "Hello from DeepSeek!"
16+
reasoning_content = "Thinking in progress..."
1617

1718
# Mock response object
1819
mock_response = MagicMock()
1920
mock_response.model_dump_json.return_value = '{"mock": "true"}'
2021
mock_response.choices[0].message.content = full_content
22+
mock_response.choices[0].message.reasoning_content = reasoning_content
2123

2224
# Config with think prefix preserved
2325
config_with_think = DeepSeekLLMConfig.model_validate(
@@ -35,15 +37,15 @@ def test_deepseek_llm_generate_with_and_without_think_prefix(self):
3537
llm_with_think.client.chat.completions.create = MagicMock(return_value=mock_response)
3638

3739
output_with_think = llm_with_think.generate([{"role": "user", "content": "Hello"}])
38-
self.assertEqual(output_with_think, full_content)
40+
self.assertEqual(output_with_think, f"<think>{reasoning_content}</think>{full_content}")
3941

4042
# Config with think tag removed
4143
config_without_think = config_with_think.model_copy(update={"remove_think_prefix": True})
4244
llm_without_think = DeepSeekLLM(config_without_think)
4345
llm_without_think.client.chat.completions.create = MagicMock(return_value=mock_response)
4446

4547
output_without_think = llm_without_think.generate([{"role": "user", "content": "Hello"}])
46-
self.assertEqual(output_without_think, "Hello from DeepSeek!")
48+
self.assertEqual(output_without_think, full_content)
4749

4850
def test_deepseek_llm_generate_stream(self):
4951
"""Test DeepSeekLLM generate_stream with reasoning_content and content chunks."""
@@ -84,5 +86,5 @@ def make_chunk(delta_dict):
8486

8587
self.assertIn("Analyzing...", full_output)
8688
self.assertIn("Hello, DeepSeek!", full_output)
87-
self.assertTrue(full_output.startswith("Analyzing..."))
89+
self.assertTrue(full_output.startswith("<think>"))
8890
self.assertTrue(full_output.endswith("DeepSeek!"))

tests/llms/test_ollama.py

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import unittest
22

3+
from types import SimpleNamespace
34
from unittest.mock import MagicMock
45

56
from memos.configs.llm import LLMConfigFactory, OllamaLLMConfig
@@ -12,15 +13,15 @@ def test_llm_factory_with_mocked_ollama_backend(self):
1213
"""Test LLMFactory with mocked Ollama backend."""
1314
mock_chat = MagicMock()
1415
mock_response = MagicMock()
15-
mock_response.model_dump_json.return_value = '{"model":"qwen3:0.6b","created_at":"2025-05-13T18:07:04.508998134Z","done":true,"done_reason":"stop","total_duration":348924420,"load_duration":14321072,"prompt_eval_count":16,"prompt_eval_duration":16770943,"eval_count":21,"eval_duration":317395459,"message":{"role":"assistant","content":"Hello! How are you? I\'m here to help and smile!","images":null,"tool_calls":null}}'
16-
mock_response.__getitem__.side_effect = lambda key: {
17-
"message": {
18-
"role": "assistant",
19-
"content": "Hello! How are you? I'm here to help and smile!",
20-
"images": None,
21-
"tool_calls": None,
22-
}
23-
}[key]
16+
mock_response.model_dump_json.return_value = '{"model":"qwen3:0.6b","created_at":"2025-05-13T18:07:04.508998134Z","done":true,"done_reason":"stop","total_duration":348924420,"load_duration":14321072,"prompt_eval_count":16,"prompt_eval_duration":16770943,"eval_count":21,"eval_duration":317395459,"message":{"role":"assistant","content":"Hello! How are you? I\'m here to help and smile!", "thinking":"Analyzing your request...","images":null,"tool_calls":null}}'
17+
18+
mock_response.message = SimpleNamespace(
19+
role="assistant",
20+
content="Hello! How are you? I'm here to help and smile!",
21+
thinking="Analyzing your request...",
22+
images=None,
23+
tool_calls=None,
24+
)
2425
mock_chat.return_value = mock_response
2526

2627
config = LLMConfigFactory.model_validate(
@@ -32,6 +33,7 @@ def test_llm_factory_with_mocked_ollama_backend(self):
3233
"max_tokens": 1024,
3334
"top_p": 0.9,
3435
"top_k": 50,
36+
"enable_thinking": True,
3537
},
3638
}
3739
)
@@ -42,21 +44,23 @@ def test_llm_factory_with_mocked_ollama_backend(self):
4244
]
4345
response = llm.generate(messages)
4446

45-
self.assertEqual(response, "Hello! How are you? I'm here to help and smile!")
47+
self.assertEqual(
48+
response,
49+
"<think>Analyzing your request...</think>Hello! How are you? I'm here to help and smile!",
50+
)
4651

4752
def test_ollama_llm_with_mocked_backend(self):
4853
"""Test OllamaLLM with mocked backend."""
4954
mock_chat = MagicMock()
5055
mock_response = MagicMock()
51-
mock_response.model_dump_json.return_value = '{"model":"qwen3:0.6b","created_at":"2025-05-13T18:07:04.508998134Z","done":true,"done_reason":"stop","total_duration":348924420,"load_duration":14321072,"prompt_eval_count":16,"prompt_eval_duration":16770943,"eval_count":21,"eval_duration":317395459,"message":{"role":"assistant","content":"Hello! How are you? I\'m here to help and smile!","images":null,"tool_calls":null}}'
52-
mock_response.__getitem__.side_effect = lambda key: {
53-
"message": {
54-
"role": "assistant",
55-
"content": "Hello! How are you? I'm here to help and smile!",
56-
"images": None,
57-
"tool_calls": None,
58-
}
59-
}[key]
56+
mock_response.model_dump_json.return_value = '{"model":"qwen3:0.6b","created_at":"2025-05-13T18:07:04.508998134Z","done":true,"done_reason":"stop","total_duration":348924420,"load_duration":14321072,"prompt_eval_count":16,"prompt_eval_duration":16770943,"eval_count":21,"eval_duration":317395459,"message":{"role":"assistant","content":"Hello! How are you? I\'m here to help and smile!","thinking":"Analyzing your request...","images":null,"tool_calls":null}}'
57+
mock_response.message = SimpleNamespace(
58+
role="assistant",
59+
content="Hello! How are you? I'm here to help and smile!",
60+
thinking="Analyzing your request...",
61+
images=None,
62+
tool_calls=None,
63+
)
6064
mock_chat.return_value = mock_response
6165

6266
config = OllamaLLMConfig(
@@ -73,4 +77,7 @@ def test_ollama_llm_with_mocked_backend(self):
7377
]
7478
response = ollama.generate(messages)
7579

76-
self.assertEqual(response, "Hello! How are you? I'm here to help and smile!")
80+
self.assertEqual(
81+
response,
82+
"<think>Analyzing your request...</think>Hello! How are you? I'm here to help and smile!",
83+
)

tests/llms/test_openai.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ def test_llm_factory_with_mocked_openai_backend(self):
1414
mock_response = MagicMock()
1515
mock_response.model_dump_json.return_value = '{"id":"chatcmpl-BWoqIrvOeWdnFVZQUFzCcdVEpJ166","choices":[{"finish_reason":"stop","index":0,"message":{"content":"Hello! I\'m an AI language model created by OpenAI. I\'m here to help answer questions, provide information, and assist with a wide range of topics. How can I assist you today?","role":"assistant"}}],"created":1747161634,"model":"gpt-4o-2024-08-06","object":"chat.completion"}'
1616
mock_response.choices[0].message.content = "Hello! I'm an AI language model created by OpenAI. I'm here to help answer questions, provide information, and assist with a wide range of topics. How can I assist you today?" # fmt: skip
17+
mock_response.choices[0].message.reasoning_content = None
1718
mock_chat_completions_create.return_value = mock_response
1819

1920
config = LLMConfigFactory.model_validate(

tests/llms/test_qwen.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,14 @@ def test_qwen_llm_generate_with_and_without_think_prefix(self):
1212
"""Test QwenLLM non-streaming response generation with and without <think> prefix removal."""
1313

1414
# Simulated full response content with <think> tag
15-
full_content = "<think>Analyzing your request...</think>Hello, world!"
15+
full_content = "Hello from DeepSeek!"
16+
reasoning_content = "Thinking in progress..."
1617

1718
# Prepare the mock response object with expected structure
1819
mock_response = MagicMock()
1920
mock_response.model_dump_json.return_value = '{"mocked": "true"}'
2021
mock_response.choices[0].message.content = full_content
22+
mock_response.choices[0].message.reasoning_content = reasoning_content
2123

2224
# Create config with remove_think_prefix = False
2325
config_with_think = QwenLLMConfig.model_validate(
@@ -37,7 +39,7 @@ def test_qwen_llm_generate_with_and_without_think_prefix(self):
3739
llm_with_think.client.chat.completions.create = MagicMock(return_value=mock_response)
3840

3941
response_with_think = llm_with_think.generate([{"role": "user", "content": "Hi"}])
40-
self.assertEqual(response_with_think, full_content)
42+
self.assertEqual(response_with_think, f"<think>{reasoning_content}</think>{full_content}")
4143

4244
# Create config with remove_think_prefix = True
4345
config_without_think = config_with_think.model_copy(update={"remove_think_prefix": True})
@@ -47,7 +49,7 @@ def test_qwen_llm_generate_with_and_without_think_prefix(self):
4749
llm_without_think.client.chat.completions.create = MagicMock(return_value=mock_response)
4850

4951
response_without_think = llm_without_think.generate([{"role": "user", "content": "Hi"}])
50-
self.assertEqual(response_without_think, "Hello, world!")
52+
self.assertEqual(response_without_think, full_content)
5153
self.assertNotIn("<think>", response_without_think)
5254

5355
def test_qwen_llm_generate_stream(self):

0 commit comments

Comments
 (0)