Skip to content

Commit c913ff7

Browse files
committed
Fix tests
1 parent 9fa62d0 commit c913ff7

File tree

4 files changed

+140
-88
lines changed

4 files changed

+140
-88
lines changed

src/mcp_agent/workflows/llm/augmented_llm_azure.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ async def generate_structured(
365365

366366
@classmethod
367367
def convert_message_to_message_param(
368-
cls, message: ResponseMessage, **kwargs
368+
cls, message: ResponseMessage
369369
) -> AssistantMessage:
370370
"""Convert a response object to an input parameter object to allow LLM calls to be chained."""
371371
assistant_message = AssistantMessage(

tests/workflows/llm/test_augmented_llm_anthropic.py

Lines changed: 83 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -210,31 +210,56 @@ async def test_generate_str(self, mock_llm, default_usage):
210210
@pytest.mark.asyncio
211211
async def test_generate_structured(self, mock_llm, default_usage):
212212
"""
213-
Tests structured output generation using Instructor.
213+
Tests structured output generation using native Anthropic API.
214214
"""
215+
from unittest.mock import patch
216+
import json
215217

216218
# Define a simple response model
217219
class TestResponseModel(BaseModel):
218220
name: str
219221
value: int
220222

221-
# Mock the generate_str method to return a string that will be parsed by the instructor mock
222-
mock_llm.generate_str = AsyncMock(return_value="name: Test, value: 42")
223-
224-
# Patch executor.execute to return the expected TestResponseModel instance
225-
mock_llm.executor.execute = AsyncMock(
226-
return_value=TestResponseModel(name="Test", value=42)
223+
# Create a mock Message with tool_use block containing the structured data
224+
tool_use_block = ToolUseBlock(
225+
type="tool_use",
226+
id="tool_123",
227+
name="return_structured_output",
228+
input={"name": "Test", "value": 42},
227229
)
228230

229-
# Call the method
230-
result = await AnthropicAugmentedLLM.generate_structured(
231-
mock_llm, "Test query", TestResponseModel
231+
mock_message = Message(
232+
type="message",
233+
id="msg_123",
234+
role="assistant",
235+
content=[tool_use_block],
236+
model="claude-3-7-sonnet-latest",
237+
stop_reason="tool_use",
238+
usage=default_usage,
232239
)
233240

234-
# Assertions
235-
assert isinstance(result, TestResponseModel)
236-
assert result.name == "Test"
237-
assert result.value == 42
241+
# Mock the AsyncAnthropic client and streaming
242+
with patch(
243+
"mcp_agent.workflows.llm.augmented_llm_anthropic.AsyncAnthropic"
244+
) as MockAsyncAnthropic:
245+
mock_client = MockAsyncAnthropic.return_value
246+
mock_stream = AsyncMock()
247+
mock_stream.get_final_message = AsyncMock(return_value=mock_message)
248+
mock_stream.__aenter__ = AsyncMock(return_value=mock_stream)
249+
mock_stream.__aexit__ = AsyncMock(return_value=None)
250+
mock_client.messages.stream = MagicMock(return_value=mock_stream)
251+
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
252+
mock_client.__aexit__ = AsyncMock(return_value=None)
253+
254+
# Call the method
255+
result = await AnthropicAugmentedLLM.generate_structured(
256+
mock_llm, "Test query", TestResponseModel
257+
)
258+
259+
# Assertions
260+
assert isinstance(result, TestResponseModel)
261+
assert result.name == "Test"
262+
assert result.value == 42
238263

239264
# Test 4: With History
240265
@pytest.mark.asyncio
@@ -779,6 +804,8 @@ async def test_generate_structured_with_mixed_message_types(self, mock_llm):
779804
"""
780805
Tests generate_structured() method with mixed message types.
781806
"""
807+
from unittest.mock import patch
808+
import json
782809

783810
# Define a simple response model
784811
class TestResponseModel(BaseModel):
@@ -795,19 +822,51 @@ class TestResponseModel(BaseModel):
795822
),
796823
]
797824

798-
mock_llm.generate_str = AsyncMock(return_value="name: MixedTypes, value: 123")
799-
# Patch executor.execute to return the expected TestResponseModel instance
800-
mock_llm.executor.execute = AsyncMock(
801-
return_value=TestResponseModel(name="MixedTypes", value=123)
825+
# Create a mock Message with tool_use block containing the structured data
826+
tool_use_block = ToolUseBlock(
827+
type="tool_use",
828+
id="tool_456",
829+
name="return_structured_output",
830+
input={"name": "MixedTypes", "value": 123},
802831
)
803832

804-
# Call generate_structured with mixed message types
805-
result = await mock_llm.generate_structured(messages, TestResponseModel)
833+
mock_message = Message(
834+
type="message",
835+
id="msg_456",
836+
role="assistant",
837+
content=[tool_use_block],
838+
model="claude-3-7-sonnet-latest",
839+
stop_reason="tool_use",
840+
usage=Usage(
841+
cache_creation_input_tokens=0,
842+
cache_read_input_tokens=0,
843+
input_tokens=100,
844+
output_tokens=50,
845+
server_tool_use=None,
846+
service_tier=None,
847+
),
848+
)
806849

807-
# Assertions
808-
assert isinstance(result, TestResponseModel)
809-
assert result.name == "MixedTypes"
810-
assert result.value == 123
850+
# Mock the AsyncAnthropic client and streaming
851+
with patch(
852+
"mcp_agent.workflows.llm.augmented_llm_anthropic.AsyncAnthropic"
853+
) as MockAsyncAnthropic:
854+
mock_client = MockAsyncAnthropic.return_value
855+
mock_stream = AsyncMock()
856+
mock_stream.get_final_message = AsyncMock(return_value=mock_message)
857+
mock_stream.__aenter__ = AsyncMock(return_value=mock_stream)
858+
mock_stream.__aexit__ = AsyncMock(return_value=None)
859+
mock_client.messages.stream = MagicMock(return_value=mock_stream)
860+
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
861+
mock_client.__aexit__ = AsyncMock(return_value=None)
862+
863+
# Call generate_structured with mixed message types
864+
result = await mock_llm.generate_structured(messages, TestResponseModel)
865+
866+
# Assertions
867+
assert isinstance(result, TestResponseModel)
868+
assert result.name == "MixedTypes"
869+
assert result.value == 123
811870

812871
# Test 25: System Prompt Not None in API Call
813872
@pytest.mark.asyncio

tests/workflows/llm/test_augmented_llm_google.py

Lines changed: 26 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -183,29 +183,23 @@ class TestResponseModel(BaseModel):
183183
name: str
184184
value: int
185185

186-
# Mock the generate_str method
187-
mock_llm.generate_str = AsyncMock(return_value="name: Test, value: 42")
188-
189-
# Mock instructor from_genai
190-
with patch("instructor.from_genai") as mock_instructor:
191-
mock_client = MagicMock()
192-
mock_client.chat.completions.create.return_value = TestResponseModel(
193-
name="Test", value=42
194-
)
195-
mock_instructor.return_value = mock_client
186+
# Create a proper GenerateContentResponse with JSON content
187+
import json
188+
json_content = json.dumps({"name": "Test", "value": 42})
189+
response = self.create_text_response(json_content)
196190

197-
# Patch executor.execute to be an async mock returning the expected value
198-
mock_llm.executor.execute = AsyncMock(
199-
return_value=TestResponseModel(name="Test", value=42)
200-
)
191+
# Patch executor.execute to return the GenerateContentResponse with JSON
192+
mock_llm.executor.execute = AsyncMock(
193+
return_value=response
194+
)
201195

202-
# Call the method
203-
result = await mock_llm.generate_structured("Test query", TestResponseModel)
196+
# Call the method
197+
result = await mock_llm.generate_structured("Test query", TestResponseModel)
204198

205-
# Assertions
206-
assert isinstance(result, TestResponseModel)
207-
assert result.name == "Test"
208-
assert result.value == 42
199+
# Assertions
200+
assert isinstance(result, TestResponseModel)
201+
assert result.name == "Test"
202+
assert result.value == 42
209203

210204
# Test 4: With History
211205
@pytest.mark.asyncio
@@ -773,26 +767,20 @@ class TestResponseModel(BaseModel):
773767
),
774768
]
775769

776-
# Mock the generate_str method
777-
mock_llm.generate_str = AsyncMock(return_value="name: MixedTypes, value: 123")
770+
# Create a proper GenerateContentResponse with JSON content
771+
import json
772+
json_content = json.dumps({"name": "MixedTypes", "value": 123})
773+
response = self.create_text_response(json_content)
778774

779-
# Patch instructor.from_genai to return the expected model
780-
with patch("instructor.from_genai") as mock_instructor:
781-
mock_client = MagicMock()
782-
mock_client.chat.completions.create.return_value = TestResponseModel(
783-
name="MixedTypes", value=123
784-
)
785-
mock_instructor.return_value = mock_client
786-
787-
# Patch executor.execute to be an async mock returning the expected value
788-
mock_llm.executor.execute = AsyncMock(
789-
return_value=TestResponseModel(name="MixedTypes", value=123)
790-
)
775+
# Patch executor.execute to return the GenerateContentResponse with JSON
776+
mock_llm.executor.execute = AsyncMock(
777+
return_value=response
778+
)
791779

792-
result = await mock_llm.generate_structured(messages, TestResponseModel)
793-
assert isinstance(result, TestResponseModel)
794-
assert result.name == "MixedTypes"
795-
assert result.value == 123
780+
result = await mock_llm.generate_structured(messages, TestResponseModel)
781+
assert isinstance(result, TestResponseModel)
782+
assert result.name == "MixedTypes"
783+
assert result.value == 123
796784

797785
@pytest.mark.asyncio
798786
async def test_parallel_tool_calls(self, mock_llm: GoogleAugmentedLLM):

tests/workflows/llm/test_augmented_llm_openai.py

Lines changed: 30 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -174,38 +174,33 @@ async def test_generate_str(self, mock_llm, default_usage):
174174
@pytest.mark.asyncio
175175
async def test_generate_structured(self, mock_llm, default_usage):
176176
"""
177-
Tests structured output generation using Instructor.
177+
Tests structured output generation using native OpenAI API.
178178
"""
179+
import json
179180

180181
# Define a simple response model
181182
class TestResponseModel(BaseModel):
182183
name: str
183184
value: int
184185

185-
# Set up mocks for the two-stage process
186-
# First for the text generation
187-
mock_llm.generate_str = AsyncMock(return_value="name: Test, value: 42")
188-
189-
# Then for Instructor's structured data extraction
190-
with patch("instructor.from_openai") as mock_instructor:
191-
mock_client = MagicMock()
192-
mock_client.chat.completions.create.return_value = TestResponseModel(
193-
name="Test", value=42
194-
)
195-
mock_instructor.return_value = mock_client
186+
# Create a proper ChatCompletion response with JSON content
187+
json_content = json.dumps({"name": "Test", "value": 42})
188+
completion_response = self.create_text_response(
189+
json_content, usage=default_usage
190+
)
196191

197-
# Patch executor.execute to be an async mock returning the expected value
198-
mock_llm.executor.execute = AsyncMock(
199-
return_value=TestResponseModel(name="Test", value=42)
200-
)
192+
# Patch executor.execute to return the ChatCompletion with JSON
193+
mock_llm.executor.execute = AsyncMock(
194+
return_value=completion_response
195+
)
201196

202-
# Call the method
203-
result = await mock_llm.generate_structured("Test query", TestResponseModel)
197+
# Call the method
198+
result = await mock_llm.generate_structured("Test query", TestResponseModel)
204199

205-
# Assertions
206-
assert isinstance(result, TestResponseModel)
207-
assert result.name == "Test"
208-
assert result.value == 42
200+
# Assertions
201+
assert isinstance(result, TestResponseModel)
202+
assert result.name == "Test"
203+
assert result.value == 42
209204

210205
# Test 4: With History
211206
@pytest.mark.asyncio
@@ -612,6 +607,7 @@ async def test_generate_structured_with_mixed_message_types(self, mock_llm):
612607
"""
613608
Tests generate_structured() method with mixed message types.
614609
"""
610+
import json
615611

616612
# Define a simple response model
617613
class TestResponseModel(BaseModel):
@@ -628,10 +624,19 @@ class TestResponseModel(BaseModel):
628624
),
629625
]
630626

631-
mock_llm.generate_str = AsyncMock(return_value="name: MixedTypes, value: 123")
632-
# Patch executor.execute to return the expected TestResponseModel instance
627+
# Create a proper ChatCompletion response with JSON content
628+
json_content = json.dumps({"name": "MixedTypes", "value": 123})
629+
completion_response = self.create_text_response(
630+
json_content, usage=CompletionUsage(
631+
completion_tokens=100,
632+
prompt_tokens=150,
633+
total_tokens=250
634+
)
635+
)
636+
637+
# Patch executor.execute to return the ChatCompletion with JSON
633638
mock_llm.executor.execute = AsyncMock(
634-
return_value=TestResponseModel(name="MixedTypes", value=123)
639+
return_value=completion_response
635640
)
636641

637642
# Call generate_structured with mixed message types

0 commit comments

Comments
 (0)