Skip to content

Commit e624781

Browse files
author
Harmanpreet Kaur
committed
2 parents bceb635 + a94e78b commit e624781

File tree

10 files changed

+1301
-793
lines changed

10 files changed

+1301
-793
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ jobs:
3737
run: |
3838
python -m pip install --upgrade pip
3939
pip install -r src/backend/requirements.txt
40-
pip install pytest-cov
4140
4241
- name: Check if test files exist
4342
id: check_tests
@@ -49,7 +48,6 @@ jobs:
4948
echo "Test files found, running tests."
5049
echo "skip_tests=false" >> $GITHUB_ENV
5150
fi
52-
5351
- name: Run tests with coverage
5452
if: env.skip_tests == 'false'
5553
run: |
@@ -58,4 +56,4 @@ jobs:
5856
- name: Skip coverage report if no tests
5957
if: env.skip_tests == 'true'
6058
run: |
61-
echo "Skipping coverage report because no tests were found."
59+
echo "Skipping coverage report because no tests were found."

src/backend/requirements.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,8 @@ opentelemetry-instrumentation-fastapi
1414
opentelemetry-instrumentation-openai
1515
opentelemetry-exporter-otlp-proto-http
1616
opentelemetry-exporter-otlp-proto-grpc
17+
18+
# Testing tools
19+
pytest>=8.2,<9 # Compatible version for pytest-asyncio
20+
pytest-asyncio==0.24.0
21+
pytest-cov==5.0.0
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
"""
2+
Test cases for HumanAgent class in the backend agents module.
3+
"""
4+
5+
# Standard library imports
6+
import os
7+
import sys
8+
from unittest.mock import AsyncMock, MagicMock, patch
9+
import pytest
10+
11+
# Project-specific imports
12+
from autogen_core.base import AgentInstantiationContext, AgentRuntime
13+
from src.backend.agents.human import HumanAgent
14+
from src.backend.models.messages import HumanFeedback, Step, StepStatus, BAgentType
15+
16+
# Set environment variables before any imports to avoid runtime errors
17+
os.environ["COSMOSDB_ENDPOINT"] = "https://mock-endpoint"
18+
os.environ["COSMOSDB_KEY"] = "mock-key"
19+
os.environ["COSMOSDB_DATABASE"] = "mock-database"
20+
os.environ["COSMOSDB_CONTAINER"] = "mock-container"
21+
os.environ["APPLICATIONINSIGHTS_INSTRUMENTATION_KEY"] = "mock-instrumentation-key"
22+
os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] = "mock-deployment-name"
23+
os.environ["AZURE_OPENAI_API_VERSION"] = "2023-01-01"
24+
os.environ["AZURE_OPENAI_ENDPOINT"] = "https://mock-openai-endpoint"
25+
26+
# Mock Azure modules
27+
sys.modules["azure.monitor.events.extension"] = MagicMock()
28+
29+
30+
@pytest.fixture
31+
def setup_agent():
32+
"""
33+
Fixture to set up a HumanAgent and its dependencies.
34+
"""
35+
memory = AsyncMock()
36+
user_id = "test_user"
37+
group_chat_manager_id = "group_chat_manager"
38+
39+
# Mock runtime and agent ID
40+
mock_runtime = MagicMock(spec=AgentRuntime)
41+
mock_agent_id = "test_agent_id"
42+
43+
# Set up the context
44+
with patch.object(AgentInstantiationContext, "current_runtime", return_value=mock_runtime):
45+
with patch.object(AgentInstantiationContext, "current_agent_id", return_value=mock_agent_id):
46+
agent = HumanAgent(memory, user_id, group_chat_manager_id)
47+
48+
session_id = "session123"
49+
step_id = "step123"
50+
plan_id = "plan123"
51+
52+
# Mock HumanFeedback message
53+
feedback_message = HumanFeedback(
54+
session_id=session_id,
55+
step_id=step_id,
56+
plan_id=plan_id,
57+
approved=True,
58+
human_feedback="Great job!",
59+
)
60+
61+
# Mock Step with all required fields
62+
step = Step(
63+
plan_id=plan_id,
64+
action="Test Action",
65+
agent=BAgentType.human_agent,
66+
status=StepStatus.planned,
67+
session_id=session_id,
68+
user_id=user_id,
69+
human_feedback=None,
70+
)
71+
72+
return agent, memory, feedback_message, step, session_id, step_id, plan_id
73+
74+
75+
@patch("src.backend.agents.human.logging.info")
76+
@patch("src.backend.agents.human.track_event")
77+
@pytest.mark.asyncio
78+
async def test_handle_step_feedback_step_not_found(mock_track_event, mock_logging, setup_agent):
79+
"""
80+
Test scenario where the step is not found in memory.
81+
"""
82+
agent, memory, feedback_message, _, _, step_id, _ = setup_agent
83+
84+
# Mock no step found
85+
memory.get_step.return_value = None
86+
87+
# Run the method
88+
await agent.handle_step_feedback(feedback_message, MagicMock())
89+
90+
# Check if log and return were called correctly
91+
mock_logging.assert_called_with(f"No step found with id: {step_id}")
92+
memory.update_step.assert_not_called()
93+
mock_track_event.assert_not_called()
94+
95+
96+
if __name__ == "__main__":
97+
pytest.main()
Lines changed: 199 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,199 @@
1+
import os
2+
import sys
3+
from unittest.mock import AsyncMock, MagicMock, patch
4+
import pytest
5+
from src.backend.agents.planner import PlannerAgent
6+
from src.backend.models.messages import InputTask, HumanClarification, Plan, PlanStatus
7+
from src.backend.context.cosmos_memory import CosmosBufferedChatCompletionContext
8+
9+
10+
# Mock azure.monitor.events.extension globally
11+
sys.modules['azure.monitor.events.extension'] = MagicMock()
12+
13+
# Mock environment variables
14+
os.environ["COSMOSDB_ENDPOINT"] = "https://mock-endpoint"
15+
os.environ["COSMOSDB_KEY"] = "mock-key"
16+
os.environ["COSMOSDB_DATABASE"] = "mock-database"
17+
os.environ["COSMOSDB_CONTAINER"] = "mock-container"
18+
os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"] = "mock-deployment-name"
19+
os.environ["AZURE_OPENAI_API_VERSION"] = "2023-01-01"
20+
os.environ["AZURE_OPENAI_ENDPOINT"] = "https://mock-openai-endpoint"
21+
22+
23+
@pytest.fixture
24+
def mock_context():
25+
"""Mock the CosmosBufferedChatCompletionContext."""
26+
return MagicMock(spec=CosmosBufferedChatCompletionContext)
27+
28+
29+
@pytest.fixture
30+
def mock_model_client():
31+
"""Mock the Azure OpenAI model client."""
32+
return MagicMock()
33+
34+
35+
@pytest.fixture
36+
def mock_runtime_context():
37+
"""Mock the runtime context for AgentInstantiationContext."""
38+
with patch(
39+
"autogen_core.base._agent_instantiation.AgentInstantiationContext.AGENT_INSTANTIATION_CONTEXT_VAR",
40+
new=MagicMock(),
41+
) as mock_context_var:
42+
yield mock_context_var
43+
44+
45+
@pytest.fixture
46+
def planner_agent(mock_model_client, mock_context, mock_runtime_context):
47+
"""Return an instance of PlannerAgent with mocked dependencies."""
48+
# Mock the context variable to ensure runtime context is properly simulated
49+
mock_runtime_context.get.return_value = (MagicMock(), "mock-agent-id")
50+
return PlannerAgent(
51+
model_client=mock_model_client,
52+
session_id="test-session",
53+
user_id="test-user",
54+
memory=mock_context,
55+
available_agents=["HumanAgent", "MarketingAgent", "TechSupportAgent"],
56+
agent_tools_list=["tool1", "tool2"],
57+
)
58+
59+
60+
@pytest.mark.asyncio
61+
async def test_handle_plan_clarification(planner_agent, mock_context):
62+
"""Test the handle_plan_clarification method."""
63+
# Prepare mock clarification and context
64+
mock_clarification = HumanClarification(
65+
session_id="test-session",
66+
plan_id="plan-1",
67+
human_clarification="Test clarification",
68+
)
69+
70+
mock_context.get_plan_by_session = AsyncMock(return_value=Plan(
71+
id="plan-1",
72+
session_id="test-session",
73+
user_id="test-user",
74+
initial_goal="Test Goal",
75+
overall_status="in_progress",
76+
source="PlannerAgent",
77+
summary="Mock Summary",
78+
human_clarification_request=None,
79+
))
80+
mock_context.update_plan = AsyncMock()
81+
mock_context.add_item = AsyncMock()
82+
83+
# Execute the method
84+
await planner_agent.handle_plan_clarification(mock_clarification, None)
85+
86+
# Assertions
87+
mock_context.get_plan_by_session.assert_called_with(session_id="test-session")
88+
mock_context.update_plan.assert_called()
89+
mock_context.add_item.assert_called()
90+
91+
92+
@pytest.mark.asyncio
93+
async def test_generate_instruction_with_special_characters(planner_agent):
94+
"""Test _generate_instruction with special characters in the objective."""
95+
special_objective = "Solve this task: @$%^&*()"
96+
instruction = planner_agent._generate_instruction(special_objective)
97+
98+
# Assertions
99+
assert "Solve this task: @$%^&*()" in instruction
100+
assert "HumanAgent" in instruction
101+
assert "tool1" in instruction
102+
103+
104+
@pytest.mark.asyncio
105+
async def test_handle_plan_clarification_updates_plan_correctly(planner_agent, mock_context):
106+
"""Test handle_plan_clarification ensures correct plan updates."""
107+
mock_clarification = HumanClarification(
108+
session_id="test-session",
109+
plan_id="plan-1",
110+
human_clarification="Updated clarification text",
111+
)
112+
113+
mock_plan = Plan(
114+
id="plan-1",
115+
session_id="test-session",
116+
user_id="test-user",
117+
initial_goal="Test Goal",
118+
overall_status="in_progress",
119+
source="PlannerAgent",
120+
summary="Mock Summary",
121+
human_clarification_request="Previous clarification needed",
122+
)
123+
124+
# Mock get_plan_by_session and update_plan
125+
mock_context.get_plan_by_session = AsyncMock(return_value=mock_plan)
126+
mock_context.update_plan = AsyncMock()
127+
128+
# Execute the method
129+
await planner_agent.handle_plan_clarification(mock_clarification, None)
130+
131+
# Assertions
132+
assert mock_plan.human_clarification_response == "Updated clarification text"
133+
mock_context.update_plan.assert_called_with(mock_plan)
134+
135+
136+
@pytest.mark.asyncio
137+
async def test_handle_input_task_with_exception(planner_agent, mock_context):
138+
"""Test handle_input_task gracefully handles exceptions."""
139+
# Mock InputTask
140+
input_task = InputTask(description="Test task causing exception", session_id="test-session")
141+
142+
# Mock _create_structured_plan to raise an exception
143+
planner_agent._create_structured_plan = AsyncMock(side_effect=Exception("Mocked exception"))
144+
145+
# Execute the method
146+
with pytest.raises(Exception, match="Mocked exception"):
147+
await planner_agent.handle_input_task(input_task, None)
148+
149+
# Assertions
150+
planner_agent._create_structured_plan.assert_called()
151+
mock_context.add_item.assert_not_called()
152+
mock_context.add_plan.assert_not_called()
153+
mock_context.add_step.assert_not_called()
154+
155+
156+
@pytest.mark.asyncio
157+
async def test_handle_plan_clarification_handles_memory_error(planner_agent, mock_context):
158+
"""Test handle_plan_clarification gracefully handles memory errors."""
159+
mock_clarification = HumanClarification(
160+
session_id="test-session",
161+
plan_id="plan-1",
162+
human_clarification="Test clarification",
163+
)
164+
165+
# Mock get_plan_by_session to raise an exception
166+
mock_context.get_plan_by_session = AsyncMock(side_effect=Exception("Memory error"))
167+
168+
# Execute the method
169+
with pytest.raises(Exception, match="Memory error"):
170+
await planner_agent.handle_plan_clarification(mock_clarification, None)
171+
172+
# Ensure no updates or messages are added after failure
173+
mock_context.update_plan.assert_not_called()
174+
mock_context.add_item.assert_not_called()
175+
176+
177+
@pytest.mark.asyncio
178+
async def test_generate_instruction_with_missing_objective(planner_agent):
179+
"""Test _generate_instruction with a missing or empty objective."""
180+
instruction = planner_agent._generate_instruction("")
181+
assert "Your objective is:" in instruction
182+
assert "The agents you have access to are:" in instruction
183+
assert "These agents have access to the following functions:" in instruction
184+
185+
186+
@pytest.mark.asyncio
187+
async def test_create_structured_plan_with_error(planner_agent, mock_context):
188+
"""Test _create_structured_plan when an error occurs during plan creation."""
189+
planner_agent._model_client.create = AsyncMock(side_effect=Exception("Mocked error"))
190+
191+
messages = [{"content": "Test message", "source": "PlannerAgent"}]
192+
plan, steps = await planner_agent._create_structured_plan(messages)
193+
194+
# Assertions
195+
assert plan.initial_goal == "Error generating plan"
196+
assert plan.overall_status == PlanStatus.failed
197+
assert len(steps) == 0
198+
mock_context.add_plan.assert_not_called()
199+
mock_context.add_step.assert_not_called()

0 commit comments

Comments
 (0)