@@ -121,3 +121,187 @@ def workflow_config():
121121 "enable_xml_prompts" : False ,
122122 "skip_expensive_stages" : True ,
123123 }
124+
125+
126+ # =============================================================================
127+ # Integration Test Fixtures - Mock LLM Responses
128+ # =============================================================================
129+
130+
131+ @pytest .fixture
132+ def mock_llm_responses ():
133+ """
134+ Configurable mock LLM responses for integration testing.
135+
136+ Usage:
137+ def test_something(mock_llm_responses):
138+ mock_llm_responses["security_scan"] = "No vulnerabilities found"
139+ # Run workflow...
140+ """
141+ return {
142+ # Code review responses
143+ "classify" : "feature: Adding new functionality" ,
144+ "security_scan" : '{"vulnerabilities": [], "score": 100}' ,
145+ "code_analysis" : '{"quality_score": 85, "issues": []}' ,
146+ "architect_review" : "Architecture looks good. No concerns." ,
147+ # Security audit responses
148+ "identify_vulnerabilities" : '{"vulnerabilities": [], "risk_level": "low"}' ,
149+ "analyze_risk" : '{"overall_risk": "low", "recommendations": []}' ,
150+ "generate_report" : "# Security Audit Report\n \n No issues found." ,
151+ # Document generation responses
152+ "analyze_structure" : '{"modules": [], "functions": []}' ,
153+ "generate_documentation" : "# API Documentation\n \n Generated docs here." ,
154+ # Health check responses
155+ "diagnose" : '{"health_score": 95, "issues": []}' ,
156+ "fix" : '{"fixes_applied": 0, "fixes": []}' ,
157+ # Generic fallback
158+ "default" : "Task completed successfully." ,
159+ }
160+
161+
162+ @pytest .fixture
163+ def mock_anthropic_client (mock_llm_responses ):
164+ """
165+ Mock Anthropic client for integration tests.
166+
167+ Returns deterministic responses based on the prompt content.
168+ """
169+ from unittest .mock import MagicMock
170+
171+ def create_mock_response (prompt : str ) -> MagicMock :
172+ """Create a mock response based on prompt content."""
173+ response = MagicMock ()
174+
175+ # Determine response based on keywords in prompt
176+ content = mock_llm_responses ["default" ]
177+ for key , value in mock_llm_responses .items ():
178+ if key .lower () in prompt .lower ():
179+ content = value
180+ break
181+
182+ response .content = [MagicMock (text = content )]
183+ response .usage = MagicMock (input_tokens = 100 , output_tokens = 50 )
184+ return response
185+
186+ client = MagicMock ()
187+ client .messages .create = MagicMock (
188+ side_effect = lambda ** kwargs : create_mock_response (
189+ kwargs .get ("messages" , [{}])[- 1 ].get ("content" , "" )
190+ if isinstance (kwargs .get ("messages" , [{}])[- 1 ], dict )
191+ else str (kwargs .get ("messages" , []))
192+ )
193+ )
194+
195+ return client
196+
197+
198+ @pytest .fixture
199+ def patch_llm_client (mock_anthropic_client ):
200+ """
201+ Patch the Anthropic client globally for integration tests.
202+
203+ Usage:
204+ def test_workflow(patch_llm_client):
205+ # LLM calls will return mock responses
206+ result = await workflow.execute(...)
207+ """
208+ from unittest .mock import patch
209+
210+ with patch ("anthropic.Anthropic" , return_value = mock_anthropic_client ):
211+ yield mock_anthropic_client
212+
213+
214+ @pytest .fixture
215+ def sample_python_file (temp_dir ):
216+ """Create a sample Python file for testing file-based workflows."""
217+ code = '''"""Sample module for testing."""
218+
219+ def add(a: int, b: int) -> int:
220+ """Add two numbers."""
221+ return a + b
222+
223+ def divide(a: int, b: int) -> float:
224+ """Divide a by b."""
225+ if b == 0:
226+ raise ValueError("Cannot divide by zero")
227+ return a / b
228+
229+ class Calculator:
230+ """Simple calculator class."""
231+
232+ def __init__(self):
233+ self.history = []
234+
235+ def calculate(self, op: str, a: int, b: int) -> float:
236+ if op == "add":
237+ result = add(a, b)
238+ elif op == "divide":
239+ result = divide(a, b)
240+ else:
241+ raise ValueError(f"Unknown operation: {op}")
242+ self.history.append((op, a, b, result))
243+ return result
244+ '''
245+ file_path = temp_dir / "sample.py"
246+ file_path .write_text (code )
247+ return file_path
248+
249+
250+ @pytest .fixture
251+ def sample_project_dir (temp_dir ):
252+ """Create a sample project directory structure for testing."""
253+ # Create src directory
254+ src_dir = temp_dir / "src"
255+ src_dir .mkdir ()
256+
257+ # Create main module
258+ (src_dir / "__init__.py" ).write_text ("" )
259+ (src_dir / "main.py" ).write_text (
260+ '''
261+ """Main application module."""
262+
263+ def main():
264+ print("Hello, World!")
265+
266+ if __name__ == "__main__":
267+ main()
268+ '''
269+ )
270+
271+ # Create utils module
272+ (src_dir / "utils.py" ).write_text (
273+ '''
274+ """Utility functions."""
275+
276+ def format_name(first: str, last: str) -> str:
277+ """Format a full name."""
278+ return f"{first} {last}"
279+ '''
280+ )
281+
282+ # Create tests directory
283+ tests_dir = temp_dir / "tests"
284+ tests_dir .mkdir ()
285+ (tests_dir / "__init__.py" ).write_text ("" )
286+ (tests_dir / "test_main.py" ).write_text (
287+ '''
288+ """Tests for main module."""
289+
290+ def test_placeholder():
291+ assert True
292+ '''
293+ )
294+
295+ # Create pyproject.toml
296+ (temp_dir / "pyproject.toml" ).write_text (
297+ """
298+ [project]
299+ name = "sample-project"
300+ version = "0.1.0"
301+
302+ [tool.pytest.ini_options]
303+ testpaths = ["tests"]
304+ """
305+ )
306+
307+ return temp_dir
0 commit comments