Skip to content

Commit 9ecc770

Browse files
Add mock backend and integration tests
Co-authored-by: me <[email protected]>
1 parent 560e392 commit 9ecc770

File tree

6 files changed

+409
-4
lines changed

6 files changed

+409
-4
lines changed

agents/.env.test

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
# Test configuration for backend integration
2+
# Copy this file to .env for testing
3+
4+
# Bot Framework Configuration (dummy values for testing)
5+
MICROSOFT_APP_ID=test-app-id
6+
MICROSOFT_APP_PASSWORD=test-app-password
7+
8+
# Microsoft 365 Configuration (dummy values for testing)
9+
AZURE_TENANT_ID=test-tenant-id
10+
AZURE_CLIENT_ID=test-client-id
11+
AZURE_CLIENT_SECRET=test-client-secret
12+
13+
# Backend API Configuration
14+
BACKEND_URL=http://localhost:50505
15+
16+
# Agent Settings
17+
AGENT_NAME=RAG Assistant
18+
AGENT_DESCRIPTION=AI-powered document search and chat assistant
19+
MAX_CONVERSATION_TURNS=20
20+
ENABLE_TYPING_INDICATOR=true
21+
22+
# Channel Settings
23+
ENABLE_TEAMS=true
24+
ENABLE_COPILOT=true
25+
ENABLE_WEB_CHAT=true
26+
27+
# Server Configuration
28+
HOST=0.0.0.0
29+
PORT=8000
30+
LOG_LEVEL=INFO

agents/mock_backend.py

Lines changed: 202 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,202 @@
1+
"""
2+
Mock backend for testing agent integration.
3+
This simulates the RAG backend responses without requiring Azure services.
4+
"""
5+
6+
import asyncio
7+
import json
8+
import logging
9+
from quart import Quart, request, jsonify
10+
11+
# Configure logging
12+
logging.basicConfig(level=logging.INFO)
13+
logger = logging.getLogger(__name__)
14+
15+
app = Quart(__name__)
16+
17+
# Mock responses for testing
18+
MOCK_RESPONSES = {
19+
"hello": {
20+
"answer": "Hello! I'm your AI assistant. I can help you search through documents and answer questions. How can I assist you today?",
21+
"data_points": {
22+
"text": [],
23+
"citations": []
24+
},
25+
"thoughts": [
26+
{
27+
"title": "Greeting Response",
28+
"description": "Generated a friendly greeting response"
29+
}
30+
],
31+
"token_usage": {
32+
"prompt_tokens": 10,
33+
"completion_tokens": 25,
34+
"total_tokens": 35
35+
},
36+
"model_info": {
37+
"model": "gpt-4",
38+
"temperature": "0.3"
39+
}
40+
},
41+
"benefits": {
42+
"answer": "Based on the policy document, the main benefits include comprehensive health coverage, flexible work arrangements, professional development opportunities, and competitive compensation packages. These benefits are designed to support employee well-being and career growth.",
43+
"data_points": {
44+
"text": [
45+
"Policy Document: The company offers comprehensive health coverage including medical, dental, and vision insurance.",
46+
"Policy Document: Flexible work arrangements are available including remote work options and flexible hours.",
47+
"Policy Document: Professional development opportunities include training programs, conference attendance, and tuition reimbursement."
48+
],
49+
"citations": [
50+
"Policy Document (Page 1)",
51+
"Policy Document (Page 2)",
52+
"Policy Document (Page 3)"
53+
]
54+
},
55+
"thoughts": [
56+
{
57+
"title": "Document Search",
58+
"description": "Searched through policy documents for benefit information"
59+
},
60+
{
61+
"title": "Response Generation",
62+
"description": "Generated comprehensive response about benefits"
63+
}
64+
],
65+
"token_usage": {
66+
"prompt_tokens": 150,
67+
"completion_tokens": 75,
68+
"total_tokens": 225
69+
},
70+
"model_info": {
71+
"model": "gpt-4",
72+
"temperature": "0.3"
73+
}
74+
}
75+
}
76+
77+
@app.route("/", methods=["GET"])
78+
async def health():
79+
"""Health check endpoint."""
80+
return jsonify({
81+
"status": "healthy",
82+
"service": "Mock RAG Backend",
83+
"version": "1.0.0"
84+
})
85+
86+
@app.route("/chat", methods=["POST"])
87+
async def chat():
88+
"""Chat endpoint that simulates RAG responses."""
89+
try:
90+
data = await request.get_json()
91+
messages = data.get("messages", [])
92+
93+
if not messages:
94+
return jsonify({"error": "No messages provided"}), 400
95+
96+
# Get the last user message
97+
last_message = None
98+
for msg in reversed(messages):
99+
if msg.get("role") == "user":
100+
last_message = msg.get("content", "").lower()
101+
break
102+
103+
if not last_message:
104+
return jsonify({"error": "No user message found"}), 400
105+
106+
# Determine response based on message content
107+
if "hello" in last_message or "hi" in last_message:
108+
response = MOCK_RESPONSES["hello"]
109+
elif "benefit" in last_message or "policy" in last_message:
110+
response = MOCK_RESPONSES["benefits"]
111+
else:
112+
# Default response
113+
response = {
114+
"answer": f"I understand you're asking about '{last_message}'. I'm a mock backend for testing purposes. In a real implementation, I would search through your documents and provide detailed answers with citations.",
115+
"data_points": {
116+
"text": [],
117+
"citations": []
118+
},
119+
"thoughts": [
120+
{
121+
"title": "Mock Response",
122+
"description": "Generated mock response for testing"
123+
}
124+
],
125+
"token_usage": {
126+
"prompt_tokens": 50,
127+
"completion_tokens": 30,
128+
"total_tokens": 80
129+
},
130+
"model_info": {
131+
"model": "gpt-4",
132+
"temperature": "0.3"
133+
}
134+
}
135+
136+
logger.info(f"Mock backend responding to: {last_message}")
137+
return jsonify(response)
138+
139+
except Exception as e:
140+
logger.error(f"Error in mock chat endpoint: {e}")
141+
return jsonify({"error": "Internal server error"}), 500
142+
143+
@app.route("/chat/stream", methods=["POST"])
144+
async def chat_stream():
145+
"""Streaming chat endpoint that simulates streaming RAG responses."""
146+
try:
147+
data = await request.get_json()
148+
messages = data.get("messages", [])
149+
150+
if not messages:
151+
return jsonify({"error": "No messages provided"}), 400
152+
153+
# Get the last user message
154+
last_message = None
155+
for msg in reversed(messages):
156+
if msg.get("role") == "user":
157+
last_message = msg.get("content", "").lower()
158+
break
159+
160+
if not last_message:
161+
return jsonify({"error": "No user message found"}), 400
162+
163+
# Determine response based on message content
164+
if "hello" in last_message or "hi" in last_message:
165+
response_text = MOCK_RESPONSES["hello"]["answer"]
166+
elif "benefit" in last_message or "policy" in last_message:
167+
response_text = MOCK_RESPONSES["benefits"]["answer"]
168+
else:
169+
response_text = f"I understand you're asking about '{last_message}'. I'm a mock backend for testing purposes."
170+
171+
# Simulate streaming by sending the response in chunks
172+
async def generate_stream():
173+
words = response_text.split()
174+
for i, word in enumerate(words):
175+
chunk = {
176+
"type": "content",
177+
"content": word + " " if i < len(words) - 1 else word
178+
}
179+
yield json.dumps(chunk) + "\n"
180+
await asyncio.sleep(0.1) # Simulate delay
181+
182+
# Send final chunk
183+
final_chunk = {
184+
"type": "done",
185+
"content": ""
186+
}
187+
yield json.dumps(final_chunk) + "\n"
188+
189+
from quart import Response
190+
return Response(
191+
generate_stream(),
192+
mimetype="application/x-ndjson",
193+
headers={"Content-Type": "application/x-ndjson"}
194+
)
195+
196+
except Exception as e:
197+
logger.error(f"Error in mock streaming chat endpoint: {e}")
198+
return jsonify({"error": "Internal server error"}), 500
199+
200+
if __name__ == "__main__":
201+
logger.info("Starting mock backend on http://localhost:50505")
202+
app.run(host="0.0.0.0", port=50505, debug=True)

agents/requirements.txt

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
1-
# Microsoft 365 Agents SDK and Bot Framework dependencies
2-
microsoft-365-agents-sdk
1+
# Bot Framework dependencies
32
botbuilder-core>=4.15.0
43
botbuilder-schema>=4.15.0
5-
botbuilder-adapter-teams>=4.15.0
6-
botbuilder-adapter-azure>=4.15.0
74

85
# Web framework
96
quart>=0.18.0

agents/test_backend_integration.py

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
"""
2+
Test script for backend integration.
3+
This script tests the agent's ability to call the existing backend API.
4+
"""
5+
6+
import asyncio
7+
import logging
8+
import os
9+
from typing import Dict, Any
10+
11+
from config.agent_config import AgentConfig
12+
from services.rag_service import RAGService, RAGRequest
13+
14+
15+
# Configure logging
16+
logging.basicConfig(level=logging.INFO)
17+
logger = logging.getLogger(__name__)
18+
19+
20+
async def test_backend_integration():
21+
"""Test the backend integration."""
22+
try:
23+
# Load configuration
24+
config = AgentConfig.from_environment()
25+
26+
# Set dummy values for testing
27+
config.app_id = "test-app-id"
28+
config.app_password = "test-app-password"
29+
config.tenant_id = "test-tenant-id"
30+
config.client_id = "test-client-id"
31+
config.client_secret = "test-client-secret"
32+
33+
config.validate()
34+
35+
logger.info(f"Testing backend integration with: {config.backend_url}")
36+
37+
# Initialize RAG service
38+
rag_service = RAGService(config)
39+
await rag_service.initialize()
40+
41+
# Test 1: Simple chat request
42+
logger.info("Test 1: Simple chat request")
43+
request = RAGRequest(
44+
message="What are the main benefits mentioned in the policy document?",
45+
conversation_history=[],
46+
user_id="test-user-123",
47+
channel_id="test-channel"
48+
)
49+
50+
response = await rag_service.process_query(request)
51+
logger.info(f"Response: {response.answer}")
52+
logger.info(f"Sources: {len(response.sources)}")
53+
logger.info(f"Citations: {len(response.citations)}")
54+
logger.info(f"Thoughts: {len(response.thoughts)}")
55+
56+
# Test 2: Chat with conversation history
57+
logger.info("\nTest 2: Chat with conversation history")
58+
request_with_history = RAGRequest(
59+
message="Can you provide more details about the first benefit?",
60+
conversation_history=[
61+
{"role": "user", "content": "What are the main benefits mentioned in the policy document?"},
62+
{"role": "assistant", "content": response.answer}
63+
],
64+
user_id="test-user-123",
65+
channel_id="test-channel"
66+
)
67+
68+
response_with_history = await rag_service.process_query(request_with_history)
69+
logger.info(f"Response with history: {response_with_history.answer}")
70+
71+
# Test 3: Streaming request
72+
logger.info("\nTest 3: Streaming request")
73+
async for chunk in rag_service.process_query_stream(request):
74+
logger.info(f"Stream chunk: {chunk}")
75+
if chunk.get("type") == "error":
76+
break
77+
78+
logger.info("✅ All tests completed successfully!")
79+
80+
except Exception as e:
81+
logger.error(f"❌ Test failed: {e}")
82+
raise
83+
finally:
84+
# Clean up
85+
if 'rag_service' in locals():
86+
await rag_service.close()
87+
88+
89+
async def test_backend_health():
90+
"""Test if the backend is healthy."""
91+
import aiohttp
92+
93+
try:
94+
config = AgentConfig.from_environment()
95+
96+
async with aiohttp.ClientSession() as session:
97+
# Test health endpoint
98+
async with session.get(f"{config.backend_url}/") as response:
99+
if response.status == 200:
100+
logger.info("✅ Backend health check passed")
101+
return True
102+
else:
103+
logger.error(f"❌ Backend health check failed: {response.status}")
104+
return False
105+
106+
except Exception as e:
107+
logger.error(f"❌ Backend health check error: {e}")
108+
return False
109+
110+
111+
async def main():
112+
"""Main test function."""
113+
logger.info("Starting backend integration tests...")
114+
115+
# Test 1: Backend health
116+
logger.info("Step 1: Testing backend health...")
117+
backend_healthy = await test_backend_health()
118+
119+
if not backend_healthy:
120+
logger.error("Backend is not healthy. Please start the backend first.")
121+
logger.info("To start the backend, run: cd /workspace/app/backend && python main.py")
122+
return
123+
124+
# Test 2: Backend integration
125+
logger.info("Step 2: Testing backend integration...")
126+
await test_backend_integration()
127+
128+
logger.info("🎉 All tests completed!")
129+
130+
131+
if __name__ == "__main__":
132+
asyncio.run(main())

0 commit comments

Comments
 (0)