|
| 1 | +from fastapi import FastAPI, HTTPException, Request |
| 2 | +from fastapi.responses import FileResponse |
| 3 | +from fastapi.staticfiles import StaticFiles |
| 4 | +from pydantic import BaseModel |
| 5 | +import autogen |
| 6 | +import os |
| 7 | +from dotenv import load_dotenv |
| 8 | +import asyncio |
| 9 | +from typing import List, Dict, Any |
| 10 | + |
| 11 | +# Load environment variables |
| 12 | +load_dotenv() |
| 13 | + |
| 14 | +app = FastAPI(title="AI Debate Club", version="1.0.0") |
| 15 | + |
| 16 | +# Serve React static files from "./static" |
| 17 | +app.mount("/static", StaticFiles(directory="static"), name="static") |
| 18 | + |
| 19 | +@app.get("/{full_path:path}") |
| 20 | +async def serve_react_app(full_path: str, request: Request): |
| 21 | + # Serve static files and frontend src files for SPA routing and development |
| 22 | + # Try to serve files from /static first |
| 23 | + static_file_path = os.path.join("static", full_path) |
| 24 | + frontend_src_path = os.path.join("frontend", "src", full_path) |
| 25 | + |
| 26 | + if os.path.isfile(static_file_path): |
| 27 | + return FileResponse(static_file_path) |
| 28 | + elif os.path.isfile(frontend_src_path): |
| 29 | + return FileResponse(frontend_src_path) |
| 30 | + else: |
| 31 | + # Fallback to React index.html for SPA routing |
| 32 | + index_path = os.path.join("static", "index.html") |
| 33 | + if os.path.exists(index_path): |
| 34 | + return FileResponse(index_path) |
| 35 | + return {"error": "Not Found"} |
| 36 | + |
| 37 | +# Debate request and response models |
| 38 | +class DebateRequest(BaseModel): |
| 39 | + topic: str |
| 40 | + |
| 41 | +class DebateResponse(BaseModel): |
| 42 | + messages: List[Dict[str, Any]] |
| 43 | + topic: str |
| 44 | + |
| 45 | +# Mistral API configuration |
| 46 | +def get_llm_config(): |
| 47 | + api_key = os.getenv("MISTRAL_API_KEY") |
| 48 | + if not api_key: |
| 49 | + raise HTTPException(status_code=500, detail="MISTRAL_API_KEY not configured. Please set MISTRAL_API_KEY in your .env file") |
| 50 | + |
| 51 | + return {"config_list": [{"model": "mistral-small", "api_type": "mistral", "base_url": "https://api.mistral.ai/v1", "api_key": api_key}]} |
| 52 | + |
| 53 | +def create_debate_agents(): |
| 54 | + """Create the debate agents with specific roles""" |
| 55 | + llm_config = get_llm_config() |
| 56 | + |
| 57 | + # Pro agent (argues in favor) |
| 58 | + pro_agent = autogen.ConversableAgent( |
| 59 | + name="ProAgent", |
| 60 | + system_message="You are a skilled debater arguing in FAVOR of the given topic. Present compelling arguments, evidence, and reasoning to support the topic. Be persuasive, logical, and engaging. Keep responses concise but impactful. Always stay in character as the 'pro' side of the debate.", |
| 61 | + llm_config=llm_config, |
| 62 | + ) |
| 63 | + |
| 64 | + # Con agent (argues against) |
| 65 | + con_agent = autogen.ConversableAgent( |
| 66 | + name="ConAgent", |
| 67 | + system_message="You are a skilled debater arguing AGAINST the given topic. Present compelling arguments, evidence, and reasoning to oppose the topic. Be persuasive, logical, and engaging. Keep responses concise but impactful. Always stay in character as the 'con' side of the debate.", |
| 68 | + llm_config=llm_config, |
| 69 | + ) |
| 70 | + |
| 71 | + # return user_proxy, pro_agent, con_agent |
| 72 | + return pro_agent, con_agent |
| 73 | + |
| 74 | +def init_autogen_chat(manager, agent, debate_prompt): |
| 75 | + manager.initiate_chat(agent, message=debate_prompt) |
| 76 | + |
| 77 | +@app.post("/debate", response_model=DebateResponse) |
| 78 | +async def start_debate(request: DebateRequest): |
| 79 | + """Start a debate between Pro and Con agents on the given topic""" |
| 80 | + try: |
| 81 | + # Create agents |
| 82 | + pro_agent, con_agent = create_debate_agents() |
| 83 | + |
| 84 | + # Create group chat with simpler configuration |
| 85 | + groupchat = autogen.GroupChat( |
| 86 | + agents=[pro_agent, con_agent], |
| 87 | + messages=[], |
| 88 | + max_round=4, # Maximum 4 turns as specified |
| 89 | + speaker_selection_method="round_robin", # Add round robin turn-taking |
| 90 | + allow_repeat_speaker=False, # Prevent the same agent from speaking twice in a row |
| 91 | + ) |
| 92 | + |
| 93 | + manager = autogen.GroupChatManager( |
| 94 | + groupchat=groupchat, |
| 95 | + llm_config=get_llm_config(), |
| 96 | + ) |
| 97 | + |
| 98 | + # Start the debate with a simpler prompt |
| 99 | + debate_prompt = f"Debate topic: {request.topic}. ProAgent argues FOR, ConAgent argues AGAINST. Keep it respectful and engaging." |
| 100 | + |
| 101 | + # Run the debate |
| 102 | + try: |
| 103 | + # Try AutoGen first |
| 104 | + await asyncio.to_thread( |
| 105 | + init_autogen_chat, |
| 106 | + manager, |
| 107 | + pro_agent, |
| 108 | + debate_prompt |
| 109 | + ) |
| 110 | + except Exception as chat_error: |
| 111 | + # Return a mock response if AutoGen fails |
| 112 | + print(f"AutoGen chat failed: {chat_error}") |
| 113 | + |
| 114 | + mock_messages = [ |
| 115 | + {"role": "ProAgent", "content": f"I will argue in favor of: {request.topic}. This is an important topic that deserves careful consideration."}, |
| 116 | + {"role": "ConAgent", "content": f"I will argue against: {request.topic}. There are valid concerns that need to be addressed."} |
| 117 | + ] |
| 118 | + return DebateResponse(topic=request.topic, messages=mock_messages) |
| 119 | + |
| 120 | + # Extract messages from the chat result |
| 121 | + messages = [] |
| 122 | + for msg in groupchat.messages: |
| 123 | + if msg.get("content") and msg.get("name"): |
| 124 | + # Clean up the message format to avoid API issues |
| 125 | + clean_msg = { |
| 126 | + "role": msg["name"], |
| 127 | + "content": msg["content"] |
| 128 | + } |
| 129 | + messages.append(clean_msg) |
| 130 | + |
| 131 | + return DebateResponse( |
| 132 | + topic=request.topic, |
| 133 | + messages=messages |
| 134 | + ) |
| 135 | + |
| 136 | + except Exception as e: |
| 137 | + raise HTTPException(status_code=500, detail=f"Debate failed: {str(e)}") |
| 138 | + |
| 139 | +@app.get("/health") |
| 140 | +async def health_check(): |
| 141 | + """Health check endpoint""" |
| 142 | + return {"status": "healthy", "message": "AI Debate Club is running"} |
| 143 | + |
| 144 | +if __name__ == "__main__": |
| 145 | + import uvicorn |
| 146 | + uvicorn.run(app, host="0.0.0.0", port=8000) |
0 commit comments