|
| 1 | +import os |
| 2 | +import time |
| 3 | +import uuid |
| 4 | +from datetime import datetime, timezone |
| 5 | +from flask import Flask, request, jsonify, Response, stream_with_context |
| 6 | +from flask_cors import CORS |
| 7 | + |
| 8 | +app = Flask(__name__) |
| 9 | +CORS(app) # Enable CORS for Next.js |
| 10 | +app.config["DEBUG"] = os.getenv("FLASK_DEBUG", "False").lower() == "true" |
| 11 | +PORT = int(os.getenv("PORT", 8000)) |
| 12 | + |
| 13 | +# --- In-Memory Database --- |
| 14 | +# Structure mimics: backend/src/database/models.py |
| 15 | +conversations_db = {} |
| 16 | + |
| 17 | + |
| 18 | +# --- Helpers --- |
| 19 | +def get_utc_now(): |
| 20 | + return datetime.now(timezone.utc).isoformat() |
| 21 | + |
| 22 | + |
| 23 | +def generate_fake_context(): |
| 24 | + """Mimics the RAG output with OpenROAD specific sources""" |
| 25 | + return { |
| 26 | + "sources": [ |
| 27 | + { |
| 28 | + "source": "https://openroad.readthedocs.io/en/latest/main/README.html", |
| 29 | + "context": "OpenROAD is an automated physical design tool...", |
| 30 | + }, |
| 31 | + { |
| 32 | + "source": "manpages/man1/global_placement.md", |
| 33 | + "context": "Global placement (gpl) distributes cells across the core...", |
| 34 | + }, |
| 35 | + ] |
| 36 | + } |
| 37 | + |
| 38 | + |
| 39 | +# Validation helpers and error handling |
| 40 | + |
| 41 | + |
| 42 | +class ValidationError(ValueError): |
| 43 | + """Raised when incoming request bodies are invalid.""" |
| 44 | + |
| 45 | + |
| 46 | +def parse_json_body(required_fields=None, allow_empty=False): |
| 47 | + """ |
| 48 | + Parse the JSON body, optionally allowing empty payloads (treated as {}). |
| 49 | + Raises ValidationError if JSON is missing or required fields are absent. |
| 50 | + """ |
| 51 | + has_body = request.content_length not in (None, 0) |
| 52 | + if allow_empty and not has_body: |
| 53 | + data = {} |
| 54 | + else: |
| 55 | + data = request.get_json(silent=True) |
| 56 | + if data is None: |
| 57 | + raise ValidationError("Request body must be JSON.") |
| 58 | + |
| 59 | + if required_fields: |
| 60 | + missing = [field for field in required_fields if field not in data] |
| 61 | + if missing: |
| 62 | + raise ValidationError(f"Missing required field(s): {', '.join(missing)}.") |
| 63 | + |
| 64 | + return data |
| 65 | + |
| 66 | + |
| 67 | +@app.errorhandler(ValidationError) |
| 68 | +def handle_validation_error(error): |
| 69 | + return jsonify({"error": str(error)}), 400 |
| 70 | + |
| 71 | + |
| 72 | +@app.errorhandler(404) |
| 73 | +def not_found(error): |
| 74 | + return jsonify({"error": "Resource not found"}), 404 |
| 75 | + |
| 76 | + |
| 77 | +@app.errorhandler(500) |
| 78 | +def internal_error(error): |
| 79 | + app.logger.exception("Unhandled exception: %s", error) |
| 80 | + return jsonify({"error": "Internal server error"}), 500 |
| 81 | + |
| 82 | + |
| 83 | +# --- Routes matching backend/src/api/routers --- |
| 84 | + |
| 85 | + |
| 86 | +@app.route("/healthcheck", methods=["GET"]) |
| 87 | +def healthcheck(): |
| 88 | + return jsonify({"status": "ok"}) |
| 89 | + |
| 90 | + |
| 91 | +# --- Helpers Router (backend/src/api/routers/helpers.py) --- |
| 92 | +@app.route("/helpers/suggestedQuestions", methods=["POST"]) |
| 93 | +def suggested_questions(): |
| 94 | + """ |
| 95 | + Mimics the OpenAI/Gemini call to generate next questions. |
| 96 | + """ |
| 97 | + parse_json_body(allow_empty=True) |
| 98 | + return jsonify( |
| 99 | + { |
| 100 | + "suggested_questions": [ |
| 101 | + "How do I install OpenROAD flow scripts?", |
| 102 | + "What is the difference between Global and Detailed Routing?", |
| 103 | + "How to fix LVS errors in Sky130?", |
| 104 | + "Explain the CTS (Clock Tree Synthesis) stage.", |
| 105 | + ] |
| 106 | + } |
| 107 | + ) |
| 108 | + |
| 109 | + |
| 110 | +# --- Conversations Router (backend/src/api/routers/conversations.py) --- |
| 111 | + |
| 112 | + |
| 113 | +@app.route("/conversations", methods=["POST"]) |
| 114 | +def create_conversation(): |
| 115 | + data = parse_json_body(allow_empty=True) |
| 116 | + title = data.get("title", "New Conversation") |
| 117 | + if "title" in data and (not isinstance(title, str) or not title.strip()): |
| 118 | + raise ValidationError("title must be a non-empty string when provided.") |
| 119 | + |
| 120 | + new_id = str(uuid.uuid4()) |
| 121 | + conversations_db[new_id] = { |
| 122 | + "uuid": new_id, |
| 123 | + "title": title, |
| 124 | + "created_at": get_utc_now(), |
| 125 | + "updated_at": get_utc_now(), |
| 126 | + "messages": [], |
| 127 | + } |
| 128 | + return jsonify(conversations_db[new_id]), 201 |
| 129 | + |
| 130 | + |
| 131 | +@app.route("/conversations", methods=["GET"]) |
| 132 | +def list_conversations(): |
| 133 | + # Sort by updated_at desc (mimicking crud.get_all_conversations) |
| 134 | + conv_list = list(conversations_db.values()) |
| 135 | + conv_list.sort(key=lambda x: x["updated_at"], reverse=True) |
| 136 | + |
| 137 | + # The list response in backend/src/api/models/response_model.py excludes messages |
| 138 | + response_list = [] |
| 139 | + for c in conv_list: |
| 140 | + response_list.append( |
| 141 | + { |
| 142 | + "uuid": c["uuid"], |
| 143 | + "title": c["title"], |
| 144 | + "created_at": c["created_at"], |
| 145 | + "updated_at": c["updated_at"], |
| 146 | + } |
| 147 | + ) |
| 148 | + return jsonify(response_list) |
| 149 | + |
| 150 | + |
| 151 | +@app.route("/conversations/<uuid_str>", methods=["GET"]) |
| 152 | +def get_conversation(uuid_str): |
| 153 | + if uuid_str not in conversations_db: |
| 154 | + return jsonify({"detail": "Conversation not found"}), 404 |
| 155 | + return jsonify(conversations_db[uuid_str]) |
| 156 | + |
| 157 | + |
| 158 | +@app.route("/conversations/<uuid_str>", methods=["DELETE"]) |
| 159 | +def delete_conversation(uuid_str): |
| 160 | + if uuid_str in conversations_db: |
| 161 | + del conversations_db[uuid_str] |
| 162 | + return Response(status=204) |
| 163 | + return jsonify({"detail": "Conversation not found"}), 404 |
| 164 | + |
| 165 | + |
| 166 | +@app.route("/conversations/agent-retriever", methods=["POST"]) |
| 167 | +def agent_retriever(): |
| 168 | + """ |
| 169 | + Simulates the non-streaming RAG agent. |
| 170 | + """ |
| 171 | + data = parse_json_body(required_fields=["query"]) |
| 172 | + user_query = data["query"] |
| 173 | + if not isinstance(user_query, str) or not user_query.strip(): |
| 174 | + raise ValidationError("query must be a non-empty string.") |
| 175 | + |
| 176 | + conv_id = data.get("conversation_uuid") |
| 177 | + if conv_id and not isinstance(conv_id, str): |
| 178 | + raise ValidationError("conversation_uuid must be a string.") |
| 179 | + |
| 180 | + # 1. Handle Conversation Creation if ID is missing |
| 181 | + if not conv_id or conv_id not in conversations_db: |
| 182 | + conv_id = str(uuid.uuid4()) |
| 183 | + title = user_query[:100] |
| 184 | + conversations_db[conv_id] = { |
| 185 | + "uuid": conv_id, |
| 186 | + "title": title, |
| 187 | + "created_at": get_utc_now(), |
| 188 | + "updated_at": get_utc_now(), |
| 189 | + "messages": [], |
| 190 | + } |
| 191 | + |
| 192 | + # 2. Save User Message |
| 193 | + conversations_db[conv_id]["messages"].append( |
| 194 | + { |
| 195 | + "uuid": str(uuid.uuid4()), |
| 196 | + "conversation_uuid": conv_id, |
| 197 | + "role": "user", |
| 198 | + "content": user_query, |
| 199 | + "created_at": get_utc_now(), |
| 200 | + } |
| 201 | + ) |
| 202 | + |
| 203 | + # Simulate Latency |
| 204 | + time.sleep(1) |
| 205 | + |
| 206 | + # 3. Generate Fake Answer |
| 207 | + fake_answer = f"This is a **mock backend** response to: '{user_query}'.\n\nI am simulating the `RetrieverGraph`. Here is some information about OpenROAD:\n\n- It is an open-source flow.\n- It uses yosys, openSTA, etc." |
| 208 | + context_sources = generate_fake_context() |
| 209 | + tools_used = ["retrieve_general"] |
| 210 | + |
| 211 | + # 4. Save Assistant Message |
| 212 | + conversations_db[conv_id]["messages"].append( |
| 213 | + { |
| 214 | + "uuid": str(uuid.uuid4()), |
| 215 | + "conversation_uuid": conv_id, |
| 216 | + "role": "assistant", |
| 217 | + "content": fake_answer, |
| 218 | + "context_sources": context_sources, |
| 219 | + "tools": tools_used, |
| 220 | + "created_at": get_utc_now(), |
| 221 | + } |
| 222 | + ) |
| 223 | + |
| 224 | + conversations_db[conv_id]["updated_at"] = get_utc_now() |
| 225 | + |
| 226 | + # 5. Return ChatResponse model |
| 227 | + return jsonify( |
| 228 | + { |
| 229 | + "response": fake_answer, |
| 230 | + "context_sources": [ |
| 231 | + {"source": s["source"], "context": s["context"]} |
| 232 | + for s in context_sources["sources"] |
| 233 | + ], |
| 234 | + "tools": tools_used, |
| 235 | + } |
| 236 | + ) |
| 237 | + |
| 238 | + |
| 239 | +@app.route("/conversations/agent-retriever/stream", methods=["POST"]) |
| 240 | +def agent_retriever_stream(): |
| 241 | + """ |
| 242 | + Simulates the Streaming Endpoint. |
| 243 | + Matches backend logic: Yields "Sources: ..." then text chunks. |
| 244 | + """ |
| 245 | + data = parse_json_body(required_fields=["query"]) |
| 246 | + user_query = data["query"] |
| 247 | + if not isinstance(user_query, str) or not user_query.strip(): |
| 248 | + raise ValidationError("query must be a non-empty string.") |
| 249 | + |
| 250 | + conv_id = data.get("conversation_uuid") |
| 251 | + if conv_id and not isinstance(conv_id, str): |
| 252 | + raise ValidationError("conversation_uuid must be a string.") |
| 253 | + |
| 254 | + # Handle logic to find/create conversation (same as above) |
| 255 | + if not conv_id or conv_id not in conversations_db: |
| 256 | + conv_id = str(uuid.uuid4()) |
| 257 | + title = user_query[:100] |
| 258 | + conversations_db[conv_id] = { |
| 259 | + "uuid": conv_id, |
| 260 | + "title": title, |
| 261 | + "created_at": get_utc_now(), |
| 262 | + "updated_at": get_utc_now(), |
| 263 | + "messages": [], |
| 264 | + } |
| 265 | + |
| 266 | + # Save User Message |
| 267 | + conversations_db[conv_id]["messages"].append( |
| 268 | + { |
| 269 | + "uuid": str(uuid.uuid4()), |
| 270 | + "conversation_uuid": conv_id, |
| 271 | + "role": "user", |
| 272 | + "content": user_query, |
| 273 | + "created_at": get_utc_now(), |
| 274 | + } |
| 275 | + ) |
| 276 | + |
| 277 | + def generate(): |
| 278 | + # 1. Simulate "Thinking" |
| 279 | + time.sleep(0.5) |
| 280 | + |
| 281 | + # 2. Send Sources first (Mimicking backend behavior) |
| 282 | + sources = ["https://openroad.readthedocs.io/en/latest/", "manpages/ant.md"] |
| 283 | + yield f"Sources: {', '.join(sources)}\n\n" |
| 284 | + |
| 285 | + time.sleep(0.5) |
| 286 | + |
| 287 | + # 3. Stream Text Chunks |
| 288 | + full_response = f"I am streaming a response regarding **{user_query}**.\n\n" |
| 289 | + yield full_response |
| 290 | + |
| 291 | + words = "OpenROAD is a fast, autonomous, open-source tool flow for digital layout generation. It covers synthesis to GDSII.".split() |
| 292 | + |
| 293 | + buffer = "" |
| 294 | + for word in words: |
| 295 | + chunk = word + " " |
| 296 | + buffer += chunk |
| 297 | + yield chunk |
| 298 | + time.sleep(0.1) # Simulate token generation speed |
| 299 | + |
| 300 | + # 4. Save the completed message to DB (for history) |
| 301 | + conversations_db[conv_id]["messages"].append( |
| 302 | + { |
| 303 | + "uuid": str(uuid.uuid4()), |
| 304 | + "conversation_uuid": conv_id, |
| 305 | + "role": "assistant", |
| 306 | + "content": full_response + buffer, |
| 307 | + "context_sources": { |
| 308 | + "sources": [{"source": s, "context": ""} for s in sources] |
| 309 | + }, |
| 310 | + "tools": ["retrieve_general"], |
| 311 | + "created_at": get_utc_now(), |
| 312 | + } |
| 313 | + ) |
| 314 | + conversations_db[conv_id]["updated_at"] = get_utc_now() |
| 315 | + |
| 316 | + return Response(stream_with_context(generate()), content_type="text/event-stream") |
| 317 | + |
| 318 | + |
| 319 | +if __name__ == "__main__": |
| 320 | + print(f"🚀 Fake OpenROAD Backend running on http://localhost:{PORT}") |
| 321 | + print(" - Simulating Postgres, VectorDB, and LLM") |
| 322 | + app.run(host="0.0.0.0", port=PORT, debug=app.config["DEBUG"]) |
0 commit comments