1+ import os
2+ import time
3+ import uuid
4+ import json
5+ import random
6+ from datetime import datetime , timezone
7+ from flask import Flask , request , jsonify , Response , stream_with_context
8+ from flask_cors import CORS
9+
10+ app = Flask (__name__ )
11+ CORS (app ) # Enable CORS for Next.js
12+ app .config ['DEBUG' ] = os .getenv ('FLASK_DEBUG' , 'False' ).lower () == 'true'
13+ PORT = int (os .getenv ('PORT' , 8000 ))
14+
15+ # --- In-Memory Database ---
16+ # Structure mimics: backend/src/database/models.py
17+ conversations_db = {}
18+
19+ # --- Helpers ---
20+ def get_utc_now ():
21+ return datetime .now (timezone .utc ).isoformat ()
22+
23+ def generate_fake_context ():
24+ """Mimics the RAG output with OpenROAD specific sources"""
25+ return {
26+ "sources" : [
27+ {
28+ "source" : "https://openroad.readthedocs.io/en/latest/main/README.html" ,
29+ "context" : "OpenROAD is an automated physical design tool..."
30+ },
31+ {
32+ "source" : "manpages/man1/global_placement.md" ,
33+ "context" : "Global placement (gpl) distributes cells across the core..."
34+ }
35+ ]
36+ }
37+
38+ # Validation helpers and error handling
39+
40+ class ValidationError (ValueError ):
41+ """Raised when incoming request bodies are invalid."""
42+
43+
44+ def parse_json_body (required_fields = None , allow_empty = False ):
45+ """
46+ Parse the JSON body, optionally allowing empty payloads (treated as {}).
47+ Raises ValidationError if JSON is missing or required fields are absent.
48+ """
49+ has_body = request .content_length not in (None , 0 )
50+ if allow_empty and not has_body :
51+ data = {}
52+ else :
53+ data = request .get_json (silent = True )
54+ if data is None :
55+ raise ValidationError ("Request body must be JSON." )
56+
57+ if required_fields :
58+ missing = [field for field in required_fields if field not in data ]
59+ if missing :
60+ raise ValidationError (f"Missing required field(s): { ', ' .join (missing )} ." )
61+
62+ return data
63+
64+
65+ @app .errorhandler (ValidationError )
66+ def handle_validation_error (error ):
67+ return jsonify ({"error" : str (error )}), 400
68+
69+
70+ @app .errorhandler (404 )
71+ def not_found (error ):
72+ return jsonify ({"error" : "Resource not found" }), 404
73+
74+
75+ @app .errorhandler (500 )
76+ def internal_error (error ):
77+ app .logger .exception ("Unhandled exception: %s" , error )
78+ return jsonify ({"error" : "Internal server error" }), 500
79+
80+
81+ # --- Routes matching backend/src/api/routers ---
82+
83+ @app .route ('/healthcheck' , methods = ['GET' ])
84+ def healthcheck ():
85+ return jsonify ({"status" : "ok" })
86+
87+ # --- Helpers Router (backend/src/api/routers/helpers.py) ---
88+ @app .route ('/helpers/suggestedQuestions' , methods = ['POST' ])
89+ def suggested_questions ():
90+ """
91+ Mimics the OpenAI/Gemini call to generate next questions.
92+ """
93+ parse_json_body (allow_empty = True )
94+ return jsonify ({
95+ "suggested_questions" : [
96+ "How do I install OpenROAD flow scripts?" ,
97+ "What is the difference between Global and Detailed Routing?" ,
98+ "How to fix LVS errors in Sky130?" ,
99+ "Explain the CTS (Clock Tree Synthesis) stage."
100+ ]
101+ })
102+
103+ # --- Conversations Router (backend/src/api/routers/conversations.py) ---
104+
105+ @app .route ('/conversations' , methods = ['POST' ])
106+ def create_conversation ():
107+ data = parse_json_body (allow_empty = True )
108+ title = data .get ('title' , "New Conversation" )
109+ if 'title' in data and (not isinstance (title , str ) or not title .strip ()):
110+ raise ValidationError ("title must be a non-empty string when provided." )
111+
112+ new_id = str (uuid .uuid4 ())
113+ conversations_db [new_id ] = {
114+ "uuid" : new_id ,
115+ "title" : title ,
116+ "created_at" : get_utc_now (),
117+ "updated_at" : get_utc_now (),
118+ "messages" : []
119+ }
120+ return jsonify (conversations_db [new_id ]), 201
121+
122+ @app .route ('/conversations' , methods = ['GET' ])
123+ def list_conversations ():
124+ # Sort by updated_at desc (mimicking crud.get_all_conversations)
125+ conv_list = list (conversations_db .values ())
126+ conv_list .sort (key = lambda x : x ['updated_at' ], reverse = True )
127+
128+ # The list response in backend/src/api/models/response_model.py excludes messages
129+ response_list = []
130+ for c in conv_list :
131+ response_list .append ({
132+ "uuid" : c ['uuid' ],
133+ "title" : c ['title' ],
134+ "created_at" : c ['created_at' ],
135+ "updated_at" : c ['updated_at' ]
136+ })
137+ return jsonify (response_list )
138+
139+ @app .route ('/conversations/<uuid_str>' , methods = ['GET' ])
140+ def get_conversation (uuid_str ):
141+ if uuid_str not in conversations_db :
142+ return jsonify ({"detail" : "Conversation not found" }), 404
143+ return jsonify (conversations_db [uuid_str ])
144+
145+ @app .route ('/conversations/<uuid_str>' , methods = ['DELETE' ])
146+ def delete_conversation (uuid_str ):
147+ if uuid_str in conversations_db :
148+ del conversations_db [uuid_str ]
149+ return Response (status = 204 )
150+ return jsonify ({"detail" : "Conversation not found" }), 404
151+
152+ @app .route ('/conversations/agent-retriever' , methods = ['POST' ])
153+ def agent_retriever ():
154+ """
155+ Simulates the non-streaming RAG agent.
156+ """
157+ data = parse_json_body (required_fields = ['query' ])
158+ user_query = data ['query' ]
159+ if not isinstance (user_query , str ) or not user_query .strip ():
160+ raise ValidationError ("query must be a non-empty string." )
161+
162+ conv_id = data .get ('conversation_uuid' )
163+ if conv_id and not isinstance (conv_id , str ):
164+ raise ValidationError ("conversation_uuid must be a string." )
165+
166+ # 1. Handle Conversation Creation if ID is missing
167+ if not conv_id :
168+ conv_id = str (uuid .uuid4 ())
169+ title = user_query [:100 ] if user_query else "New Conversation"
170+ conversations_db [conv_id ] = {
171+ "uuid" : conv_id ,
172+ "title" : title ,
173+ "created_at" : get_utc_now (),
174+ "updated_at" : get_utc_now (),
175+ "messages" : []
176+ }
177+
178+ # 2. Save User Message
179+ conversations_db [conv_id ]['messages' ].append ({
180+ "uuid" : str (uuid .uuid4 ()),
181+ "conversation_uuid" : conv_id ,
182+ "role" : "user" ,
183+ "content" : user_query ,
184+ "created_at" : get_utc_now ()
185+ })
186+
187+ # Simulate Latency
188+ time .sleep (1 )
189+
190+ # 3. Generate Fake Answer
191+ fake_answer = f"This is a **mock backend** response to: '{ user_query } '.\n \n I am simulating the `RetrieverGraph`. Here is some information about OpenROAD:\n \n - It is an open-source flow.\n - It uses yosys, openSTA, etc."
192+ context_sources = generate_fake_context ()
193+ tools_used = ["retrieve_general" ]
194+
195+ # 4. Save Assistant Message
196+ conversations_db [conv_id ]['messages' ].append ({
197+ "uuid" : str (uuid .uuid4 ()),
198+ "conversation_uuid" : conv_id ,
199+ "role" : "assistant" ,
200+ "content" : fake_answer ,
201+ "context_sources" : context_sources ,
202+ "tools" : tools_used ,
203+ "created_at" : get_utc_now ()
204+ })
205+
206+ conversations_db [conv_id ]['updated_at' ] = get_utc_now ()
207+
208+ # 5. Return ChatResponse model
209+ return jsonify ({
210+ "response" : fake_answer ,
211+ "context_sources" : [
212+ {"source" : s ["source" ], "context" : s ["context" ]}
213+ for s in context_sources ["sources" ]
214+ ],
215+ "tools" : tools_used
216+ })
217+
218+ @app .route ('/conversations/agent-retriever/stream' , methods = ['POST' ])
219+ def agent_retriever_stream ():
220+ """
221+ Simulates the Streaming Endpoint.
222+ Matches backend logic: Yields "Sources: ..." then text chunks.
223+ """
224+ data = parse_json_body (required_fields = ['query' ])
225+ user_query = data ['query' ]
226+ if not isinstance (user_query , str ) or not user_query .strip ():
227+ raise ValidationError ("query must be a non-empty string." )
228+
229+ conv_id = data .get ('conversation_uuid' )
230+ if conv_id and not isinstance (conv_id , str ):
231+ raise ValidationError ("conversation_uuid must be a string." )
232+
233+ # Handle logic to find/create conversation (same as above)
234+ if not conv_id or conv_id not in conversations_db :
235+ conv_id = str (uuid .uuid4 ())
236+ title = user_query [:100 ] if user_query else "New Conversation"
237+ conversations_db [conv_id ] = {
238+ "uuid" : conv_id ,
239+ "title" : title ,
240+ "created_at" : get_utc_now (),
241+ "updated_at" : get_utc_now (),
242+ "messages" : []
243+ }
244+
245+ # Save User Message
246+ conversations_db [conv_id ]['messages' ].append ({
247+ "uuid" : str (uuid .uuid4 ()),
248+ "conversation_uuid" : conv_id ,
249+ "role" : "user" ,
250+ "content" : user_query ,
251+ "created_at" : get_utc_now ()
252+ })
253+
254+ def generate ():
255+ # 1. Simulate "Thinking"
256+ time .sleep (0.5 )
257+
258+ # 2. Send Sources first (Mimicking backend behavior)
259+ sources = ["https://openroad.readthedocs.io/en/latest/" , "manpages/ant.md" ]
260+ yield f"Sources: { ', ' .join (sources )} \n \n "
261+
262+ time .sleep (0.5 )
263+
264+ # 3. Stream Text Chunks
265+ full_response = f"I am streaming a response regarding **{ user_query } **.\n \n "
266+ yield full_response
267+
268+ words = "OpenROAD is a fast, autonomous, open-source tool flow for digital layout generation. It covers synthesis to GDSII." .split ()
269+
270+ buffer = ""
271+ for word in words :
272+ chunk = word + " "
273+ buffer += chunk
274+ yield chunk
275+ time .sleep (0.1 ) # Simulate token generation speed
276+
277+ # 4. Save the completed message to DB (for history)
278+ conversations_db [conv_id ]['messages' ].append ({
279+ "uuid" : str (uuid .uuid4 ()),
280+ "conversation_uuid" : conv_id ,
281+ "role" : "assistant" ,
282+ "content" : full_response + buffer ,
283+ "context_sources" : {"sources" : [{"source" : s , "context" : "" } for s in sources ]},
284+ "tools" : ["retrieve_general" ],
285+ "created_at" : get_utc_now ()
286+ })
287+ conversations_db [conv_id ]['updated_at' ] = get_utc_now ()
288+
289+ return Response (stream_with_context (generate ()), content_type = 'text/event-stream' )
290+
291+ if __name__ == '__main__' :
292+ print (f"🚀 Fake OpenROAD Backend running on http://localhost:{ PORT } " )
293+ print (" - Simulating Postgres, VectorDB, and LLM" )
294+ app .run (host = '0.0.0.0' , port = PORT , debug = app .config ['DEBUG' ])
0 commit comments