Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
f85c73c
feat: Nextjs frontend
error9098x Jan 13, 2025
d6e8f71
remove large added files pre-commit
luarss Jan 15, 2025
0f36372
fix yarn/nodejs versions in readme
luarss Jan 16, 2025
0e3102d
added formatter (prettier) + lint
error9098x Feb 10, 2025
3cc6e47
fix postcss/tailwindcss bug
luarss Feb 23, 2025
373e9f8
feat: added requirements.txt file for apiproxy.py
Kannav02 Mar 9, 2025
b010d67
fix: README.md made consistent
Kannav02 Mar 9, 2025
69f7172
fix: suggested questions disabled for now
Kannav02 Mar 10, 2025
0a9ab05
fix: README.md updated
Kannav02 Mar 10, 2025
1b9fa80
fix: removed setTheme from dependency array, thus removing dependenc…
Kannav02 Mar 11, 2025
3b262e0
fix: added proxy api for suggested questions
Kannav02 Mar 20, 2025
c181814
fix: frontend for suggestedQuestions fixed
Kannav02 Mar 20, 2025
485f502
feat: included response model for suggestions api
Kannav02 Apr 7, 2025
c14dc71
feat: suggested questions routed added for backend
Kannav02 Apr 7, 2025
e29eda4
feat: adjusted prompt templates, router and schema for router paths
Kannav02 Apr 8, 2025
653c3b7
feat: added the helper path for the routers
Kannav02 Apr 8, 2025
e08a93d
fix: fixed the helpers route and removed response type
Kannav02 Apr 11, 2025
25e3fec
fix: added the functionality to make request to the backend using the…
Kannav02 Apr 11, 2025
14c0939
fix: removed hardcoded url link, added variables in .env.example
Kannav02 Jun 4, 2025
ac7d3fd
fix: linting issues
Kannav02 Jun 4, 2025
1cac0ec
feat: added middleware and new router for ui
Kannav02 Jun 5, 2025
a121c8f
feat: added the new ui router that replaces apiproxy.py
Kannav02 Jun 5, 2025
ec9ee25
fix: right url added in SuggestedQuestions based on the ui router
Kannav02 Jun 5, 2025
887ed0c
fix: changed the port
Kannav02 Jun 5, 2025
dec56fc
update dockerfile
luarss Jun 5, 2025
d0ab7f2
update backend .env
luarss Jun 5, 2025
727fd4d
update backend routes
luarss Jun 5, 2025
ade3705
cleanup nextjs-frontend
luarss Jun 5, 2025
2036dc6
fix lint
luarss Jun 5, 2025
00a1a68
fix: source hydration fixed for nextjs
Kannav02 Jun 9, 2025
7b711a4
fix: lint fix
Kannav02 Jun 9, 2025
8fec607
fix: template added and requirements.txt from nextjs removed
Kannav02 Jun 9, 2025
f74389f
chore: readme and .env.example fixed
Kannav02 Jun 9, 2025
e23349f
fix: fixed the environment variable issue
Kannav02 Jun 11, 2025
b323e2c
frontend changes
luarss Jul 6, 2025
86c5905
backend fixes
luarss Jul 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,7 @@ temp_test_run_data.json

# backend
faiss_db

# frontend
node_modules
.next
4 changes: 0 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,3 @@ repos:
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: detect-private-key
- id: check-added-large-files
args:
- --maxkb=120
- --enforce-all
1 change: 1 addition & 0 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,4 @@ pypdf==5.6.0
unstructured==0.15.1
nltk==3.9.1
huggingface_hub[cli]==0.24.6
openai==1.93.0
18 changes: 17 additions & 1 deletion backend/src/api/main.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,23 @@
from fastapi import FastAPI
from .routers import graphs, healthcheck
from .routers import graphs, healthcheck, helpers, ui
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI()

# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=[
"http://localhost:3000",
"http://127.0.0.1:3000",
"http://localhost:8001", # mock endpoint
"http://127.0.0.1:8001",
],
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
allow_headers=["*"],
)
app.include_router(healthcheck.router)
app.include_router(graphs.router)
app.include_router(helpers.router)
app.include_router(ui.router)
9 changes: 9 additions & 0 deletions backend/src/api/models/response_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,15 @@ class ContextSource(BaseModel):
context: str = ""


class SuggestedQuestions(BaseModel):
suggested_questions: list[str]


class SuggestedQuestionInput(BaseModel):
latest_question: str
assistant_answer: str


class ChatResponse(BaseModel):
response: str
context_sources: list[ContextSource] = []
Expand Down
4 changes: 4 additions & 0 deletions backend/src/api/routers/graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@
embeddings_model_name = str(os.getenv("HF_EMBEDDINGS"))
elif embeddings_type == "GOOGLE_GENAI" or embeddings_type == "GOOGLE_VERTEXAI":
embeddings_model_name = str(os.getenv("GOOGLE_EMBEDDINGS"))
else:
raise ValueError(
"EMBEDDINGS_TYPE environment variable must be set to 'HF', 'GOOGLE_GENAI', or 'GOOGLE_VERTEXAI'."
)

embeddings_config = {"type": embeddings_type, "name": embeddings_model_name}

Expand Down
52 changes: 52 additions & 0 deletions backend/src/api/routers/helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import os
from fastapi import APIRouter, HTTPException
from dotenv import load_dotenv
from ..models.response_model import SuggestedQuestionInput, SuggestedQuestions
from ...prompts.prompt_templates import suggested_questions_prompt_template
from openai import OpenAI

load_dotenv()

GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
if not GOOGLE_API_KEY:
raise RuntimeError("GOOGLE_API_KEY is not set")

model = "gemini-2.0-flash"
client = OpenAI(
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
api_key=GOOGLE_API_KEY,
)

router = APIRouter(prefix="/helpers", tags=["helpers"])


# Main Router
@router.post("/suggestedQuestions")
async def get_suggested_questions(
suggested_question_input: SuggestedQuestionInput,
) -> SuggestedQuestions:
full_prompt = suggested_questions_prompt_template.format(
latest_question=suggested_question_input.latest_question,
assistant_answer=suggested_question_input.assistant_answer,
)

try:
response = client.beta.chat.completions.parse(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": full_prompt},
],
response_format=SuggestedQuestions,
)
response_data = response.choices[0].message.parsed
if not response_data or not SuggestedQuestions.model_validate(response_data):
raise ValueError(
f"Invalid response format from the model, got {response_data}"
)
return SuggestedQuestions.model_validate(response_data)

except Exception as e:
raise HTTPException(
status_code=500, detail="Failed to get suggested questions: " + str(e)
)
36 changes: 36 additions & 0 deletions backend/src/api/routers/ui.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from fastapi import APIRouter, Request, Response
import httpx
import os

router = APIRouter(prefix="/ui", tags=["ui"])

BACKEND_ENDPOINT = os.getenv("BACKEND_ENDPOINT", "http://localhost:8000")
client = httpx.AsyncClient(base_url=BACKEND_ENDPOINT)


@router.post("/chat")
async def proxy_chat(request: Request) -> Response:
data = await request.json()
# TODO: set this route dynamically
resp = await client.post("/graphs/agent-retriever", json=data)
return Response(
content=resp.content,
status_code=resp.status_code,
media_type=resp.headers.get("content-type"),
)


@router.post("/suggestedQuestions")
async def suggested_questions(request: Request) -> Response:
data = await request.json()
# Transform camelCase to snake_case for the backend
transformed_data = {
"latest_question": data.get("latestQuestion", ""),
"assistant_answer": data.get("assistantAnswer", ""),
}
resp = await client.post("/helpers/suggestedQuestions", json=transformed_data)
return Response(
content=resp.content,
status_code=resp.status_code,
media_type=resp.headers.get("content-type"),
)
106 changes: 77 additions & 29 deletions backend/src/prompts/prompt_templates.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
summarise_prompt_template = """
You are an expert programmer and problem-solver, tasked with answering any question about the OpenROAD (OR) project \
You are an expert programmer and problem-solver, tasked with answering any question about the OpenROAD (OR) project
and the OpenROAD-Flow-Scripts (ORFS).

Generate a comprehensive and informative answer for the given question based solely on the provided context.\
Use an unbiased and journalistic tone. \
You may use bullet points to explain the answer in a step-by-step, detailed manner.\
Generate a comprehensive and informative answer for the given question based solely on the provided context.
Use an unbiased and journalistic tone.
You may use bullet points to explain the answer in a step-by-step, detailed manner.
You may provide code snippets and terminal commands as part of the answer.

The user does not have access to the context.\
You must not ask the user to refer to the context in any part of your answer.\
The user does not have access to the context.
You must not ask the user to refer to the context in any part of your answer.
You must not ask the user to refer to a link that is not a part of your answer.

If there is nothing in the context relevant to the question, simply say "Sorry its not avaiable in my knowledge base." \
Do not try to make up an answer.\
If there is nothing in the context relevant to the question, simply say "Sorry its not avaiable in my knowledge base."
Do not try to make up an answer.
Anything between the following `context` html blocks is retrieved from a knowledge bank, not part of the conversation with the user.

For casual greetings respond politely with a simple, relevant answer.\
Introduce yourself when asked.\
For casual greetings respond politely with a simple, relevant answer.
Introduce yourself when asked.

------------------------------------------------------------------------------------
Use the following context:
Expand All @@ -33,15 +33,16 @@
"""

gh_discussion_prompt_template = """
The following is a GitHub Discussions conversation between two programmers discussing the OpenROAD (OR) project\
and the OpenROAD-Flow-Scripts (ORFS).\
The following is a GitHub Discussions conversation between two programmers discussing the OpenROAD (OR) project
and the OpenROAD-Flow-Scripts (ORFS).

You may infer information from the conversation to answer the question.

"""

tool_rephrase_prompt_template = """You are an assistant tasked with answering any question about the OpenROAD (OR) project \
and the OpenROAD-Flow-Scripts (ORFS). You have access to the following set of tools.\
tool_rephrase_prompt_template = """
You are an assistant tasked with answering any question about the OpenROAD (OR) project
and the OpenROAD-Flow-Scripts (ORFS). You have access to the following set of tools.

Here are the names and descriptions for each tool:

Expand All @@ -53,29 +54,76 @@
This is the user's follow-up question:
{question}

Given the chat history, rephrase the follow-up question to be a standalone question.\
The rephrased question should include only relevant information inferred from the chat history.\
If the question is already standalone, return the same question.\
Return your response as a json blob with 'rephrased_question'.\
Given the chat history, rephrase the follow-up question to be a standalone question.
The rephrased question should include only relevant information inferred from the chat history.
If the question is already standalone, return the same question.
Return your response as a json blob with 'rephrased_question'.

Choose the most appropriate tools from the list of tools to answer the rephrased question.\
Use the tool descriptions to pick the appropriate tools.\
Return your response as a JSON blob with 'tool_names'.\
Choose the most appropriate tools from the list of tools to answer the rephrased question.
Use the tool descriptions to pick the appropriate tools.
Return your response as a JSON blob with 'tool_names'.

"""

rephrase_prompt_template = """

This is the chat history between you and the user:\
This is the chat history between you and the user:
{chat_history}

This is the user's follow-up question:\
This is the user's follow-up question:
{question}

Given the chat history, rephrase the follow-up question to be a standalone question.\
The rephrased question should include only relevant information inferred from the chat history.\
If the question is already standalone, return the same question.\
Choose the most appropriate tools from the list of tools to answer the rephrased question.\
Return your response as a json blob with 'rephrased_question'.\
Given the chat history, rephrase the follow-up question to be a standalone question.
The rephrased question should include only relevant information inferred from the chat history.
If the question is already standalone, return the same question.
Choose the most appropriate tools from the list of tools to answer the rephrased question.
Return your response as a json blob with 'rephrased_question'.

"""

suggested_questions_prompt_template = """
If the assistant answer has sufficient knowledge, use it to predict the next 3 suggested questions.
Otherwise, strictly restrict to these topics. Make sure it is in the form of a question.
Getting Started with OpenROAD
Building OpenROAD
Getting Started with the OpenROAD Flow - OpenROAD-flow-scripts
Tutorials
Git Quickstart
Man pages
OpenROAD User Guide
Database
GUI
Partition Management
Restructure
Floorplan Initialization
Pin Placement
Chip-level Connections
Macro Placement
Hierarchical Macro Placement
Tapcell Insertion
PDN Generation
Global Placement
Gate Resizing
Detailed Placement
Clock Tree Synthesis
Global Routing
Antenna Checker
Detailed Routing
Metal Fill
Parasitics Extraction
Messages Glossary
Getting Involved
Developer's Guide
Coding Practices
Logger
CI
README Format
Tcl Format
Man pages Test Framework
Code of Conduct
FAQs

User Question: {latest_question}

Assistant Answer: {assistant_answer}

"""
Loading