33import logging
44from werkzeug .utils import secure_filename
55
6- from langchain_core .prompts import ChatPromptTemplate , MessagesPlaceholder
7- from langchain_core .messages import HumanMessage
8-
96from genai .rag .ingestion_pipeline import IngestionPipeline
107from genai .vector_database .qdrant_vdb import QdrantVDB
118from genai .rag .llm .chat_model import ChatModel
9+ from genai .service .rag_service import retrieve_similar_docs , prepare_prompt
1210
1311
1412# Set Logging
@@ -80,13 +78,25 @@ def upload_file():
8078
8179@generate_bp .route ('/genai/generate' , methods = ['POST' ])
8280def generate ():
81+ """API Endpoint for generating recipe responses based on document retrieval.
82+
83+ This endpoint processes user queries against a vector database of recipes and returns
84+ AI-generated responses using retrieved context.
85+
86+ Request Body:
87+ query (str): The user's recipe-related query
88+ conversation_id (str): Unique identifier for the conversation thread
89+
90+ Returns:
91+ JSON response containing the generated recipe response or error message
92+ """
8393 data = request .get_json ()
8494
8595 if not data or "query" not in data or "conversation_id" not in data :
8696 return jsonify ({"error" : "Missing 'query' or 'conversation_id'" }), 400
8797
8898 query = data ["query" ]
89- conversation_id = data ["conversation_id" ] # will be used
99+ # conversation_id = data["conversation_id"] # will be used in the future
90100
91101 try :
92102 collection_name = "recipes"
@@ -96,29 +106,15 @@ def generate():
96106 vector_store = qdrant .create_and_get_vector_storage (
97107 collection_name
98108 )
109+ #todo: retrieve messages from chat history as BaseMessage
110+ messages = []
111+ retrieved_docs = retrieve_similar_docs (vector_store , query )
112+ prompt = prepare_prompt (query , retrieved_docs , messages )
99113
100- # Retrieve 5 similar documents
101- retriever = vector_store .as_retriever (search_kwargs = {"k" : 5 })
102- retrieved_docs = retriever .invoke (query )
103- docs_content = "\n \n " .join (doc .page_content for doc in retrieved_docs )
104-
105- # Prepare prompt
106- prompt_template = ChatPromptTemplate ([
107- ("system" , "You are a helpful assistant for recipe generation based on the given ingredients and the following context:\n \n {context}" ),
108- MessagesPlaceholder ("msgs" )
109- ])
110-
111- prompt = prompt_template .invoke ({
112- "context" : docs_content ,
113- "msgs" : HumanMessage (content = query )
114- })
115-
116114 response = llm .invoke (prompt )
117115 return jsonify ({
118116 "response" : response .content ,
119117 }), 200
120118
121119 except Exception as e :
122120 return jsonify ({"error" : str (e )}), 500
123-
124-
0 commit comments