66from genai .rag .ingestion_pipeline import IngestionPipeline
77from genai .vector_database .qdrant_vdb import QdrantVDB
88from genai .rag .llm .chat_model import ChatModel
9- from genai .service .rag_service import retrieve_similar_docs , prepare_prompt
9+ from genai .service .rag_service import retrieve_similar_docs , prepare_prompt , process_raw_messages
1010
1111
1212# Set Logging
@@ -78,25 +78,33 @@ def upload_file():
7878
7979@generate_bp .route ('/genai/generate' , methods = ['POST' ])
8080def generate ():
81- """API Endpoint for generating recipe response based on document retrieval
81+ """
82+ API Endpoint for generating recipe responses using retrieved context.
8283
83- This endpoint processes user queries against a vector database of recipes
84- and returns AI-generated responses using retrieved context.
84+ This endpoint processes a user query against a vector database of recipes
85+ and returns an AI-generated response using both retrieved context and
86+ the full conversation history provided in the request.
8587
8688 Request Body:
8789 query (str): The user's recipe-related query
88- conversation_id (str): Unique identifier for the conversation thread
90+ messages (List[Dict]): Full conversation history, each with 'role' and 'content'
91+ Example:
92+ [
93+ {"role": "user", "content": "I have eggs and tomatoes."},
94+ {"role": "assistant", "content": "You could make shakshuka."}
95+ ]
8996
9097 Returns:
91- JSON response containing the generated recipe response or error message
98+ JSON response containing:
99+ - 'response': The generated assistant reply
92100 """
93101 data = request .get_json ()
94102
95- if not data or "query" not in data or "conversation_id " not in data :
96- return jsonify ({"error" : "Missing 'query' or 'conversation_id '" }), 400
103+ if not data or "query" not in data or "messages " not in data :
104+ return jsonify ({"error" : "Missing 'query' or 'messages '" }), 400
97105
98106 query = data ["query" ]
99- # conversation_id = data["conversation_id"] # will be used in the future
107+ messages_raw = data ["messages" ]
100108
101109 try :
102110 collection_name = "recipes"
@@ -106,8 +114,8 @@ def generate():
106114 vector_store = qdrant .create_and_get_vector_storage (
107115 collection_name
108116 )
109- # todo: retrieve messages from chat history as BaseMessage
110- messages = []
117+ # turn raw message into BaseMessage type
118+ messages = process_raw_messages ( messages_raw )
111119 retrieved_docs = retrieve_similar_docs (vector_store , query )
112120 prompt = prepare_prompt (
113121 llm .get_system_prompt (),
0 commit comments