|
| 1 | +--- |
| 2 | +title: Deploy a RAG-based LLM backend server |
| 3 | +weight: 3 |
| 4 | + |
| 5 | +layout: learningpathall |
| 6 | +--- |
| 7 | + |
| 8 | +## Backend Script for RAG-based LLM Server |
| 9 | +Once the virtual environment is activated, create a `backend.py` script using the following content. This script integrates the LLM with the FAISS vector database for RAG: |
| 10 | + |
| 11 | +```python |
| 12 | +import os |
| 13 | +import time |
| 14 | +import logging |
| 15 | +from flask import Flask, request, jsonify |
| 16 | +from flask_cors import CORS |
| 17 | +from langchain_community.vectorstores import FAISS |
| 18 | +from langchain_community.embeddings import HuggingFaceEmbeddings |
| 19 | +from langchain_community.llms import LlamaCpp |
| 20 | +from langchain_core.callbacks import StreamingStdOutCallbackHandler |
| 21 | +from langchain_core.prompts import PromptTemplate |
| 22 | +from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader |
| 23 | +from langchain_text_splitters import HTMLHeaderTextSplitter, CharacterTextSplitter |
| 24 | +from langchain.schema.runnable import RunnablePassthrough |
| 25 | +from langchain_core.output_parsers import StrOutputParser |
| 26 | +from langchain_core.runnables import ConfigurableField |
| 27 | + |
| 28 | +# Configure logging |
| 29 | +logging.getLogger('watchdog').setLevel(logging.ERROR) |
| 30 | +logger = logging.getLogger(__name__) |
| 31 | + |
| 32 | +# Initialize Flask app |
| 33 | +app = Flask(__name__) |
| 34 | +CORS(app) |
| 35 | + |
| 36 | +# Configure paths |
| 37 | +BASE_PATH = "/home/ubuntu" |
| 38 | +TEMP_DIR = os.path.join(BASE_PATH, "temp") |
| 39 | +VECTOR_DIR = os.path.join(BASE_PATH, "vector") |
| 40 | +MODEL_PATH = os.path.join(BASE_PATH, "models/llama3.1-8b-instruct.Q4_0_arm.gguf") |
| 41 | + |
| 42 | +# Ensure directories exist |
| 43 | +os.makedirs(TEMP_DIR, exist_ok=True) |
| 44 | +os.makedirs(VECTOR_DIR, exist_ok=True) |
| 45 | + |
| 46 | +# Token Streaming |
| 47 | +class StreamingCallback(StreamingStdOutCallbackHandler): |
| 48 | + def __init__(self): |
| 49 | + super().__init__() |
| 50 | + self.tokens = [] |
| 51 | + self.start_time = None |
| 52 | + |
| 53 | + def on_llm_start(self, *args, **kwargs): |
| 54 | + self.start_time = time.time() |
| 55 | + self.tokens = [] |
| 56 | + print("\nLLM Started generating response...", flush=True) |
| 57 | + |
| 58 | + def on_llm_new_token(self, token: str, **kwargs): |
| 59 | + self.tokens.append(token) |
| 60 | + print(token, end="", flush=True) |
| 61 | + |
| 62 | + def on_llm_end(self, *args, **kwargs): |
| 63 | + end_time = time.time() |
| 64 | + duration = end_time - self.start_time |
| 65 | + print(f"\nLLM finished generating response in {duration:.2f} seconds", flush=True) |
| 66 | + |
| 67 | +def format_docs(docs): |
| 68 | + return "\n\n".join(doc.page_content for doc in docs).replace("Context:", "").strip() |
| 69 | + |
| 70 | +# Vectordb creating API |
| 71 | +@app.route('/create_vectordb', methods=['POST']) |
| 72 | +def create_vectordb(): |
| 73 | + try: |
| 74 | + data = request.json |
| 75 | + vector_name = data['vector_name'] |
| 76 | + chunk_size = int(data['chunk_size']) |
| 77 | + doc_type = data['doc_type'] |
| 78 | + vector_path = os.path.join(VECTOR_DIR, vector_name) |
| 79 | + |
| 80 | + # Process document |
| 81 | + chunk_overlap = 30 |
| 82 | + if doc_type == "PDF": |
| 83 | + loader = DirectoryLoader(TEMP_DIR, glob='*.pdf', loader_cls=PyPDFLoader) |
| 84 | + docs = loader.load() |
| 85 | + elif doc_type == "HTML": |
| 86 | + url = data['url'] |
| 87 | + splitter = HTMLHeaderTextSplitter([ |
| 88 | + ("h1", "Header 1"), ("h2", "Header 2"), |
| 89 | + ("h3", "Header 3"), ("h4", "Header 4") |
| 90 | + ]) |
| 91 | + docs = splitter.split_text_from_url(url) |
| 92 | + else: |
| 93 | + return jsonify({"error": "Unsupported document type"}), 400 |
| 94 | + |
| 95 | + # Create vectorstore |
| 96 | + text_splitter = CharacterTextSplitter( |
| 97 | + chunk_size=chunk_size, |
| 98 | + chunk_overlap=chunk_overlap |
| 99 | + ) |
| 100 | + split_docs = text_splitter.split_documents(docs) |
| 101 | + embedding = HuggingFaceEmbeddings(model_name="thenlper/gte-base") |
| 102 | + vectorstore = FAISS.from_documents(documents=split_docs, embedding=embedding) |
| 103 | + vectorstore.save_local(vector_path) |
| 104 | + |
| 105 | + return jsonify({"status": "success", "path": vector_path}) |
| 106 | + except Exception as e: |
| 107 | + logger.exception("Error creating vector database") |
| 108 | + return jsonify({"error": str(e)}), 500 |
| 109 | + |
| 110 | +# Query API |
| 111 | +@app.route('/query', methods=['POST']) |
| 112 | +def query(): |
| 113 | + try: |
| 114 | + data = request.json |
| 115 | + question = data['question'] |
| 116 | + vector_path = data.get('vector_path') |
| 117 | + use_vectordb = data.get('use_vectordb', False) |
| 118 | + |
| 119 | + # Initialize LLM |
| 120 | + callbacks = [StreamingCallback()] |
| 121 | + model = LlamaCpp( |
| 122 | + model_path=MODEL_PATH, |
| 123 | + temperature=0.1, |
| 124 | + max_tokens=1024, |
| 125 | + n_batch=2048, |
| 126 | + callbacks=callbacks, |
| 127 | + n_ctx=10000, |
| 128 | + n_threads=64, |
| 129 | + n_threads_batch=64 |
| 130 | + ) |
| 131 | + |
| 132 | + # Create chain |
| 133 | + if use_vectordb and vector_path: |
| 134 | + embedding = HuggingFaceEmbeddings(model_name="thenlper/gte-base") |
| 135 | + vectorstore = FAISS.load_local(vector_path, embedding, allow_dangerous_deserialization=True) |
| 136 | + retriever = vectorstore.as_retriever().configurable_fields( |
| 137 | + search_kwargs=ConfigurableField(id="search_kwargs") |
| 138 | + ).with_config({"search_kwargs": {"k": 5}}) |
| 139 | + |
| 140 | + template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> |
| 141 | + You are a helpful assistant. Use the following context to answer the question. |
| 142 | + Context: {context} |
| 143 | + Question: {question} |
| 144 | + Answer: <|eot_id|>""" |
| 145 | + |
| 146 | + prompt = PromptTemplate(template=template, input_variables=["context", "question"]) |
| 147 | + chain = ( |
| 148 | + {"context": retriever | format_docs, "question": RunnablePassthrough()} |
| 149 | + | prompt |
| 150 | + | model |
| 151 | + | StrOutputParser() |
| 152 | + ) |
| 153 | + else: |
| 154 | + template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> |
| 155 | + Question: {question} |
| 156 | + Answer: <|eot_id|>""" |
| 157 | + |
| 158 | + prompt = PromptTemplate(template=template, input_variables=["question"]) |
| 159 | + chain = RunnablePassthrough().assign(question=lambda x: x) | prompt | model | StrOutputParser() |
| 160 | + |
| 161 | + # Generate response |
| 162 | + response = chain.invoke(question) |
| 163 | + return jsonify({"answer": response}) |
| 164 | + except Exception as e: |
| 165 | + logger.exception("Error processing query") |
| 166 | + return jsonify({"error": str(e)}), 500 |
| 167 | + |
| 168 | +# File Upload API |
| 169 | +@app.route('/upload_file', methods=['POST']) |
| 170 | +def upload_file(): |
| 171 | + try: |
| 172 | + file = request.files['file'] |
| 173 | + if file and file.filename.endswith('.pdf'): |
| 174 | + filename = os.path.join(TEMP_DIR, "uploaded.pdf") |
| 175 | + file.save(filename) |
| 176 | + return jsonify({"status": "success", "path": filename}) |
| 177 | + return jsonify({"error": "Invalid file"}), 400 |
| 178 | + except Exception as e: |
| 179 | + logger.exception("Error uploading file") |
| 180 | + return jsonify({"error": str(e)}), 500 |
| 181 | + |
| 182 | +if __name__ == '__main__': |
| 183 | + app.run(host='0.0.0.0', port=5000, debug=True) |
| 184 | +``` |
| 185 | + |
| 186 | +## Run the Backend Server |
| 187 | + |
| 188 | +You are now ready to run the backend server for the RAG Chatbot. |
| 189 | +Use the following command in a terminal to start the backend server: |
| 190 | + |
| 191 | +```python |
| 192 | +python3 backend.py |
| 193 | +``` |
| 194 | + |
| 195 | +You should see output similar to the image below when the backend server starts successfully: |
| 196 | + |
0 commit comments