|
| 1 | +from langchain_community.vectorstores.neo4j_vector import Neo4jVector |
| 2 | +from langchain.chains import GraphCypherQAChain |
| 3 | +from langchain.graphs import Neo4jGraph |
| 4 | +import os |
| 5 | +from dotenv import load_dotenv |
| 6 | +from langchain.chains import RetrievalQA |
| 7 | +from langchain_openai import ChatOpenAI |
| 8 | +from langchain_openai import OpenAIEmbeddings |
| 9 | +import logging |
| 10 | +load_dotenv() |
| 11 | + |
| 12 | +openai_api_key = os.environ.get('OPENAI_API_KEY') |
| 13 | + |
| 14 | +def vector_embed_results(qa,question): |
| 15 | + vector_res={} |
| 16 | + try: |
| 17 | + # question ="What do you know about machine learning" |
| 18 | + result = qa({"query": question}) |
| 19 | + vector_res['result']=result["result"] |
| 20 | + list_source_docs=[] |
| 21 | + for i in result["source_documents"]: |
| 22 | + list_source_docs.append(i.metadata['source']) |
| 23 | + vector_res['source']=list_source_docs |
| 24 | + except Exception as e: |
| 25 | + error_message = str(e) |
| 26 | + logging.exception(f'Exception in vector embedding in QA component:{error_message}') |
| 27 | + raise Exception(error_message) |
| 28 | + |
| 29 | + return vector_res |
| 30 | + |
| 31 | +def cypher_results(graph,question,model_version): |
| 32 | + cypher_res={} |
| 33 | + try: |
| 34 | + graph.refresh_schema() |
| 35 | + cypher_chain = GraphCypherQAChain.from_llm( |
| 36 | + graph=graph, |
| 37 | + # cypher_llm=ChatOpenAI(temperature=0, model="gpt-4"), |
| 38 | + cypher_llm=ChatOpenAI(temperature=0, model=model_version), |
| 39 | + qa_llm=ChatOpenAI(temperature=0, model=model_version), |
| 40 | + validate_cypher=True, # Validate relationship directions |
| 41 | + verbose=True, |
| 42 | + top_k=2 |
| 43 | + ) |
| 44 | + |
| 45 | + cypher_res=cypher_chain.invoke({"query": question}) |
| 46 | + |
| 47 | + except Exception as e: |
| 48 | + error_message = str(e) |
| 49 | + logging.exception(f'Exception in CypherQAChain in QA component:{error_message}') |
| 50 | + raise Exception(error_message) |
| 51 | + |
| 52 | + return cypher_res |
| 53 | + |
| 54 | + |
| 55 | + |
| 56 | +def QA_RAG(uri,userName,password,model_version,question): |
| 57 | + try: |
| 58 | + if model_version=='OpenAI GPT 3.5': |
| 59 | + model_version='gpt-3.5-turbo' |
| 60 | + elif model_version=='OpenAI GPT 4': |
| 61 | + model_version='gpt-4-0125-preview' |
| 62 | + retrieval_query=""" |
| 63 | + MATCH (node)-[:PART_OF]->(d:Document) |
| 64 | + WITH d, apoc.text.join(collect(node.text),"\n----\n") as text, avg(score) as score |
| 65 | + RETURN text, score, {source: COALESCE(CASE WHEN d.url CONTAINS "None" THEN d.fileName ELSE d.url END, d.fileName)} as metadata |
| 66 | + """ |
| 67 | + |
| 68 | + neo_db=Neo4jVector.from_existing_index( |
| 69 | + embedding=OpenAIEmbeddings(), |
| 70 | + url=uri, |
| 71 | + username=userName, |
| 72 | + password=password, |
| 73 | + database="neo4j", |
| 74 | + index_name="vector", |
| 75 | + retrieval_query=retrieval_query, |
| 76 | + ) |
| 77 | + llm = ChatOpenAI(model= model_version, temperature=0) |
| 78 | + |
| 79 | + qa = RetrievalQA.from_chain_type( |
| 80 | + llm=llm, chain_type="stuff", retriever=neo_db.as_retriever(search_kwargs={"score_threshold": 0.5}), return_source_documents=True |
| 81 | + ) |
| 82 | + |
| 83 | + graph = Neo4jGraph( |
| 84 | + url=uri, |
| 85 | + username=userName, |
| 86 | + password=password |
| 87 | + ) |
| 88 | + vector_res=vector_embed_results(qa,question) |
| 89 | + print(vector_res) |
| 90 | + cypher_res= cypher_results(graph,question,model_version) |
| 91 | + print(cypher_res) |
| 92 | + final_prompt = f"""You are a helpful question-answering agent. Your task is to analyze |
| 93 | + and synthesize information from two sources: the top result from a similarity search |
| 94 | + (unstructured information) and relevant data from a graph database (structured information). |
| 95 | + Given the user's query: {question}, provide a meaningful and efficient answer based |
| 96 | + on the insights derived from the following data: |
| 97 | + Structured information: {cypher_res.get('result','')}. |
| 98 | + Unstructured information: {vector_res.get('result','')}. |
| 99 | +
|
| 100 | + If structured information fails to find an answer then use the answer from unstructured information and vice versa. I only want a straightforward answer without mentioning from which source you got the answer. |
| 101 | + """ |
| 102 | + print(final_prompt) |
| 103 | + response = llm.predict(final_prompt) |
| 104 | + res={"message":response,"user":"chatbot"} |
| 105 | + return res |
| 106 | + except Exception as e: |
| 107 | + error_message = str(e) |
| 108 | + logging.exception(f'Exception in in QA component:{error_message}') |
| 109 | + raise Exception(error_message) |
0 commit comments