77from langchain_openai import ChatOpenAI
88from langchain_openai import OpenAIEmbeddings
99import logging
10+ from langchain_community .chat_message_histories import Neo4jChatMessageHistory
11+ import asyncio
1012load_dotenv ()
1113
1214openai_api_key = os .environ .get ('OPENAI_API_KEY' )
15+ model_version = 'gpt-4-0125-preview'
1316
1417def vector_embed_results (qa ,question ):
1518 vector_res = {}
1619 try :
17- # question ="What do you know about machine learning"
1820 result = qa ({"query" : question })
19- vector_res ['result' ]= result [ "result" ]
21+ vector_res ['result' ]= result . get ( "result" )
2022 list_source_docs = []
2123 for i in result ["source_documents" ]:
2224 list_source_docs .append (i .metadata ['source' ])
2325 vector_res ['source' ]= list_source_docs
2426 except Exception as e :
2527 error_message = str (e )
2628 logging .exception (f'Exception in vector embedding in QA component:{ error_message } ' )
27- raise Exception (error_message )
29+ # raise Exception(error_message)
2830
2931 return vector_res
3032
31- def cypher_results (graph ,question , model_version ):
33+ def cypher_results (graph ,question ):
3234 cypher_res = {}
3335 try :
3436 graph .refresh_schema ()
3537 cypher_chain = GraphCypherQAChain .from_llm (
3638 graph = graph ,
37- # cypher_llm=ChatOpenAI(temperature=0, model="gpt-4"),
3839 cypher_llm = ChatOpenAI (temperature = 0 , model = model_version ),
3940 qa_llm = ChatOpenAI (temperature = 0 , model = model_version ),
4041 validate_cypher = True , # Validate relationship directions
4142 verbose = True ,
4243 top_k = 2
4344 )
44-
45- cypher_res = cypher_chain .invoke ({"query" : question })
45+ try :
46+ cypher_res = cypher_chain .invoke ({"query" : question })
47+ except :
48+ cypher_res = {}
4649
4750 except Exception as e :
4851 error_message = str (e )
4952 logging .exception (f'Exception in CypherQAChain in QA component:{ error_message } ' )
50- raise Exception (error_message )
53+ # raise Exception(error_message)
5154
5255 return cypher_res
5356
57+ def save_chat_history (uri ,userName ,password ,session_id ,user_message ,ai_message ):
58+ try :
59+ history = Neo4jChatMessageHistory (
60+ url = uri ,
61+ username = userName ,
62+ password = password ,
63+ session_id = session_id
64+ )
65+ history .add_user_message (user_message )
66+ history .add_ai_message (ai_message )
67+ logging .info (f'Successfully saved chat history' )
68+ except Exception as e :
69+ error_message = str (e )
70+ logging .exception (f'Exception in saving chat history:{ error_message } ' )
71+ # raise Exception(error_message)
72+
5473
74+ def get_chat_history (llm ,uri ,userName ,password ,session_id ):
75+ try :
76+ history = Neo4jChatMessageHistory (
77+ url = uri ,
78+ username = userName ,
79+ password = password ,
80+ session_id = session_id
81+ )
82+ chat_history = history .messages
5583
56- def QA_RAG (uri ,userName ,password ,model_version ,question ):
84+ if len (chat_history )== 0 :
85+ return ""
86+ condense_template = f"""Given the following earlier conversation , Summarise the chat history.Make sure to include all the relevant information.
87+ Chat History:
88+ { chat_history } """
89+ chat_summary = llm .predict (condense_template )
90+ return chat_summary
91+ except Exception as e :
92+ error_message = str (e )
93+ logging .exception (f'Exception in retrieving chat history:{ error_message } ' )
94+ # raise Exception(error_message)
95+ return ''
96+
97+ def QA_RAG (uri ,userName ,password ,question ,session_id ):
5798 try :
58- if model_version == 'OpenAI GPT 3.5' :
59- model_version = 'gpt-3.5-turbo'
60- elif model_version == 'OpenAI GPT 4' :
61- model_version = 'gpt-4-0125-preview'
6299 retrieval_query = """
63100 MATCH (node)-[:PART_OF]->(d:Document)
64101 WITH d, apoc.text.join(collect(node.text),"\n ----\n ") as text, avg(score) as score
@@ -77,7 +114,7 @@ def QA_RAG(uri,userName,password,model_version,question):
77114 llm = ChatOpenAI (model = model_version , temperature = 0 )
78115
79116 qa = RetrievalQA .from_chain_type (
80- llm = llm , chain_type = "stuff" , retriever = neo_db .as_retriever (search_kwargs = {"score_threshold" : 0.5 }), return_source_documents = True
117+ llm = llm , chain_type = "stuff" , retriever = neo_db .as_retriever (search_kwargs = {'k' : 3 , "score_threshold" : 0.5 }), return_source_documents = True
81118 )
82119
83120 graph = Neo4jGraph (
@@ -86,24 +123,39 @@ def QA_RAG(uri,userName,password,model_version,question):
86123 password = password
87124 )
88125 vector_res = vector_embed_results (qa ,question )
126+ print ('Response from Vector embeddings' )
89127 print (vector_res )
90- cypher_res = cypher_results (graph ,question ,model_version )
128+ cypher_res = cypher_results (graph ,question )
129+ print ('Response from CypherQAChain' )
91130 print (cypher_res )
131+
132+ chat_summary = get_chat_history (llm ,uri ,userName ,password ,session_id )
133+
92134 final_prompt = f"""You are a helpful question-answering agent. Your task is to analyze
93135 and synthesize information from two sources: the top result from a similarity search
94- (unstructured information) and relevant data from a graph database (structured information).
136+ (unstructured information) and relevant data from a graph database (structured information).
137+ If structured information fails to find an answer then use the answer from unstructured information
138+ and vice versa. I only want a straightforward answer without mentioning from which source you got the answer. You are also receiving
139+ a chat history of the earlier conversation. You should be able to understand the context from the chat history and answer the question.
95140 Given the user's query: { question } , provide a meaningful and efficient answer based
96141 on the insights derived from the following data:
142+ chat_summary:{ chat_summary }
97143 Structured information: { cypher_res .get ('result' ,'' )} .
98144 Unstructured information: { vector_res .get ('result' ,'' )} .
99145
100- If structured information fails to find an answer then use the answer from unstructured information and vice versa. I only want a straightforward answer without mentioning from which source you got the answer.
101146 """
102147 print (final_prompt )
103148 response = llm .predict (final_prompt )
104- res = {"message" :response ,"user" :"chatbot" }
149+ ai_message = response
150+ user_message = question
151+ save_chat_history (uri ,userName ,password ,session_id ,user_message ,ai_message )
152+
153+ res = {"session_id" :session_id ,"message" :response ,"user" :"chatbot" }
105154 return res
106155 except Exception as e :
107156 error_message = str (e )
108157 logging .exception (f'Exception in in QA component:{ error_message } ' )
109- raise Exception (error_message )
158+ # raise Exception(error_message)
159+ return {"session_id" :session_id ,"message" :"Something went wrong" ,"user" :"chatbot" }
160+
161+
0 commit comments