33
44from elasticsearch import Elasticsearch
55from flask import current_app , render_template , stream_with_context
6- from langchain_elasticsearch import ElasticsearchChatMessageHistory , ElasticsearchStore , SparseVectorStrategy
6+ from langchain_elasticsearch import (
7+ ElasticsearchChatMessageHistory ,
8+ ElasticsearchStore ,
9+ SparseVectorStrategy ,
10+ )
711from langchain_openai import ChatOpenAI
812from llm_integrations import get_llm
913from elasticsearch_client import (
1216)
1317
1418INDEX = os .getenv ("ES_INDEX" , "workplace-app-docs" )
15- INDEX_CHAT_HISTORY = os .getenv ("ES_INDEX_CHAT_HISTORY" , "workplace-app-docs-chat-history" )
19+ INDEX_CHAT_HISTORY = os .getenv (
20+ "ES_INDEX_CHAT_HISTORY" , "workplace-app-docs-chat-history"
21+ )
1622ELSER_MODEL = os .getenv ("ELSER_MODEL" , ".elser_model_2" )
1723SESSION_ID_TAG = "[SESSION_ID]"
1824SOURCE_TAG = "[SOURCE]"
2632
2733llm = get_llm ()
2834
35+
2936@stream_with_context
3037def ask_question (question , session_id ):
3138 yield f"data: { SESSION_ID_TAG } { session_id } \n \n "
3239 current_app .logger .debug ("Chat session ID: %s" , session_id )
3340
34- chat_history = get_elasticsearch_chat_message_history (INDEX_CHAT_HISTORY , session_id )
41+ chat_history = get_elasticsearch_chat_message_history (
42+ INDEX_CHAT_HISTORY , session_id
43+ )
3544
3645 if len (chat_history .messages ) > 0 :
3746 # create a condensed question
@@ -50,7 +59,9 @@ def ask_question(question, session_id):
5059 docs = store .as_retriever ().invoke (condensed_question )
5160 for doc in docs :
5261 doc_source = {** doc .metadata , "page_content" : doc .page_content }
53- current_app .logger .debug ("Retrieved document passage from: %s" , doc .metadata ["name" ])
62+ current_app .logger .debug (
63+ "Retrieved document passage from: %s" , doc .metadata ["name" ]
64+ )
5465 yield f"data: { SOURCE_TAG } { json .dumps (doc_source )} \n \n "
5566
5667 qa_prompt = render_template (
@@ -62,7 +73,9 @@ def ask_question(question, session_id):
6273
6374 answer = ""
6475 for chunk in llm .stream (qa_prompt ):
65- content = chunk .content .replace ("\n " , " " ) # the stream can get messed up with newlines
76+ content = chunk .content .replace (
77+ "\n " , " "
78+ ) # the stream can get messed up with newlines
6679 yield f"data: { content } \n \n "
6780 answer += chunk .content
6881
0 commit comments