|
5 | 5 | from langchain.chains import RetrievalQAWithSourcesChain, LLMChain |
6 | 6 | import os |
7 | 7 | from langchain.memory import ConversationBufferWindowMemory |
8 | | -from langchain_openai import OpenAI |
| 8 | +from langchain_openai import ChatOpenAI |
9 | 9 | from langchain.memory import ConversationBufferWindowMemory |
10 | 10 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
11 | 11 | import logging |
@@ -43,7 +43,7 @@ def load_global_retriever(): |
43 | 43 |
|
44 | 44 |
|
45 | 45 | def get_llm(): |
46 | | - llm = OpenAI(temperature=0.6, model_name="gpt-3.5-turbo-instruct") |
| 46 | + llm = ChatOpenAI(temperature=0.6, model_name="gpt-3.5-turbo-instruct") |
47 | 47 | return llm |
48 | 48 |
|
49 | 49 |
|
@@ -80,12 +80,13 @@ def get_qa_chain(llm, retriever, session): |
80 | 80 | Previous conversation: |
81 | 81 | """ |
82 | 82 | chat_prompt_template = "{chat_history}" |
83 | | - human_prompt_template = "{question}." |
| 83 | + human_prompt_template = "{question}" |
84 | 84 | chatbot_prompt_template = "CrapBot:" |
85 | 85 | messages = [ |
86 | 86 | ("system", system_prompt_template), |
87 | 87 | ("placeholder", chat_prompt_template), |
88 | 88 | ("human", human_prompt_template), |
| 89 | + ("system", chatbot_prompt_template), |
89 | 90 | ] |
90 | 91 |
|
91 | 92 | PROMPT = ChatPromptTemplate.from_messages( |
|
0 commit comments