Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,6 @@ from langchain_community.llms import HuggingFaceHub
|
|
16 |
from langchain_openai import ChatOpenAI
|
17 |
from langchain_openai import OpenAIEmbeddings
|
18 |
from langchain.memory import ConversationBufferMemory
|
19 |
-
from langchain.prompts import PromptTemplate
|
20 |
from langchain.chains import ConversationalRetrievalChain
|
21 |
|
22 |
|
@@ -88,33 +87,13 @@ def get_conversation_chain(vectorstore):
|
|
88 |
Returns:
|
89 |
- ConversationalRetrievalChain: An initialized conversational chain object.
|
90 |
"""
|
91 |
-
|
92 |
-
# Define a strict prompt template that makes the model answer only based on the document
|
93 |
-
prompt_template = """
|
94 |
-
You are a helpful assistant. Use the following document context to answer the question.
|
95 |
-
If the answer is not in the document, simply respond with "I cannot provide an answer based on the document."
|
96 |
-
|
97 |
-
Document: {context}
|
98 |
-
|
99 |
-
Question: {question}
|
100 |
-
"""
|
101 |
-
|
102 |
-
prompt = PromptTemplate(
|
103 |
-
input_variables=["context", "question"], template=prompt_template
|
104 |
-
)
|
105 |
-
|
106 |
try:
|
107 |
llm = ChatOpenAI(model_name="gpt-4o")
|
108 |
memory = ConversationBufferMemory(
|
109 |
memory_key="chat_history", return_messages=True
|
110 |
)
|
111 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
112 |
-
llm=llm,
|
113 |
-
retriever=vectorstore.as_retriever(),
|
114 |
-
memory=memory,
|
115 |
-
return_source_documents=True,
|
116 |
-
combine_docs_chain_kwargs={"prompt": prompt},
|
117 |
-
document_variable_name="context", # Specify the variable name for the document context
|
118 |
)
|
119 |
return conversation_chain
|
120 |
except Exception as e:
|
@@ -122,15 +101,11 @@ def get_conversation_chain(vectorstore):
|
|
122 |
|
123 |
|
124 |
def handle_userinput(user_question):
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
# Retrieve the response from the conversation chain
|
132 |
-
response = st.session_state.conversation({"question": user_question})
|
133 |
-
|
134 |
st.session_state.chat_history = response["chat_history"]
|
135 |
|
136 |
for i, message in reversed(list(enumerate(st.session_state.chat_history))):
|
@@ -144,10 +119,6 @@ def handle_userinput(user_question):
|
|
144 |
bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True
|
145 |
)
|
146 |
|
147 |
-
# Ensure the bot only uses the document and replies accordingly
|
148 |
-
if not response["chat_history"][-1].content:
|
149 |
-
st.write("I cannot provide an answer based on the document.")
|
150 |
-
|
151 |
|
152 |
def get_user_chat_count(user_id):
|
153 |
"""
|
@@ -247,7 +218,6 @@ def chat(slug, user_id):
|
|
247 |
Restricts chat based on user group and chat count.
|
248 |
"""
|
249 |
|
250 |
-
# Show the user instruction at the top of the chat interface
|
251 |
st.write(
|
252 |
"**Please note:** Due to processing limitations, the chat may not fully comprehend the whole document."
|
253 |
)
|
|
|
16 |
from langchain_openai import ChatOpenAI
|
17 |
from langchain_openai import OpenAIEmbeddings
|
18 |
from langchain.memory import ConversationBufferMemory
|
|
|
19 |
from langchain.chains import ConversationalRetrievalChain
|
20 |
|
21 |
|
|
|
87 |
Returns:
|
88 |
- ConversationalRetrievalChain: An initialized conversational chain object.
|
89 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
try:
|
91 |
llm = ChatOpenAI(model_name="gpt-4o")
|
92 |
memory = ConversationBufferMemory(
|
93 |
memory_key="chat_history", return_messages=True
|
94 |
)
|
95 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
96 |
+
llm=llm, retriever=vectorstore.as_retriever(), memory=memory
|
|
|
|
|
|
|
|
|
|
|
97 |
)
|
98 |
return conversation_chain
|
99 |
except Exception as e:
|
|
|
101 |
|
102 |
|
103 |
def handle_userinput(user_question):
|
104 |
+
response = st.session_state.conversation(
|
105 |
+
{
|
106 |
+
"question": f"Based on the memory and the provided document, answer the following user question: {user_question}. If the question is unrelated to memory or the document, just mention that you cannot provide an answer."
|
107 |
+
}
|
108 |
+
)
|
|
|
|
|
|
|
|
|
109 |
st.session_state.chat_history = response["chat_history"]
|
110 |
|
111 |
for i, message in reversed(list(enumerate(st.session_state.chat_history))):
|
|
|
119 |
bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True
|
120 |
)
|
121 |
|
|
|
|
|
|
|
|
|
122 |
|
123 |
def get_user_chat_count(user_id):
|
124 |
"""
|
|
|
218 |
Restricts chat based on user group and chat count.
|
219 |
"""
|
220 |
|
|
|
221 |
st.write(
|
222 |
"**Please note:** Due to processing limitations, the chat may not fully comprehend the whole document."
|
223 |
)
|