Spaces:
Running
Running
Update frontend/chat_interface.py
Browse files- frontend/chat_interface.py +145 -147
frontend/chat_interface.py
CHANGED
@@ -1,148 +1,146 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from backend.analysis import llm
|
3 |
-
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
|
4 |
-
from langchain.chains.combine_documents import create_stuff_documents_chain
|
5 |
-
from langchain_community.chat_message_histories import ChatMessageHistory
|
6 |
-
from langchain_core.chat_history import BaseChatMessageHistory
|
7 |
-
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
8 |
-
from langchain_core.runnables.history import RunnableWithMessageHistory
|
9 |
-
|
10 |
-
# Chat interface section of the application - displayed at the right
|
11 |
-
def render_chat_interface():
|
12 |
-
st.header("Chat with the Resume") # Header for the chat interface
|
13 |
-
|
14 |
-
# Add CSS for fixing chat input position at the bottom
|
15 |
-
st.markdown("""
|
16 |
-
<style>
|
17 |
-
.stChatInput {
|
18 |
-
position: fixed;
|
19 |
-
bottom: 0;
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
"
|
49 |
-
"
|
50 |
-
"
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
("
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
"
|
70 |
-
"
|
71 |
-
"
|
72 |
-
"
|
73 |
-
"
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
("
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
)
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
else:
|
148 |
st.info("Please upload a resume and analyze it to start chatting.")
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from backend.analysis import llm
|
3 |
+
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
|
4 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
5 |
+
from langchain_community.chat_message_histories import ChatMessageHistory
|
6 |
+
from langchain_core.chat_history import BaseChatMessageHistory
|
7 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
8 |
+
from langchain_core.runnables.history import RunnableWithMessageHistory
|
9 |
+
|
10 |
+
# Chat interface section of the application - displayed at the right
|
11 |
+
def render_chat_interface():
|
12 |
+
st.header("Chat with the Resume") # Header for the chat interface
|
13 |
+
|
14 |
+
# Add CSS for fixing chat input position at the bottom
|
15 |
+
st.markdown("""
|
16 |
+
<style>
|
17 |
+
.stChatInput {
|
18 |
+
position: fixed;
|
19 |
+
bottom: 0;
|
20 |
+
padding: 1rem;
|
21 |
+
background-color: white;
|
22 |
+
z-index: 1000;
|
23 |
+
}
|
24 |
+
.stChatFloatingInputContainer {
|
25 |
+
margin-bottom: 20px;
|
26 |
+
}
|
27 |
+
</style>
|
28 |
+
""", unsafe_allow_html=True) # Injecting custom CSS for styling
|
29 |
+
|
30 |
+
# Initialize empty chat messages
|
31 |
+
if "messages" not in st.session_state:
|
32 |
+
st.session_state.messages = [] # Initialize messages in session state
|
33 |
+
|
34 |
+
# Check if the vector store is available
|
35 |
+
if "vector_store" in st.session_state:
|
36 |
+
# Setting up the vector store as retriever
|
37 |
+
retriever = st.session_state.vector_store.as_retriever(
|
38 |
+
search_type="mmr", # Uses Maximum Marginal Relevance for search
|
39 |
+
search_kwargs={
|
40 |
+
"k": 3, # Fetch top 3 chunks
|
41 |
+
}
|
42 |
+
)
|
43 |
+
|
44 |
+
# Chat logic setup for contextualizing user questions
|
45 |
+
contextualize_q_system_prompt = (
|
46 |
+
"Given a chat history and the latest user question "
|
47 |
+
"which might reference context in the chat history, "
|
48 |
+
"formulate a standalone question which can be understood "
|
49 |
+
"without the chat history. Do NOT answer the question, "
|
50 |
+
"just reformulate it if needed and otherwise return it as is."
|
51 |
+
)
|
52 |
+
|
53 |
+
# Creating a prompt template for contextualizing questions
|
54 |
+
contextualize_q_prompt = ChatPromptTemplate.from_messages([
|
55 |
+
("system", contextualize_q_system_prompt),
|
56 |
+
MessagesPlaceholder("chat_history"),
|
57 |
+
("human", "{input}"),
|
58 |
+
])
|
59 |
+
|
60 |
+
# Creating a history-aware retriever with the language model
|
61 |
+
history_aware_retriever = create_history_aware_retriever(
|
62 |
+
llm, retriever, contextualize_q_prompt
|
63 |
+
)
|
64 |
+
|
65 |
+
# System prompt for answering questions
|
66 |
+
system_prompt = (
|
67 |
+
"You are an assistant for question-answering tasks. "
|
68 |
+
"Use the following pieces of retrieved context to answer "
|
69 |
+
"the question. If you don't know the answer, say that you "
|
70 |
+
"don't know. Use three sentences maximum and keep the "
|
71 |
+
"answer concise."
|
72 |
+
"\n\n"
|
73 |
+
"{context}"
|
74 |
+
)
|
75 |
+
|
76 |
+
# Creating a prompt template for question-answering
|
77 |
+
qa_prompt = ChatPromptTemplate.from_messages([
|
78 |
+
("system", system_prompt),
|
79 |
+
MessagesPlaceholder("chat_history"),
|
80 |
+
("human", "{input}"),
|
81 |
+
])
|
82 |
+
|
83 |
+
# Setting up the question-answering chain
|
84 |
+
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
|
85 |
+
retrieval_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
|
86 |
+
|
87 |
+
# Chat history management using a dictionary
|
88 |
+
store = {}
|
89 |
+
|
90 |
+
def get_session_history(session_id: str) -> BaseChatMessageHistory:
|
91 |
+
if session_id not in store:
|
92 |
+
store[session_id] = ChatMessageHistory() # Create a new history if not exists
|
93 |
+
return store[session_id] # Return the chat history
|
94 |
+
|
95 |
+
# Creating a runnable chain with message history
|
96 |
+
conversational_retrieval_chain = RunnableWithMessageHistory(
|
97 |
+
retrieval_chain,
|
98 |
+
get_session_history,
|
99 |
+
input_messages_key="input",
|
100 |
+
history_messages_key="chat_history",
|
101 |
+
output_messages_key="answer",
|
102 |
+
)
|
103 |
+
|
104 |
+
# Create a container for messages with bottom padding for input space
|
105 |
+
chat_container = st.container()
|
106 |
+
|
107 |
+
# Add space at the bottom to prevent messages from being hidden behind input
|
108 |
+
st.markdown("<div style='height: 100px;'></div>", unsafe_allow_html=True)
|
109 |
+
|
110 |
+
# Input box - will be fixed at bottom due to CSS
|
111 |
+
prompt = st.chat_input("Ask about the resume") # Input for user queries
|
112 |
+
|
113 |
+
# Display messages in the container
|
114 |
+
with chat_container:
|
115 |
+
for message in st.session_state.messages: # Iterate through session messages
|
116 |
+
with st.chat_message(message["role"]):
|
117 |
+
st.markdown(message["content"]) # Display message content
|
118 |
+
|
119 |
+
if prompt: # Check if there is a user input
|
120 |
+
st.session_state.messages.append({"role": "user", "content": prompt}) # Store user message
|
121 |
+
with chat_container:
|
122 |
+
with st.chat_message("user"):
|
123 |
+
st.markdown(prompt) # Display user input
|
124 |
+
|
125 |
+
with st.chat_message("assistant"):
|
126 |
+
# Prepare input data for the conversational chain
|
127 |
+
input_data = {
|
128 |
+
"input": prompt,
|
129 |
+
"chat_history": st.session_state.messages,
|
130 |
+
}
|
131 |
+
response = conversational_retrieval_chain.invoke(
|
132 |
+
input_data,
|
133 |
+
config={
|
134 |
+
"configurable": {"session_id": "abc123"} # Setting session ID
|
135 |
+
},
|
136 |
+
)
|
137 |
+
answer_text = response['answer'] # Extract the assistant's response
|
138 |
+
st.markdown(answer_text) # Display the response
|
139 |
+
|
140 |
+
st.session_state.messages.append({"role": "assistant", "content": answer_text}) # Store assistant response
|
141 |
+
|
142 |
+
# Force a rerun to update the chat immediately
|
143 |
+
st.rerun() # Refresh the Streamlit app
|
144 |
+
|
145 |
+
else:
|
|
|
|
|
146 |
st.info("Please upload a resume and analyze it to start chatting.")
|