HEHEBOIOG commited on
Commit
187e418
Β·
verified Β·
1 Parent(s): d047c3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -33
app.py CHANGED
@@ -20,6 +20,7 @@ class AdvancedRAGChatbot:
20
  llm_model: str = "llama-3.3-70b-versatile",
21
  temperature: float = 0.7,
22
  retrieval_k: int = 5):
 
23
  self.embeddings = self._configure_embeddings(embedding_model)
24
  self.semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
25
  self.sentiment_analyzer = pipeline("sentiment-analysis")
@@ -31,10 +32,12 @@ class AdvancedRAGChatbot:
31
  self.qa_chain = self._create_conversational_retrieval_chain()
32
 
33
  def _configure_embeddings(self, model_name: str):
 
34
  encode_kwargs = {'normalize_embeddings': True, 'show_progress_bar': True}
35
  return HuggingFaceBgeEmbeddings(model_name=model_name, encode_kwargs=encode_kwargs)
36
 
37
  def _configure_llm(self, model_name: str, temperature: float):
 
38
  return ChatGroq(
39
  model_name=model_name,
40
  temperature=temperature,
@@ -43,14 +46,24 @@ class AdvancedRAGChatbot:
43
  )
44
 
45
  def _initialize_vector_database(self, persist_directory: str = 'vector_db'):
 
46
  return Chroma(persist_directory=persist_directory, embedding_function=self.embeddings)
47
 
48
  def _configure_retriever(self, retrieval_k: int):
49
- return self.vector_db.as_retriever(k=retrieval_k, search_type="mmr", fetch_k=20)
 
 
 
 
 
 
 
50
 
51
  def _create_conversational_retrieval_chain(self):
 
52
  template = """
53
- You are a helpful AI assistant. Use the following context and chat history to provide a precise answer.
 
54
 
55
  Context: {context}
56
  Chat History: {chat_history}
@@ -68,64 +81,111 @@ class AdvancedRAGChatbot:
68
  )
69
 
70
  def process_query(self, query: str) -> Dict[str, Any]:
 
 
71
  semantic_score = self.semantic_model.encode([query])[0]
72
  sentiment_result = self.sentiment_analyzer(query)[0]
73
  entities = self.ner_pipeline(query)
 
 
74
  result = self.qa_chain({"question": query})
75
 
76
- response_data = {
77
  "response": result['answer'],
78
  "source_documents": result.get('source_documents', []),
79
  "semantic_similarity": semantic_score.tolist(),
80
  "sentiment": sentiment_result,
81
- "named_entities": entities,
82
- "contextual_information": result.get("source_documents", [])
83
  }
84
- return response_data
85
 
86
  def main():
87
- st.set_page_config(page_title="Advanced NLP RAG Chatbot", layout="wide", initial_sidebar_state="expanded")
88
- st.title("🧠 Advanced NLP RAG Chatbot")
 
 
 
 
 
89
 
 
90
  with st.sidebar:
91
- st.header("Configuration")
 
 
 
92
  embedding_model = st.selectbox(
93
  "Embedding Model",
94
  ["BAAI/bge-large-en-v1.5", "sentence-transformers/all-MiniLM-L6-v2"]
95
  )
96
- temperature = st.slider("Model Temperature", 0.0, 1.0, 0.7)
97
- retrieval_k = st.slider("Documents to Retrieve (k)", 1, 10, 5)
 
 
 
 
98
 
 
99
  chatbot = AdvancedRAGChatbot(
100
  embedding_model=embedding_model,
101
  temperature=temperature,
102
  retrieval_k=retrieval_k
103
  )
104
 
105
- st.markdown("### Chat with the AI Assistant")
106
- query_col, response_col = st.columns(2)
107
 
108
- with query_col:
109
- user_input = st.text_area("Ask your question:", placeholder="Type your question here...", height=150)
110
 
111
- if user_input:
112
- with st.spinner("Processing your query..."):
113
- response = chatbot.process_query(user_input)
 
 
 
 
 
114
 
115
- with response_col:
116
- st.markdown("### Bot Response")
117
- st.write(response['response'])
118
-
119
- st.markdown("### Sentiment Analysis")
120
- st.write(f"Sentiment: {response['sentiment']['label']} ({response['sentiment']['score']:.2%})")
121
-
122
- st.markdown("### Named Entities")
123
- for entity in response['named_entities']:
124
- st.write(f"- {entity['word']} ({entity['entity']})")
125
-
126
- st.markdown("### Source Documents")
127
- for doc in response['source_documents']:
128
- st.text_area("Source Document", doc.page_content, height=100)
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  if __name__ == "__main__":
131
- main()
 
20
  llm_model: str = "llama-3.3-70b-versatile",
21
  temperature: float = 0.7,
22
  retrieval_k: int = 5):
23
+ """Initialize the Advanced RAG Chatbot with configurable parameters"""
24
  self.embeddings = self._configure_embeddings(embedding_model)
25
  self.semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
26
  self.sentiment_analyzer = pipeline("sentiment-analysis")
 
32
  self.qa_chain = self._create_conversational_retrieval_chain()
33
 
34
  def _configure_embeddings(self, model_name: str):
35
+ """Configure embeddings with normalization"""
36
  encode_kwargs = {'normalize_embeddings': True, 'show_progress_bar': True}
37
  return HuggingFaceBgeEmbeddings(model_name=model_name, encode_kwargs=encode_kwargs)
38
 
39
  def _configure_llm(self, model_name: str, temperature: float):
40
+ """Configure the Language Model with Groq"""
41
  return ChatGroq(
42
  model_name=model_name,
43
  temperature=temperature,
 
46
  )
47
 
48
  def _initialize_vector_database(self, persist_directory: str = 'vector_db'):
49
+ """Initialize the vector database"""
50
  return Chroma(persist_directory=persist_directory, embedding_function=self.embeddings)
51
 
52
  def _configure_retriever(self, retrieval_k: int):
53
+ """Configure the document retriever"""
54
+ return self.vector_db.as_retriever(
55
+ search_kwargs={
56
+ "k": retrieval_k,
57
+ "search_type": "mmr",
58
+ "fetch_k": 20
59
+ }
60
+ )
61
 
62
  def _create_conversational_retrieval_chain(self):
63
+ """Create the conversational retrieval chain"""
64
  template = """
65
+ You are a helpful AI assistant. Provide a precise and comprehensive answer
66
+ based on the context and chat history.
67
 
68
  Context: {context}
69
  Chat History: {chat_history}
 
81
  )
82
 
83
  def process_query(self, query: str) -> Dict[str, Any]:
84
+ """Process the user query with multiple NLP techniques"""
85
+ # Advanced NLP Analysis
86
  semantic_score = self.semantic_model.encode([query])[0]
87
  sentiment_result = self.sentiment_analyzer(query)[0]
88
  entities = self.ner_pipeline(query)
89
+
90
+ # RAG Query Processing
91
  result = self.qa_chain({"question": query})
92
 
93
+ return {
94
  "response": result['answer'],
95
  "source_documents": result.get('source_documents', []),
96
  "semantic_similarity": semantic_score.tolist(),
97
  "sentiment": sentiment_result,
98
+ "named_entities": entities
 
99
  }
 
100
 
101
  def main():
102
+ # Page Configuration
103
+ st.set_page_config(
104
+ page_title="Advanced RAG Chatbot",
105
+ page_icon="🧠",
106
+ layout="wide",
107
+ initial_sidebar_state="expanded"
108
+ )
109
 
110
+ # Sidebar Configuration
111
  with st.sidebar:
112
+ st.header("πŸ”§ Chatbot Settings")
113
+ st.markdown("Customize your AI assistant's behavior")
114
+
115
+ # Model Configuration
116
  embedding_model = st.selectbox(
117
  "Embedding Model",
118
  ["BAAI/bge-large-en-v1.5", "sentence-transformers/all-MiniLM-L6-v2"]
119
  )
120
+ temperature = st.slider("Creativity Level", 0.0, 1.0, 0.7, help="Higher values make responses more creative")
121
+ retrieval_k = st.slider("Context Depth", 1, 10, 5, help="Number of reference documents to retrieve")
122
+
123
+ # Additional Controls
124
+ st.divider()
125
+ reset_chat = st.button("πŸ”„ Reset Conversation")
126
 
127
+ # Initialize Chatbot
128
  chatbot = AdvancedRAGChatbot(
129
  embedding_model=embedding_model,
130
  temperature=temperature,
131
  retrieval_k=retrieval_k
132
  )
133
 
134
+ # Main Chat Interface
135
+ st.title("πŸ€– Advanced RAG Chatbot")
136
 
137
+ # Two-column layout
138
+ col1, col2 = st.columns(2)
139
 
140
+ with col1:
141
+ st.header("Input")
142
+ # Chat input with placeholder
143
+ user_input = st.text_area(
144
+ "Ask your question",
145
+ placeholder="Enter your query here...",
146
+ height=250
147
+ )
148
 
149
+ # Submit button
150
+ submit_button = st.button("Send Query", type="primary")
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
+ with col2:
153
+ st.header("Response")
154
+ # Response container
155
+ if submit_button and user_input:
156
+ with st.spinner("Processing your query..."):
157
+ try:
158
+ response = chatbot.process_query(user_input)
159
+
160
+ # Bot Response
161
+ st.markdown("#### Bot's Answer")
162
+ st.write(response['response'])
163
+
164
+ # Sentiment Analysis
165
+ st.markdown("#### Sentiment Analysis")
166
+ sentiment = response['sentiment']
167
+ st.metric(
168
+ label="Sentiment",
169
+ value=sentiment['label'],
170
+ delta=f"{sentiment['score']:.2%}"
171
+ )
172
+
173
+ # Named Entities
174
+ st.markdown("#### Detected Entities")
175
+ for entity in response['named_entities']:
176
+ st.text(f"{entity['word']} ({entity['entity']})")
177
+
178
+ # Source Documents
179
+ if response['source_documents']:
180
+ st.markdown("#### Reference Documents")
181
+ for i, doc in enumerate(response['source_documents'], 1):
182
+ with st.expander(f"Document {i}"):
183
+ st.write(doc.page_content)
184
+
185
+ except Exception as e:
186
+ st.error(f"An error occurred: {e}")
187
+ else:
188
+ st.info("Submit a query to see the AI's response")
189
+
190
  if __name__ == "__main__":
191
+ main()