Spaces:
Running
Running
Tushar Malik
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,552 @@
|
|
1 |
-
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
""
|
7 |
-
client =
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
17 |
):
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
],
|
|
|
|
|
59 |
)
|
60 |
|
61 |
-
|
62 |
-
if __name__ == "__main__":
|
63 |
-
demo.launch()
|
|
|
1 |
+
# Cell 2: Import necessary libraries
|
2 |
+
import time
|
3 |
+
import fitz # PyMuPDF
|
4 |
+
import numpy as np
|
5 |
+
import pickle
|
6 |
+
import os
|
7 |
+
import dill
|
8 |
+
import logging
|
9 |
+
import asyncio
|
10 |
+
import networkx as nx # Import networkx here
|
11 |
+
from mistralai import Mistral
|
12 |
+
from annoy import AnnoyIndex
|
13 |
+
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
|
14 |
+
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
|
15 |
+
from sklearn.preprocessing import normalize
|
16 |
+
from rank_bm25 import BM25Okapi
|
17 |
+
from gensim.models import Word2Vec
|
18 |
+
from typing import List, Optional, Tuple
|
19 |
|
20 |
+
# Cell 3: Set up logging and Mistral client
|
21 |
+
logger = logging.getLogger(__name__)
|
22 |
+
api_key = "VtyTFrel7S9KXsTUc9JuJ6CIPr8WK9r3"
|
23 |
+
client = Mistral(api_key=api_key)
|
24 |
|
25 |
+
# Cell 4: Function to get embeddings with rate limiting
|
26 |
+
def get_text_embedding_with_rate_limit(text_list, initial_delay=2, max_retries=10):
|
27 |
+
embeddings = []
|
28 |
+
for text in text_list:
|
29 |
+
retries = 0
|
30 |
+
delay = initial_delay
|
31 |
+
while retries < max_retries:
|
32 |
+
try:
|
33 |
+
token_count = len(text.split())
|
34 |
+
if token_count > 16384:
|
35 |
+
print("Warning: Text chunk exceeds the token limit. Truncating the text.")
|
36 |
+
text = " ".join(text.split()[:16384])
|
37 |
+
response = client.embeddings.create(model="mistral-embed", inputs=[text])
|
38 |
+
embeddings.extend([embedding.embedding for embedding in response.data])
|
39 |
+
time.sleep(delay)
|
40 |
+
break
|
41 |
+
except Exception as e:
|
42 |
+
retries += 1
|
43 |
+
print(f"Rate limit exceeded, retrying in {delay} seconds... (Attempt {retries}/{max_retries})")
|
44 |
+
time.sleep(delay)
|
45 |
+
delay *= 2
|
46 |
+
if retries == max_retries:
|
47 |
+
print("Max retries reached. Skipping this chunk.")
|
48 |
+
break
|
49 |
+
return embeddings
|
50 |
|
51 |
+
# Cell 5: Function to store embeddings in a vector database
|
52 |
+
def store_embeddings_in_vector_db(
|
53 |
+
pdf_path: str,
|
54 |
+
vector_db_path: str,
|
55 |
+
annoy_index_path: str,
|
56 |
+
chunk_size: int = 2048,
|
57 |
+
overlap: int = 200,
|
58 |
+
num_trees: int = 10
|
59 |
):
|
60 |
+
doc = fitz.open(pdf_path)
|
61 |
+
all_embeddings = []
|
62 |
+
all_texts = []
|
63 |
+
total_pages = doc.page_count
|
64 |
+
logging.info(f"Processing PDF: {pdf_path} with {total_pages} pages.")
|
65 |
+
|
66 |
+
for page_num in range(total_pages):
|
67 |
+
page = doc.load_page(page_num)
|
68 |
+
text = page.get_text()
|
69 |
+
if text.strip():
|
70 |
+
chunks = split_text_into_chunks(text, chunk_size, overlap)
|
71 |
+
embeddings = get_text_embedding_with_rate_limit(chunks)
|
72 |
+
all_embeddings.extend(embeddings)
|
73 |
+
all_texts.extend(chunks)
|
74 |
+
logging.info(f"Processed page {page_num + 1}/{total_pages}, extracted {len(chunks)} chunks.")
|
75 |
+
else:
|
76 |
+
logging.warning(f"No text found on page {page_num + 1}.")
|
77 |
+
|
78 |
+
embeddings_np = np.array(all_embeddings).astype('float32')
|
79 |
+
with open(vector_db_path, "wb") as f:
|
80 |
+
dill.dump({'embeddings': embeddings_np, 'texts': all_texts}, f)
|
81 |
+
logging.info(f"Stored embeddings and texts to {vector_db_path}.")
|
82 |
+
|
83 |
+
if os.path.exists(annoy_index_path):
|
84 |
+
os.remove(annoy_index_path)
|
85 |
+
logging.info(f"Existing Annoy index at {annoy_index_path} removed.")
|
86 |
+
|
87 |
+
embedding_dim = embeddings_np.shape[1]
|
88 |
+
annoy_index = AnnoyIndex(embedding_dim, 'angular')
|
89 |
+
for i, embedding in enumerate(embeddings_np):
|
90 |
+
annoy_index.add_item(i, embedding)
|
91 |
+
annoy_index.build(num_trees)
|
92 |
+
annoy_index.save(annoy_index_path)
|
93 |
+
logging.info(f"Annoy index built with {len(all_embeddings)} items and saved to {annoy_index_path}.")
|
94 |
+
|
95 |
+
# Cell 6: Helper functions for text processing
|
96 |
+
def split_text_into_chunks(text: str, chunk_size: int = 2048, overlap: int = 200) -> List[str]:
|
97 |
+
tokens = text.split()
|
98 |
+
chunks = []
|
99 |
+
start = 0
|
100 |
+
while start < len(tokens):
|
101 |
+
end = start + chunk_size
|
102 |
+
chunk = " ".join(tokens[start:end])
|
103 |
+
chunks.append(chunk)
|
104 |
+
start += chunk_size - overlap
|
105 |
+
return chunks
|
106 |
+
|
107 |
+
class MistralRAGChatbot:
|
108 |
+
def __init__(self, vector_db_path: str, annoy_index_path: str):
|
109 |
+
self.embeddings, self.texts = self.load_vector_db(vector_db_path)
|
110 |
+
self.annoy_index = self.load_annoy_index(annoy_index_path, self.embeddings.shape[1])
|
111 |
+
self.tfidf_matrix, self.tfidf_vectorizer = self.calculate_tfidf(self.texts)
|
112 |
+
self.bm25 = BM25Okapi([text.split() for text in self.texts])
|
113 |
+
self.word2vec_model = self.train_word2vec(self.texts)
|
114 |
+
self.reranking_methods = {
|
115 |
+
'reciprocal_rank_fusion': self.reciprocal_rank_fusion,
|
116 |
+
'weighted_score_fusion': self.weighted_score_fusion,
|
117 |
+
'semantic_similarity': self.semantic_similarity_reranking,
|
118 |
+
'advanced_fusion': self.advanced_fusion_retrieval
|
119 |
+
}
|
120 |
+
logging.info("MistralRAGChatbot initialized successfully.")
|
121 |
+
|
122 |
+
|
123 |
+
def load_vector_db(self, vector_db_path: str) -> Tuple[np.ndarray, List[str]]:
|
124 |
+
with open(vector_db_path, "rb") as f:
|
125 |
+
data = dill.load(f)
|
126 |
+
embeddings = np.array(data['embeddings'], dtype='float32')
|
127 |
+
texts = data['texts']
|
128 |
+
logging.info(f"Loaded vector database from {vector_db_path} with {len(texts)} entries.")
|
129 |
+
return embeddings, texts
|
130 |
+
|
131 |
+
def load_annoy_index(self, annoy_index_path: str, embedding_dim: int) -> AnnoyIndex:
|
132 |
+
annoy_index = AnnoyIndex(embedding_dim, 'angular')
|
133 |
+
annoy_index.load(annoy_index_path)
|
134 |
+
logging.info(f"Loaded Annoy index from {annoy_index_path}.")
|
135 |
+
return annoy_index
|
136 |
+
|
137 |
+
def calculate_tfidf(self, texts: List[str]) -> Tuple[np.ndarray, TfidfVectorizer]:
|
138 |
+
vectorizer = TfidfVectorizer(stop_words='english')
|
139 |
+
tfidf_matrix = vectorizer.fit_transform(texts)
|
140 |
+
logging.info("TF-IDF matrix calculated.")
|
141 |
+
return tfidf_matrix, vectorizer
|
142 |
+
|
143 |
+
def train_word2vec(self, texts: List[str]) -> Word2Vec:
|
144 |
+
tokenized_texts = [text.split() for text in texts]
|
145 |
+
model = Word2Vec(sentences=tokenized_texts, vector_size=100, window=5, min_count=1, workers=4)
|
146 |
+
logging.info("Word2Vec model trained.")
|
147 |
+
return model
|
148 |
+
|
149 |
+
async def get_text_embedding(self, text: str, model: str = "mistral-embed") -> np.ndarray:
|
150 |
+
try:
|
151 |
+
response = await client.embeddings.create_async(model=model, inputs=[text])
|
152 |
+
return np.array(response.data[0].embedding)
|
153 |
+
except Exception as e:
|
154 |
+
logging.error(f"Error fetching embedding: {e}")
|
155 |
+
return np.zeros((1024,)) # Assuming embedding size of 384
|
156 |
+
|
157 |
+
def advanced_fusion_retrieval(self, user_query: str, docs: List[dict]) -> List[dict]:
|
158 |
+
query_embedding = self.create_embeddings([user_query])[0]
|
159 |
+
|
160 |
+
vector_scores = {doc['index']: doc['score'] for doc in docs if doc['method'] == 'annoy'}
|
161 |
+
bm25_scores = {doc['index']: doc['score'] for doc in docs if doc['method'] == 'bm25'}
|
162 |
+
|
163 |
+
sim_graph = nx.Graph()
|
164 |
+
sim_matrix = cosine_similarity(self.embeddings)
|
165 |
+
for i in range(len(self.embeddings)):
|
166 |
+
for j in range(i + 1, len(self.embeddings)):
|
167 |
+
if sim_matrix[i, j] > 0.5:
|
168 |
+
sim_graph.add_edge(i, j, weight=sim_matrix[i, j])
|
169 |
+
|
170 |
+
pagerank_scores = np.array(list(nx.pagerank(sim_graph, weight='weight').values()))
|
171 |
+
|
172 |
+
combined_scores = {}
|
173 |
+
for doc in docs:
|
174 |
+
idx = doc['index']
|
175 |
+
combined_scores[idx] = (
|
176 |
+
0.5 * vector_scores.get(idx, 0) +
|
177 |
+
0.3 * bm25_scores.get(idx, 0) +
|
178 |
+
0.2 * pagerank_scores[idx]
|
179 |
+
)
|
180 |
+
|
181 |
+
sorted_indices = sorted(combined_scores, key=combined_scores.get, reverse=True)
|
182 |
+
|
183 |
+
return [{'text': self.texts[i], 'method': 'advanced_fusion', 'score': combined_scores[i], 'index': i} for i in sorted_indices[:5]]
|
184 |
+
|
185 |
+
def create_embeddings(self, text_list: List[str]) -> np.ndarray:
|
186 |
+
expected_dim = 1024 # The dimension expected by the Annoy index
|
187 |
+
embeddings = []
|
188 |
+
for text in text_list:
|
189 |
+
word_vectors = [self.word2vec_model.wv[token] for token in text.split() if token in self.word2vec_model.wv]
|
190 |
+
avg_embedding = np.mean(word_vectors, axis=0, dtype=np.float32) if word_vectors else np.zeros(self.word2vec_model.vector_size, dtype=np.float32)
|
191 |
+
if avg_embedding.shape[0] < expected_dim:
|
192 |
+
avg_embedding = np.pad(avg_embedding, (0, expected_dim - avg_embedding.shape[0]), 'constant')
|
193 |
+
elif avg_embedding.shape[0] > expected_dim:
|
194 |
+
avg_embedding = avg_embedding[:expected_dim]
|
195 |
+
embeddings.append(avg_embedding)
|
196 |
+
return np.array(embeddings, dtype=np.float32)
|
197 |
+
|
198 |
+
|
199 |
+
async def generate_response_with_rag(
|
200 |
+
self,
|
201 |
+
user_query: str,
|
202 |
+
model: str = "mistral-small-latest",
|
203 |
+
top_k: int = 10,
|
204 |
+
response_style: str = "Detailed",
|
205 |
+
selected_retrieval_methods: Optional[List[str]] = None,
|
206 |
+
selected_reranking_methods: Optional[List[str]] = None
|
207 |
+
) -> Tuple[str, List[str], List[dict]]:
|
208 |
+
if not selected_retrieval_methods:
|
209 |
+
selected_retrieval_methods = ['annoy', 'tfidf', 'bm25', 'word2vec', 'euclidean', 'jaccard']
|
210 |
+
if not selected_reranking_methods:
|
211 |
+
selected_reranking_methods = ['reciprocal_rank_fusion', 'weighted_score_fusion', 'advanced_fusion']
|
212 |
+
query_embedding = await self.get_text_embedding(user_query)
|
213 |
+
retrieved_docs = self.retrieve_documents(user_query, query_embedding, top_k, selected_retrieval_methods)
|
214 |
+
reranked_docs = self.rerank_documents(user_query, retrieved_docs, selected_reranking_methods)
|
215 |
+
context = "\n\n".join([doc['text'] for doc in reranked_docs[:5]])
|
216 |
+
prompt = self.build_prompt(context, user_query, response_style)
|
217 |
+
try:
|
218 |
+
async_response = await client.chat.stream_async(model=model, messages=[{"role": "user", "content": prompt}])
|
219 |
+
response = ""
|
220 |
+
async for chunk in async_response:
|
221 |
+
response += chunk.data.choices[0].delta.content
|
222 |
+
logging.info("Response generated successfully.")
|
223 |
+
except Exception as e:
|
224 |
+
logging.error(f"Error generating response: {e}")
|
225 |
+
response = "An error occurred while generating the response."
|
226 |
+
return response, [doc['text'] for doc in reranked_docs[:5]], reranked_docs[:5]
|
227 |
+
|
228 |
+
|
229 |
+
def retrieve_documents(
|
230 |
+
self,
|
231 |
+
user_query: str,
|
232 |
+
query_embedding: np.ndarray,
|
233 |
+
top_k: int,
|
234 |
+
selected_methods: List[str]
|
235 |
+
) -> List[dict]:
|
236 |
+
all_docs = []
|
237 |
+
for method in selected_methods:
|
238 |
+
indices, scores = getattr(self, f"retrieve_with_{method}")(user_query, query_embedding, top_k)
|
239 |
+
for idx, score in zip(indices, scores):
|
240 |
+
all_docs.append({
|
241 |
+
'text': self.texts[idx],
|
242 |
+
'method': method,
|
243 |
+
'score': score,
|
244 |
+
'index': idx
|
245 |
+
})
|
246 |
+
return all_docs
|
247 |
+
|
248 |
+
def retrieve_with_annoy(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
249 |
+
n_results = min(top_k, len(self.texts))
|
250 |
+
indices, distances = self.annoy_index.get_nns_by_vector(query_embedding, n_results, include_distances=True)
|
251 |
+
scores = [1.0 - (dist / max(distances)) for dist in distances] # Normalize distances to a [0, 1] score
|
252 |
+
logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
|
253 |
+
return indices, scores
|
254 |
+
|
255 |
+
def retrieve_with_tfidf(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
256 |
+
query_vec = self.tfidf_vectorizer.transform([user_query])
|
257 |
+
similarities = cosine_similarity(query_vec, self.tfidf_matrix).flatten()
|
258 |
+
indices = np.argsort(-similarities)[:top_k]
|
259 |
+
logging.debug(f"TF-IDF retrieval returned {len(indices)} documents.")
|
260 |
+
return indices, similarities[indices].tolist()
|
261 |
+
|
262 |
+
def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
263 |
+
tokenized_query = user_query.split()
|
264 |
+
scores = self.bm25.get_scores(tokenized_query)
|
265 |
+
indices = np.argsort(-scores)[:top_k]
|
266 |
+
logging.debug(f"BM25 retrieval returned {len(indices)} documents.")
|
267 |
+
return indices, scores[indices].tolist()
|
268 |
+
|
269 |
+
def retrieve_with_word2vec(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
270 |
+
query_tokens = user_query.split()
|
271 |
+
query_vec = np.mean([self.word2vec_model.wv[token] for token in query_tokens if token in self.word2vec_model.wv], axis=0)
|
272 |
+
expected_dim = query_vec.shape[0]
|
273 |
+
doc_vectors = []
|
274 |
+
for doc in self.texts:
|
275 |
+
word_vectors = [self.word2vec_model.wv[token] for token in doc.split() if token in self.word2vec_model.wv]
|
276 |
+
avg_vector = np.mean(word_vectors, axis=0) if word_vectors else np.zeros(expected_dim)
|
277 |
+
doc_vectors.append(avg_vector)
|
278 |
+
doc_vectors = np.array(doc_vectors)
|
279 |
+
similarities = cosine_similarity([query_vec], doc_vectors).flatten()
|
280 |
+
indices = np.argsort(-similarities)[:top_k]
|
281 |
+
return indices, similarities[indices].tolist()
|
282 |
+
|
283 |
+
def retrieve_with_euclidean(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
284 |
+
distances = euclidean_distances([query_embedding], self.embeddings).flatten()
|
285 |
+
indices = np.argsort(distances)[:top_k]
|
286 |
+
logging.debug(f"Euclidean retrieval returned {len(indices)} documents.")
|
287 |
+
return indices, distances[indices].tolist()
|
288 |
+
|
289 |
+
def retrieve_with_jaccard(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
|
290 |
+
query_set = set(user_query.lower().split())
|
291 |
+
scores = []
|
292 |
+
for doc in self.texts:
|
293 |
+
doc_set = set(doc.lower().split())
|
294 |
+
intersection = query_set.intersection(doc_set)
|
295 |
+
union = query_set.union(doc_set)
|
296 |
+
score = float(len(intersection)) / len(union) if union else 0
|
297 |
+
scores.append(score)
|
298 |
+
indices = np.argsort(-np.array(scores))[:top_k]
|
299 |
+
logging.debug(f"Jaccard retrieval returned {len(indices)} documents.")
|
300 |
+
return indices.tolist(), [scores[i] for i in indices]
|
301 |
+
|
302 |
+
def rerank_documents(
|
303 |
+
self,
|
304 |
+
user_query: str,
|
305 |
+
retrieved_docs: List[dict],
|
306 |
+
selected_methods: List[str]
|
307 |
+
) -> List[dict]:
|
308 |
+
reranked_docs = retrieved_docs
|
309 |
+
for method in selected_methods:
|
310 |
+
if method == 'advanced_fusion':
|
311 |
+
reranked_docs = self.advanced_fusion_retrieval(user_query, reranked_docs)
|
312 |
+
else:
|
313 |
+
reranked_docs = self.reranking_methods[method](user_query, reranked_docs)
|
314 |
+
|
315 |
+
return reranked_docs
|
316 |
+
|
317 |
+
def reciprocal_rank_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
|
318 |
+
k = 60 # fusion parameter
|
319 |
+
method_ranks = {}
|
320 |
+
fused_scores = {} # Initialize fused_scores here
|
321 |
+
for doc in docs:
|
322 |
+
method = doc['method']
|
323 |
+
if method not in method_ranks:
|
324 |
+
method_ranks[method] = {doc['index']: 1}
|
325 |
+
else:
|
326 |
+
method_ranks[method][doc['index']] = len(method_ranks[method]) + 1
|
327 |
+
for doc in docs:
|
328 |
+
idx = doc['index']
|
329 |
+
if idx not in fused_scores:
|
330 |
+
fused_scores[idx] = sum(1 / (k + rank) for method_rank in method_ranks.values() for i, rank in method_rank.items() if i == idx)
|
331 |
+
reranked_docs = sorted(docs, key=lambda x: fused_scores.get(x['index'], 0), reverse=True) # Use get() to handle missing keys
|
332 |
+
for doc in reranked_docs:
|
333 |
+
doc['rrf_score'] = fused_scores.get(doc['index'], 0) # Use get() to handle missing keys
|
334 |
+
return reranked_docs
|
335 |
+
|
336 |
+
def weighted_score_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
|
337 |
+
method_weights = {
|
338 |
+
'annoy': 0.3,
|
339 |
+
'tfidf': 0.2,
|
340 |
+
'bm25': 0.2,
|
341 |
+
'word2vec': 0.1,
|
342 |
+
'euclidean': 0.1,
|
343 |
+
'jaccard': 0.1
|
344 |
+
}
|
345 |
+
fused_scores = {}
|
346 |
+
for doc in docs:
|
347 |
+
idx = doc['index']
|
348 |
+
if idx not in fused_scores:
|
349 |
+
fused_scores[idx] = doc['score'] * method_weights[doc['method']]
|
350 |
+
else:
|
351 |
+
fused_scores[idx] += doc['score'] * method_weights[doc['method']]
|
352 |
+
|
353 |
+
reranked_docs = sorted(docs, key=lambda x: fused_scores[x['index']], reverse=True)
|
354 |
+
for doc in reranked_docs:
|
355 |
+
doc['wsf_score'] = fused_scores[doc['index']]
|
356 |
+
return reranked_docs
|
357 |
+
|
358 |
+
def semantic_similarity_reranking(self, user_query: str, docs: List[dict]) -> List[dict]:
|
359 |
+
query_embedding = np.mean([self.word2vec_model.wv[token] for token in user_query.split() if token in self.word2vec_model.wv], axis=0)
|
360 |
+
for doc in docs:
|
361 |
+
doc_embedding = np.mean([self.word2vec_model.wv[token] for token in doc['text'].split() if token in self.word2vec_model.wv], axis=0)
|
362 |
+
doc_embedding = doc_embedding if doc_embedding.shape == query_embedding.shape else np.zeros(query_embedding.shape)
|
363 |
+
doc['semantic_score'] = cosine_similarity([query_embedding], [doc_embedding])[0][0]
|
364 |
+
return sorted(docs, key=lambda x: x['semantic_score'], reverse=True)
|
365 |
+
|
366 |
+
def build_prompt(self, context: str, user_query: str, response_style: str) -> str:
|
367 |
+
styles = {
|
368 |
+
"detailed": "Provide a comprehensive and detailed answer based on the provided context.",
|
369 |
+
"concise": "Provide a brief and concise answer based on the provided context.",
|
370 |
+
"creative": "Provide a creative and engaging answer based on the provided context.",
|
371 |
+
"technical": "Provide a technical and in-depth answer based on the provided context."
|
372 |
+
}
|
373 |
+
|
374 |
+
style_instruction = styles.get(response_style.lower(), styles["detailed"])
|
375 |
+
|
376 |
+
if not context or not self.is_context_relevant(context, user_query):
|
377 |
+
prompt = f"""You are an intelligent assistant.
|
378 |
+
|
379 |
+
User Question:
|
380 |
+
{user_query}
|
381 |
+
|
382 |
+
Instruction:
|
383 |
+
The document database does not contain relevant information to answer the question. Please inform the user that no relevant documents were found and refrain from generating an imaginative or unrelated response."""
|
384 |
+
else:
|
385 |
+
prompt = f"""You are an intelligent assistant.
|
386 |
+
|
387 |
+
Context:
|
388 |
+
{context}
|
389 |
+
|
390 |
+
User Question:
|
391 |
+
{user_query}
|
392 |
+
|
393 |
+
Instruction:
|
394 |
+
{style_instruction}"""
|
395 |
+
|
396 |
+
logging.debug("Prompt constructed for response generation.")
|
397 |
+
return prompt
|
398 |
+
|
399 |
+
def is_context_relevant(self, context: str, user_query: str) -> bool:
|
400 |
+
context_lower = context.lower()
|
401 |
+
user_query_lower = user_query.lower()
|
402 |
+
query_terms = set(user_query_lower.split())
|
403 |
+
context_terms = set(context_lower.split())
|
404 |
+
common_terms = query_terms.intersection(context_terms)
|
405 |
+
return len(common_terms) > len(query_terms) * 0.2
|
406 |
+
|
407 |
+
# Cell 8: Store embeddings in vector DB and Annoy index
|
408 |
+
def create_vector_db_and_annoy_index(pdf_path, vector_db_path, annoy_index_path):
|
409 |
+
store_embeddings_in_vector_db(pdf_path, vector_db_path, annoy_index_path)
|
410 |
+
print("Vector database and Annoy index creation completed.")
|
411 |
+
|
412 |
+
# Cell 9: Run the store embeddings function (example)
|
413 |
+
# Replace 'example.pdf' with your PDF file path.
|
414 |
+
# It will create 'vector_db.pkl' and 'vector_index.ann'
|
415 |
+
create_vector_db_and_annoy_index('med.pdf', 'vector_db.pkl', 'vector_index.ann')
|
416 |
+
|
417 |
+
# # Cell 10: Query the chatbot with user input
|
418 |
+
# async def query_chatbot():
|
419 |
+
# vector_db_path = "vector_db.pkl"
|
420 |
+
# annoy_index_path = "vector_index.ann"
|
421 |
+
# chatbot = MistralRAGChatbot(vector_db_path, annoy_index_path)
|
422 |
+
|
423 |
+
# user_query = input("Please enter your query: ")
|
424 |
+
# response_style = input("Please choose response style (Detailed, Concise, Creative, Technical): ").strip().lower()
|
425 |
+
# selected_retrieval_methods = input("Please choose retrieval methods (comma-separated: annoy, tfidf, bm25, euclidean, jaccard): ")
|
426 |
+
# selected_reranking_methods = input("Please choose reranking methods (comma-separated: advanced_fusion, reciprocal_rank_fusion, weighted_score_fusion, semantic_similarity): ")
|
427 |
+
|
428 |
+
# selected_retrieval_methods_list = [method.strip() for method in selected_retrieval_methods.split(',') if method.strip()]
|
429 |
+
# selected_reranking_methods_list = [method.strip() for method in selected_reranking_methods.split(',') if method.strip()]
|
430 |
+
|
431 |
+
# response, retrieved_docs, source_info = await chatbot.generate_response_with_rag(
|
432 |
+
# user_query=user_query,
|
433 |
+
# response_style=response_style,
|
434 |
+
# selected_retrieval_methods=selected_retrieval_methods_list,
|
435 |
+
# selected_reranking_methods=selected_reranking_methods_list
|
436 |
+
# )
|
437 |
+
|
438 |
+
# print("\nResponse:")
|
439 |
+
# print(response)
|
440 |
+
# print("\nRetrieved and Reranked Documents:")
|
441 |
+
# for idx, doc_info in enumerate(source_info, start=1):
|
442 |
+
# print(f"\nDocument {idx}:")
|
443 |
+
# print(f"Content Preview: {doc_info['text'][:200]}...")
|
444 |
+
# print(f"Original Retrieval Method: {doc_info['method']}")
|
445 |
+
# if 'score' in doc_info:
|
446 |
+
# print(f"Original Score: {doc_info['score']:.4f}")
|
447 |
+
# for key, value in doc_info.items():
|
448 |
+
# if key.endswith('_score') and key != 'score':
|
449 |
+
# print(f"{key.replace('_', ' ').title()}: {value:.4f}")
|
450 |
+
|
451 |
+
# import asyncio
|
452 |
+
|
453 |
+
# async def query_chatbot2():
|
454 |
+
# vector_db_path = "vector_db.pkl"
|
455 |
+
# annoy_index_path = "vector_index.ann"
|
456 |
+
# chatbot = MistralRAGChatbot(vector_db_path, annoy_index_path)
|
457 |
+
|
458 |
+
# user_query = "what is the name of the patient"
|
459 |
+
# response_style = "Concise"
|
460 |
+
# selected_retrieval_methods_list = ["tfidf", "bm25"]
|
461 |
+
# selected_reranking_methods_list = ["reciprocal_rank_fusion"]
|
462 |
+
|
463 |
+
# try:
|
464 |
+
# response, retrieved_docs, source_info = await chatbot.generate_response_with_rag(
|
465 |
+
# user_query=user_query,
|
466 |
+
# response_style=response_style,
|
467 |
+
# selected_retrieval_methods=selected_retrieval_methods_list,
|
468 |
+
# selected_reranking_methods=selected_reranking_methods_list
|
469 |
+
# )
|
470 |
+
|
471 |
+
# print("\n--- Response ---")
|
472 |
+
# print(response)
|
473 |
+
|
474 |
+
# print("\n--- Retrieved and Reranked Documents ---")
|
475 |
+
# for idx, doc_info in enumerate(source_info, start=1):
|
476 |
+
# print(f"\nDocument {idx}:")
|
477 |
+
# print(f"Content Preview: {doc_info['text'][:150]}...") # Show a preview of the document content
|
478 |
+
# print(f"Original Retrieval Method: {doc_info['method']}")
|
479 |
+
# if 'score' in doc_info:
|
480 |
+
# print(f"Original Score: {doc_info['score']:.4f}")
|
481 |
+
|
482 |
+
# # Display scores from specific reranking methods
|
483 |
+
# if 'rrf_score' in doc_info:
|
484 |
+
# print(f"Reciprocal Rank Fusion Score (RRF): {doc_info['rrf_score']:.4f}")
|
485 |
+
# if 'wsf_score' in doc_info:
|
486 |
+
# print(f"Weighted Score Fusion (WSF) Score: {doc_info['wsf_score']:.4f}")
|
487 |
+
# if 'semantic_score' in doc_info:
|
488 |
+
# print(f"Semantic Similarity Score: {doc_info['semantic_score']:.4f}")
|
489 |
+
# if 'pagerank_score' in doc_info:
|
490 |
+
# print(f"PageRank Score: {doc_info['pagerank_score']:.4f}")
|
491 |
+
# if 'advanced_fusion_score' in doc_info:
|
492 |
+
# print(f"Advanced Fusion Score: {doc_info['advanced_fusion_score']:.4f}")
|
493 |
+
|
494 |
+
# except Exception as e:
|
495 |
+
# logging.error(f"Error generating response: {e}")
|
496 |
+
# print("\nResponse:")
|
497 |
+
# print("An error occurred while generating the response.")
|
498 |
+
|
499 |
+
# # Call the function in a Jupyter notebook environment
|
500 |
+
# await query_chatbot()
|
501 |
+
|
502 |
+
import gradio as gr
|
503 |
+
|
504 |
+
def chatbot_interface(user_query, response_style, selected_retrieval_methods, selected_reranking_methods, chunk_size, overlap):
|
505 |
+
vector_db_path = "vector_db.pkl"
|
506 |
+
annoy_index_path = "vector_index.ann"
|
507 |
+
|
508 |
+
# Load the documents and create embeddings with the provided chunk_size and overlap
|
509 |
+
store_embeddings_in_vector_db('med.pdf', 'vector_db.pkl', 'vector_index.ann', chunk_size, overlap)
|
510 |
+
|
511 |
+
chatbot = MistralRAGChatbot(vector_db_path, annoy_index_path)
|
512 |
+
|
513 |
+
selected_retrieval_methods_list = [method.strip() for method in selected_retrieval_methods.split(',') if method.strip()]
|
514 |
+
selected_reranking_methods_list = [method.strip() for method in selected_reranking_methods.split(',') if method.strip()]
|
515 |
+
|
516 |
+
response, retrieved_docs, source_info = asyncio.run(chatbot.generate_response_with_rag(
|
517 |
+
user_query=user_query,
|
518 |
+
response_style=response_style,
|
519 |
+
selected_retrieval_methods=selected_retrieval_methods_list,
|
520 |
+
selected_reranking_methods=selected_reranking_methods_list
|
521 |
+
))
|
522 |
+
|
523 |
+
formatted_response = f"**Response:**\n{response}\n\n"
|
524 |
+
formatted_response += "**Retrieved and Reranked Documents:**\n"
|
525 |
+
for idx, doc_info in enumerate(source_info, start=1):
|
526 |
+
formatted_response += f"\n**Document {idx}:**\n"
|
527 |
+
formatted_response += f"Content Preview: {doc_info['text'][:200]}...\n"
|
528 |
+
formatted_response += f"Original Retrieval Method: {doc_info['method']}\n"
|
529 |
+
if 'score' in doc_info:
|
530 |
+
formatted_response += f"Original Score: {doc_info['score']:.4f}\n"
|
531 |
+
for key, value in doc_info.items():
|
532 |
+
if key.endswith('_score') and key != 'score':
|
533 |
+
formatted_response += f"{key.replace('_', ' ').title()}: {value:.4f}\n"
|
534 |
+
|
535 |
+
return formatted_response
|
536 |
+
|
537 |
+
iface = gr.Interface(
|
538 |
+
fn=chatbot_interface,
|
539 |
+
theme=gr.themes.Soft(),
|
540 |
+
inputs=[
|
541 |
+
gr.Textbox(lines=5, label="User Query"),
|
542 |
+
gr.Dropdown(["Detailed", "Concise", "Creative", "Technical"], label="Response Style"),
|
543 |
+
gr.Dropdown(["annoy", "tfidf", "bm25", "euclidean", "jaccard"], label="Retrieval Methods", multiselect=True), # This line is changed
|
544 |
+
gr.Dropdown(["advanced_fusion", "reciprocal_rank_fusion", "weighted_score_fusion", "semantic_similarity"], label="Reranking Methods"),
|
545 |
+
gr.Slider(minimum=1024, maximum=2048, step=128, value=2048, label="Chunk Size"),
|
546 |
+
gr.Slider(minimum=100, maximum=300, step=100, value=200, label="Overlap")
|
547 |
],
|
548 |
+
outputs=gr.Textbox(label="Chatbot Response"),
|
549 |
+
title="Chat with Document"
|
550 |
)
|
551 |
|
552 |
+
iface.launch()
|
|
|
|