Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,167 +1,253 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
-
import transformers
|
7 |
from langchain_community.document_loaders import PyPDFLoader
|
8 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
9 |
from langchain_community.vectorstores import Chroma
|
10 |
from langchain.chains import ConversationalRetrievalChain
|
11 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
12 |
from langchain_community.llms import HuggingFacePipeline
|
|
|
13 |
from langchain.memory import ConversationBufferMemory
|
|
|
14 |
import spaces
|
15 |
from pathlib import Path
|
16 |
import chromadb
|
17 |
from unidecode import unidecode
|
18 |
-
import re
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
|
|
24 |
|
25 |
-
if not hf_token:
|
26 |
-
raise ValueError("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
|
27 |
|
28 |
-
# Log in to Hugging Face
|
29 |
-
login(token=hf_token)
|
30 |
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
33 |
|
34 |
@spaces.GPU
|
|
|
35 |
def load_doc(list_file_path, chunk_size, chunk_overlap):
|
|
|
|
|
|
|
36 |
loaders = [PyPDFLoader(x) for x in list_file_path]
|
37 |
pages = []
|
38 |
for loader in loaders:
|
39 |
pages.extend(loader.load())
|
|
|
40 |
text_splitter = RecursiveCharacterTextSplitter(
|
41 |
-
chunk_size=chunk_size,
|
42 |
-
chunk_overlap=chunk_overlap)
|
43 |
doc_splits = text_splitter.split_documents(pages)
|
44 |
return doc_splits
|
45 |
|
|
|
|
|
46 |
def create_db(splits, collection_name):
|
47 |
-
|
48 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
49 |
-
|
50 |
-
embedding = HuggingFaceEmbeddings(
|
51 |
-
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
52 |
-
model_kwargs={"use_auth_token": hf_token}
|
53 |
-
)
|
54 |
new_client = chromadb.EphemeralClient()
|
55 |
vectordb = Chroma.from_documents(
|
56 |
documents=splits,
|
57 |
embedding=embedding,
|
58 |
client=new_client,
|
59 |
collection_name=collection_name,
|
|
|
60 |
)
|
61 |
return vectordb
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
def create_collection_name(filepath):
|
|
|
64 |
collection_name = Path(filepath).stem
|
65 |
-
|
|
|
|
|
|
|
66 |
collection_name = unidecode(collection_name)
|
|
|
|
|
67 |
collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
|
|
|
68 |
collection_name = collection_name[:50]
|
|
|
69 |
if len(collection_name) < 3:
|
70 |
collection_name = collection_name + 'xyz'
|
|
|
71 |
if not collection_name[0].isalnum():
|
72 |
collection_name = 'A' + collection_name[1:]
|
73 |
if not collection_name[-1].isalnum():
|
74 |
collection_name = collection_name[:-1] + 'Z'
|
|
|
|
|
75 |
return collection_name
|
76 |
|
77 |
-
def initialize_global_llm(llm_model, temperature, max_tokens, top_k, progress=gr.Progress()):
|
78 |
-
global global_llm, global_tokenizer
|
79 |
-
|
80 |
-
if global_llm is None:
|
81 |
-
progress(0.1, desc="Initializing HF tokenizer...")
|
82 |
-
global_tokenizer = AutoTokenizer.from_pretrained(llm_model, use_auth_token=hf_token)
|
83 |
-
|
84 |
-
progress(0.3, desc="Loading model...")
|
85 |
-
try:
|
86 |
-
model = AutoModelForCausalLM.from_pretrained(
|
87 |
-
llm_model,
|
88 |
-
use_auth_token=hf_token,
|
89 |
-
torch_dtype=torch.float16,
|
90 |
-
device_map="auto"
|
91 |
-
)
|
92 |
-
except RuntimeError as e:
|
93 |
-
if "CUDA out of memory" in str(e):
|
94 |
-
raise gr.Error("GPU memory exceeded. Try a smaller model or reduce batch size.")
|
95 |
-
else:
|
96 |
-
raise e
|
97 |
-
|
98 |
-
progress(0.5, desc="Initializing HF pipeline...")
|
99 |
-
pipeline = transformers.pipeline(
|
100 |
-
"text-generation",
|
101 |
-
model=model,
|
102 |
-
tokenizer=global_tokenizer,
|
103 |
-
torch_dtype=torch.float16,
|
104 |
-
device_map="auto",
|
105 |
-
max_new_tokens=max_tokens,
|
106 |
-
do_sample=True,
|
107 |
-
top_k=top_k,
|
108 |
-
num_return_sequences=1,
|
109 |
-
eos_token_id=global_tokenizer.eos_token_id
|
110 |
-
)
|
111 |
-
global_llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
|
112 |
-
|
113 |
-
progress(0.9, desc="LLM initialization complete!")
|
114 |
-
return "LLM initialized successfully!"
|
115 |
-
else:
|
116 |
-
progress(0.9, desc="Using previously initialized LLM.")
|
117 |
-
return "Using previously initialized LLM."
|
118 |
|
|
|
119 |
def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
|
120 |
-
|
121 |
-
return None, None, "Error: No files uploaded. Please upload PDF files first."
|
122 |
-
|
123 |
list_file_path = [x.name for x in list_file_obj if x is not None]
|
124 |
-
|
125 |
-
return None, None, "Error: No valid files found. Please upload PDF files."
|
126 |
-
|
127 |
progress(0.1, desc="Creating collection name...")
|
128 |
collection_name = create_collection_name(list_file_path[0])
|
129 |
progress(0.25, desc="Loading document...")
|
|
|
130 |
doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
|
|
|
131 |
progress(0.5, desc="Generating vector database...")
|
|
|
132 |
vector_db = create_db(doc_splits, collection_name)
|
133 |
progress(0.9, desc="Done!")
|
134 |
return vector_db, collection_name, "Complete!"
|
135 |
|
|
|
136 |
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
|
|
|
137 |
llm_name = list_llm[llm_option]
|
138 |
-
|
139 |
-
|
140 |
-
memory = ConversationBufferMemory(
|
141 |
-
memory_key="chat_history",
|
142 |
-
output_key='answer',
|
143 |
-
return_messages=True
|
144 |
-
)
|
145 |
-
retriever = vector_db.as_retriever()
|
146 |
-
qa_chain = ConversationalRetrievalChain.from_llm(
|
147 |
-
global_llm,
|
148 |
-
retriever=retriever,
|
149 |
-
chain_type="stuff",
|
150 |
-
memory=memory,
|
151 |
-
return_source_documents=True,
|
152 |
-
verbose=False,
|
153 |
-
)
|
154 |
return qa_chain, "Complete!"
|
155 |
|
|
|
156 |
def format_chat_history(message, chat_history):
|
157 |
formatted_chat_history = []
|
158 |
for user_message, bot_message in chat_history:
|
159 |
formatted_chat_history.append(f"User: {user_message}")
|
160 |
formatted_chat_history.append(f"Assistant: {bot_message}")
|
161 |
return formatted_chat_history
|
|
|
162 |
|
163 |
def conversation(qa_chain, message, history):
|
164 |
formatted_chat_history = format_chat_history(message, history)
|
|
|
|
|
|
|
165 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
166 |
response_answer = response["answer"]
|
167 |
if response_answer.find("Helpful Answer:") != -1:
|
@@ -170,12 +256,28 @@ def conversation(qa_chain, message, history):
|
|
170 |
response_source1 = response_sources[0].page_content.strip()
|
171 |
response_source2 = response_sources[1].page_content.strip()
|
172 |
response_source3 = response_sources[2].page_content.strip()
|
|
|
173 |
response_source1_page = response_sources[0].metadata["page"] + 1
|
174 |
response_source2_page = response_sources[1].metadata["page"] + 1
|
175 |
response_source3_page = response_sources[2].metadata["page"] + 1
|
|
|
|
|
176 |
|
|
|
177 |
new_history = history + [(message, response_answer)]
|
|
|
178 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
def demo():
|
181 |
with gr.Blocks(theme="base") as demo:
|
@@ -184,38 +286,50 @@ def demo():
|
|
184 |
collection_name = gr.State()
|
185 |
|
186 |
gr.Markdown(
|
187 |
-
"""<center><h2>
|
188 |
<h3>Ask any questions about your PDF documents</h3>""")
|
189 |
gr.Markdown(
|
190 |
-
"""<b>Note:</b> This AI assistant
|
191 |
-
|
192 |
-
This chatbot takes past questions into account and includes document references
|
|
|
|
|
193 |
|
194 |
-
with gr.Tab("Step 1 -
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
|
199 |
-
slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
|
200 |
-
llm_progress = gr.Textbox(value="Not initialized", label="LLM initialization status")
|
201 |
-
init_llm_btn = gr.Button("Initialize LLM")
|
202 |
-
|
203 |
-
with gr.Tab("Step 2 - Upload PDF"):
|
204 |
-
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
205 |
|
206 |
-
with gr.Tab("Step
|
207 |
-
|
|
|
208 |
with gr.Accordion("Advanced options - Document text splitter", open=False):
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
|
|
|
|
|
|
|
|
213 |
|
214 |
-
with gr.Tab("Step
|
215 |
-
|
216 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
|
218 |
-
with gr.Tab("Step
|
219 |
chatbot = gr.Chatbot(height=300)
|
220 |
with gr.Accordion("Advanced - Document references", open=False):
|
221 |
with gr.Row():
|
@@ -233,39 +347,33 @@ def demo():
|
|
233 |
submit_btn = gr.Button("Submit message")
|
234 |
clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
|
235 |
|
236 |
-
#
|
237 |
-
|
238 |
-
|
239 |
-
inputs=[
|
240 |
-
outputs=[llm_progress]
|
241 |
-
)
|
242 |
-
|
243 |
-
db_btn.click(initialize_database,
|
244 |
-
inputs=[document, slider_chunk_size, slider_chunk_overlap],
|
245 |
outputs=[vector_db, collection_name, db_progress])
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
|
252 |
queue=False)
|
253 |
|
254 |
# Chatbot events
|
255 |
-
msg.submit(conversation,
|
256 |
-
inputs=[qa_chain, msg, chatbot],
|
257 |
-
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
|
258 |
queue=False)
|
259 |
-
submit_btn.click(conversation,
|
260 |
-
inputs=[qa_chain, msg, chatbot],
|
261 |
-
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
|
262 |
queue=False)
|
263 |
-
clear_btn.click(lambda:[None,"",0,"",0,"",0],
|
264 |
-
inputs=None,
|
265 |
-
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
|
266 |
queue=False)
|
267 |
-
|
268 |
demo.queue().launch(debug=True)
|
269 |
|
|
|
270 |
if __name__ == "__main__":
|
271 |
-
demo()
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
+
|
|
|
|
|
|
|
4 |
from langchain_community.document_loaders import PyPDFLoader
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
from langchain_community.vectorstores import Chroma
|
7 |
from langchain.chains import ConversationalRetrievalChain
|
8 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
9 |
from langchain_community.llms import HuggingFacePipeline
|
10 |
+
from langchain.chains import ConversationChain
|
11 |
from langchain.memory import ConversationBufferMemory
|
12 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
13 |
import spaces
|
14 |
from pathlib import Path
|
15 |
import chromadb
|
16 |
from unidecode import unidecode
|
|
|
17 |
|
18 |
+
from transformers import AutoTokenizer
|
19 |
+
import transformers
|
20 |
+
import torch
|
21 |
+
import tqdm
|
22 |
+
import accelerate
|
23 |
+
import re
|
24 |
|
|
|
|
|
25 |
|
|
|
|
|
26 |
|
27 |
+
# default_persist_directory = './chroma_HF/'
|
28 |
+
list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
|
29 |
+
"google/gemma-7b-it","google/gemma-2b-it", \
|
30 |
+
"HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1", \
|
31 |
+
"meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \
|
32 |
+
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
|
33 |
+
"google/flan-t5-xxl"
|
34 |
+
]
|
35 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
36 |
|
37 |
@spaces.GPU
|
38 |
+
# Load PDF document and create doc splits
|
39 |
def load_doc(list_file_path, chunk_size, chunk_overlap):
|
40 |
+
# Processing for one document only
|
41 |
+
# loader = PyPDFLoader(file_path)
|
42 |
+
# pages = loader.load()
|
43 |
loaders = [PyPDFLoader(x) for x in list_file_path]
|
44 |
pages = []
|
45 |
for loader in loaders:
|
46 |
pages.extend(loader.load())
|
47 |
+
# text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
|
48 |
text_splitter = RecursiveCharacterTextSplitter(
|
49 |
+
chunk_size = chunk_size,
|
50 |
+
chunk_overlap = chunk_overlap)
|
51 |
doc_splits = text_splitter.split_documents(pages)
|
52 |
return doc_splits
|
53 |
|
54 |
+
|
55 |
+
# Create vector database
|
56 |
def create_db(splits, collection_name):
|
57 |
+
embedding = HuggingFaceEmbeddings()
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
new_client = chromadb.EphemeralClient()
|
59 |
vectordb = Chroma.from_documents(
|
60 |
documents=splits,
|
61 |
embedding=embedding,
|
62 |
client=new_client,
|
63 |
collection_name=collection_name,
|
64 |
+
# persist_directory=default_persist_directory
|
65 |
)
|
66 |
return vectordb
|
67 |
|
68 |
+
|
69 |
+
# Load vector database
|
70 |
+
def load_db():
|
71 |
+
embedding = HuggingFaceEmbeddings()
|
72 |
+
vectordb = Chroma(
|
73 |
+
# persist_directory=default_persist_directory,
|
74 |
+
embedding_function=embedding)
|
75 |
+
return vectordb
|
76 |
+
|
77 |
+
|
78 |
+
# Initialize langchain LLM chain
|
79 |
+
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
|
80 |
+
progress(0.1, desc="Initializing HF tokenizer...")
|
81 |
+
# HuggingFacePipeline uses local model
|
82 |
+
# Note: it will download model locally...
|
83 |
+
# tokenizer=AutoTokenizer.from_pretrained(llm_model)
|
84 |
+
# progress(0.5, desc="Initializing HF pipeline...")
|
85 |
+
# pipeline=transformers.pipeline(
|
86 |
+
# "text-generation",
|
87 |
+
# model=llm_model,
|
88 |
+
# tokenizer=tokenizer,
|
89 |
+
# torch_dtype=torch.bfloat16,
|
90 |
+
# trust_remote_code=True,
|
91 |
+
# device_map="auto",
|
92 |
+
# # max_length=1024,
|
93 |
+
# max_new_tokens=max_tokens,
|
94 |
+
# do_sample=True,
|
95 |
+
# top_k=top_k,
|
96 |
+
# num_return_sequences=1,
|
97 |
+
# eos_token_id=tokenizer.eos_token_id
|
98 |
+
# )
|
99 |
+
# llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
|
100 |
+
|
101 |
+
# HuggingFaceHub uses HF inference endpoints
|
102 |
+
progress(0.5, desc="Initializing HF Hub...")
|
103 |
+
# Use of trust_remote_code as model_kwargs
|
104 |
+
# Warning: langchain issue
|
105 |
+
# URL: https://github.com/langchain-ai/langchain/issues/6080
|
106 |
+
if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
|
107 |
+
llm = HuggingFaceEndpoint(
|
108 |
+
repo_id=llm_model,
|
109 |
+
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
|
110 |
+
temperature = temperature,
|
111 |
+
max_new_tokens = max_tokens,
|
112 |
+
top_k = top_k,
|
113 |
+
load_in_8bit = True,
|
114 |
+
)
|
115 |
+
elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
|
116 |
+
raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
|
117 |
+
llm = HuggingFaceEndpoint(
|
118 |
+
repo_id=llm_model,
|
119 |
+
temperature = temperature,
|
120 |
+
max_new_tokens = max_tokens,
|
121 |
+
top_k = top_k,
|
122 |
+
)
|
123 |
+
elif llm_model == "microsoft/phi-2":
|
124 |
+
# raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
|
125 |
+
llm = HuggingFaceEndpoint(
|
126 |
+
repo_id=llm_model,
|
127 |
+
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
|
128 |
+
temperature = temperature,
|
129 |
+
max_new_tokens = max_tokens,
|
130 |
+
top_k = top_k,
|
131 |
+
trust_remote_code = True,
|
132 |
+
torch_dtype = "auto",
|
133 |
+
)
|
134 |
+
elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
|
135 |
+
llm = HuggingFaceEndpoint(
|
136 |
+
repo_id=llm_model,
|
137 |
+
# model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
|
138 |
+
temperature = temperature,
|
139 |
+
max_new_tokens = 250,
|
140 |
+
top_k = top_k,
|
141 |
+
)
|
142 |
+
elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
|
143 |
+
raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
|
144 |
+
llm = HuggingFaceEndpoint(
|
145 |
+
repo_id=llm_model,
|
146 |
+
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
|
147 |
+
temperature = temperature,
|
148 |
+
max_new_tokens = max_tokens,
|
149 |
+
top_k = top_k,
|
150 |
+
)
|
151 |
+
else:
|
152 |
+
llm = HuggingFaceEndpoint(
|
153 |
+
repo_id=llm_model,
|
154 |
+
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
|
155 |
+
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
|
156 |
+
temperature = temperature,
|
157 |
+
max_new_tokens = max_tokens,
|
158 |
+
top_k = top_k,
|
159 |
+
)
|
160 |
+
|
161 |
+
progress(0.75, desc="Defining buffer memory...")
|
162 |
+
memory = ConversationBufferMemory(
|
163 |
+
memory_key="chat_history",
|
164 |
+
output_key='answer',
|
165 |
+
return_messages=True
|
166 |
+
)
|
167 |
+
# retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
|
168 |
+
retriever=vector_db.as_retriever()
|
169 |
+
progress(0.8, desc="Defining retrieval chain...")
|
170 |
+
qa_chain = ConversationalRetrievalChain.from_llm(
|
171 |
+
llm,
|
172 |
+
retriever=retriever,
|
173 |
+
chain_type="stuff",
|
174 |
+
memory=memory,
|
175 |
+
# combine_docs_chain_kwargs={"prompt": your_prompt})
|
176 |
+
return_source_documents=True,
|
177 |
+
#return_generated_question=False,
|
178 |
+
verbose=False,
|
179 |
+
)
|
180 |
+
progress(0.9, desc="Done!")
|
181 |
+
return qa_chain
|
182 |
+
|
183 |
+
|
184 |
+
# Generate collection name for vector database
|
185 |
+
# - Use filepath as input, ensuring unicode text
|
186 |
def create_collection_name(filepath):
|
187 |
+
# Extract filename without extension
|
188 |
collection_name = Path(filepath).stem
|
189 |
+
# Fix potential issues from naming convention
|
190 |
+
## Remove space
|
191 |
+
collection_name = collection_name.replace(" ","-")
|
192 |
+
## ASCII transliterations of Unicode text
|
193 |
collection_name = unidecode(collection_name)
|
194 |
+
## Remove special characters
|
195 |
+
#collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
|
196 |
collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
|
197 |
+
## Limit length to 50 characters
|
198 |
collection_name = collection_name[:50]
|
199 |
+
## Minimum length of 3 characters
|
200 |
if len(collection_name) < 3:
|
201 |
collection_name = collection_name + 'xyz'
|
202 |
+
## Enforce start and end as alphanumeric character
|
203 |
if not collection_name[0].isalnum():
|
204 |
collection_name = 'A' + collection_name[1:]
|
205 |
if not collection_name[-1].isalnum():
|
206 |
collection_name = collection_name[:-1] + 'Z'
|
207 |
+
print('Filepath: ', filepath)
|
208 |
+
print('Collection name: ', collection_name)
|
209 |
return collection_name
|
210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
|
212 |
+
# Initialize database
|
213 |
def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
|
214 |
+
# Create list of documents (when valid)
|
|
|
|
|
215 |
list_file_path = [x.name for x in list_file_obj if x is not None]
|
216 |
+
# Create collection_name for vector database
|
|
|
|
|
217 |
progress(0.1, desc="Creating collection name...")
|
218 |
collection_name = create_collection_name(list_file_path[0])
|
219 |
progress(0.25, desc="Loading document...")
|
220 |
+
# Load document and create splits
|
221 |
doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
|
222 |
+
# Create or load vector database
|
223 |
progress(0.5, desc="Generating vector database...")
|
224 |
+
# global vector_db
|
225 |
vector_db = create_db(doc_splits, collection_name)
|
226 |
progress(0.9, desc="Done!")
|
227 |
return vector_db, collection_name, "Complete!"
|
228 |
|
229 |
+
|
230 |
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
|
231 |
+
# print("llm_option",llm_option)
|
232 |
llm_name = list_llm[llm_option]
|
233 |
+
print("llm_name: ",llm_name)
|
234 |
+
qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
return qa_chain, "Complete!"
|
236 |
|
237 |
+
|
238 |
def format_chat_history(message, chat_history):
|
239 |
formatted_chat_history = []
|
240 |
for user_message, bot_message in chat_history:
|
241 |
formatted_chat_history.append(f"User: {user_message}")
|
242 |
formatted_chat_history.append(f"Assistant: {bot_message}")
|
243 |
return formatted_chat_history
|
244 |
+
|
245 |
|
246 |
def conversation(qa_chain, message, history):
|
247 |
formatted_chat_history = format_chat_history(message, history)
|
248 |
+
#print("formatted_chat_history",formatted_chat_history)
|
249 |
+
|
250 |
+
# Generate response using QA chain
|
251 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
252 |
response_answer = response["answer"]
|
253 |
if response_answer.find("Helpful Answer:") != -1:
|
|
|
256 |
response_source1 = response_sources[0].page_content.strip()
|
257 |
response_source2 = response_sources[1].page_content.strip()
|
258 |
response_source3 = response_sources[2].page_content.strip()
|
259 |
+
# Langchain sources are zero-based
|
260 |
response_source1_page = response_sources[0].metadata["page"] + 1
|
261 |
response_source2_page = response_sources[1].metadata["page"] + 1
|
262 |
response_source3_page = response_sources[2].metadata["page"] + 1
|
263 |
+
# print ('chat response: ', response_answer)
|
264 |
+
# print('DB source', response_sources)
|
265 |
|
266 |
+
# Append user message and response to chat history
|
267 |
new_history = history + [(message, response_answer)]
|
268 |
+
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
|
269 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
270 |
+
|
271 |
+
|
272 |
+
def upload_file(file_obj):
|
273 |
+
list_file_path = []
|
274 |
+
for idx, file in enumerate(file_obj):
|
275 |
+
file_path = file_obj.name
|
276 |
+
list_file_path.append(file_path)
|
277 |
+
# print(file_path)
|
278 |
+
# initialize_database(file_path, progress)
|
279 |
+
return list_file_path
|
280 |
+
|
281 |
|
282 |
def demo():
|
283 |
with gr.Blocks(theme="base") as demo:
|
|
|
286 |
collection_name = gr.State()
|
287 |
|
288 |
gr.Markdown(
|
289 |
+
"""<center><h2>PDF-based chatbot</center></h2>
|
290 |
<h3>Ask any questions about your PDF documents</h3>""")
|
291 |
gr.Markdown(
|
292 |
+
"""<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
|
293 |
+
The user interface explicitely shows multiple steps to help understand the RAG workflow.
|
294 |
+
This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
|
295 |
+
<br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
|
296 |
+
""")
|
297 |
|
298 |
+
with gr.Tab("Step 1 - Upload PDF"):
|
299 |
+
with gr.Row():
|
300 |
+
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
301 |
+
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
|
303 |
+
with gr.Tab("Step 2 - Process document"):
|
304 |
+
with gr.Row():
|
305 |
+
db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
|
306 |
with gr.Accordion("Advanced options - Document text splitter", open=False):
|
307 |
+
with gr.Row():
|
308 |
+
slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
|
309 |
+
with gr.Row():
|
310 |
+
slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
|
311 |
+
with gr.Row():
|
312 |
+
db_progress = gr.Textbox(label="Vector database initialization", value="None")
|
313 |
+
with gr.Row():
|
314 |
+
db_btn = gr.Button("Generate vector database")
|
315 |
|
316 |
+
with gr.Tab("Step 3 - Initialize QA chain"):
|
317 |
+
with gr.Row():
|
318 |
+
llm_btn = gr.Radio(list_llm_simple, \
|
319 |
+
label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
|
320 |
+
with gr.Accordion("Advanced options - LLM model", open=False):
|
321 |
+
with gr.Row():
|
322 |
+
slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
323 |
+
with gr.Row():
|
324 |
+
slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
|
325 |
+
with gr.Row():
|
326 |
+
slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
|
327 |
+
with gr.Row():
|
328 |
+
llm_progress = gr.Textbox(value="None",label="QA chain initialization")
|
329 |
+
with gr.Row():
|
330 |
+
qachain_btn = gr.Button("Initialize Question Answering chain")
|
331 |
|
332 |
+
with gr.Tab("Step 4 - Chatbot"):
|
333 |
chatbot = gr.Chatbot(height=300)
|
334 |
with gr.Accordion("Advanced - Document references", open=False):
|
335 |
with gr.Row():
|
|
|
347 |
submit_btn = gr.Button("Submit message")
|
348 |
clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
|
349 |
|
350 |
+
# Preprocessing events
|
351 |
+
#upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|
352 |
+
db_btn.click(initialize_database, \
|
353 |
+
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
|
|
|
|
|
|
|
|
|
|
|
354 |
outputs=[vector_db, collection_name, db_progress])
|
355 |
+
qachain_btn.click(initialize_LLM, \
|
356 |
+
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
357 |
+
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
|
358 |
+
inputs=None, \
|
359 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
|
|
360 |
queue=False)
|
361 |
|
362 |
# Chatbot events
|
363 |
+
msg.submit(conversation, \
|
364 |
+
inputs=[qa_chain, msg, chatbot], \
|
365 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
366 |
queue=False)
|
367 |
+
submit_btn.click(conversation, \
|
368 |
+
inputs=[qa_chain, msg, chatbot], \
|
369 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
370 |
queue=False)
|
371 |
+
clear_btn.click(lambda:[None,"",0,"",0,"",0], \
|
372 |
+
inputs=None, \
|
373 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
374 |
queue=False)
|
|
|
375 |
demo.queue().launch(debug=True)
|
376 |
|
377 |
+
|
378 |
if __name__ == "__main__":
|
379 |
+
demo()
|