santuchal commited on
Commit
975798a
1 Parent(s): 9c36f5f

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +312 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ from langchain.document_loaders import PyPDFLoader
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.vectorstores import Chroma
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from langchain.embeddings import HuggingFaceEmbeddings
9
+ from langchain.llms import HuggingFacePipeline
10
+ from langchain.chains import ConversationChain
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain.llms import HuggingFaceHub
13
+
14
+ from pathlib import Path
15
+ import chromadb
16
+
17
+ from transformers import AutoTokenizer
18
+ import transformers
19
+ import torch
20
+ import tqdm
21
+ import accelerate
22
+
23
+
24
+ # default_persist_directory = './chroma_HF/'
25
+ list_llm = ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mistral-7B-Instruct-v0.1", \
26
+ "HuggingFaceH4/zephyr-7b-beta", "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \
27
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
28
+ "google/flan-t5-xxl"
29
+ ]
30
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
31
+
32
+ # Load PDF document and create doc splits
33
+ def load_doc(list_file_path, chunk_size, chunk_overlap):
34
+ # Processing for one document only
35
+ # loader = PyPDFLoader(file_path)
36
+ # pages = loader.load()
37
+ loaders = [PyPDFLoader(x) for x in list_file_path]
38
+ pages = []
39
+ for loader in loaders:
40
+ pages.extend(loader.load())
41
+ # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
42
+ text_splitter = RecursiveCharacterTextSplitter(
43
+ chunk_size = chunk_size,
44
+ chunk_overlap = chunk_overlap)
45
+ doc_splits = text_splitter.split_documents(pages)
46
+ return doc_splits
47
+
48
+
49
+ # Create vector database
50
+ def create_db(splits, collection_name):
51
+ embedding = HuggingFaceEmbeddings()
52
+ new_client = chromadb.EphemeralClient()
53
+ vectordb = Chroma.from_documents(
54
+ documents=splits,
55
+ embedding=embedding,
56
+ client=new_client,
57
+ collection_name=collection_name,
58
+ # persist_directory=default_persist_directory
59
+ )
60
+ return vectordb
61
+
62
+
63
+ # Load vector database
64
+ def load_db():
65
+ embedding = HuggingFaceEmbeddings()
66
+ vectordb = Chroma(
67
+ # persist_directory=default_persist_directory,
68
+ embedding_function=embedding)
69
+ return vectordb
70
+
71
+
72
+ # Initialize langchain LLM chain
73
+ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
74
+ progress(0.1, desc="Initializing HF tokenizer...")
75
+ # HuggingFacePipeline uses local model
76
+ # Note: it will download model locally...
77
+ # tokenizer=AutoTokenizer.from_pretrained(llm_model)
78
+ # progress(0.5, desc="Initializing HF pipeline...")
79
+ # pipeline=transformers.pipeline(
80
+ # "text-generation",
81
+ # model=llm_model,
82
+ # tokenizer=tokenizer,
83
+ # torch_dtype=torch.bfloat16,
84
+ # trust_remote_code=True,
85
+ # device_map="auto",
86
+ # # max_length=1024,
87
+ # max_new_tokens=max_tokens,
88
+ # do_sample=True,
89
+ # top_k=top_k,
90
+ # num_return_sequences=1,
91
+ # eos_token_id=tokenizer.eos_token_id
92
+ # )
93
+ # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
94
+
95
+ # HuggingFaceHub uses HF inference endpoints
96
+ progress(0.5, desc="Initializing HF Hub...")
97
+ # Use of trust_remote_code as model_kwargs
98
+ # Warning: langchain issue
99
+ # URL: https://github.com/langchain-ai/langchain/issues/6080
100
+ if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
101
+ llm = HuggingFaceHub(
102
+ repo_id=llm_model,
103
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
104
+ )
105
+ elif llm_model == "microsoft/phi-2":
106
+ raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
107
+ llm = HuggingFaceHub(
108
+ repo_id=llm_model,
109
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
110
+ )
111
+ elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
112
+ llm = HuggingFaceHub(
113
+ repo_id=llm_model,
114
+ model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
115
+ )
116
+ elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
117
+ raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
118
+ llm = HuggingFaceHub(
119
+ repo_id=llm_model,
120
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
121
+ )
122
+ else:
123
+ llm = HuggingFaceHub(
124
+ repo_id=llm_model,
125
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
126
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
127
+ )
128
+
129
+ progress(0.75, desc="Defining buffer memory...")
130
+ memory = ConversationBufferMemory(
131
+ memory_key="chat_history",
132
+ output_key='answer',
133
+ return_messages=True
134
+ )
135
+ # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
136
+ retriever=vector_db.as_retriever()
137
+ progress(0.8, desc="Defining retrieval chain...")
138
+ qa_chain = ConversationalRetrievalChain.from_llm(
139
+ llm,
140
+ retriever=retriever,
141
+ chain_type="stuff",
142
+ memory=memory,
143
+ # combine_docs_chain_kwargs={"prompt": your_prompt})
144
+ return_source_documents=True,
145
+ #return_generated_question=False,
146
+ verbose=False,
147
+ )
148
+ progress(0.9, desc="Done!")
149
+ return qa_chain
150
+
151
+
152
+ # Initialize database
153
+ def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
154
+ # Create list of documents (when valid)
155
+ list_file_path = [x.name for x in list_file_obj if x is not None]
156
+ # Create collection_name for vector database
157
+ progress(0.1, desc="Creating collection name...")
158
+ collection_name = Path(list_file_path[0]).stem
159
+ # Fix potential issues from naming convention
160
+ collection_name = collection_name.replace(" ","-")
161
+ collection_name = collection_name[:50]
162
+ # print('list_file_path: ', list_file_path)
163
+ print('Collection name: ', collection_name)
164
+ progress(0.25, desc="Loading document...")
165
+ # Load document and create splits
166
+ doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
167
+ # Create or load vector database
168
+ progress(0.5, desc="Generating vector database...")
169
+ # global vector_db
170
+ vector_db = create_db(doc_splits, collection_name)
171
+ progress(0.9, desc="Done!")
172
+ return vector_db, collection_name, "Complete!"
173
+
174
+
175
+ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
176
+ # print("llm_option",llm_option)
177
+ llm_name = list_llm[llm_option]
178
+ print("llm_name: ",llm_name)
179
+ qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
180
+ return qa_chain, "Complete!"
181
+
182
+
183
+ def format_chat_history(message, chat_history):
184
+ formatted_chat_history = []
185
+ for user_message, bot_message in chat_history:
186
+ formatted_chat_history.append(f"User: {user_message}")
187
+ formatted_chat_history.append(f"Assistant: {bot_message}")
188
+ return formatted_chat_history
189
+
190
+
191
+ def conversation(qa_chain, message, history):
192
+ formatted_chat_history = format_chat_history(message, history)
193
+ #print("formatted_chat_history",formatted_chat_history)
194
+
195
+ # Generate response using QA chain
196
+ response = qa_chain({"question": message, "chat_history": formatted_chat_history})
197
+ response_answer = response["answer"]
198
+ response_sources = response["source_documents"]
199
+ response_source1 = response_sources[0].page_content.strip()
200
+ response_source2 = response_sources[1].page_content.strip()
201
+ # Langchain sources are zero-based
202
+ response_source1_page = response_sources[0].metadata["page"] + 1
203
+ response_source2_page = response_sources[1].metadata["page"] + 1
204
+ # print ('chat response: ', response_answer)
205
+ # print('DB source', response_sources)
206
+
207
+ # Append user message and response to chat history
208
+ new_history = history + [(message, response_answer)]
209
+ # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
210
+ return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page
211
+
212
+
213
+ def upload_file(file_obj):
214
+ list_file_path = []
215
+ for idx, file in enumerate(file_obj):
216
+ file_path = file_obj.name
217
+ list_file_path.append(file_path)
218
+ # print(file_path)
219
+ # initialize_database(file_path, progress)
220
+ return list_file_path
221
+
222
+
223
+ def demo():
224
+ with gr.Blocks(theme="base") as demo:
225
+ vector_db = gr.State()
226
+ qa_chain = gr.State()
227
+ collection_name = gr.State()
228
+
229
+ gr.Markdown(
230
+ """<center><h2>PDF-based chatbot (powered by LangChain and open-source LLMs)</center></h2>
231
+ <h3>Ask any questions about your PDF documents, along with follow-ups</h3>
232
+ <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
233
+ When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
234
+ <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
235
+ """)
236
+ with gr.Tab("Step 1 - Document pre-processing"):
237
+ with gr.Row():
238
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
239
+ # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
240
+ with gr.Row():
241
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
242
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
243
+ with gr.Row():
244
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
245
+ with gr.Row():
246
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
247
+ with gr.Row():
248
+ db_progress = gr.Textbox(label="Vector database initialization", value="None")
249
+ with gr.Row():
250
+ db_btn = gr.Button("Generate vector database...")
251
+
252
+ with gr.Tab("Step 2 - QA chain initialization"):
253
+ with gr.Row():
254
+ llm_btn = gr.Radio(list_llm_simple, \
255
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
256
+ with gr.Accordion("Advanced options - LLM model", open=False):
257
+ with gr.Row():
258
+ slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
259
+ with gr.Row():
260
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
261
+ with gr.Row():
262
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
263
+ with gr.Row():
264
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
265
+ with gr.Row():
266
+ qachain_btn = gr.Button("Initialize question-answering chain...")
267
+
268
+ with gr.Tab("Step 3 - Conversation with chatbot"):
269
+ chatbot = gr.Chatbot(height=300)
270
+ with gr.Accordion("Advanced - Document references", open=False):
271
+ with gr.Row():
272
+ doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
273
+ source1_page = gr.Number(label="Page", scale=1)
274
+ with gr.Row():
275
+ doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
276
+ source2_page = gr.Number(label="Page", scale=1)
277
+ with gr.Row():
278
+ msg = gr.Textbox(placeholder="Type message", container=True)
279
+ with gr.Row():
280
+ submit_btn = gr.Button("Submit")
281
+ clear_btn = gr.ClearButton([msg, chatbot])
282
+
283
+ # Preprocessing events
284
+ #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
285
+ db_btn.click(initialize_database, \
286
+ inputs=[document, slider_chunk_size, slider_chunk_overlap], \
287
+ outputs=[vector_db, collection_name, db_progress])
288
+ qachain_btn.click(initialize_LLM, \
289
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
290
+ outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0], \
291
+ inputs=None, \
292
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
293
+ queue=False)
294
+
295
+ # Chatbot events
296
+ msg.submit(conversation, \
297
+ inputs=[qa_chain, msg, chatbot], \
298
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
299
+ queue=False)
300
+ submit_btn.click(conversation, \
301
+ inputs=[qa_chain, msg, chatbot], \
302
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
303
+ queue=False)
304
+ clear_btn.click(lambda:[None,"",0,"",0], \
305
+ inputs=None, \
306
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
307
+ queue=False)
308
+ demo.queue().launch(debug=True)
309
+
310
+
311
+ if __name__ == "__main__":
312
+ demo()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ sentence-transformers
4
+ langchain<0.1.2
5
+ tqdm
6
+ accelerate
7
+ pypdf
8
+ chromadb
9
+