nirajandhakal commited on
Commit
2805261
1 Parent(s): ad4311f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +579 -52
app.py CHANGED
@@ -1,70 +1,597 @@
 
1
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import torch
3
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
- import PyPDF2
5
- import sounddevice as sd
6
- import numpy as np
7
- from gtts import gTTS
8
- from io import BytesIO
9
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def load_quantized_model(model_name):
12
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
13
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
14
 
15
- # Quantize the model
16
- model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
17
- model.eval()
18
 
19
- return model, tokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- def pdf_to_text(pdf_bytes):
22
- pdf_file_obj = BytesIO(pdf_bytes)
23
- pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)
24
- text = ''
25
- for page_num in range(pdf_reader.numPages):
26
- page_obj = pdf_reader.getPage(page_num)
27
- text += page_obj.extractText()
28
- pdf_file_obj.close()
29
- return text
30
 
31
- def generate_audio(model, tokenizer, text):
32
- input_ids = torch.tensor(tokenizer.encode(text, return_tensors="pt")).cuda()
33
- with torch.no_grad():
34
- outputs = model.generate(input_ids, max_length=500, pad_token_id=tokenizer.eos_token_id)
35
- output_text = tokenizer.decode(outputs[0])
36
- return output_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- def save_and_play_audio(text):
39
- tts = gTTS(text=text, lang='en')
40
- output_file = "output.mp3"
41
- tts.save(output_file)
42
 
43
- data, fs = sd.default.read_audio(output_file)
44
- sd.play(data, fs)
45
- sd.wait()
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- return output_file
48
 
49
- def main(pdf_file):
50
- # Load the quantized model
51
- model, tokenizer = load_quantized_model("microsoft/speecht5_tts")
 
 
 
52
 
53
- # Move the model to the GPU if available
54
- if torch.cuda.is_available():
55
- model.cuda()
56
 
57
- # Convert the uploaded PDF file to text
58
- text = pdf_to_text(pdf_file.read())
 
 
 
 
 
59
 
60
- # Generate audio from the text
61
- audio_text = generate_audio(model, tokenizer, text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- # Save and play the audio
64
- output_file = save_and_play_audio(audio_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- return {"output_file": output_file}
67
 
68
  if __name__ == "__main__":
69
- app = gr.Interface(main, inputs=gr.inputs.File(type="pdf"), outputs="text")
70
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
  import os
3
+
4
+ from langchain_community.document_loaders import PyPDFLoader
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain_community.vectorstores import Chroma
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from langchain_community.embeddings import HuggingFaceEmbeddings
9
+ from langchain_community.llms import HuggingFacePipeline
10
+ from langchain.chains import ConversationChain
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain_community.llms import HuggingFaceEndpoint
13
+
14
+ from pathlib import Path
15
+ import chromadb
16
+ from unidecode import unidecode
17
+
18
+ from transformers import AutoTokenizer
19
+ import transformers
20
  import torch
21
+ import tqdm
22
+ import accelerate
23
+ import re
24
+
25
+
26
+
27
+ # default_persist_directory = './chroma_HF/'
28
+ list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
29
+ "google/gemma-7b-it","google/gemma-2b-it", \
30
+ "HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1", \
31
+ "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \
32
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
33
+ "google/flan-t5-xxl"
34
+ ]
35
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
36
+
37
+ # Load PDF document and create doc splits
38
+ def load_doc(list_file_path, chunk_size, chunk_overlap):
39
+ # Processing for one document only
40
+ # loader = PyPDFLoader(file_path)
41
+ # pages = loader.load()
42
+ loaders = [PyPDFLoader(x) for x in list_file_path]
43
+ pages = []
44
+ for loader in loaders:
45
+ pages.extend(loader.load())
46
+ # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
47
+ text_splitter = RecursiveCharacterTextSplitter(
48
+ chunk_size = chunk_size,
49
+ chunk_overlap = chunk_overlap)
50
+ doc_splits = text_splitter.split_documents(pages)
51
+ return doc_splits
52
+
53
+
54
+ # Create vector database
55
+ def create_db(splits, collection_name):
56
+ embedding = HuggingFaceEmbeddings()
57
+ new_client = chromadb.EphemeralClient()
58
+ vectordb = Chroma.from_documents(
59
+ documents=splits,
60
+ embedding=embedding,
61
+ client=new_client,
62
+ collection_name=collection_name,
63
+ # persist_directory=default_persist_directory
64
+ )
65
+ return vectordb
66
+
67
 
68
+ # Load vector database
69
+ def load_db():
70
+ embedding = HuggingFaceEmbeddings()
71
+ vectordb = Chroma(
72
+ # persist_directory=default_persist_directory,
73
+ embedding_function=embedding)
74
+ return vectordb
75
 
 
 
 
76
 
77
+ # Initialize langchain LLM chain
78
+ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
79
+ progress(0.1, desc="Initializing HF tokenizer...")
80
+ # HuggingFacePipeline uses local model
81
+ # Note: it will download model locally...
82
+ # tokenizer=AutoTokenizer.from_pretrained(llm_model)
83
+ # progress(0.5, desc="Initializing HF pipeline...")
84
+ # pipeline=transformers.pipeline(
85
+ # "text-generation",
86
+ # model=llm_model,
87
+ # tokenizer=tokenizer,
88
+ # torch_dtype=torch.bfloat16,
89
+ # trust_remote_code=True,
90
+ # device_map="auto",
91
+ # # max_length=1024,
92
+ # max_new_tokens=max_tokens,
93
+ # do_sample=True,
94
+ # top_k=top_k,
95
+ # num_return_sequences=1,
96
+ # eos_token_id=tokenizer.eos_token_id
97
+ # )
98
+ # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
99
+
100
+ # HuggingFaceHub uses HF inference endpoints
101
+ progress(0.5, desc="Initializing HF Hub...")
102
+ # Use of trust_remote_code as model_kwargs
103
+ # Warning: langchain issue
104
+ # URL: https://github.com/langchain-ai/langchain/issues/6080
105
+ if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
106
+ llm = HuggingFaceEndpoint(
107
+ repo_id=llm_model,
108
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
109
+ temperature = temperature,
110
+ max_new_tokens = max_tokens,
111
+ top_k = top_k,
112
+ load_in_8bit = True,
113
+ )
114
+ elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
115
+ raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
116
+ llm = HuggingFaceEndpoint(
117
+ repo_id=llm_model,
118
+ temperature = temperature,
119
+ max_new_tokens = max_tokens,
120
+ top_k = top_k,
121
+ )
122
+ elif llm_model == "microsoft/phi-2":
123
+ raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
124
+ llm = HuggingFaceEndpoint(
125
+ repo_id=llm_model,
126
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
127
+ temperature = temperature,
128
+ max_new_tokens = max_tokens,
129
+ top_k = top_k,
130
+ trust_remote_code = True,
131
+ torch_dtype = "auto",
132
+ )
133
+ elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
134
+ llm = HuggingFaceEndpoint(
135
+ repo_id=llm_model,
136
+ # model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
137
+ temperature = temperature,
138
+ max_new_tokens = 250,
139
+ top_k = top_k,
140
+ )
141
+ elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
142
+ raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
143
+ llm = HuggingFaceEndpoint(
144
+ repo_id=llm_model,
145
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
146
+ temperature = temperature,
147
+ max_new_tokens = max_tokens,
148
+ top_k = top_k,
149
+ )
150
+ else:
151
+ llm = HuggingFaceEndpoint(
152
+ repo_id=llm_model,
153
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
154
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
155
+ temperature = temperature,
156
+ max_new_tokens = max_tokens,
157
+ top_k = top_k,
158
+ )
159
+
160
+ progress(0.75, desc="Defining buffer memory...")
161
+ memory = ConversationBufferMemory(
162
+ memory_key="chat_history",
163
+ output_key='answer',
164
+ return_messages=True
165
+ )
166
+ # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
167
+ retriever=vector_db.as_retriever()
168
+ progress(0.8, desc="Defining retrieval chain...")
169
+ qa_chain = ConversationalRetrievalChain.from_llm(
170
+ llm,
171
+ retriever=retriever,
172
+ chain_type="stuff",
173
+ memory=memory,
174
+ # combine_docs_chain_kwargs={"prompt": your_prompt})
175
+ return_source_documents=True,
176
+ #return_generated_question=False,
177
+ verbose=False,
178
+ )
179
+ progress(0.9, desc="Done!")
180
+ return qa_chain
181
 
 
 
 
 
 
 
 
 
 
182
 
183
+ # Generate collection name for vector database
184
+ # - Use filepath as input, ensuring unicode text
185
+ def create_collection_name(filepath):
186
+ # Extract filename without extension
187
+ collection_name = Path(filepath).stem
188
+ # Fix potential issues from naming convention
189
+ ## Remove space
190
+ collection_name = collection_name.replace(" ","-")
191
+ ## ASCII transliterations of Unicode text
192
+ collection_name = unidecode(collection_name)
193
+ ## Remove special characters
194
+ #collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
195
+ collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
196
+ ## Limit length to 50 characters
197
+ collection_name = collection_name[:50]
198
+ ## Minimum length of 3 characters
199
+ if len(collection_name) < 3:
200
+ collection_name = collection_name + 'xyz'
201
+ ## Enforce start and end as alphanumeric character
202
+ if not collection_name[0].isalnum():
203
+ collection_name = 'A' + collection_name[1:]
204
+ if not collection_name[-1].isalnum():
205
+ collection_name = collection_name[:-1] + 'Z'
206
+ print('Filepath: ', filepath)
207
+ print('Collection name: ', collection_name)
208
+ return collection_name
209
 
 
 
 
 
210
 
211
+ # Initialize database
212
+ def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
213
+ # Create list of documents (when valid)
214
+ list_file_path = [x.name for x in list_file_obj if x is not None]
215
+ # Create collection_name for vector database
216
+ progress(0.1, desc="Creating collection name...")
217
+ collection_name = create_collection_name(list_file_path[0])
218
+ progress(0.25, desc="Loading document...")
219
+ # Load document and create splits
220
+ doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
221
+ # Create or load vector database
222
+ progress(0.5, desc="Generating vector database...")
223
+ # global vector_db
224
+ vector_db = create_db(doc_splits, collection_name)
225
+ progress(0.9, desc="Done!")
226
+ return vector_db, collection_name, "Complete!"
227
 
 
228
 
229
+ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
230
+ # print("llm_option",llm_option)
231
+ llm_name = list_llm[llm_option]
232
+ print("llm_name: ",llm_name)
233
+ qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
234
+ return qa_chain, "Complete!"
235
 
 
 
 
236
 
237
+ def format_chat_history(message, chat_history):
238
+ formatted_chat_history = []
239
+ for user_message, bot_message in chat_history:
240
+ formatted_chat_history.append(f"User: {user_message}")
241
+ formatted_chat_history.append(f"Assistant: {bot_message}")
242
+ return formatted_chat_history
243
+
244
 
245
+ def conversation(qa_chain, message, history):
246
+ formatted_chat_history = format_chat_history(message, history)
247
+ #print("formatted_chat_history",formatted_chat_history)
248
+
249
+ # Generate response using QA chain
250
+ response = qa_chain({"question": message, "chat_history": formatted_chat_history})
251
+ response_answer = response["answer"]
252
+ if response_answer.find("Helpful Answer:") != -1:
253
+ response_answer = response_answer.split("Helpful Answer:")[-1]
254
+ response_sources = response["source_documents"]
255
+ response_source1 = response_sources[0].page_content.strip()
256
+ response_source2 = response_sources[1].page_content.strip()
257
+ response_source3 = response_sources[2].page_content.strip()
258
+ # Langchain sources are zero-based
259
+ response_source1_page = response_sources[0].metadata["page"] + 1
260
+ response_source2_page = response_sources[1].metadata["page"] + 1
261
+ response_source3_page = response_sources[2].metadata["page"] + 1
262
+ # print ('chat response: ', response_answer)
263
+ # print('DB source', response_sources)
264
+
265
+ # Append user message and response to chat history
266
+ new_history = history + [(message, response_answer)]
267
+ # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
268
+ return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
269
+
270
 
271
+ def upload_file(file_obj):
272
+ list_file_path = []
273
+ for idx, file in enumerate(file_obj):
274
+ file_path = file_obj.name
275
+ list_file_path.append(file_path)
276
+ # print(file_path)
277
+ # initialize_database(file_path, progress)
278
+ return list_file_path
279
+
280
+
281
+ def demo():
282
+ with gr.Blocks(theme="base") as demo:
283
+ vector_db = gr.State()
284
+ qa_chain = gr.State()
285
+ collection_name = gr.State()
286
+
287
+ gr.Markdown(
288
+ """<center>
289
+ <img src="https://github.com/dhakalnirajan" alt="Profile Picture">
290
+ <h2>PDF-based Chatbot (powered by LangChain and open-source Large Language Models)</h2>
291
+ <h3>Ask any questions about your PDF documents, along with follow-ups.</h3>
292
+ <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents.
293
+ When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
294
+
295
+ <b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.
296
+
297
+ <div style="display: flex; justify-content: center; align-items: center; margin-top: 2rem; gap: 1rem;">
298
+ <a href="<img" target="_blank" rel="noreferrer">https://huggingface.co/nirajandhakal"><img src="</a" target="_blank" rel="noreferrer">https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-%23FF0000.svg?style=for-the-badge&logo=huggingface&logoColor=white"></a>
299
+ <a href="<img" target="_blank" rel="noreferrer">https://twitter.com/nirajandhakal_7"><img src="</a" target="_blank" rel="noreferrer">https://img.shields.io/badge/Twitter-%231DA1F2.svg?style=for-the-badge&logo=Twitter&logoColor=white"></a>
300
+ <a href="<img" target="_blank" rel="noreferrer">https://www.linkedin.com/in/nirajandhakal07"><img src="</a" target="_blank" rel="noreferrer">https://img.shields.io/badge/LinkedIn-%230077B5.svg?style=for-the-badge&logo=linkedin&logoColor=white"></a>
301
+ <a href="<img" target="_blank" rel="noreferrer">https://github.com/dhakalnirajan"><img src="</a" target="_blank" rel="noreferrer">https://img.shields.io/badge/Github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white"></a>
302
+ </div>
303
+ </center>"""
304
+ )
305
+ with gr.Tab("Step 1 - Document pre-processing"):
306
+ with gr.Row():
307
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
308
+ # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
309
+ with gr.Row():
310
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
311
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
312
+ with gr.Row():
313
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
314
+ with gr.Row():
315
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
316
+ with gr.Row():
317
+ db_progress = gr.Textbox(label="Vector database initialization", value="None")
318
+ with gr.Row():
319
+ db_btn = gr.Button("Generate vector database...")
320
+
321
+ with gr.Tab("Step 2 - QA chain initialization"):
322
+ with gr.Row():
323
+ llm_btn = gr.Radio(list_llm_simple, \
324
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
325
+ with gr.Accordion("Advanced options - LLM model", open=False):
326
+ with gr.Row():
327
+ slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
328
+ with gr.Row():
329
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
330
+ with gr.Row():
331
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
332
+ with gr.Row():
333
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
334
+ with gr.Row():
335
+ qachain_btn = gr.Button("Initialize question-answering chain...")
336
+
337
+ with gr.Tab("Step 3 - Conversation with chatbot"):
338
+ chatbot = gr.Chatbot(height=300)
339
+ with gr.Accordion("Advanced - Document references", open=False):
340
+ with gr.Row():
341
+ doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
342
+ source1_page = gr.Number(label="Page", scale=1)
343
+ with gr.Row():
344
+ doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
345
+ source2_page = gr.Number(label="Page", scale=1)
346
+ with gr.Row():
347
+ doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
348
+ source3_page = gr.Number(label="Page", scale=1)
349
+ with gr.Row():
350
+ msg = gr.Textbox(placeholder="Type message", container=True)
351
+ with gr.Row():
352
+ submit_btn = gr.Button("Submit")
353
+ clear_btn = gr.ClearButton([msg, chatbot])
354
+
355
+ # Preprocessing events
356
+ #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
357
+ db_btn.click(initialize_database, \
358
+ inputs=[document, slider_chunk_size, slider_chunk_overlap], \
359
+ outputs=[vector_db, collection_name, db_progress])
360
+ qachain_btn.click(initialize_LLM, \
361
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
362
+ outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
363
+ inputs=None, \
364
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
365
+ queue=False)
366
+
367
+ # Chatbot events
368
+ msg.submit(conversation, \
369
+ inputs=[qa_chain, msg, chatbot], \
370
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
371
+ queue=False)
372
+ submit_btn.click(conversation, \
373
+ inputs=[qa_chain, msg, chatbot], \
374
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
375
+ queue=False)
376
+ clear_btn.click(lambda:[None,"",0,"",0,"",0], \
377
+ inputs=None, \
378
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
379
+ queue=False)
380
 
 
381
 
382
  if __name__ == "__main__":
383
+ gr.Interface(demo(),
384
+ css="""
385
+ /* General styling */
386
+ body {
387
+ font-family: Arial, sans-serif;
388
+ line-height: 1.6;
389
+ color: #333;
390
+ background-color: #f5f5f5;
391
+ padding: 20px;
392
+ }
393
+
394
+ h2, h3 {
395
+ line-height: 1.2;
396
+ }
397
+
398
+ h2 {
399
+ font-size: 2rem;
400
+ margin-bottom: 10px;
401
+ }
402
+
403
+ h3 {
404
+ font-size: 1.2rem;
405
+ margin-top: 20px;
406
+ }
407
+
408
+ a {
409
+ color: #007bff;
410
+ text-decoration: none;
411
+ }
412
+
413
+ a:hover {
414
+ text-decoration: underline;
415
+ }
416
+
417
+ /* Input elements */
418
+ input[type="file"] {
419
+ border: 1px solid #ccc;
420
+ border-radius: 4px;
421
+ padding: 5px;
422
+ outline: none;
423
+ }
424
+
425
+ select {
426
+ appearance: menulist;
427
+ background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 14 8'><polygon points='0,0 14,0 7,8'/></svg>");
428
+ background-repeat: no-repeat;
429
+ background-position: right 10px center;
430
+ background-size: 12px;
431
+ border: 1px solid #ccc;
432
+ border-radius: 4px;
433
+ padding: 5px;
434
+ outline: none;
435
+ cursor: pointer;
436
+ }
437
+
438
+ slider {
439
+ width: 100%;
440
+ margin-bottom: 10px;
441
+ }
442
+
443
+ button {
444
+ background-color: #007bff;
445
+ color: white;
446
+ border: none;
447
+ border-radius: 4px;
448
+ padding: 10px;
449
+ cursor: pointer;
450
+ }
451
+
452
+ button:hover {
453
+ background-color: #0056b3;
454
+ }
455
+
456
+ button:disabled {
457
+ opacity: 0.5;
458
+ cursor: not-allowed;
459
+ }
460
+
461
+ /* Chatbot section */
462
+ .gradio-Chatbox {
463
+ border: 1px solid #ddd;
464
+ border-radius: 4px;
465
+ padding: 10px;
466
+ margin-top: 20px;
467
+ }
468
+
469
+ .gradio-Chatbox .gradio-ChatMessage--system {
470
+ background-color: #eee;
471
+ padding: 10px;
472
+ border-radius: 4px;
473
+ margin-bottom: 10px;
474
+ }
475
+
476
+ .gradio-Chatbox .gradio-ChatMessage--assistant {
477
+ background-color: #f5f5f5;
478
+ padding: 10px;
479
+ border-radius: 4px;
480
+ margin-bottom: 10px;
481
+ }
482
+
483
+ .gradio-Chatbox .gradio-ChatInputContainer {
484
+ margin-top: 10px;
485
+ }
486
+
487
+ .gradio-Chatbox .gradio-ChatInputContainer input[type="text"] {
488
+ width: calc(100% - 40px);
489
+ padding: 10px;
490
+ border: none;
491
+ border-radius: 4px;
492
+ }
493
+
494
+ .gradio-Chatbox .gradio-ChatInputContainer button {
495
+ width: 30px;
496
+ height: 30px;
497
+ padding: 0;
498
+ min-width: auto;
499
+ border: none;
500
+ border-radius: 50%;
501
+ position: relative;
502
+ overflow: hidden;
503
+ box-shadow: 0 0 0 1px rgb(0 0 0 / 10%), 0 2px 4px rgb(0 0 0 / 10%);
504
+ transition: all 0.2s ease-in-out;
505
+ background-color: #007bff;
506
+ color: white;
507
+ }
508
+
509
+ .gradio-Chatbox .gradio-ChatInputContainer button::before {
510
+ content: "";
511
+ position: absolute;
512
+ left: -50%;
513
+ top: -50%;
514
+ width: 200%;
515
+ height: 200%;
516
+ background-color: currentColor;
517
+ opacity: 0;
518
+ transition: all 0.2s ease-in-out;
519
+ transform-origin: center;
520
+ }
521
+
522
+ .gradio-Chatbox .gradio-ChatInputContainer button:focus {
523
+ box-shadow: 0 0 0 2px rgb(0 0 0 / 20%), 0 2px 4px rgb(0 0 0 / 15%);
524
+ }
525
+
526
+ .gradio-Chatbox .gradio-ChatInputContainer button:active {
527
+ transform: translateY(1px);
528
+ }
529
+
530
+ .gradio-Chatbox .gradio-ChatInputContainer button:hover:not(:focus):not(:active) {
531
+ filter: brightness(90%);
532
+ }
533
+
534
+ .gradio-Chatbox .gradio-ChatInputContainer button:hover:not(:focus):not(:active)::before {
535
+ opacity: 0.2;
536
+ }
537
+
538
+ .gradio-Chatbox .gradio-ChatInputContainer button:active:not(:focus)::before {
539
+ opacity: 0.5;
540
+ transform: rotate(-45deg) scaleX(1.5) scaleY(1.3);
541
+ }
542
+
543
+ /* Accordion sections */
544
+ .accordion-section {
545
+ margin-top: 20px;
546
+ }
547
+
548
+ /* Progress bars */
549
+ .progressbar-container {
550
+ margin-top: 10px;
551
+ }
552
+
553
+ .progressbar-container .progressbar-label {
554
+ margin-right: 10px;
555
+ }
556
+
557
+ .progressbar-container .progressbar-percentage {
558
+ float: right;
559
+ }
560
+
561
+ /* Tooltip component */
562
+ .tooltip-wrapper {
563
+ position: relative;
564
+ }
565
+
566
+ .tooltip-wrapper .tooltip {
567
+ visibility: hidden;
568
+ background-color: #f9f9f9;
569
+ color: #333;
570
+ text-align: center;
571
+ padding: 5px 0;
572
+ border-radius: 6px;
573
+ position: absolute;
574
+ z-index: 1;
575
+ bottom: 100%;
576
+ left: 50%;
577
+ margin-left: -60px;
578
+ opacity: 0;
579
+ transition: opacity 0.3s;
580
+ }
581
+
582
+ .tooltip-wrapper .tooltip::after {
583
+ content: "";
584
+ position: absolute;
585
+ top: 100%;
586
+ left: 50%;
587
+ margin-left: -5px;
588
+ border-width: 5px;
589
+ border-style: solid;
590
+ border-color: #f9f9f9 transparent transparent transparent;
591
+ }
592
+
593
+ .tooltip-wrapper:hover .tooltip {
594
+ visibility: visible;
595
+ opacity: 1;
596
+ }
597
+ """).launch(debug=True)