DHEIVER commited on
Commit
7e34d60
·
verified ·
1 Parent(s): ccd769b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -198
app.py CHANGED
@@ -15,43 +15,38 @@ from pathlib import Path
15
  import chromadb
16
  from unidecode import unidecode
17
 
18
- from transformers import AutoTokenizer
19
  import transformers
20
  import torch
21
  import tqdm
22
  import accelerate
23
  import re
24
 
25
-
26
-
27
- # default_persist_directory = './chroma_HF/'
28
- list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
29
- "google/gemma-7b-it","google/gemma-2b-it", \
30
- "HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1", \
31
- "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \
32
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
33
- "google/flan-t5-xxl"
34
  ]
35
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
36
 
37
- # Load PDF document and create doc splits
38
  def load_doc(list_file_path, chunk_size, chunk_overlap):
39
- # Processing for one document only
40
- # loader = PyPDFLoader(file_path)
41
- # pages = loader.load()
42
  loaders = [PyPDFLoader(x) for x in list_file_path]
43
  pages = []
44
  for loader in loaders:
45
  pages.extend(loader.load())
46
- # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
47
  text_splitter = RecursiveCharacterTextSplitter(
48
- chunk_size = chunk_size,
49
- chunk_overlap = chunk_overlap)
 
50
  doc_splits = text_splitter.split_documents(pages)
51
  return doc_splits
52
 
53
-
54
- # Create vector database
55
  def create_db(splits, collection_name):
56
  embedding = HuggingFaceEmbeddings()
57
  new_client = chromadb.EphemeralClient()
@@ -60,193 +55,94 @@ def create_db(splits, collection_name):
60
  embedding=embedding,
61
  client=new_client,
62
  collection_name=collection_name,
63
- # persist_directory=default_persist_directory
64
  )
65
  return vectordb
66
 
67
-
68
- # Load vector database
69
- def load_db():
70
- embedding = HuggingFaceEmbeddings()
71
- vectordb = Chroma(
72
- # persist_directory=default_persist_directory,
73
- embedding_function=embedding)
74
- return vectordb
75
-
76
-
77
- # Initialize langchain LLM chain
78
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
79
  progress(0.1, desc="Initializing HF tokenizer...")
80
- # HuggingFacePipeline uses local model
81
- # Note: it will download model locally...
82
- # tokenizer=AutoTokenizer.from_pretrained(llm_model)
83
- # progress(0.5, desc="Initializing HF pipeline...")
84
- # pipeline=transformers.pipeline(
85
- # "text-generation",
86
- # model=llm_model,
87
- # tokenizer=tokenizer,
88
- # torch_dtype=torch.bfloat16,
89
- # trust_remote_code=True,
90
- # device_map="auto",
91
- # # max_length=1024,
92
- # max_new_tokens=max_tokens,
93
- # do_sample=True,
94
- # top_k=top_k,
95
- # num_return_sequences=1,
96
- # eos_token_id=tokenizer.eos_token_id
97
- # )
98
- # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
99
-
100
- # HuggingFaceHub uses HF inference endpoints
101
- progress(0.5, desc="Initializing HF Hub...")
102
- # Use of trust_remote_code as model_kwargs
103
- # Warning: langchain issue
104
- # URL: https://github.com/langchain-ai/langchain/issues/6080
105
- if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
106
- llm = HuggingFaceEndpoint(
107
- repo_id=llm_model,
108
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
109
- temperature = temperature,
110
- max_new_tokens = max_tokens,
111
- top_k = top_k,
112
- load_in_8bit = True,
113
- )
114
- elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
115
- raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
116
- llm = HuggingFaceEndpoint(
117
- repo_id=llm_model,
118
- temperature = temperature,
119
- max_new_tokens = max_tokens,
120
- top_k = top_k,
121
- )
122
- elif llm_model == "microsoft/phi-2":
123
- # raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
124
- llm = HuggingFaceEndpoint(
125
- repo_id=llm_model,
126
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
127
- temperature = temperature,
128
- max_new_tokens = max_tokens,
129
- top_k = top_k,
130
- trust_remote_code = True,
131
- torch_dtype = "auto",
132
- )
133
- elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
134
- llm = HuggingFaceEndpoint(
135
- repo_id=llm_model,
136
- # model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
137
- temperature = temperature,
138
- max_new_tokens = 250,
139
- top_k = top_k,
140
- )
141
- elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
142
- raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
143
- llm = HuggingFaceEndpoint(
144
- repo_id=llm_model,
145
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
146
- temperature = temperature,
147
- max_new_tokens = max_tokens,
148
- top_k = top_k,
149
- )
150
- else:
151
- llm = HuggingFaceEndpoint(
152
- repo_id=llm_model,
153
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
154
- # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
155
- temperature = temperature,
156
- max_new_tokens = max_tokens,
157
- top_k = top_k,
158
- )
159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  progress(0.75, desc="Defining buffer memory...")
161
  memory = ConversationBufferMemory(
162
  memory_key="chat_history",
163
  output_key='answer',
164
  return_messages=True
165
  )
166
- # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
167
- retriever=vector_db.as_retriever()
168
  progress(0.8, desc="Defining retrieval chain...")
169
  qa_chain = ConversationalRetrievalChain.from_llm(
170
  llm,
171
  retriever=retriever,
172
  chain_type="stuff",
173
  memory=memory,
174
- # combine_docs_chain_kwargs={"prompt": your_prompt})
175
  return_source_documents=True,
176
- #return_generated_question=False,
177
  verbose=False,
178
  )
179
  progress(0.9, desc="Done!")
180
  return qa_chain
181
 
182
-
183
- # Generate collection name for vector database
184
- # - Use filepath as input, ensuring unicode text
185
  def create_collection_name(filepath):
186
- # Extract filename without extension
187
  collection_name = Path(filepath).stem
188
- # Fix potential issues from naming convention
189
- ## Remove space
190
- collection_name = collection_name.replace(" ","-")
191
- ## ASCII transliterations of Unicode text
192
  collection_name = unidecode(collection_name)
193
- ## Remove special characters
194
- #collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
195
  collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
196
- ## Limit length to 50 characters
197
  collection_name = collection_name[:50]
198
- ## Minimum length of 3 characters
199
  if len(collection_name) < 3:
200
  collection_name = collection_name + 'xyz'
201
- ## Enforce start and end as alphanumeric character
202
  if not collection_name[0].isalnum():
203
  collection_name = 'A' + collection_name[1:]
204
  if not collection_name[-1].isalnum():
205
  collection_name = collection_name[:-1] + 'Z'
206
- print('Filepath: ', filepath)
207
- print('Collection name: ', collection_name)
208
  return collection_name
209
 
210
-
211
- # Initialize database
212
  def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
213
- # Create list of documents (when valid)
214
  list_file_path = [x.name for x in list_file_obj if x is not None]
215
- # Create collection_name for vector database
216
  progress(0.1, desc="Creating collection name...")
217
  collection_name = create_collection_name(list_file_path[0])
218
  progress(0.25, desc="Loading document...")
219
- # Load document and create splits
220
  doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
221
- # Create or load vector database
222
  progress(0.5, desc="Generating vector database...")
223
- # global vector_db
224
  vector_db = create_db(doc_splits, collection_name)
225
  progress(0.9, desc="Done!")
226
  return vector_db, collection_name, "Complete!"
227
 
228
-
229
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
230
- # print("llm_option",llm_option)
231
  llm_name = list_llm[llm_option]
232
- print("llm_name: ",llm_name)
233
  qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
234
  return qa_chain, "Complete!"
235
 
236
-
237
  def format_chat_history(message, chat_history):
238
  formatted_chat_history = []
239
  for user_message, bot_message in chat_history:
240
  formatted_chat_history.append(f"User: {user_message}")
241
  formatted_chat_history.append(f"Assistant: {bot_message}")
242
  return formatted_chat_history
243
-
244
 
 
245
  def conversation(qa_chain, message, history):
246
  formatted_chat_history = format_chat_history(message, history)
247
- #print("formatted_chat_history",formatted_chat_history)
248
-
249
- # Generate response using QA chain
250
  response = qa_chain({"question": message, "chat_history": formatted_chat_history})
251
  response_answer = response["answer"]
252
  if response_answer.find("Helpful Answer:") != -1:
@@ -255,29 +151,13 @@ def conversation(qa_chain, message, history):
255
  response_source1 = response_sources[0].page_content.strip()
256
  response_source2 = response_sources[1].page_content.strip()
257
  response_source3 = response_sources[2].page_content.strip()
258
- # Langchain sources are zero-based
259
  response_source1_page = response_sources[0].metadata["page"] + 1
260
  response_source2_page = response_sources[1].metadata["page"] + 1
261
  response_source3_page = response_sources[2].metadata["page"] + 1
262
- # print ('chat response: ', response_answer)
263
- # print('DB source', response_sources)
264
-
265
- # Append user message and response to chat history
266
  new_history = history + [(message, response_answer)]
267
- # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
268
  return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
269
-
270
-
271
- def upload_file(file_obj):
272
- list_file_path = []
273
- for idx, file in enumerate(file_obj):
274
- file_path = file_obj.name
275
- list_file_path.append(file_path)
276
- # print(file_path)
277
- # initialize_database(file_path, progress)
278
- return list_file_path
279
-
280
 
 
281
  def demo():
282
  with gr.Blocks(theme="base") as demo:
283
  vector_db = gr.State()
@@ -297,16 +177,15 @@ def demo():
297
  with gr.Tab("Step 1 - Upload PDF"):
298
  with gr.Row():
299
  document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
300
- # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
301
 
302
  with gr.Tab("Step 2 - Process document"):
303
  with gr.Row():
304
- db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
305
  with gr.Accordion("Advanced options - Document text splitter", open=False):
306
  with gr.Row():
307
- slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
308
  with gr.Row():
309
- slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
310
  with gr.Row():
311
  db_progress = gr.Textbox(label="Vector database initialization", value="None")
312
  with gr.Row():
@@ -314,17 +193,16 @@ def demo():
314
 
315
  with gr.Tab("Step 3 - Initialize QA chain"):
316
  with gr.Row():
317
- llm_btn = gr.Radio(list_llm_simple, \
318
- label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
319
  with gr.Accordion("Advanced options - LLM model", open=False):
320
  with gr.Row():
321
- slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
322
  with gr.Row():
323
- slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
324
  with gr.Row():
325
- slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
326
  with gr.Row():
327
- llm_progress = gr.Textbox(value="None",label="QA chain initialization")
328
  with gr.Row():
329
  qachain_btn = gr.Button("Initialize Question Answering chain")
330
 
@@ -346,33 +224,16 @@ def demo():
346
  submit_btn = gr.Button("Submit message")
347
  clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
348
 
349
- # Preprocessing events
350
- #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
351
- db_btn.click(initialize_database, \
352
- inputs=[document, slider_chunk_size, slider_chunk_overlap], \
353
- outputs=[vector_db, collection_name, db_progress])
354
- qachain_btn.click(initialize_LLM, \
355
- inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
356
- outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
357
- inputs=None, \
358
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
359
- queue=False)
360
-
361
- # Chatbot events
362
- msg.submit(conversation, \
363
- inputs=[qa_chain, msg, chatbot], \
364
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
365
- queue=False)
366
- submit_btn.click(conversation, \
367
- inputs=[qa_chain, msg, chatbot], \
368
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
369
- queue=False)
370
- clear_btn.click(lambda:[None,"",0,"",0,"",0], \
371
- inputs=None, \
372
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
373
- queue=False)
374
  demo.queue().launch(debug=True)
375
 
376
-
377
  if __name__ == "__main__":
378
- demo()
 
15
  import chromadb
16
  from unidecode import unidecode
17
 
18
+ from transformers import AutoTokenizer, pipeline
19
  import transformers
20
  import torch
21
  import tqdm
22
  import accelerate
23
  import re
24
 
25
+ # Lista de modelos gratuitos que não exigem chave de API
26
+ list_llm = [
27
+ "mistralai/Mistral-7B-Instruct-v0.2",
28
+ "mistralai/Mistral-7B-Instruct-v0.1",
29
+ "google/flan-t5-xxl",
30
+ "HuggingFaceH4/zephyr-7b-beta",
31
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
32
+ "microsoft/phi-2"
 
33
  ]
34
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
35
 
36
+ # Função para carregar o documento PDF e dividir em partes
37
  def load_doc(list_file_path, chunk_size, chunk_overlap):
 
 
 
38
  loaders = [PyPDFLoader(x) for x in list_file_path]
39
  pages = []
40
  for loader in loaders:
41
  pages.extend(loader.load())
 
42
  text_splitter = RecursiveCharacterTextSplitter(
43
+ chunk_size=chunk_size,
44
+ chunk_overlap=chunk_overlap
45
+ )
46
  doc_splits = text_splitter.split_documents(pages)
47
  return doc_splits
48
 
49
+ # Função para criar o banco de dados vetorial
 
50
  def create_db(splits, collection_name):
51
  embedding = HuggingFaceEmbeddings()
52
  new_client = chromadb.EphemeralClient()
 
55
  embedding=embedding,
56
  client=new_client,
57
  collection_name=collection_name,
 
58
  )
59
  return vectordb
60
 
61
+ # Função para inicializar a cadeia de LLM
 
 
 
 
 
 
 
 
 
 
62
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
63
  progress(0.1, desc="Initializing HF tokenizer...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ # Carregar o tokenizer e o pipeline do modelo
66
+ tokenizer = AutoTokenizer.from_pretrained(llm_model)
67
+ progress(0.5, desc="Initializing HF pipeline...")
68
+ pipeline_model = transformers.pipeline(
69
+ "text-generation",
70
+ model=llm_model,
71
+ tokenizer=tokenizer,
72
+ torch_dtype=torch.bfloat16,
73
+ device_map="auto",
74
+ max_new_tokens=max_tokens,
75
+ do_sample=True,
76
+ top_k=top_k,
77
+ temperature=temperature,
78
+ )
79
+ llm = HuggingFacePipeline(pipeline=pipeline_model)
80
+
81
  progress(0.75, desc="Defining buffer memory...")
82
  memory = ConversationBufferMemory(
83
  memory_key="chat_history",
84
  output_key='answer',
85
  return_messages=True
86
  )
87
+ retriever = vector_db.as_retriever()
88
+
89
  progress(0.8, desc="Defining retrieval chain...")
90
  qa_chain = ConversationalRetrievalChain.from_llm(
91
  llm,
92
  retriever=retriever,
93
  chain_type="stuff",
94
  memory=memory,
 
95
  return_source_documents=True,
 
96
  verbose=False,
97
  )
98
  progress(0.9, desc="Done!")
99
  return qa_chain
100
 
101
+ # Função para gerar o nome da coleção do banco de dados vetorial
 
 
102
  def create_collection_name(filepath):
 
103
  collection_name = Path(filepath).stem
104
+ collection_name = collection_name.replace(" ", "-")
 
 
 
105
  collection_name = unidecode(collection_name)
 
 
106
  collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
 
107
  collection_name = collection_name[:50]
 
108
  if len(collection_name) < 3:
109
  collection_name = collection_name + 'xyz'
 
110
  if not collection_name[0].isalnum():
111
  collection_name = 'A' + collection_name[1:]
112
  if not collection_name[-1].isalnum():
113
  collection_name = collection_name[:-1] + 'Z'
 
 
114
  return collection_name
115
 
116
+ # Função para inicializar o banco de dados
 
117
  def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
 
118
  list_file_path = [x.name for x in list_file_obj if x is not None]
 
119
  progress(0.1, desc="Creating collection name...")
120
  collection_name = create_collection_name(list_file_path[0])
121
  progress(0.25, desc="Loading document...")
 
122
  doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
 
123
  progress(0.5, desc="Generating vector database...")
 
124
  vector_db = create_db(doc_splits, collection_name)
125
  progress(0.9, desc="Done!")
126
  return vector_db, collection_name, "Complete!"
127
 
128
+ # Função para inicializar a cadeia de QA
129
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
 
130
  llm_name = list_llm[llm_option]
131
+ print("llm_name: ", llm_name)
132
  qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
133
  return qa_chain, "Complete!"
134
 
135
+ # Função para formatar o histórico de conversa
136
  def format_chat_history(message, chat_history):
137
  formatted_chat_history = []
138
  for user_message, bot_message in chat_history:
139
  formatted_chat_history.append(f"User: {user_message}")
140
  formatted_chat_history.append(f"Assistant: {bot_message}")
141
  return formatted_chat_history
 
142
 
143
+ # Função para gerar a conversa
144
  def conversation(qa_chain, message, history):
145
  formatted_chat_history = format_chat_history(message, history)
 
 
 
146
  response = qa_chain({"question": message, "chat_history": formatted_chat_history})
147
  response_answer = response["answer"]
148
  if response_answer.find("Helpful Answer:") != -1:
 
151
  response_source1 = response_sources[0].page_content.strip()
152
  response_source2 = response_sources[1].page_content.strip()
153
  response_source3 = response_sources[2].page_content.strip()
 
154
  response_source1_page = response_sources[0].metadata["page"] + 1
155
  response_source2_page = response_sources[1].metadata["page"] + 1
156
  response_source3_page = response_sources[2].metadata["page"] + 1
 
 
 
 
157
  new_history = history + [(message, response_answer)]
 
158
  return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
 
 
 
 
 
 
 
 
 
 
 
159
 
160
+ # Função principal para rodar a interface
161
  def demo():
162
  with gr.Blocks(theme="base") as demo:
163
  vector_db = gr.State()
 
177
  with gr.Tab("Step 1 - Upload PDF"):
178
  with gr.Row():
179
  document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
 
180
 
181
  with gr.Tab("Step 2 - Process document"):
182
  with gr.Row():
183
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value="ChromaDB", type="index", info="Choose your vector database")
184
  with gr.Accordion("Advanced options - Document text splitter", open=False):
185
  with gr.Row():
186
+ slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
187
  with gr.Row():
188
+ slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
189
  with gr.Row():
190
  db_progress = gr.Textbox(label="Vector database initialization", value="None")
191
  with gr.Row():
 
193
 
194
  with gr.Tab("Step 3 - Initialize QA chain"):
195
  with gr.Row():
196
+ llm_btn = gr.Radio(list_llm_simple, label="LLM models", value=list_llm_simple[0], type="index", info="Choose your LLM model")
 
197
  with gr.Accordion("Advanced options - LLM model", open=False):
198
  with gr.Row():
199
+ slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
200
  with gr.Row():
201
+ slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
202
  with gr.Row():
203
+ slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
204
  with gr.Row():
205
+ llm_progress = gr.Textbox(value="None", label="QA chain initialization")
206
  with gr.Row():
207
  qachain_btn = gr.Button("Initialize Question Answering chain")
208
 
 
224
  submit_btn = gr.Button("Submit message")
225
  clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
226
 
227
+ # Eventos de pré-processamento
228
+ db_btn.click(initialize_database, inputs=[document, slider_chunk_size, slider_chunk_overlap], outputs=[vector_db, collection_name, db_progress])
229
+ qachain_btn.click(initialize_LLM, inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], inputs=None, outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
230
+
231
+ # Eventos do chatbot
232
+ msg.submit(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
233
+ submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
234
+ clear_btn.click(lambda:[None,"",0,"",0,"",0], inputs=None, outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
235
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  demo.queue().launch(debug=True)
237
 
 
238
  if __name__ == "__main__":
239
+ demo()