Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
from pathlib import Path | |
import re | |
from unidecode import unidecode | |
import chromadb | |
from langchain_community.vectorstores import FAISS, ScaNN, Milvus | |
from langchain_community.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_community.vectorstores import Chroma | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_community.llms import HuggingFacePipeline | |
from langchain.chains import ConversationChain | |
from langchain.memory import ConversationBufferMemory | |
from langchain_community.llms import HuggingFaceEndpoint | |
from huggingface_hub import InferenceClient | |
import torch | |
api_token = os.getenv("HF_TOKEN") | |
client = InferenceClient( | |
"mistralai/Mistral-7B-Instruct-v0.3" | |
) | |
list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3"] | |
list_llm_simple = [os.path.basename(llm) for llm in list_llm] | |
def format_prompt(message, history): | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): | |
temperature = float(temperature) | |
if temperature < 1e-2: | |
temperature = 1e-2 | |
top_p = float(top_p) | |
generate_kwargs = dict( | |
temperature=temperature, | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=42, | |
) | |
formatted_prompt = format_prompt(prompt, history) | |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
def load_doc(list_file_path, chunk_size, chunk_overlap): | |
loaders = [PyPDFLoader(x) for x in list_file_path] | |
pages = [] | |
for loader in loaders: | |
pages.extend(loader.load()) | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) | |
doc_splits = text_splitter.split_documents(pages) | |
return doc_splits | |
def create_db(splits, collection_name, db_type): | |
embedding = HuggingFaceEmbeddings() | |
if db_type == "ChromaDB": | |
new_client = chromadb.EphemeralClient() | |
vectordb = Chroma.from_documents( | |
documents=splits, | |
embedding=embedding, | |
client=new_client, | |
collection_name=collection_name, | |
) | |
elif db_type == "FAISS": | |
vectordb = FAISS.from_documents( | |
documents=splits, | |
embedding=embedding | |
) | |
elif db_type == "ScaNN": | |
vectordb = ScaNN.from_documents( | |
documents=splits, | |
embedding=embedding | |
) | |
elif db_type == "Milvus": | |
vectordb = Milvus.from_documents( | |
documents=splits, | |
embedding=embedding, | |
collection_name=collection_name, | |
) | |
else: | |
raise ValueError(f"Unsupported vector database type: {db_type}") | |
return vectordb | |
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, initial_prompt, progress=gr.Progress()): | |
progress(0.1, desc="Initializing HF tokenizer...") | |
progress(0.5, desc="Initializing HF Hub...") | |
llm = HuggingFaceEndpoint( | |
repo_id=llm_model, | |
huggingfacehub_api_token=api_token, | |
temperature=temperature, | |
max_new_tokens=max_tokens, | |
top_k=top_k, | |
) | |
progress(0.75, desc="Defining buffer memory...") | |
memory = ConversationBufferMemory( | |
memory_key="chat_history", | |
output_key='answer', | |
return_messages=True | |
) | |
retriever = vector_db.as_retriever() | |
progress(0.8, desc="Defining retrieval chain...") | |
qa_chain = ConversationalRetrievalChain.from_llm( | |
llm, | |
retriever=retriever, | |
chain_type="stuff", | |
memory=memory, | |
return_source_documents=True, | |
verbose=False, | |
) | |
qa_chain({"question": initial_prompt}) # Initialize with the initial prompt | |
progress(0.9, desc="Done!") | |
return qa_chain | |
def initialize_llm_no_doc(llm_model, temperature, max_tokens, top_k, initial_prompt, progress=gr.Progress()): | |
progress(0.1, desc="Initializing HF tokenizer...") | |
progress(0.5, desc="Initializing HF Hub...") | |
llm = HuggingFaceEndpoint( | |
repo_id=llm_model, | |
huggingfacehub_api_token=api_token, | |
temperature=temperature, | |
max_new_tokens=max_tokens, | |
top_k=top_k, | |
) | |
progress(0.75, desc="Defining buffer memory...") | |
memory = ConversationBufferMemory( | |
memory_key="chat_history", | |
output_key='answer', | |
return_messages=True | |
) | |
conversation_chain = ConversationChain(llm=llm, memory=memory, verbose=False) | |
conversation_chain({"question": initial_prompt}) | |
progress(0.9, desc="Done!") | |
return conversation_chain | |
def format_chat_history(message, chat_history): | |
formatted_chat_history = [] | |
for user_message, bot_message in chat_history: | |
formatted_chat_history.append(f"User: {user_message}") | |
formatted_chat_history.append(f"Assistant: {bot_message}") | |
return formatted_chat_history | |
def conversation(qa_chain, message, history): | |
formatted_chat_history = format_chat_history(message, history) | |
response = qa_chain({"question": message, "chat_history": formatted_chat_history}) | |
response_answer = response["answer"] | |
if "Helpful Answer:" in response_answer: | |
response_answer = response_answer.split("Helpful Answer:")[-1] | |
response_sources = response["source_documents"] | |
response_source1 = response_sources[0].page_content.strip() | |
response_source2 = response_sources[1].page_content.strip() | |
response_source3 = response_sources[2].page_content.strip() | |
response_source1_page = response_sources[0].metadata["page"] + 1 | |
response_source2_page = response_sources[1].metadata["page"] + 1 | |
response_source3_page = response_sources[2].metadata["page"] + 1 | |
new_history = history + [(message, response_answer)] | |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page | |
def conversation_no_doc(llm, message, history): | |
formatted_chat_history = format_chat_history(message, history) | |
response = llm({"question": message, "chat_history": formatted_chat_history}) | |
response_answer = response["answer"] | |
new_history = history + [(message, response_answer)] | |
return llm, gr.update(value=""), new_history | |
def upload_file(file_obj): | |
list_file_path = [] | |
for file in file_obj: | |
list_file_path.append(file.name) | |
return list_file_path | |
def demo(): | |
with gr.Blocks(theme="base") as demo: | |
vector_db = gr.State() | |
qa_chain = gr.State() | |
collection_name = gr.State() | |
initial_prompt = gr.State("") | |
llm_no_doc = gr.State() | |
gr.Markdown( | |
"""<center><h2>lucIAna</center></h2> | |
<h3>Olá, sou a 2. versão</h3>""") | |
gr.Markdown( | |
"""<b>Note:</b> Esta é a lucIAna, primeira Versão da IA para seus PDF documentos. | |
Este chatbot leva em consideração perguntas anteriores ao gerar respostas (por meio de memória conversacional) e inclui referências a documentos para fins de clareza. | |
""") | |
with gr.Tab("Step 1 - Upload PDF"): | |
with gr.Row(): | |
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)") | |
with gr.Tab("Step 2 - Process document"): | |
with gr.Row(): | |
db_type_radio = gr.Radio(["ChromaDB", "FAISS", "ScaNN", "Milvus"], label="Vector database type", value="ChromaDB", type="value", info="Choose your vector database") | |
with gr.Accordion("Advanced options - Document text splitter", open=False): | |
with gr.Row(): | |
slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True) | |
with gr.Row(): | |
slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True) | |
with gr.Row(): | |
db_progress = gr.Textbox(label="Vector database initialization", value="None") | |
with gr.Row(): | |
db_btn = gr.Button("Generate vector database") | |
with gr.Tab("Step 3 - Set Initial Prompt"): | |
with gr.Row(): | |
prompt_input = gr.Textbox(label="Initial Prompt", lines=5, value="Você é um advogado sênior, onde seu papel é analisar e trazer as informações sem inventar, dando a sua melhor opinião sempre trazendo contexto e referência. Aprenda o que é jurisprudência.") | |
with gr.Row(): | |
set_prompt_btn = gr.Button("Set Prompt") | |
with gr.Tab("Step 4 - Initialize QA chain"): | |
with gr.Row(): | |
llm_btn = gr.Radio(list_llm_simple, | |
label="LLM models", value=list_llm_simple[0], type="index", info="Choose your LLM model") | |
with gr.Accordion("Advanced options - LLM model", open=False): | |
with gr.Row(): | |
slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True) | |
with gr.Row(): | |
slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True) | |
with gr.Row(): | |
slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True) | |
with gr.Row(): | |
llm_progress = gr.Textbox(value="None", label="QA chain initialization") | |
with gr.Row(): | |
qachain_btn = gr.Button("Initialize Question Answering chain") | |
with gr.Tab("Step 5 - Chatbot with document"): | |
chatbot = gr.Chatbot(height=300) | |
with gr.Accordion("Advanced - Document references", open=False): | |
with gr.Row(): | |
doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20) | |
source1_page = gr.Number(label="Page", scale=1) | |
with gr.Row(): | |
doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20) | |
source2_page = gr.Number(label="Page", scale=1) | |
with gr.Row(): | |
doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20) | |
source3_page = gr.Number(label="Page", scale=1) | |
with gr.Row(): | |
msg = gr.Textbox(placeholder="Type message (e.g. 'What is this document about?')", container=True) | |
with gr.Row(): | |
submit_btn = gr.Button("Submit message") | |
clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation") | |
with gr.Tab("Step 6 - Chatbot without document"): | |
chatbot_no_doc = gr.Chatbot(height=300) | |
additional_inputs=[ | |
gr.Slider( | |
label="Temperature", | |
value=0.9, | |
minimum=0.0, | |
maximum=1.0, | |
step=0.05, | |
interactive=True, | |
info="Higher values produce more diverse outputs", | |
), | |
gr.Slider( | |
label="Max new tokens", | |
value=256, | |
minimum=0, | |
maximum=1048, | |
step=64, | |
interactive=True, | |
info="The maximum numbers of new tokens", | |
), | |
gr.Slider( | |
label="Top-p (nucleus sampling)", | |
value=0.90, | |
minimum=0.0, | |
maximum=1, | |
step=0.05, | |
interactive=True, | |
info="Higher values sample more low-probability tokens", | |
), | |
gr.Slider( | |
label="Repetition penalty", | |
value=1.2, | |
minimum=1.0, | |
maximum=2.0, | |
step=0.05, | |
interactive=True, | |
info="Penalize repeated tokens", | |
) | |
] | |
with gr.Row(): | |
msg_no_doc = gr.Textbox(placeholder="Type message to chat with lucIAna", container=True) | |
with gr.Row(): | |
submit_btn_no_doc = gr.Button("Submit message") | |
clear_btn_no_doc = gr.ClearButton([msg_no_doc, chatbot_no_doc], value="Clear conversation") | |
gr.ChatInterface( | |
fn=generate, | |
chatbot=chatbot_no_doc, | |
additional_inputs=additional_inputs, | |
title="Mistral 7B v0.3" | |
) | |
# Preprocessing events | |
db_btn.click(initialize_database, | |
inputs=[document, slider_chunk_size, slider_chunk_overlap, db_type_radio], | |
outputs=[vector_db, collection_name, db_progress]) | |
set_prompt_btn.click(lambda prompt: gr.update(value=prompt), | |
inputs=prompt_input, | |
outputs=initial_prompt) | |
qachain_btn.click(initialize_llmchain, | |
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db, initial_prompt], | |
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], | |
inputs=None, | |
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
queue=False) | |
# Chatbot events with document | |
msg.submit(conversation, | |
inputs=[qa_chain, msg, chatbot], | |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
queue=False) | |
submit_btn.click(conversation, | |
inputs=[qa_chain, msg, chatbot], | |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
queue=False) | |
clear_btn.click(lambda:[None,"",0,"",0,"",0], | |
inputs=None, | |
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], | |
queue=False) | |
# Initialize LLM without document for conversation | |
submit_btn_no_doc.click(conversation_no_doc, | |
inputs=[llm_no_doc, msg_no_doc, chatbot_no_doc], | |
outputs=[llm_no_doc, msg_no_doc, chatbot_no_doc], | |
queue=False) | |
clear_btn_no_doc.click(lambda:[None,""], | |
inputs=None, | |
outputs=[chatbot_no_doc, msg_no_doc], | |
queue=False) | |
demo.queue().launch(debug=True, share=True) | |
if __name__ == "__main__": | |
demo() | |