import gradio as gr import os from langchain.document_loaders import PyPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from langchain.chains import ConversationalRetrievalChain from langchain.embeddings import HuggingFaceEmbeddings from langchain.llms import HuggingFacePipeline from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory from langchain.llms import HuggingFaceHub from pathlib import Path import chromadb from transformers import AutoTokenizer import transformers import torch import tqdm import accelerate # default_persist_directory = './chroma_HF/' list_llm = ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mistral-7B-Instruct-v0.1", \ "HuggingFaceH4/zephyr-7b-beta", "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \ "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \ "google/flan-t5-xxl" ] list_llm_simple = [os.path.basename(llm) for llm in list_llm] # Load PDF document and create doc splits def load_doc(list_file_path, chunk_size, chunk_overlap): # Processing for one document only # loader = PyPDFLoader(file_path) # pages = loader.load() loaders = [PyPDFLoader(x) for x in list_file_path] pages = [] for loader in loaders: pages.extend(loader.load()) # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50) text_splitter = RecursiveCharacterTextSplitter( chunk_size = chunk_size, chunk_overlap = chunk_overlap) doc_splits = text_splitter.split_documents(pages) return doc_splits # Create vector database def create_db(splits, collection_name): embedding = HuggingFaceEmbeddings() new_client = chromadb.EphemeralClient() vectordb = Chroma.from_documents( documents=splits, embedding=embedding, client=new_client, collection_name=collection_name, # persist_directory=default_persist_directory ) return vectordb # Load vector database def load_db(): embedding = HuggingFaceEmbeddings() vectordb = Chroma( # persist_directory=default_persist_directory, embedding_function=embedding) return vectordb # Initialize langchain LLM chain def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): progress(0.1, desc="Initializing HF tokenizer...") # HuggingFacePipeline uses local model # Note: it will download model locally... # tokenizer=AutoTokenizer.from_pretrained(llm_model) # progress(0.5, desc="Initializing HF pipeline...") # pipeline=transformers.pipeline( # "text-generation", # model=llm_model, # tokenizer=tokenizer, # torch_dtype=torch.bfloat16, # trust_remote_code=True, # device_map="auto", # # max_length=1024, # max_new_tokens=max_tokens, # do_sample=True, # top_k=top_k, # num_return_sequences=1, # eos_token_id=tokenizer.eos_token_id # ) # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature}) # HuggingFaceHub uses HF inference endpoints progress(0.5, desc="Initializing HF Hub...") # Use of trust_remote_code as model_kwargs # Warning: langchain issue # URL: https://github.com/langchain-ai/langchain/issues/6080 if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1": llm = HuggingFaceHub( repo_id=llm_model, model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True} ) elif llm_model == "microsoft/phi-2": raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...") llm = HuggingFaceHub( repo_id=llm_model, model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"} ) elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0": llm = HuggingFaceHub( repo_id=llm_model, model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k} ) elif llm_model == "meta-llama/Llama-2-7b-chat-hf": raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...") llm = HuggingFaceHub( repo_id=llm_model, model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k} ) else: llm = HuggingFaceHub( repo_id=llm_model, # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"} model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k} ) progress(0.75, desc="Defining buffer memory...") memory = ConversationBufferMemory( memory_key="chat_history", output_key='answer', return_messages=True ) # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3}) retriever=vector_db.as_retriever() progress(0.8, desc="Defining retrieval chain...") qa_chain = ConversationalRetrievalChain.from_llm( llm, retriever=retriever, chain_type="stuff", memory=memory, # combine_docs_chain_kwargs={"prompt": your_prompt}) return_source_documents=True, # return_generated_question=True, # verbose=True, ) progress(0.9, desc="Done!") return qa_chain # Initialize database def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()): # Create list of documents (when valid) list_file_path = [x.name for x in list_file_obj if x is not None] # Create collection_name for vector database progress(0.1, desc="Creating collection name...") collection_name = Path(list_file_path[0]).stem # Fix potential issues from naming convention collection_name = collection_name.replace(" ","-") collection_name = collection_name[:50] # print('list_file_path: ', list_file_path) print('Collection name: ', collection_name) progress(0.25, desc="Loading document...") # Load document and create splits doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap) # Create or load vector database progress(0.5, desc="Generating vector database...") # global vector_db vector_db = create_db(doc_splits, collection_name) progress(0.9, desc="Done!") return vector_db, collection_name, "Complete!" def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): # print("llm_option",llm_option) llm_name = list_llm[llm_option] print("llm_name: ",llm_name) qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress) return qa_chain, "Complete!" def format_chat_history(message, chat_history): formatted_chat_history = [] for user_message, bot_message in chat_history: formatted_chat_history.append(f"User: {user_message}") formatted_chat_history.append(f"Assistant: {bot_message}") return formatted_chat_history def conversation(qa_chain, message, history): formatted_chat_history = format_chat_history(message, history) #print("formatted_chat_history",formatted_chat_history) # Generate response using QA chain response = qa_chain({"question": message, "chat_history": formatted_chat_history}) response_answer = response["answer"] response_sources = response["source_documents"] response_source1 = response_sources[0].page_content.strip() response_source2 = response_sources[1].page_content.strip() # Langchain sources are zero-based response_source1_page = response_sources[0].metadata["page"] + 1 response_source2_page = response_sources[1].metadata["page"] + 1 # print ('chat response: ', response_answer) # print('DB source', response_sources) # Append user message and response to chat history new_history = history + [(message, response_answer)] # return gr.update(value=""), new_history, response_sources[0], response_sources[1] return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page def upload_file(file_obj): list_file_path = [] for idx, file in enumerate(file_obj): file_path = file_obj.name list_file_path.append(file_path) # print(file_path) # initialize_database(file_path, progress) return list_file_path def demo(): with gr.Blocks(theme="base") as demo: vector_db = gr.State() qa_chain = gr.State() collection_name = gr.State() gr.Markdown( """