import streamlit as st from PIL import Image from dotenv import load_dotenv from streamlit_extras.add_vertical_space import add_vertical_space from PyPDF2 import PdfReader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings from langchain.vectorstores import chroma from langchain.chains.retrieval_qa.base import RetrievalQA from langchain.chains.question_answering import load_qa_chain from langchain_community.llms import huggingface_hub from langchain.document_loaders.pdf import PyMuPDFLoader from transformers import AutoTokenizer, AutoModelForCausalLM from langchain_core.vectorstores import VectorStoreRetriever #from langchain.llms import huggingface_endpoint import os #import fitz #import tempfile from langchain.chains.summarize import load_summarize_chain img = Image.open('image/nexio_logo1.png') st.set_page_config(page_title="PDF Chatbot App",page_icon=img,layout="centered") with st.sidebar: st.title('🤖 AI PDF Chatbot 💬') st.markdown(''' ## About This app is an AI chatbot for the PDF files ''') add_vertical_space(12) st.write('Powered by ') st.image(image='image/nexio_logo2.png',width=150) # load huggingface API key .env file load_dotenv() def main(): st.header("Chat with PDF 💬") # upload pdf file pdf = st.file_uploader("Upload your PDF file",type='pdf') if pdf is not None: pdf_reader = PdfReader(pdf) text = "" for page in pdf_reader.pages: text += page.extract_text() text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, length_function=len ) chunks = text_splitter.split_text(text=text) chunks_doc = text_splitter.create_documents(text) # embeddings embeddings = HuggingFaceEmbeddings() vector_store = chroma.Chroma.from_texts(chunks,embeddings) # Accept user question query = st.text_input("Ask questions about your PDF file:") if query: #PATH = 'model/' #llm = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium") llm = huggingface_hub.HuggingFaceHub(repo_id="openai-community/gpt2-medium",model_kwargs={"temperature":1.0, "max_length":500}) #docs = vector_store.similarity_search(query=query, k=1) #st.write(docs) #chain = load_qa_chain(llm=llm, chain_type="stuff") #response = chain.run(input_documents=docs, question=query) retriever=vector_store.similarity_search(query) st.write(retriever[0].page_content) #chain = RetrievalQA.from_chain_type(llm=llm,chain_type="stuff",retriever=retriever[0].page_content) #response = chain.run(query) #st.write(response) if __name__ == '__main__': main()