binqiangliu commited on
Commit
61781c7
1 Parent(s): e8926c9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -0
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from PyPDF2 import PdfReader
4
+ from langchain.text_splitter import CharacterTextSplitter
5
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
6
+ from langchain.embeddings import HuggingFaceEmbeddings, SentenceTransformerEmbeddings
7
+ from langchain import HuggingFaceHub
8
+ from langchain.vectorstores import FAISS
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ from langchain.chat_models import ChatOpenAI
12
+ from htmlTemplates import bot_template, user_template, css
13
+ from transformers import pipeline
14
+ import sys
15
+ import os
16
+ from dotenv import load_dotenv
17
+
18
+ HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
19
+
20
+ def get_pdf_text(pdf_files):
21
+ text = ""
22
+ for pdf_file in pdf_files:
23
+ reader = PdfReader(pdf_file)
24
+ for page in reader.pages:
25
+ text += page.extract_text()
26
+ return text
27
+
28
+ def get_chunk_text(text):
29
+ text_splitter = CharacterTextSplitter(
30
+ separator = "\n",
31
+ chunk_size = 1000,
32
+ chunk_overlap = 200,
33
+ length_function = len
34
+ )
35
+ chunks = text_splitter.split_text(text)
36
+ return chunks
37
+
38
+ def get_vector_store(text_chunks):
39
+ # For OpenAI Embeddings
40
+ #embeddings = OpenAIEmbeddings()
41
+ # For Huggingface Embeddings
42
+ #embeddings = HuggingFaceInstructEmbeddings(model_name = "hkunlp/instructor-xl")
43
+ embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
44
+ vectorstore = FAISS.from_texts(texts = text_chunks, embedding = embeddings)
45
+ return vectorstore
46
+
47
+ def get_conversation_chain(vector_store):
48
+ # OpenAI Model
49
+ #llm = ChatOpenAI()
50
+ #HuggingFace Model
51
+ llm = HuggingFaceHub(repo_id="google/flan-t5-xxl")
52
+ #llm = HuggingFaceHub(repo_id="tiiuae/falcon-40b-instruct", model_kwargs={"temperature":0.5, "max_length":512}) #出现超时timed out错误
53
+ #llm = HuggingFaceHub(repo_id="meta-llama/Llama-2-70b-hf", model_kwargs={"min_length":100, "max_length":1024,"temperature":0.1})
54
+ #repo_id="HuggingFaceH4/starchat-beta"
55
+ #llm = HuggingFaceHub(repo_id=repo_id,
56
+ # model_kwargs={"min_length":100,
57
+ # "max_new_tokens":1024, "do_sample":True,
58
+ # "temperature":0.1,
59
+ # "top_k":50,
60
+ # "top_p":0.95, "eos_token_id":49155})
61
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
62
+ conversation_chain = ConversationalRetrievalChain.from_llm(
63
+ llm = llm,
64
+ retriever = vector_store.as_retriever(),
65
+ memory = memory
66
+ )
67
+ print("***Start of printing Conversation_Chain***")
68
+ print(conversation_chain)
69
+ print("***End of printing Conversation_Chain***")
70
+ st.write("***Start of printing Conversation_Chain***")
71
+ st.write(conversation_chain)
72
+ st.write("***End of printing Conversation_Chain***")
73
+ return conversation_chain
74
+
75
+ def handle_user_input(question):
76
+ response = st.session_state.conversation({'question':question})
77
+ st.session_state.chat_history = response['chat_history']
78
+ for i, message in enumerate(st.session_state.chat_history):
79
+ if i % 2 == 0:
80
+ st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
81
+ else:
82
+ st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
83
+
84
+ def main():
85
+ load_dotenv()
86
+ st.set_page_config(page_title='Chat with Your own PDFs', page_icon=':books:')
87
+ st.write(css, unsafe_allow_html=True)
88
+ if "conversation" not in st.session_state:
89
+ st.session_state.conversation = None
90
+ if "chat_history" not in st.session_state:
91
+ st.session_state.chat_history = None
92
+ st.header('Chat with Your own PDFs :books:')
93
+ question = st.text_input("Ask anything to your PDF: ")
94
+ if question:
95
+ handle_user_input(question)
96
+ with st.sidebar:
97
+ st.subheader("Upload your Documents Here: ")
98
+ pdf_files = st.file_uploader("Choose your PDF Files and Press OK", type=['pdf'], accept_multiple_files=True)
99
+ if st.button("OK"):
100
+ with st.spinner("Processing your PDFs..."):
101
+ # Get PDF Text
102
+ raw_text = get_pdf_text(pdf_files)
103
+ # Get Text Chunks
104
+ text_chunks = get_chunk_text(raw_text)
105
+ # Create Vector Store
106
+ vector_store = get_vector_store(text_chunks)
107
+ st.write("DONE")
108
+ # Create conversation chain
109
+ st.session_state.conversation = get_conversation_chain(vector_store)
110
+
111
+ if __name__ == '__main__':
112
+ main()