Spaces:
Sleeping
Sleeping
import langchain | |
from langchain.embeddings import SentenceTransformerEmbeddings | |
from langchain.chains.question_answering import load_qa_chain | |
from langchain.document_loaders import UnstructuredPDFLoader,UnstructuredWordDocumentLoader | |
from langchain.indexes import VectorstoreIndexCreator | |
from langchain.vectorstores import FAISS | |
from langchain import HuggingFaceHub | |
from langchain import PromptTemplate | |
from langchain.chat_models import ChatOpenAI | |
from zipfile import ZipFile | |
import gradio as gr | |
import openpyxl | |
import os | |
import shutil | |
from langchain.schema import Document | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
import tiktoken | |
import secrets | |
import openai | |
import time | |
from duckduckgo_search import DDGS | |
import requests | |
import tempfile | |
tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo") | |
# create the length function | |
def tiktoken_len(text): | |
tokens = tokenizer.encode( | |
text, | |
disallowed_special=() | |
) | |
return len(tokens) | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=600, | |
chunk_overlap=200, | |
length_function=tiktoken_len, | |
separators=["\n\n", "\n", " ", ""] | |
) | |
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") | |
foo = Document(page_content='foo is fou!',metadata={"source":'foo source'}) | |
def reset_database(ui_session_id): | |
session_id = f"PDFAISS-{ui_session_id}" | |
if 'drive' in session_id: | |
print("RESET DATABASE: session_id contains 'drive' !!") | |
return None | |
try: | |
shutil.rmtree(session_id) | |
except: | |
print(f'no {session_id} directory present') | |
try: | |
os.remove(f"{session_id}.zip") | |
except: | |
print("no {session_id}.zip present") | |
return None | |
def is_duplicate(split_docs,db): | |
epsilon=0.0 | |
print(f"DUPLICATE: Treating: {split_docs[0].metadata['source'].split('/')[-1]}") | |
for i in range(min(3,len(split_docs))): | |
query = split_docs[i].page_content | |
docs = db.similarity_search_with_score(query,k=1) | |
_ , score = docs[0] | |
epsilon += score | |
print(f"DUPLICATE: epsilon: {epsilon}") | |
return epsilon < 0.1 | |
def merge_split_docs_to_db(split_docs,db,progress,progress_step=0.1): | |
progress(progress_step,desc="merging docs") | |
if len(split_docs)==0: | |
print("MERGE to db: NO docs!!") | |
return | |
filename = split_docs[0].metadata['source'] | |
if is_duplicate(split_docs,db): | |
print(f"MERGE: Document is duplicated: {filename}") | |
return | |
print(f"MERGE: number of split docs: {len(split_docs)}") | |
batch = 10 | |
for i in range(0, len(split_docs), batch): | |
progress(i/len(split_docs),desc=f"added {i} chunks of {len(split_docs)} chunks") | |
db1 = FAISS.from_documents(split_docs[i:i+batch], embeddings) | |
db.merge_from(db1) | |
return db | |
def merge_pdf_to_db(filename,db,progress,progress_step=0.1): | |
progress_step+=0.05 | |
progress(progress_step,'unpacking pdf') | |
doc = UnstructuredPDFLoader(filename).load() | |
doc[0].metadata['source'] = filename.split('/')[-1] | |
split_docs = text_splitter.split_documents(doc) | |
progress_step+=0.3 | |
progress(progress_step,'docx unpacked') | |
return merge_split_docs_to_db(split_docs,db,progress,progress_step) | |
def merge_docx_to_db(filename,db,progress,progress_step=0.1): | |
progress_step+=0.05 | |
progress(progress_step,'unpacking docx') | |
doc = UnstructuredWordDocumentLoader(filename).load() | |
doc[0].metadata['source'] = filename.split('/')[-1] | |
split_docs = text_splitter.split_documents(doc) | |
progress_step+=0.3 | |
progress(progress_step,'docx unpacked') | |
return merge_split_docs_to_db(split_docs,db,progress,progress_step) | |
def merge_txt_to_db(filename,db,progress,progress_step=0.1): | |
progress_step+=0.05 | |
progress(progress_step,'unpacking txt') | |
with open(filename) as f: | |
docs = text_splitter.split_text(f.read()) | |
split_docs = [Document(page_content=doc,metadata={'source':filename.split('/')[-1]}) for doc in docs] | |
progress_step+=0.3 | |
progress(progress_step,'txt unpacked') | |
return merge_split_docs_to_db(split_docs,db,progress,progress_step) | |
def unpack_zip_file(filename,db,progress): | |
with ZipFile(filename, 'r') as zipObj: | |
contents = zipObj.namelist() | |
print(f"unpack zip: contents: {contents}") | |
tmp_directory = filename.split('/')[-1].split('.')[-2] | |
shutil.unpack_archive(filename, tmp_directory) | |
if 'index.faiss' in [item.lower() for item in contents]: | |
db2 = FAISS.load_local(tmp_directory, embeddings) | |
db.merge_from(db2) | |
return db | |
for file in contents: | |
if file.lower().endswith('.docx'): | |
db = merge_docx_to_db(f"{tmp_directory}/{file}",db,progress) | |
if file.lower().endswith('.pdf'): | |
db = merge_pdf_to_db(f"{tmp_directory}/{file}",db,progress) | |
if file.lower().endswith('.txt'): | |
db = merge_txt_to_db(f"{tmp_directory}/{file}",db,progress) | |
return db | |
def add_files_to_zip(session_id): | |
zip_file_name = f"{session_id}.zip" | |
with ZipFile(zip_file_name, "w") as zipObj: | |
for root, dirs, files in os.walk(session_id): | |
for file_name in files: | |
file_path = os.path.join(root, file_name) | |
arcname = os.path.relpath(file_path, session_id) | |
zipObj.write(file_path, arcname) | |
## Search files functions ## | |
def search_docs(topic, max_references): | |
print(f"SEARCH PDF : {topic}") | |
doc_list = [] | |
with DDGS() as ddgs: | |
i=0 | |
for r in ddgs.text('{} filetype:pdf'.format(topic), region='wt-wt', safesearch='On', timelimit='n'): | |
#doc_list.append(str(r)) | |
if i>=max_references: | |
break | |
doc_list.append("TITLE : " + r['title'] + " -- BODY : " + r['body'] + " -- URL : " + r['href']) | |
i+=1 | |
return doc_list | |
def store_files(references, ret_names=False): | |
url_list=[] | |
temp_files = [] | |
for ref in references: | |
url_list.append(ref.split(" ")[-1]) | |
for url in url_list: | |
response = requests.get(url) | |
if response.status_code == 200: | |
filename = url.split('/')[-1] | |
if filename.split('.')[-1] == 'pdf': | |
filename = filename[:-4] | |
print('File name.pdf :', filename) | |
temp_file = tempfile.NamedTemporaryFile(delete=False,prefix=filename, suffix='.pdf') | |
else: | |
print('File name :', filename) | |
temp_file = tempfile.NamedTemporaryFile(delete=False,prefix=filename, suffix='.pdf') | |
temp_file.write(response.content) | |
temp_file.close() | |
if ret_names: | |
temp_files.append(temp_file.name) | |
else: | |
temp_files.append(temp_file) | |
return temp_files | |
## Summary functions ## | |
## Load each doc from the vector store | |
def load_docs(ui_session_id): | |
session_id_global_db = f"PDFAISS-{ui_session_id}" | |
try: | |
db = FAISS.load_local(session_id_global_db,embeddings) | |
print("load_docs after loading global db:",session_id_global_db,len(db.index_to_docstore_id)) | |
except: | |
return f"SESSION: {session_id_global_db} database does not exist","","" | |
docs = [] | |
for i in range(1,len(db.index_to_docstore_id)): | |
docs.append(db.docstore.search(db.index_to_docstore_id[i])) | |
return docs | |
# summarize with gpt 3.5 turbo | |
def summarize_gpt(doc,system='provide a summary of the following document: ', first_tokens=600): | |
doc = doc.replace('\n\n\n', '').replace('---', '').replace('...', '').replace('___', '') | |
encoded = tokenizer.encode(doc) | |
print("/n TOKENIZED : ", encoded) | |
decoded = tokenizer.decode(encoded[:min(first_tokens, len(encoded))]) | |
print("/n DOC SHORTEN", min(first_tokens, len(encoded)), " : ", decoded) | |
completion = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=[ | |
{"role": "system", "content": system}, | |
{"role": "user", "content": decoded} | |
] | |
) | |
return completion.choices[0].message["content"] | |
def summarize_docs_generator(apikey_input, session_id): | |
openai.api_key = apikey_input | |
docs=load_docs(session_id) | |
print("################# DOCS LOADED ##################", "docs type : ", type(docs[0])) | |
try: | |
fail = docs[0].page_content | |
except: | |
return docs[0] | |
source = "" | |
summaries = "" | |
i = 0 | |
while i<len(docs): | |
doc = docs[i] | |
unique_doc = "" | |
if source != doc.metadata: | |
unique_doc = ''.join([doc.page_content for doc in docs[i:i+3]]) | |
print("\n\n****Open AI API called****\n\n") | |
if i == 0: | |
try: | |
summary = summarize_gpt(unique_doc) | |
except: | |
return f"ERROR : Try checking the validity of the provided OpenAI API Key" | |
else: | |
try: | |
summary = summarize_gpt(unique_doc) | |
except: | |
print(f"ERROR : There was an error but it is not linked with the validity of api key, taking a 20s nap") | |
yield summaries + f"\n\n °°° OpenAI error, please wait 20 sec of cooldown. °°°" | |
time.sleep(20) | |
summary = summarize_gpt(unique_doc) | |
print("SUMMARY : ", summary) | |
summaries += f"Source : {doc.metadata['source'].split('/')[-1]}\n{summary} \n\n" | |
source = doc.metadata | |
yield summaries | |
i+=1 | |
yield summaries | |
def summarize_docs(apikey_input, session_id): | |
gen = summarize_docs_generator(apikey_input, session_id) | |
while True: | |
try: | |
yield str(next(gen)) | |
except StopIteration: | |
return | |
#### UI Functions #### | |
def embed_files(files,ui_session_id,progress=gr.Progress(),progress_step=0.05): | |
print(files) | |
progress(progress_step,desc="Starting...") | |
split_docs=[] | |
if len(ui_session_id)==0: | |
ui_session_id = secrets.token_urlsafe(16) | |
session_id = f"PDFAISS-{ui_session_id}" | |
try: | |
db = FAISS.load_local(session_id,embeddings) | |
except: | |
print(f"SESSION: {session_id} database does not exist, create a FAISS db") | |
db = FAISS.from_documents([foo], embeddings) | |
db.save_local(session_id) | |
print(f"SESSION: {session_id} database created") | |
print("EMBEDDED, before embeddeding: ",session_id,len(db.index_to_docstore_id)) | |
for file_id,file in enumerate(files): | |
print("ID : ", file_id, "FILE : ", file) | |
file_type = file.name.split('.')[-1].lower() | |
source = file.name.split('/')[-1] | |
print(f"current file: {source}") | |
progress(file_id/len(files),desc=f"Treating {source}") | |
if file_type == 'pdf': | |
db2 = merge_pdf_to_db(file.name,db,progress) | |
if file_type == 'txt': | |
db2 = merge_txt_to_db(file.name,db,progress) | |
if file_type == 'docx': | |
db2 = merge_docx_to_db(file.name,db,progress) | |
if file_type == 'zip': | |
db2 = unpack_zip_file(file.name,db,progress) | |
if db2 != None: | |
db = db2 | |
db.save_local(session_id) | |
### move file to store ### | |
progress(progress_step, desc = 'moving file to store') | |
directory_path = f"{session_id}/store/" | |
if not os.path.exists(directory_path): | |
os.makedirs(directory_path) | |
try: | |
shutil.move(file.name, directory_path) | |
except: | |
pass | |
### load the updated db and zip it ### | |
progress(progress_step, desc = 'loading db') | |
db = FAISS.load_local(session_id,embeddings) | |
print("EMBEDDED, after embeddeding: ",session_id,len(db.index_to_docstore_id)) | |
progress(progress_step, desc = 'zipping db for download') | |
add_files_to_zip(session_id) | |
print(f"EMBEDDED: db zipped") | |
progress(progress_step, desc = 'db zipped') | |
return f"{session_id}.zip",ui_session_id | |
def add_to_db(references,ui_session_id): | |
files = store_files(references) | |
return embed_files(files,ui_session_id) | |
def export_files(references): | |
files = store_files(references, ret_names=True) | |
#paths = [file.name for file in files] | |
return files | |
def display_docs(docs): | |
output_str = '' | |
for i, doc in enumerate(docs): | |
source = doc.metadata['source'].split('/')[-1] | |
output_str += f"Ref: {i+1}\n{repr(doc.page_content)}\nSource: {source}\n\n" | |
return output_str | |
def ask_gpt(query, apikey,history,ui_session_id): | |
session_id = f"PDFAISS-{ui_session_id}" | |
try: | |
db = FAISS.load_local(session_id,embeddings) | |
print("ASKGPT after loading",session_id,len(db.index_to_docstore_id)) | |
except: | |
print(f"SESSION: {session_id} database does not exist") | |
return f"SESSION: {session_id} database does not exist","","" | |
docs = db.similarity_search(query) | |
history += f"[query]\n{query}\n[answer]\n" | |
if(apikey==""): | |
history += f"None\n[references]\n{display_docs(docs)}\n\n" | |
return "No answer from GPT", display_docs(docs),history | |
else: | |
llm = ChatOpenAI(temperature=0, model_name = 'gpt-3.5-turbo', openai_api_key=apikey) | |
chain = load_qa_chain(llm, chain_type="stuff") | |
answer = chain.run(input_documents=docs, question=query, verbose=True) | |
history += f"{answer}\n[references]\n{display_docs(docs)}\n\n" | |
return answer,display_docs(docs),history | |
with gr.Blocks() as demo: | |
gr.Markdown("Upload your documents and question them.") | |
with gr.Accordion("Open to enter your API key", open=False): | |
apikey_input = gr.Textbox(placeholder="Type here your OpenAI API key to use Summarization and Q&A", label="OpenAI API Key",type='password') | |
with gr.Tab("Upload PDF & TXT"): | |
with gr.Accordion("Get files from the web", open=False): | |
with gr.Column(): | |
topic_input = gr.Textbox(placeholder="Type your research", label="Research") | |
with gr.Row(): | |
max_files = gr.Slider(1, 30, step=1, value=10, label="Maximum number of files") | |
btn_search = gr.Button("Search") | |
dd_documents = gr.Dropdown(label='List of documents', info='Click to remove from selection', multiselect=True) | |
dd_documents.style(container=True) | |
with gr.Row(): | |
btn_dl = gr.Button("Add these files to the Database") | |
btn_export = gr.Button("⬇ Export selected files ⬇") | |
tb_session_id = gr.Textbox(label='session id') | |
docs_input = gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"]) | |
db_output = gr.outputs.File(label="Download zipped database") | |
btn_generate_db = gr.Button("Generate database") | |
btn_reset_db = gr.Button("Reset database") | |
with gr.Tab("Summarize PDF"): | |
with gr.Column(): | |
summary_output = gr.Textbox(label='Summarized files') | |
btn_summary = gr.Button("Summarize") | |
summary_output.style(show_copy_button=True) | |
with gr.Tab("Ask PDF"): | |
with gr.Column(): | |
query_input = gr.Textbox(placeholder="Type your question", label="Question") | |
btn_askGPT = gr.Button("Answer") | |
answer_output = gr.Textbox(label='GPT 3.5 answer') | |
answer_output.style(show_copy_button=True) | |
sources = gr.Textbox(label='Sources') | |
sources.style(show_copy_button=True) | |
history = gr.Textbox(label='History') | |
history.style(show_copy_button=True) | |
topic_input.submit(search_docs, inputs=[topic_input, max_files], outputs=dd_documents) | |
btn_search.click(search_docs, inputs=[topic_input, max_files], outputs=dd_documents) | |
btn_dl.click(add_to_db, inputs=[dd_documents,tb_session_id], outputs=[db_output,tb_session_id]) | |
btn_export.click(export_files, inputs=dd_documents, outputs=docs_input) | |
btn_generate_db.click(embed_files, inputs=[docs_input,tb_session_id], outputs=[db_output,tb_session_id]) | |
btn_reset_db.click(reset_database,inputs=[tb_session_id],outputs=[db_output]) | |
btn_summary.click(summarize_docs, inputs=[apikey_input,tb_session_id], outputs=summary_output) | |
btn_askGPT.click(ask_gpt, inputs=[query_input,apikey_input,history,tb_session_id], outputs=[answer_output,sources,history]) | |
# | |
demo.queue(concurrency_count=10) | |
demo.launch(debug=False,share=False) |