Duplicate from fffiloni/langchain-chat-with-pdf
Browse filesCo-authored-by: Sylvain Filoni <fffiloni@users.noreply.huggingface.co>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +88 -0
- requirements.txt +5 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Chat with PDF
|
3 |
+
emoji: 📄🤖
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.27.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: fffiloni/langchain-chat-with-pdf
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from langchain.document_loaders import OnlinePDFLoader
|
4 |
+
|
5 |
+
from langchain.text_splitter import CharacterTextSplitter
|
6 |
+
|
7 |
+
from langchain.llms import HuggingFaceHub
|
8 |
+
|
9 |
+
from langchain.embeddings import HuggingFaceHubEmbeddings
|
10 |
+
|
11 |
+
from langchain.vectorstores import Chroma
|
12 |
+
|
13 |
+
from langchain.chains import RetrievalQA
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
def loading_pdf():
|
18 |
+
return "Loading..."
|
19 |
+
|
20 |
+
def pdf_changes(pdf_doc, repo_id):
|
21 |
+
|
22 |
+
loader = OnlinePDFLoader(pdf_doc.name)
|
23 |
+
documents = loader.load()
|
24 |
+
text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0)
|
25 |
+
texts = text_splitter.split_documents(documents)
|
26 |
+
embeddings = HuggingFaceHubEmbeddings()
|
27 |
+
db = Chroma.from_documents(texts, embeddings)
|
28 |
+
retriever = db.as_retriever()
|
29 |
+
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":0.1, "max_new_tokens":250})
|
30 |
+
global qa
|
31 |
+
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True)
|
32 |
+
return "Ready"
|
33 |
+
|
34 |
+
def add_text(history, text):
|
35 |
+
history = history + [(text, None)]
|
36 |
+
return history, ""
|
37 |
+
|
38 |
+
def bot(history):
|
39 |
+
response = infer(history[-1][0])
|
40 |
+
history[-1][1] = response['result']
|
41 |
+
return history
|
42 |
+
|
43 |
+
def infer(question):
|
44 |
+
|
45 |
+
query = question
|
46 |
+
result = qa({"query": query})
|
47 |
+
|
48 |
+
return result
|
49 |
+
|
50 |
+
css="""
|
51 |
+
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
|
52 |
+
"""
|
53 |
+
|
54 |
+
title = """
|
55 |
+
<div style="text-align: center;max-width: 700px;">
|
56 |
+
<h1>Chat with PDF</h1>
|
57 |
+
<p style="text-align: center;">Upload a .PDF from your computer, click the "Load PDF to LangChain" button, <br />
|
58 |
+
when everything is ready, you can start asking questions about the pdf ;)</p>
|
59 |
+
<a style="display:inline-block; margin-left: 1em" href="https://huggingface.co/spaces/fffiloni/langchain-chat-with-pdf?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space%20to%20skip%20the%20queue-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
|
60 |
+
</div>
|
61 |
+
"""
|
62 |
+
|
63 |
+
|
64 |
+
with gr.Blocks(css=css) as demo:
|
65 |
+
with gr.Column(elem_id="col-container"):
|
66 |
+
gr.HTML(title)
|
67 |
+
|
68 |
+
with gr.Column():
|
69 |
+
pdf_doc = gr.File(label="Load a pdf", file_types=['.pdf'], type="file")
|
70 |
+
repo_id = gr.Dropdown(label="LLM", choices=["google/flan-ul2", "OpenAssistant/oasst-sft-1-pythia-12b", "bigscience/bloomz"], value="google/flan-ul2")
|
71 |
+
with gr.Row():
|
72 |
+
langchain_status = gr.Textbox(label="Status", placeholder="", interactive=False)
|
73 |
+
load_pdf = gr.Button("Load pdf to langchain")
|
74 |
+
|
75 |
+
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=350)
|
76 |
+
question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
|
77 |
+
submit_btn = gr.Button("Send message")
|
78 |
+
#load_pdf.click(loading_pdf, None, langchain_status, queue=False)
|
79 |
+
repo_id.change(pdf_changes, inputs=[pdf_doc, repo_id], outputs=[langchain_status], queue=False)
|
80 |
+
load_pdf.click(pdf_changes, inputs=[pdf_doc, repo_id], outputs=[langchain_status], queue=False)
|
81 |
+
question.submit(add_text, [chatbot, question], [chatbot, question]).then(
|
82 |
+
bot, chatbot, chatbot
|
83 |
+
)
|
84 |
+
submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(
|
85 |
+
bot, chatbot, chatbot
|
86 |
+
)
|
87 |
+
|
88 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub
|
2 |
+
chromadb
|
3 |
+
langchain
|
4 |
+
unstructured
|
5 |
+
unstructured[local-inference]
|