Spaces:
Runtime error
Runtime error
hahahahahahahah3
commited on
Commit
•
18ef411
1
Parent(s):
e2c95cc
Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
)
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
#
|
4 |
+
# Copyright @2023 RhapsodyAI, ModelBest Inc. (modelbest.cn)
|
5 |
+
#
|
6 |
+
# @author: bokai xu <bokesyo2000@gmail.com>
|
7 |
+
# @date: 2024/07/13
|
8 |
+
#
|
9 |
+
|
10 |
+
|
11 |
+
import tqdm
|
12 |
+
from PIL import Image
|
13 |
+
import hashlib
|
14 |
+
import torch
|
15 |
+
import fitz
|
16 |
+
import threading
|
17 |
import gradio as gr
|
18 |
+
import spaces
|
19 |
+
import os
|
20 |
+
from transformers import AutoModel
|
21 |
+
from transformers import AutoTokenizer
|
22 |
+
from PIL import Image
|
23 |
+
import torch
|
24 |
+
import os
|
25 |
+
import numpy as np
|
26 |
+
import json
|
27 |
+
|
28 |
+
cache_dir ='~/.cache/huggingface/hub'
|
29 |
+
os.makedirs(cache_dir, exist_ok=True)
|
30 |
+
|
31 |
+
def get_image_md5(img: Image.Image):
|
32 |
+
img_byte_array = img.tobytes()
|
33 |
+
hash_md5 = hashlib.md5()
|
34 |
+
hash_md5.update(img_byte_array)
|
35 |
+
hex_digest = hash_md5.hexdigest()
|
36 |
+
return hex_digest
|
37 |
+
|
38 |
+
def calculate_md5_from_binary(binary_data):
|
39 |
+
hash_md5 = hashlib.md5()
|
40 |
+
hash_md5.update(binary_data)
|
41 |
+
return hash_md5.hexdigest()
|
42 |
+
|
43 |
+
@spaces.GPU(duration=100)
|
44 |
+
def add_pdf_gradio(pdf_file_binary, progress=gr.Progress()):
|
45 |
+
global model, tokenizer
|
46 |
+
model.eval()
|
47 |
+
|
48 |
+
knowledge_base_name = calculate_md5_from_binary(pdf_file_binary)
|
49 |
+
|
50 |
+
this_cache_dir = os.path.join(cache_dir, knowledge_base_name)
|
51 |
+
os.makedirs(this_cache_dir, exist_ok=True)
|
52 |
+
|
53 |
+
with open(os.path.join(this_cache_dir, f"src.pdf"), 'wb') as file:
|
54 |
+
file.write(pdf_file_binary)
|
55 |
+
|
56 |
+
dpi = 200
|
57 |
+
doc = fitz.open("pdf", pdf_file_binary)
|
58 |
+
|
59 |
+
reps_list = []
|
60 |
+
images = []
|
61 |
+
image_md5s = []
|
62 |
+
|
63 |
+
for page in progress.tqdm(doc):
|
64 |
+
# with self.lock: # because we hope one 16G gpu only process one image at the same time
|
65 |
+
pix = page.get_pixmap(dpi=dpi)
|
66 |
+
image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
|
67 |
+
image_md5 = get_image_md5(image)
|
68 |
+
image_md5s.append(image_md5)
|
69 |
+
with torch.no_grad():
|
70 |
+
reps = model(text=[''], image=[image], tokenizer=tokenizer).reps
|
71 |
+
reps_list.append(reps.squeeze(0).cpu().numpy())
|
72 |
+
images.append(image)
|
73 |
+
|
74 |
+
for idx in range(len(images)):
|
75 |
+
image = images[idx]
|
76 |
+
image_md5 = image_md5s[idx]
|
77 |
+
cache_image_path = os.path.join(this_cache_dir, f"{image_md5}.png")
|
78 |
+
image.save(cache_image_path)
|
79 |
+
|
80 |
+
np.save(os.path.join(this_cache_dir, f"reps.npy"), reps_list)
|
81 |
+
|
82 |
+
with open(os.path.join(this_cache_dir, f"md5s.txt"), 'w') as f:
|
83 |
+
for item in image_md5s:
|
84 |
+
f.write(item+'\n')
|
85 |
+
|
86 |
+
return knowledge_base_name
|
87 |
+
|
88 |
+
# @spaces.GPU
|
89 |
+
def retrieve_gradio(knowledge_base: str, query: str, topk: int):
|
90 |
+
global model, tokenizer
|
91 |
+
|
92 |
+
model.eval()
|
93 |
+
|
94 |
+
target_cache_dir = os.path.join(cache_dir, knowledge_base)
|
95 |
+
|
96 |
+
if not os.path.exists(target_cache_dir):
|
97 |
+
return None
|
98 |
+
|
99 |
+
md5s = []
|
100 |
+
with open(os.path.join(target_cache_dir, f"md5s.txt"), 'r') as f:
|
101 |
+
for line in f:
|
102 |
+
md5s.append(line.rstrip('\n'))
|
103 |
+
|
104 |
+
doc_reps = np.load(os.path.join(target_cache_dir, f"reps.npy"))
|
105 |
+
|
106 |
+
query_with_instruction = "Represent this query for retrieving relavant document: " + query
|
107 |
+
with torch.no_grad():
|
108 |
+
query_rep = model(text=[query_with_instruction], image=[None], tokenizer=tokenizer).reps.squeeze(0).cpu()
|
109 |
+
|
110 |
+
query_md5 = hashlib.md5(query.encode()).hexdigest()
|
111 |
+
|
112 |
+
doc_reps_cat = torch.stack([torch.Tensor(i) for i in doc_reps], dim=0)
|
113 |
+
|
114 |
+
similarities = torch.matmul(query_rep, doc_reps_cat.T)
|
115 |
+
|
116 |
+
topk_values, topk_doc_ids = torch.topk(similarities, k=topk)
|
117 |
+
|
118 |
+
topk_values_np = topk_values.cpu().numpy()
|
119 |
+
|
120 |
+
topk_doc_ids_np = topk_doc_ids.cpu().numpy()
|
121 |
+
|
122 |
+
similarities_np = similarities.cpu().numpy()
|
123 |
+
|
124 |
+
images_topk = [Image.open(os.path.join(target_cache_dir, f"{md5s[idx]}.png")) for idx in topk_doc_ids_np]
|
125 |
+
|
126 |
+
with open(os.path.join(target_cache_dir, f"q-{query_md5}.json"), 'w') as f:
|
127 |
+
f.write(json.dumps(
|
128 |
+
{
|
129 |
+
"knowledge_base": knowledge_base,
|
130 |
+
"query": query,
|
131 |
+
"retrived_docs": [os.path.join(target_cache_dir, f"{md5s[idx]}.png") for idx in topk_doc_ids_np]
|
132 |
+
}, indent=4, ensure_ascii=False
|
133 |
+
))
|
134 |
+
|
135 |
+
return images_topk
|
136 |
+
|
137 |
+
|
138 |
+
def upvote(knowledge_base, query):
|
139 |
+
global model, tokenizer
|
140 |
+
|
141 |
+
target_cache_dir = os.path.join(cache_dir, knowledge_base)
|
142 |
+
|
143 |
+
query_md5 = hashlib.md5(query.encode()).hexdigest()
|
144 |
+
|
145 |
+
with open(os.path.join(target_cache_dir, f"q-{query_md5}.json"), 'r') as f:
|
146 |
+
data = json.loads(f.read())
|
147 |
+
|
148 |
+
data["user_preference"] = "upvote"
|
149 |
+
|
150 |
+
with open(os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"), 'w') as f:
|
151 |
+
f.write(json.dumps(data, indent=4, ensure_ascii=False))
|
152 |
+
|
153 |
+
print("up", os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"))
|
154 |
+
|
155 |
+
gr.Info('Received, babe! Thank you!')
|
156 |
+
|
157 |
+
return
|
158 |
+
|
159 |
+
|
160 |
+
def downvote(knowledge_base, query):
|
161 |
+
global model, tokenizer
|
162 |
+
|
163 |
+
target_cache_dir = os.path.join(cache_dir, knowledge_base)
|
164 |
+
|
165 |
+
query_md5 = hashlib.md5(query.encode()).hexdigest()
|
166 |
+
|
167 |
+
with open(os.path.join(target_cache_dir, f"q-{query_md5}.json"), 'r') as f:
|
168 |
+
data = json.loads(f.read())
|
169 |
+
|
170 |
+
data["user_preference"] = "downvote"
|
171 |
+
|
172 |
+
with open(os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"), 'w') as f:
|
173 |
+
f.write(json.dumps(data, indent=4, ensure_ascii=False))
|
174 |
+
|
175 |
+
print("down", os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"))
|
176 |
+
|
177 |
+
gr.Info('Received, babe! Thank you!')
|
178 |
+
|
179 |
+
return
|
180 |
+
|
181 |
+
|
182 |
+
|
183 |
+
device = 'cuda'
|
184 |
+
|
185 |
+
print("emb model load begin...")
|
186 |
+
model_path = 'RhapsodyAI/minicpm-visual-embedding-v0' # replace with your local model path
|
187 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
188 |
+
model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
|
189 |
+
model.eval()
|
190 |
+
model.to(device)
|
191 |
+
print("emb model load success!")
|
192 |
+
|
193 |
+
print("gen model load begin...")
|
194 |
+
gen_model_path = 'openbmb/MiniCPM-V-2_6'
|
195 |
+
gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_path, trust_remote_code=True)
|
196 |
+
gen_model = AutoModel.from_pretrained(gen_model_path, trust_remote_code=True, attn_implementation='sdpa', torch_dtype=torch.bfloat16)
|
197 |
+
gen_model.eval()
|
198 |
+
gen_model.to(device)
|
199 |
+
print("gen model load success!")
|
200 |
+
|
201 |
+
|
202 |
+
@spaces.GPU(duration=50)
|
203 |
+
def answer_question(images, question):
|
204 |
+
global gen_model, gen_tokenizer
|
205 |
+
# here each element of images is a tuple of (image_path, None).
|
206 |
+
images_ = [Image.open(image[0]).convert('RGB') for image in images]
|
207 |
+
msgs = [{'role': 'user', 'content': [question, *images_]}]
|
208 |
+
answer = gen_model.chat(
|
209 |
+
image=None,
|
210 |
+
msgs=msgs,
|
211 |
+
tokenizer=gen_tokenizer
|
212 |
+
)
|
213 |
+
print(answer)
|
214 |
+
return answer
|
215 |
+
|
216 |
+
|
217 |
+
with gr.Blocks() as app:
|
218 |
+
gr.Markdown("# MiniCPMV-RAG-PDFQA: Two Vision Language Models Enable End-to-End RAG")
|
219 |
+
|
220 |
+
gr.Markdown("""
|
221 |
+
- A Vision Language Model Dense Retriever ([minicpm-visual-embedding-v0](https://huggingface.co/RhapsodyAI/minicpm-visual-embedding-v0)) **directly reads** your PDFs **without need of OCR**, produce **multimodal dense representations** and build your personal library.
|
222 |
+
- **Ask a question**, it retrieve most relavant pages, then [MiniCPM-V-2.6](https://huggingface.co/spaces/openbmb/MiniCPM-V-2_6) will answer your question based on pages recalled, with strong multi-image understanding capability.
|
223 |
+
- It helps you read a long **visually-intensive** or **text-oriented** PDF document and find the pages that answer your question.
|
224 |
+
- It helps you build a personal library and retireve book pages from a large collection of books.
|
225 |
+
- It works like a human: read, store, retrieve, and answer with full vision.
|
226 |
+
""")
|
227 |
+
|
228 |
+
gr.Markdown("- Currently online demo support PDF document with less than 50 pages due to GPU time limit. Deploy on your own machine for longer PDFs and books.")
|
229 |
+
|
230 |
+
with gr.Row():
|
231 |
+
file_input = gr.File(type="binary", label="Step 1: Upload PDF")
|
232 |
+
file_result = gr.Text(label="Knowledge Base ID (remember it, it is re-usable!)")
|
233 |
+
process_button = gr.Button("Process PDF (Don't click until PDF upload success)")
|
234 |
+
|
235 |
+
process_button.click(add_pdf_gradio, inputs=[file_input], outputs=file_result)
|
236 |
+
|
237 |
+
with gr.Row():
|
238 |
+
kb_id_input = gr.Text(label="Your Knowledge Base ID (paste your Knowledge Base ID here, it is re-usable:)")
|
239 |
+
query_input = gr.Text(label="Your Queston")
|
240 |
+
topk_input = inputs=gr.Number(value=5, minimum=1, maximum=10, step=1, label="Number of pages to retrieve")
|
241 |
+
retrieve_button = gr.Button("Step2: Retrieve Pages")
|
242 |
+
|
243 |
+
with gr.Row():
|
244 |
+
images_output = gr.Gallery(label="Retrieved Pages")
|
245 |
+
|
246 |
+
retrieve_button.click(retrieve_gradio, inputs=[kb_id_input, query_input, topk_input], outputs=images_output)
|
247 |
+
|
248 |
+
with gr.Row():
|
249 |
+
button = gr.Button("Step 3: Answer Question with Retrieved Pages")
|
250 |
+
|
251 |
+
gen_model_response = gr.Textbox(label="MiniCPM-V-2.6's Answer")
|
252 |
+
|
253 |
+
button.click(fn=answer_question, inputs=[images_output, query_input], outputs=gen_model_response)
|
254 |
+
|
255 |
+
with gr.Row():
|
256 |
+
downvote_button = gr.Button("🤣Downvote")
|
257 |
+
upvote_button = gr.Button("🤗Upvote")
|
258 |
+
|
259 |
+
upvote_button.click(upvote, inputs=[kb_id_input, query_input], outputs=None)
|
260 |
+
downvote_button.click(downvote, inputs=[kb_id_input, query_input], outputs=None)
|
261 |
+
|
262 |
+
gr.Markdown("By using this demo, you agree to share your use data with us for research purpose, to help improve user experience.")
|
263 |
+
|
264 |
+
|
265 |
+
app.launch()
|