|
|
|
|
|
import base64 |
|
import os |
|
import time |
|
import zipfile |
|
from pathlib import Path |
|
import re |
|
|
|
|
|
|
|
os.system('pip install -U magic_pdf-0.9.0a1-py3-none-any.whl') |
|
|
|
|
|
from huggingface_hub import snapshot_download |
|
model_dir = snapshot_download('opendatalab/PDF-Extract-Kit') |
|
|
|
os.system('wget https://github.com/opendatalab/MinerU/raw/master/magic-pdf.template.json') |
|
os.system('cp magic-pdf.template.json ~/magic-pdf.json') |
|
os.system(f"sed -i 's|/tmp/models|{model_dir}/models|g' /home/user/magic-pdf.json") |
|
os.system("sed -i 's|cpu|cuda|g' /home/user/magic-pdf.json") |
|
|
|
os.system('cp -r paddleocr /home/user/.paddleocr') |
|
|
|
os.system("pip install -U gradio-pdf") |
|
from gradio_pdf import PDF |
|
|
|
import gradio as gr |
|
from loguru import logger |
|
|
|
from magic_pdf.libs.hash_utils import compute_sha256 |
|
from magic_pdf.rw.AbsReaderWriter import AbsReaderWriter |
|
from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter |
|
from magic_pdf.tools.common import do_parse, prepare_env |
|
|
|
|
|
|
|
|
|
def read_fn(path): |
|
disk_rw = DiskReaderWriter(os.path.dirname(path)) |
|
return disk_rw.read(os.path.basename(path), AbsReaderWriter.MODE_BIN) |
|
|
|
|
|
|
|
def parse_pdf(doc_path, output_dir, end_page_id, ocr): |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
try: |
|
file_name = f"{str(Path(doc_path).stem)}_{time.time()}" |
|
pdf_data = read_fn(doc_path) |
|
if ocr: |
|
parse_method = "ocr" |
|
else: |
|
parse_method = "auto" |
|
local_image_dir, local_md_dir = prepare_env(output_dir, file_name, parse_method) |
|
do_parse( |
|
output_dir, |
|
file_name, |
|
pdf_data, |
|
[], |
|
parse_method, |
|
False, |
|
end_page_id=end_page_id, |
|
) |
|
return local_md_dir, file_name |
|
except Exception as e: |
|
logger.exception(e) |
|
|
|
|
|
def compress_directory_to_zip(directory_path, output_zip_path): |
|
""" |
|
压缩指定目录到一个 ZIP 文件。 |
|
|
|
:param directory_path: 要压缩的目录路径 |
|
:param output_zip_path: 输出的 ZIP 文件路径 |
|
""" |
|
try: |
|
with zipfile.ZipFile(output_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: |
|
|
|
|
|
for root, dirs, files in os.walk(directory_path): |
|
for file in files: |
|
|
|
file_path = os.path.join(root, file) |
|
|
|
arcname = os.path.relpath(file_path, directory_path) |
|
|
|
zipf.write(file_path, arcname) |
|
return 0 |
|
except Exception as e: |
|
logger.exception(e) |
|
return -1 |
|
|
|
|
|
def image_to_base64(image_path): |
|
with open(image_path, "rb") as image_file: |
|
return base64.b64encode(image_file.read()).decode('utf-8') |
|
|
|
|
|
def replace_image_with_base64(markdown_text, image_dir_path): |
|
|
|
pattern = r'\!\[(?:[^\]]*)\]\(([^)]+)\)' |
|
|
|
|
|
def replace(match): |
|
relative_path = match.group(1) |
|
full_path = os.path.join(image_dir_path, relative_path) |
|
base64_image = image_to_base64(full_path) |
|
return f"" |
|
|
|
|
|
return re.sub(pattern, replace, markdown_text) |
|
|
|
|
|
def to_markdown(file_path, end_pages, ocr): |
|
|
|
local_md_dir, file_name = parse_pdf(file_path, './output', end_pages - 1, ocr) |
|
archive_zip_path = os.path.join("./output", compute_sha256(local_md_dir) + ".zip") |
|
zip_archive_success = compress_directory_to_zip(local_md_dir, archive_zip_path) |
|
if zip_archive_success == 0: |
|
logger.info("压缩成功") |
|
else: |
|
logger.error("压缩失败") |
|
md_path = os.path.join(local_md_dir, file_name + ".md") |
|
with open(md_path, 'r', encoding='utf-8') as f: |
|
txt_content = f.read() |
|
md_content = replace_image_with_base64(txt_content, local_md_dir) |
|
|
|
new_pdf_path = os.path.join(local_md_dir, file_name + "_layout.pdf") |
|
|
|
|
|
return md_content, txt_content, archive_zip_path, new_pdf_path |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def show_pdf(file): |
|
return file |
|
|
|
|
|
latex_delimiters = [{"left": "$$", "right": "$$", "display": True}, |
|
{"left": '$', "right": '$', "display": False}] |
|
|
|
|
|
from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton |
|
|
|
def init_model(): |
|
from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton |
|
try: |
|
model_manager = ModelSingleton() |
|
txt_model = model_manager.get_model(False, False) |
|
logger.info(f"txt_model init final") |
|
ocr_model = model_manager.get_model(True, False) |
|
logger.info(f"ocr_model init final") |
|
return 0 |
|
except Exception as e: |
|
logger.exception(e) |
|
return -1 |
|
|
|
|
|
model_init = init_model() |
|
logger.info(f"model_init: {model_init}") |
|
|
|
|
|
with open("header.html", "r") as file: |
|
header = file.read() |
|
|
|
|
|
if __name__ == "__main__": |
|
with gr.Blocks() as demo: |
|
gr.HTML(header) |
|
with gr.Row(): |
|
with gr.Column(variant='panel', scale=5): |
|
|
|
pdf_show = gr.Markdown() |
|
max_pages = gr.Slider(1, 10, 5, step=1, label="Max convert pages") |
|
with gr.Row() as bu_flow: |
|
is_ocr = gr.Checkbox(label="Force enable OCR") |
|
change_bu = gr.Button("Convert") |
|
clear_bu = gr.ClearButton([pdf_show], value="Clear") |
|
|
|
pdf_show = PDF(label="Please upload pdf", interactive=True, height=800) |
|
with gr.Accordion("Examples:"): |
|
example_root = os.path.join(os.path.dirname(__file__), "examples") |
|
gr.Examples( |
|
examples=[os.path.join(example_root, _) for _ in os.listdir(example_root) if |
|
_.endswith("pdf")], |
|
inputs=pdf_show, |
|
) |
|
|
|
with gr.Column(variant='panel', scale=5): |
|
output_file = gr.File(label="convert result", interactive=False) |
|
with gr.Tabs(): |
|
with gr.Tab("Markdown rendering"): |
|
md = gr.Markdown(label="Markdown rendering", height=900, show_copy_button=True, |
|
latex_delimiters=latex_delimiters, line_breaks=True) |
|
with gr.Tab("Markdown text"): |
|
md_text = gr.TextArea(lines=45, show_copy_button=True) |
|
|
|
change_bu.click(fn=to_markdown, inputs=[pdf_show, max_pages, is_ocr], outputs=[md, md_text, output_file, pdf_show]) |
|
clear_bu.add([md, pdf_show, md_text, output_file, is_ocr]) |
|
|
|
demo.launch() |
|
|