Ammar-alhaj-ali's picture
Update app.py
6806c4d
import os
os.system('pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu')
os.system('pip install -q git+https://github.com/huggingface/transformers.git')
os.system('pip install pytesseract')
import gradio as gr
import numpy as np
from transformers import AutoModelForTokenClassification
from datasets.features import ClassLabel
from transformers import AutoProcessor
from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D
import torch
from datasets import load_metric
from transformers import LayoutLMv3ForTokenClassification
from transformers.data.data_collator import default_data_collator
from transformers import AutoModelForTokenClassification
from datasets import load_dataset
from PIL import Image, ImageDraw, ImageFont
processor = AutoProcessor.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD", apply_ocr=True)
model = AutoModelForTokenClassification.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD")
# load image example
#dataset = load_dataset("nielsr/funsd-layoutlmv3", split="test")
#Image.open(dataset[2]["image_path"]).convert("RGB").save("img1.png")
#Image.open(dataset[1]["image_path"]).convert("RGB").save("img2.png")
#Image.open(dataset[0]["image_path"]).convert("RGB").save("img3.png")
# define id2label, label2color
labels = ['O', 'B-HEADER', 'I-HEADER', 'B-QUESTION', 'I-QUESTION', 'B-ANSWER', 'I-ANSWER']
id2label = {v: k for v, k in enumerate(labels)}
label2color = {
"B-HEADER": 'red',
"I-HEADER": 'red',
"B-QUESTION": 'red',
"I-QUESTION": "red",
"B-ANSWER": 'blue',
"I-ANSWER": 'blue',
"O": 'orange'
}
def unnormalize_box(bbox, width, height):
return [
width * (bbox[0] / 1000),
height * (bbox[1] / 1000),
width * (bbox[2] / 1000),
height * (bbox[3] / 1000),
]
def iob_to_label(label):
return label
def process_image(image):
print(type(image))
width, height = image.size
# encode
encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
offset_mapping = encoding.pop('offset_mapping')
# forward pass
outputs = model(**encoding)
# get predictions
predictions = outputs.logits.argmax(-1).squeeze().tolist()
token_boxes = encoding.bbox.squeeze().tolist()
# only keep non-subword predictions
is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
# draw predictions over the image
draw = ImageDraw.Draw(image)
font = ImageFont.load_default()
for prediction, box in zip(true_predictions, true_boxes):
predicted_label = iob_to_label(prediction)
draw.rectangle(box, outline=label2color[predicted_label]) #label2color[predicted_label]
draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font) #label2color[predicted_label]
return image
title = "Extracting information from FUNSD using the LayoutLMv3 "
description = "I Fine tuned LayoutLMv3 on FUNSD (Form Understanding in. Noisy Scanned Documents) "
article="<b>References</b><br>[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. <a href='https://arxiv.org/abs/2204.08387'>Paper Link</a><br>[2]"
examples =[['img1.png'],['img2.png'],['img3.png']]
css = """.output_image, .input_image {height: 600px !important}"""
iface = gr.Interface(fn=process_image,
inputs=gr.inputs.Image(type="pil"),
outputs=gr.outputs.Image(type="pil", label="annotated image"),
title=title,
description=description,
article=article,
examples=examples,
css=css,
analytics_enabled = True, enable_queue=True)
iface.launch(inline=False, share=False, debug=False)