import os
os.system('pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu')
import gradio as gr
import numpy as np
import tensorflow as tf
import torch
import json
from datasets.features import ClassLabel
from transformers import AutoProcessor
from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D
from datasets import load_dataset # this dataset uses the new Image feature :)
from transformers import LayoutLMv3ForTokenClassification
from transformers.data.data_collator import default_data_collator
from transformers import AutoModelForTokenClassification
import cv2
from PIL import Image, ImageDraw, ImageFont
#setting up the Huggingface env
# pip install -q git+https://github.com/huggingface/transformers.git
# !pip install h5py
# It's useful for evaluation metrics such as F1 on sequence labeling tasks
# !pip install -q datasets seqeval
# this dataset uses the new Image feature :)
dataset = load_dataset("nielsr/funsd-layoutlmv3")
#dataset = load_dataset("G:\\BITS - MTECH\\Sem -4\\Final Report\\code\dataset")
Image.open(dataset[2]["image_path"]).convert("RGB").save("example1.png")
Image.open(dataset[1]["image_path"]).convert("RGB").save("example2.png")
Image.open(dataset[0]["image_path"]).convert("RGB").save("example3.png")
example = dataset["test"][0]
words, boxes, ner_tags = example["tokens"], example["bboxes"], example["ner_tags"]
features = dataset["test"].features
column_names = dataset["test"].column_names
image_column_name = "image"
text_column_name = "tokens"
boxes_column_name = "bboxes"
label_column_name = "ner_tags"
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
id2label = {k: v for k,v in enumerate(label_list)}
label2id = {v: k for k,v in enumerate(label_list)}
else:
label_list = get_label_list(dataset["train"][label_column_name])
id2label = {k: v for k,v in enumerate(label_list)}
label2id = {v: k for k,v in enumerate(label_list)}
num_labels = len(label_list)
label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'violet'}
def prepare_examples(examples):
images = examples[image_column_name]
words = examples[text_column_name]
boxes = examples[boxes_column_name]
word_labels = examples[label_column_name]
encoding = processor(images, words, boxes=boxes, word_labels=word_labels,
truncation=True, padding="max_length")
return encoding
processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
#model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base")
model = LayoutLMv3ForTokenClassification.from_pretrained("microsoft/layoutlmv3-base",
id2label=id2label,
label2id=label2id)
# we need to define custom features for `set_format` (used later on) to work properly
features = Features({
'pixel_values': Array3D(dtype="float32", shape=(3, 224, 224)),
'input_ids': Sequence(feature=Value(dtype='int64')),
'attention_mask': Sequence(Value(dtype='int64')),
'bbox': Array2D(dtype="int64", shape=(512, 4)),
'labels': Sequence(feature=Value(dtype='int64')),
})
# train_dataset = dataset["train"].map(
# prepare_examples,
# batched=True,
# remove_columns=column_names,
# features=features,
# )
eval_dataset = dataset["test"].map(
prepare_examples,
batched=True,
remove_columns=column_names,
features=features,
)
def unnormalize_box(bbox, width, height):
return [
width * (bbox[0] / 1000),
height * (bbox[1] / 1000),
width * (bbox[2] / 1000),
height * (bbox[3] / 1000),
]
def process_image(image):
print(type(image))
width, height = image.size
# encode
#encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
#offset_mapping = encoding.pop('offset_mapping')
image = example["image"]
words = example["tokens"]
boxes = example["bboxes"]
word_labels = example["ner_tags"]
encoding = processor(image, words, truncation=True,boxes=boxes, word_labels=word_labels,return_offsets_mapping=True, return_tensors="pt")
offset_mapping = encoding.pop('offset_mapping')
for k,v in encoding.items():
print(k,v.shape)
# forward pass
with torch.no_grad():
outputs = model(**encoding)
# get predictions
# We take the highest score for each token, using argmax.
# This serves as the predicted label for each token.
logits = outputs.logits
#logits.shape
predictions = logits.argmax(-1).squeeze().tolist()
labels = encoding.labels.squeeze().tolist()
token_boxes = encoding.bbox.squeeze().tolist()
width, height = image.size
true_predictions = [model.config.id2label[pred] for pred, label in zip(predictions, labels) if label != - 100]
true_labels = [model.config.id2label[label] for prediction, label in zip(predictions, labels) if label != -100]
true_boxes = [unnormalize_box(box, width, height) for box, label in zip(token_boxes, labels) if label != -100]
# only keep non-subword predictions
is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
# draw predictions over the image
draw = ImageDraw.Draw(image)
font = ImageFont.load_default()
for prediction, box in zip(true_predictions, true_boxes):
predicted_label = id2label(prediction)
draw.rectangle(box, outline=label2color[predicted_label])
draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
return image
title = "DocumentAI - Extraction using LayoutLMv3 model"
description = "Extraction of Form or Invoice Extraction - We use Microsoft's LayoutLMv3 trained on Invoice Dataset to predict the Biller Name, Biller Address, Biller post_code, Due_date, GST, Invoice_date, Invoice_number, Subtotal and Total. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
article="References
[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. Paper Link
[2] LayoutLMv3 training and inference"
examples =[['example1.png'],['example2.png'],['example3.png']]
css = """.output_image, .input_image {height: 600px !important}"""
iface = gr.Interface(fn=process_image,
inputs=gr.inputs.Image(type="pil"),
outputs=gr.outputs.Image(type="pil", label="annotated image"),
title=title,
description=description,
article=article,
examples=examples,
css=css,
analytics_enabled = True, enable_queue=True)
iface.launch(inline=False, share=False, debug=False)