Ammar-alhaj-ali commited on
Commit
4092f98
1 Parent(s): 155e281

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -23
app.py CHANGED
@@ -1,29 +1,43 @@
1
  import os
2
- os.system('pip install git+https://github.com/huggingface/transformers.git --upgrade')
3
- # workaround: install old version of pytorch since detectron2 hasn't released packages for pytorch 1.9 (issue: https://github.com/facebookresearch/detectron2/issues/3158)
4
- os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html')
5
-
6
- ## install PyTesseract
7
- os.system('pip install -q pytesseract')
8
 
9
  import gradio as gr
10
  import numpy as np
 
 
 
 
 
 
 
 
 
11
 
 
12
  from datasets import load_dataset
13
  from PIL import Image, ImageDraw, ImageFont
14
 
15
- from transformers import AutoProcessor, AutoModelForTokenClassification
16
- processor = AutoProcessor.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD")
17
  model = AutoModelForTokenClassification.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD")
18
 
19
  # load image example
20
- dataset = load_dataset("nielsr/funsd", split="test")
21
- image = Image.open(dataset[0]["image_path"]).convert("RGB")
22
- image.save("document.png")
 
23
  # define id2label, label2color
24
- labels = dataset.features['ner_tags'].feature.names
25
  id2label = {v: k for v, k in enumerate(labels)}
26
- label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'violet'}
 
 
 
 
 
 
 
 
27
 
28
  def unnormalize_box(bbox, width, height):
29
  return [
@@ -33,13 +47,15 @@ def unnormalize_box(bbox, width, height):
33
  height * (bbox[3] / 1000),
34
  ]
35
 
 
36
  def iob_to_label(label):
37
- label = label[2:]
38
- if not label:
39
- return 'other'
40
  return label
41
 
 
 
42
  def process_image(image):
 
 
43
  width, height = image.size
44
 
45
  # encode
@@ -62,16 +78,19 @@ def process_image(image):
62
  draw = ImageDraw.Draw(image)
63
  font = ImageFont.load_default()
64
  for prediction, box in zip(true_predictions, true_boxes):
65
- predicted_label = iob_to_label(prediction).lower()
66
  draw.rectangle(box, outline=label2color[predicted_label])
67
  draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
68
 
69
  return image
70
 
71
- title = "Interactive demo: LayoutLMv2"
72
- description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words into QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
73
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm'>Github Repo</a></p>"
74
- examples =[['img1.png']]
 
 
 
75
 
76
  css = """.output_image, .input_image {height: 600px !important}"""
77
 
@@ -82,5 +101,7 @@ iface = gr.Interface(fn=process_image,
82
  description=description,
83
  article=article,
84
  examples=examples,
85
- css=css)
86
- iface.launch(debug=True)
 
 
 
1
  import os
2
+ os.system('pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu')
 
 
 
 
 
3
 
4
  import gradio as gr
5
  import numpy as np
6
+ from transformers import AutoModelForTokenClassification
7
+ from datasets.features import ClassLabel
8
+ from transformers import AutoProcessor
9
+ from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D
10
+ import torch
11
+ from datasets import load_metric
12
+ from transformers import LayoutLMv3ForTokenClassification
13
+ from transformers.data.data_collator import default_data_collator
14
+
15
 
16
+ from transformers import AutoModelForTokenClassification
17
  from datasets import load_dataset
18
  from PIL import Image, ImageDraw, ImageFont
19
 
20
+
21
+ processor = AutoProcessor.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD", apply_ocr=True)
22
  model = AutoModelForTokenClassification.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD")
23
 
24
  # load image example
25
+ dataset = load_dataset("darentang/generated", split="test")
26
+ Image.open(dataset[2]["image_path"]).convert("RGB").save("img1.png")
27
+ Image.open(dataset[1]["image_path"]).convert("RGB").save("img2.png")
28
+ Image.open(dataset[0]["image_path"]).convert("RGB").save("img3.png")
29
  # define id2label, label2color
30
+ labels = ['O', 'B-HEADER', 'I-HEADER', 'B-QUESTION', 'I-QUESTION', 'B-ANSWER', 'I-ANSWER']
31
  id2label = {v: k for v, k in enumerate(labels)}
32
+ label2color = {
33
+ "B-HEADER": 'red',
34
+ "I-HEADER": 'green',
35
+ "B-QUESTION": 'orange',
36
+ "I-QUESTION": "blue",
37
+ "B-ANSWER": 'gray',
38
+ "I-ANSWERE": 'violet',
39
+ "O": 'orange'
40
+ }
41
 
42
  def unnormalize_box(bbox, width, height):
43
  return [
 
47
  height * (bbox[3] / 1000),
48
  ]
49
 
50
+
51
  def iob_to_label(label):
 
 
 
52
  return label
53
 
54
+
55
+
56
  def process_image(image):
57
+
58
+ print(type(image))
59
  width, height = image.size
60
 
61
  # encode
 
78
  draw = ImageDraw.Draw(image)
79
  font = ImageFont.load_default()
80
  for prediction, box in zip(true_predictions, true_boxes):
81
+ predicted_label = iob_to_label(prediction)
82
  draw.rectangle(box, outline=label2color[predicted_label])
83
  draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
84
 
85
  return image
86
 
87
+
88
+ title = "Extraction d'informations de factures en utilisant le modèle LayoutLMv3"
89
+ description = "J'utilise LayoutLMv3 de Microsoft formé sur un ensemble de données de factures pour prédire le nom de l'émetteur de factures, l'adresse de l'émetteur de factures, le code postal de l'émetteur de factures, la date d'échéance, la TPS, la date de facturation, le numéro de facture, le sous-total et le total. Pour l'utiliser, il suffit de télécharger une image ou d'utiliser l'exemple d'image ci-dessous. Les résultats seront affichés en quelques secondes."
90
+
91
+ article="<b>References</b><br>[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. <a href='https://arxiv.org/abs/2204.08387'>Paper Link</a><br>[2] <a href='https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3'>LayoutLMv3 training and inference</a>"
92
+
93
+ examples =[['img1.png'],['img2.png'],['img3.png']]
94
 
95
  css = """.output_image, .input_image {height: 600px !important}"""
96
 
 
101
  description=description,
102
  article=article,
103
  examples=examples,
104
+ css=css,
105
+ analytics_enabled = True, enable_queue=True)
106
+
107
+ iface.launch(inline=False, share=False, debug=False)