Ammar-alhaj-ali commited on
Commit
a4d7a24
1 Parent(s): 06d7744

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -23
app.py CHANGED
@@ -1,5 +1,3 @@
1
- import os
2
-
3
  import os
4
  os.system('pip install git+https://github.com/huggingface/transformers.git --upgrade')
5
  os.system('pip install pyyaml==5.1')
@@ -8,29 +6,23 @@ os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://do
8
 
9
  # install detectron2 that matches pytorch 1.8
10
  # See https://detectron2.readthedocs.io/tutorials/install.html for instructions
11
- #os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
12
 
13
  ## install PyTesseract
14
  os.system('pip install -q pytesseract')
15
 
16
- import gradio as gr
17
- import torch
18
- import PIL
19
  import gradio as gr
20
  import numpy as np
21
- from transformers import LayoutLMv3ForTokenClassification,LayoutLMv3Processor
22
  from datasets import load_dataset
23
  from PIL import Image, ImageDraw, ImageFont
24
 
25
-
26
-
27
- processor = LayoutLMv3Processor.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD")
28
- model = LayoutLMv3ForTokenClassification.from_pretrained("Ammar-alhaj-ali/LayoutLMv3-Fine-Tuning-FUNSD")
29
 
30
  # load image example
31
  dataset = load_dataset("nielsr/funsd", split="test")
32
  image = Image.open(dataset[0]["image_path"]).convert("RGB")
33
- #image = Image.open("./invoice.png")
34
  image.save("document.png")
35
  # define id2label, label2color
36
  labels = dataset.features['ner_tags'].feature.names
@@ -80,17 +72,12 @@ def process_image(image):
80
 
81
  return image
82
 
83
-
84
- title = "Interactive demo: LayoutLMv3"
85
- description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
86
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740' target='_blank'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
87
  examples =[['img1.png']]
88
 
89
- css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
90
- #css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
91
- # css = ".output_image, .input_image {height: 600px !important}"
92
-
93
- css = ".image-preview {height: auto !important;}"
94
 
95
  iface = gr.Interface(fn=process_image,
96
  inputs=gr.inputs.Image(type="pil"),
@@ -99,6 +86,5 @@ iface = gr.Interface(fn=process_image,
99
  description=description,
100
  article=article,
101
  examples=examples,
102
- css=css,
103
- enable_queue=True)
104
  iface.launch(debug=True)
 
 
 
1
  import os
2
  os.system('pip install git+https://github.com/huggingface/transformers.git --upgrade')
3
  os.system('pip install pyyaml==5.1')
 
6
 
7
  # install detectron2 that matches pytorch 1.8
8
  # See https://detectron2.readthedocs.io/tutorials/install.html for instructions
9
+ os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
10
 
11
  ## install PyTesseract
12
  os.system('pip install -q pytesseract')
13
 
 
 
 
14
  import gradio as gr
15
  import numpy as np
16
+ from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification
17
  from datasets import load_dataset
18
  from PIL import Image, ImageDraw, ImageFont
19
 
20
+ processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
21
+ model = LayoutLMv2ForTokenClassification.from_pretrained("nielsr/layoutlmv2-finetuned-funsd")
 
 
22
 
23
  # load image example
24
  dataset = load_dataset("nielsr/funsd", split="test")
25
  image = Image.open(dataset[0]["image_path"]).convert("RGB")
 
26
  image.save("document.png")
27
  # define id2label, label2color
28
  labels = dataset.features['ner_tags'].feature.names
 
72
 
73
  return image
74
 
75
+ title = "Interactive demo: LayoutLMv2"
76
+ description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words into QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
77
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm'>Github Repo</a></p>"
 
78
  examples =[['img1.png']]
79
 
80
+ css = """.output_image, .input_image {height: 600px !important}"""
 
 
 
 
81
 
82
  iface = gr.Interface(fn=process_image,
83
  inputs=gr.inputs.Image(type="pil"),
 
86
  description=description,
87
  article=article,
88
  examples=examples,
89
+ css=css)
 
90
  iface.launch(debug=True)