nielsr HF staff commited on
Commit
3480c73
1 Parent(s): b35c806

Fix app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -10
app.py CHANGED
@@ -8,21 +8,29 @@ os.system('pip install torch==1.8.0+cu101 torchvision==0.9.0+cu101 -f https://do
8
  # See https://detectron2.readthedocs.io/tutorials/install.html for instructions
9
  os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
10
 
 
 
 
 
11
  import gradio as gr
12
  import numpy as np
13
- from transformers import LayoutLMv2FeatureExtractor, LayoutLMv2Tokenizer, LayoutLMV2ForTokenClassification
14
  from datasets import load_dataset
15
  from PIL import Image, ImageDraw, ImageFont
16
 
17
- ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
18
-
19
- image = Image.open(ds[0]["file"]).convert("RGB")
20
- image.save("document.png")
21
-
22
  feature_extractor = LayoutLMv2FeatureExtractor.from_pretrained("microsoft/layoutlmv2-base-uncased")
23
- tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
24
  model = LayoutLMv2ForTokenClassification.from_pretrained("nielsr/layoutlmv2-finetuned-funsd")
25
 
 
 
 
 
 
 
 
 
 
26
  def unnormalize_box(bbox, width, height):
27
  return [
28
  width * (bbox[0] / 1000),
@@ -78,10 +86,10 @@ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.147
78
  examples =[['document.png']]
79
 
80
  iface = gr.Interface(fn=process_image,
81
- inputs=gr.inputs.Image(shape=(480, 480), type="pil"),
82
- outputs=gr.outputs.Image(type='pil', label=f'annotated image'),
83
  title=title,
84
  description=description,
85
  article=article,
86
  examples=examples)
87
- iface.launch()
 
8
  # See https://detectron2.readthedocs.io/tutorials/install.html for instructions
9
  os.system('pip install -q detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html')
10
 
11
+ ## install PyTesseract
12
+ os.system('sudo apt install tesseract-ocr')
13
+ os.system('pip install -q pytesseract')
14
+
15
  import gradio as gr
16
  import numpy as np
17
+ from transformers import LayoutLMv2FeatureExtractor, LayoutLMv2TokenizerFast, LayoutLMv2ForTokenClassification
18
  from datasets import load_dataset
19
  from PIL import Image, ImageDraw, ImageFont
20
 
 
 
 
 
 
21
  feature_extractor = LayoutLMv2FeatureExtractor.from_pretrained("microsoft/layoutlmv2-base-uncased")
22
+ tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
23
  model = LayoutLMv2ForTokenClassification.from_pretrained("nielsr/layoutlmv2-finetuned-funsd")
24
 
25
+ # load image example
26
+ dataset = load_dataset("nielsr/funsd", split="test")
27
+ image = Image.open(dataset[0]["image_path"]).convert("RGB")
28
+ image.save("document.png")
29
+ # define id2label, label2color
30
+ labels = dataset.features['ner_tags'].feature.names
31
+ id2label = {v: k for v, k in enumerate(labels)}
32
+ label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'violet'}
33
+
34
  def unnormalize_box(bbox, width, height):
35
  return [
36
  width * (bbox[0] / 1000),
 
86
  examples =[['document.png']]
87
 
88
  iface = gr.Interface(fn=process_image,
89
+ inputs=gr.inputs.Image(type="pil"),
90
+ outputs=gr.outputs.Image(type="pil", label="annotated image"),
91
  title=title,
92
  description=description,
93
  article=article,
94
  examples=examples)
95
+ iface.launch(debug=True)