nielsr HF staff commited on
Commit
2b9a3d1
1 Parent(s): 699bf9a
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -81,9 +81,12 @@ def process_image(image):
81
 
82
  title = "Interactive demo: LayoutLMv2"
83
  description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
84
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm'>Github Repo</a></p>"
85
  examples =[['document.png']]
86
 
 
 
 
87
  iface = gr.Interface(fn=process_image,
88
  inputs=gr.inputs.Image(type="pil"),
89
  outputs=gr.outputs.Image(type="pil", label="annotated image"),
@@ -91,5 +94,5 @@ iface = gr.Interface(fn=process_image,
91
  description=description,
92
  article=article,
93
  examples=examples,
94
- css=".output_image, .input_image {height: 600px !important}")
95
  iface.launch(debug=True)
 
81
 
82
  title = "Interactive demo: LayoutLMv2"
83
  description = "Demo for Microsoft's LayoutLMv2, a Transformer for state-of-the-art document image understanding tasks. This particular model is fine-tuned on FUNSD, a dataset of manually annotated forms. It annotates the words appearing in the image as QUESTION/ANSWER/HEADER/OTHER. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
84
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2012.14740' target='_blank'>LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> | <a href='https://github.com/microsoft/unilm' target='_blank'>Github Repo</a></p>"
85
  examples =[['document.png']]
86
 
87
+ css = ".output_image, .input_image {height: 50rem !important; width: 100% !important;}"
88
+ # css = ".output_image, .input_image {height: 600px !important}"
89
+
90
  iface = gr.Interface(fn=process_image,
91
  inputs=gr.inputs.Image(type="pil"),
92
  outputs=gr.outputs.Image(type="pil", label="annotated image"),
 
94
  description=description,
95
  article=article,
96
  examples=examples,
97
+ css=css)
98
  iface.launch(debug=True)