nielsr HF staff commited on
Commit
fb6be5e
1 Parent(s): 6a94555

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -10,13 +10,12 @@ model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-fin
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  model.to(device)
12
 
13
- def process_document(image):
14
  # prepare encoder inputs
15
  pixel_values = processor(image, return_tensors="pt").pixel_values
16
 
17
  # prepare decoder inputs
18
  task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
19
- question = "When is the coffee break?"
20
  prompt = task_prompt.replace("{user_input}", question)
21
  decoder_input_ids = processor.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").input_ids
22
 
@@ -49,9 +48,12 @@ demo = gr.Interface(
49
  description = "Gradio Demo for Donut, an instance of `VisionEncoderDecoderModel` fine-tuned on DocVQA (document visual question answering). To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
50
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.15664' target='_blank'>Donut: OCR-free Document Understanding Transformer</a> | <a href='https://github.com/clovaai/donut' target='_blank'>Github Repo</a></p>"
51
 
 
 
 
52
  interface = gr.Interface(fn=answer_question,
53
  inputs=[image, question],
54
- outputs=answer,
55
  examples=examples,
56
  title=title,
57
  description=description,
@@ -59,7 +61,7 @@ interface = gr.Interface(fn=answer_question,
59
  enable_queue=True)
60
  interface.launch(debug=True)
61
 
62
- examples=[["example_1.png"]],
63
  cache_examples=False,
64
  )
65
  demo.launch()
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  model.to(device)
12
 
13
+ def process_document(image, question):
14
  # prepare encoder inputs
15
  pixel_values = processor(image, return_tensors="pt").pixel_values
16
 
17
  # prepare decoder inputs
18
  task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
 
19
  prompt = task_prompt.replace("{user_input}", question)
20
  decoder_input_ids = processor.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").input_ids
21
 
 
48
  description = "Gradio Demo for Donut, an instance of `VisionEncoderDecoderModel` fine-tuned on DocVQA (document visual question answering). To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
49
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.15664' target='_blank'>Donut: OCR-free Document Understanding Transformer</a> | <a href='https://github.com/clovaai/donut' target='_blank'>Github Repo</a></p>"
50
 
51
+ image = gr.inputs.Image(type="pil")
52
+ question = gr.inputs.Textbox(label="Question")
53
+
54
  interface = gr.Interface(fn=answer_question,
55
  inputs=[image, question],
56
+ outputs="json",
57
  examples=examples,
58
  title=title,
59
  description=description,
 
61
  enable_queue=True)
62
  interface.launch(debug=True)
63
 
64
+ examples=[["example_1.png", "When is the coffee break?"]],
65
  cache_examples=False,
66
  )
67
  demo.launch()