Spaces:
Runtime error
Runtime error
import re | |
import gradio as gr | |
import torch | |
from transformers import UdopProcessor, UdopForConditionalGeneration | |
repo_id = "microsoft/udop-large" | |
processor = UdopProcessor.from_pretrained(repo_id) | |
model = UdopForConditionalGeneration.from_pretrained(repo_id) | |
def process_document(image, question): | |
pixel_values = processor(image, return_tensors="pt").pixel_values | |
encoding = processor(images=image, text=question, return_tensors="pt") | |
outputs = model.generate(**encoding, max_new_tokens=20) | |
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0] | |
return generated_text | |
description = "Unofficial Gradio Demo for UDOP for DocVQA (document visual question answering). To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below." | |
article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2212.02623.pdf' target='_blank'>Unifying Vision, Text, and Layout for Universal Document Processing</a> | <a href='https://github.com/microsoft/UDOP' target='_blank'>Github Repo</a></p>" | |
demo = gr.Interface( | |
fn=process_document, | |
inputs=["image", gr.Textbox(label = "Question" )], | |
outputs=gr.Textbox(label = "Response" ), | |
title="Demo: UDOP for DocVQA", | |
description=description, | |
article=article, | |
examples=[["example_1.png", "When is the coffee break?"]], | |
cache_examples=True) | |
demo.launch() | |