Spaces:
Runtime error
Runtime error
from transformers import ViTFeatureExtractor, BertTokenizer, VisionEncoderDecoderModel, AutoTokenizer, AutoFeatureExtractor | |
import gradio as gr | |
title="Handwritten Phone Number OCR" | |
description="handwritten_phone_number_OCR is a Vision-Encoder-Decoder model (based on the concept of TrOCR) which uses pre-trained facebook's vit-mae-large as the encoder and xlm-roberta-base as the decoder. \nIt has been trained on MNIST (2M synthetic images)." | |
examples =[["demo_image/0313474611_lauf25.png"], ["demo_image/0466975865_nr4ywx.png"], ["demo_image/0473227403_7lnod1.png"], ["demo_image/0728880927_jr987p.png"], ["demo_image/0922853144_1o4ay5.png"]] | |
model=VisionEncoderDecoderModel.from_pretrained("dawars/handwritten_phone_number_OCR") | |
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") | |
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-mae-large") | |
def run_ocr(image): | |
pixel_values = feature_extractor(image, return_tensors="pt").pixel_values | |
# autoregressively generate caption (uses greedy decoding by default ) | |
generated_ids = model.generate(pixel_values, max_new_tokens=50) | |
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
return generated_text | |
demo = gr.Interface(fn=run_ocr, inputs="image", outputs="text", title=title, description=description, examples=examples) | |
demo.launch() |