Spaces:
Running
Running
import torch | |
from PIL import Image | |
from transformers import TrOCRProcessor, VisionEncoderDecoderModel | |
import gradio as gr | |
# Set up the device (GPU or CPU) | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# Load the fine-tuned model and processor from the Hugging Face repository | |
model = VisionEncoderDecoderModel.from_pretrained("Heramb26/TC-OCR-Custom").to(device) | |
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-large-handwritten") | |
def ocr_image(image): | |
""" | |
Perform OCR on an image using the loaded model. | |
:param image: Input PIL image. | |
:return: Extracted text. | |
""" | |
# Preprocess image and generate OCR text | |
pixel_values = processor(image, return_tensors="pt").pixel_values.to(device) | |
generated_ids = model.generate(pixel_values) | |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
return generated_text | |
# Create a Gradio interface | |
interface = gr.Interface(fn=ocr_image, # Function to be called when an image is uploaded | |
inputs=gr.Image(type="pil"), # Input is an image file (Gradio v3+ API) | |
outputs="text", # Output is extracted text | |
title="OCR Inference", # Title of the app | |
description="Upload an image with handwritten text to extract the text.") # Description | |
# Launch the Gradio app | |
interface.launch() | |