Spaces:
Running
Running
File size: 1,424 Bytes
9206f14 134092a 9206f14 0218a3d 9206f14 134092a e705ba3 9206f14 0218a3d 9206f14 0218a3d 9206f14 134092a 8f8b9df 134092a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import torch
from PIL import Image
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
import gradio as gr
# Set up the device (GPU or CPU)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load the fine-tuned model and processor from the Hugging Face repository
model = VisionEncoderDecoderModel.from_pretrained("Heramb26/TC-OCR-Custom").to(device)
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-large-handwritten")
def ocr_image(image):
"""
Perform OCR on an image using the loaded model.
:param image: Input PIL image.
:return: Extracted text.
"""
# Preprocess image and generate OCR text
pixel_values = processor(image, return_tensors="pt").pixel_values.to(device)
generated_ids = model.generate(pixel_values)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
return generated_text
# Create a Gradio interface
interface = gr.Interface(fn=ocr_image, # Function to be called when an image is uploaded
inputs=gr.Image(type="pil"), # Input is an image file (Gradio v3+ API)
outputs="text", # Output is extracted text
title="OCR Inference", # Title of the app
description="Upload an image with handwritten text to extract the text.") # Description
# Launch the Gradio app
interface.launch()
|