File size: 1,381 Bytes
9206f14
 
 
0218a3d
 
9206f14
0218a3d
 
9206f14
0218a3d
 
9206f14
0218a3d
 
9206f14
 
 
 
0218a3d
 
 
9206f14
0218a3d
 
9206f14
 
 
 
0218a3d
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import torch
from PIL import Image
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
from huggingface_hub import hf_hub_download
import os

# Load the model checkpoint and tokenizer files from Hugging Face Model Hub
checkpoint_folder = hf_hub_download(repo_id="Heramb26/tr-ocr-custom-checkpoints", filename="checkpoint-2070")

# Set up the device (GPU or CPU)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Load the fine-tuned model and processor from the downloaded folder
model = VisionEncoderDecoderModel.from_pretrained(checkpoint_folder).to(device)
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-large-handwritten")

def ocr_image(image):
    """
    Perform OCR on an image using the loaded model.
    :param image: Input PIL image.
    :return: Extracted text.
    """
    # Preprocess image and generate OCR text
    pixel_values = processor(image, return_tensors="pt").pixel_values.to(device)
    generated_ids = model.generate(pixel_values)
    generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return generated_text

# Example usage
image_path = "path/to/your/image.jpg"  # Update with the path to your image
image = Image.open(image_path)  # Open the image file using PIL
extracted_text = ocr_image(image)  # Perform OCR on the image
print("Extracted Text:", extracted_text)