|
|
|
|
|
import os |
|
import xml.etree.ElementTree as ET |
|
from glob import glob |
|
from pathlib import Path, PurePath |
|
|
|
import cv2 |
|
import numpy as np |
|
from datasets import ( |
|
BuilderConfig, |
|
DatasetInfo, |
|
Features, |
|
GeneratorBasedBuilder, |
|
Image, |
|
Split, |
|
SplitGenerator, |
|
Value, |
|
) |
|
from PIL import Image as PILImage |
|
|
|
|
|
class HTRDatasetConfig(BuilderConfig): |
|
"""BuilderConfig for HTRDataset""" |
|
|
|
def __init__(self, **kwargs): |
|
super(HTRDatasetConfig, self).__init__(**kwargs) |
|
|
|
|
|
class HTRDataset(GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
HTRDatasetConfig( |
|
name="htr_dataset", |
|
version="1.0.0", |
|
description="Line dataset for text recognition of historical swedish", |
|
) |
|
] |
|
|
|
def _info(self): |
|
features = Features({"unique_key": Value("string"), "image": Image(), "transcription": Value("string")}) |
|
return DatasetInfo(features=features) |
|
|
|
def _split_generators(self, dl_manager): |
|
""" |
|
images = dl_manager.download_and_extract( |
|
[ |
|
f"https://huggingface.co/datasets/Riksarkivet/alvsborgs_losen/resolve/main/data/images/alvsborgs_losen_imgs_{i}.tar.gz" |
|
for i in range(1, 3) |
|
] |
|
) |
|
xmls = dl_manager.download_and_extract( |
|
[ |
|
f"https://huggingface.co/datasets/Riksarkivet/alvsborgs_losen/resolve/main/data/page_xmls/alvsborgs_losen_page_xmls_{i}.tar.gz" |
|
for i in range(1, 3) |
|
] |
|
) |
|
""" |
|
|
|
images = dl_manager.download_and_extract( |
|
[ |
|
f"https://huggingface.co/datasets/Riksarkivet/frihetstidens_utskottshandlingar/resolve/main/data/images/frihetstidens_utskottshandlingar_images_{i}.tar.gz" |
|
for i in range(1, 3) |
|
] |
|
) |
|
xmls = dl_manager.download_and_extract( |
|
[ |
|
f"https://huggingface.co/datasets/Riksarkivet/frihetstidens_utskottshandlingar/resolve/main/data/page_xmls/frihetstidens_utskottshandlingar_page_xmls_{i}.tar.gz" |
|
for i in range(1, 3) |
|
] |
|
) |
|
image_extensions = [ |
|
"*.jpg", |
|
"*.jpeg", |
|
"*.png", |
|
"*.gif", |
|
"*.bmp", |
|
"*.tif", |
|
"*.tiff", |
|
"*.JPG", |
|
"*.JPEG", |
|
"*.PNG", |
|
"*.GIF", |
|
"*.BMP", |
|
"*.TIF", |
|
"*.TIFF", |
|
] |
|
imgs_nested = [glob(os.path.join(x, "**", ext), recursive=True) for ext in image_extensions for x in images] |
|
imgs_flat = [item for sublist in imgs_nested for item in sublist] |
|
sorted_imgs = sorted(imgs_flat, key=lambda x: Path(x).stem) |
|
xmls_nested = [glob(os.path.join(x, "**", "*.xml"), recursive=True) for x in xmls] |
|
xmls_flat = [item for sublist in xmls_nested for item in sublist] |
|
sorted_xmls = sorted(xmls_flat, key=lambda x: Path(x).stem) |
|
assert len(sorted_imgs) == len(sorted_xmls) |
|
imgs_xmls = [] |
|
for img, xml in zip(sorted_imgs, sorted_xmls): |
|
imgs_xmls.append((img, xml)) |
|
|
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={"imgs_xmls": imgs_xmls}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, imgs_xmls): |
|
for img, xml in imgs_xmls: |
|
assert Path(img).stem == Path(xml).stem |
|
img_filename = Path(img).stem |
|
volume = PurePath(img).parts[-2] |
|
|
|
lines_data = self.parse_pagexml(xml) |
|
|
|
|
|
image_array = cv2.imread(img) |
|
|
|
for i, line in enumerate(lines_data): |
|
line_id = str(i).zfill(4) |
|
try: |
|
cropped_image = self.crop_line_image(image_array, line["coords"]) |
|
except Exception as e: |
|
print(e) |
|
continue |
|
|
|
|
|
cropped_image_np = np.array(cropped_image, dtype=np.uint8) |
|
|
|
|
|
transcription = str(line["transcription"]) |
|
if transcription is None or not isinstance(transcription, str) or transcription == "": |
|
print(f"Invalid transcription: {transcription}") |
|
continue |
|
|
|
|
|
unique_key = f"{volume}_{img_filename}_{line_id}" |
|
|
|
try: |
|
yield ( |
|
unique_key, |
|
{"unique_key": unique_key, "image": cropped_image, "transcription": transcription}, |
|
) |
|
except Exception as e: |
|
print(f"Error yielding example {unique_key}: {e}") |
|
|
|
def parse_pagexml(self, xml): |
|
try: |
|
tree = ET.parse(xml) |
|
root = tree.getroot() |
|
except ET.ParseError as e: |
|
print(e) |
|
return [] |
|
|
|
namespaces = {"ns": "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"} |
|
page = root.find("ns:Page", namespaces) |
|
if page is None: |
|
print("no page") |
|
return [] |
|
|
|
text_regions = page.findall("ns:TextRegion", namespaces) |
|
lines_data = [] |
|
for region in text_regions: |
|
lines = region.findall("ns:TextLine", namespaces) |
|
|
|
for line in lines: |
|
try: |
|
line_id = line.get("id") |
|
coords = line.find("ns:Coords", namespaces).get("points") |
|
coords = [tuple(map(int, p.split(","))) for p in coords.split()] |
|
transcription = line.find("ns:TextEquiv/ns:Unicode", namespaces).text |
|
|
|
lines_data.append({"line_id": line_id, "coords": coords, "transcription": transcription}) |
|
except Exception as e: |
|
print(e) |
|
continue |
|
|
|
return lines_data |
|
|
|
def crop_line_image(self, img, coords): |
|
coords = np.array(coords) |
|
|
|
mask = np.zeros(img.shape[0:2], dtype=np.uint8) |
|
|
|
try: |
|
cv2.drawContours(mask, [coords], -1, (255, 255, 255), -1, cv2.LINE_AA) |
|
except Exception as e: |
|
print(e) |
|
res = cv2.bitwise_and(img, img, mask=mask) |
|
rect = cv2.boundingRect(coords) |
|
|
|
wbg = np.ones_like(img, np.uint8) * 255 |
|
cv2.bitwise_not(wbg, wbg, mask=mask) |
|
|
|
|
|
dst = wbg + res |
|
|
|
cropped = dst[rect[1] : rect[1] + rect[3], rect[0] : rect[0] + rect[2]] |
|
|
|
cropped = HTRDataset.cv2_to_pil(cropped) |
|
return cropped |
|
|
|
def np_to_cv2(image_array): |
|
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR) |
|
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
return image_rgb |
|
|
|
|
|
def cv2_to_pil(cv2_image): |
|
|
|
cv2_image_rgb = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB) |
|
|
|
pil_image = PILImage.fromarray(cv2_image_rgb) |
|
return pil_image |
|
|