|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {license_plates}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Over 1.2 million annotated license plates from vehicles around the world. |
|
This dataset is tailored for License Plate Recognition tasks and includes |
|
images from both YouTube and PlatesMania. |
|
Annotation details are provided in the About section below. |
|
""" |
|
_NAME = 'license_plates' |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
class LicensePlates(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="Brazil"), |
|
datasets.BuilderConfig(name="Estonia"), |
|
datasets.BuilderConfig(name="Finland"), |
|
datasets.BuilderConfig(name="Kazakhstan"), |
|
datasets.BuilderConfig(name="Lithuania"), |
|
datasets.BuilderConfig(name="Serbia"), |
|
datasets.BuilderConfig(name="UAE"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "Brazil" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features({ |
|
'image': datasets.Image(), |
|
'labeled_image': datasets.Image(), |
|
'bbox': datasets.Value('string'), |
|
'license_plate.id': datasets.Value('string'), |
|
'license_plate.visibility': datasets.Value('string'), |
|
'license_plate.rows_count': datasets.Value('uint8'), |
|
'license_plate.number': datasets.Value('string'), |
|
'license_plate.serial': datasets.Value('string'), |
|
'license_plate.country': datasets.Value('string'), |
|
'license_plate.mask': datasets.Value('string') |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data = dl_manager.download(f"{_DATA}{self.config.name}.tar.gz") |
|
data = dl_manager.iter_archive(data) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data": data |
|
}), |
|
] |
|
|
|
def _generate_examples(self, data): |
|
|
|
annotations = [] |
|
images = [] |
|
|
|
for idx, (file_path, file) in enumerate(data): |
|
if file_path.endswith('.csv'): |
|
annotations |
|
yield idx, { |
|
"image": { |
|
"path": image_path, |
|
"bytes": image.read() |
|
}, |
|
"mask": { |
|
"path": mask_path, |
|
"bytes": mask.read() |
|
}, |
|
'id': annotations_df['id'].iloc[idx], |
|
'gender': annotations_df['gender'].iloc[idx], |
|
'age': annotations_df['age'].iloc[idx] |
|
} |
|
|