File size: 4,270 Bytes
0525763 167bbdf 0525763 0cff0b4 0525763 167bbdf 0525763 167bbdf 0525763 167bbdf 0525763 167bbdf 220cb84 0525763 167bbdf 0cff0b4 167bbdf 0cff0b4 220cb84 0cff0b4 220cb84 0525763 0cff0b4 0525763 220cb84 0525763 220cb84 0525763 0cff0b4 0525763 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import datasets
import pandas as pd
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {license_plates},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
Over 1.2 million annotated license plates from vehicles around the world.
This dataset is tailored for License Plate Recognition tasks and includes
images from both YouTube and PlatesMania.
Annotation details are provided in the About section below.
"""
_NAME = 'license_plates'
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
class LicensePlates(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="Brazil_youtube"),
datasets.BuilderConfig(name="Estonia_platesmania"),
datasets.BuilderConfig(name="Finland_platesmania"),
datasets.BuilderConfig(name="Kazakhstan_platesmania"),
datasets.BuilderConfig(name="Kazakhstan_youtube"),
datasets.BuilderConfig(name="Lithuania_platesmania"),
datasets.BuilderConfig(name="Serbia_platesmania"),
datasets.BuilderConfig(name="Serbia_youtube"),
datasets.BuilderConfig(name="UAE_platesmania"),
datasets.BuilderConfig(name="UAE_youtube")
]
DEFAULT_CONFIG_NAME = "Brazil"
def _info(self):
features = datasets.Features({
'bbox_id': datasets.Value('uint32'),
'bbox': datasets.Value('string'),
'image': datasets.Image(),
'labeled_image': datasets.Image(),
'license_plate.id': datasets.Value('string'),
'license_plate.visibility': datasets.Value('string'),
'license_plate.rows_count': datasets.Value('uint8'),
'license_plate.number': datasets.Value('string'),
'license_plate.serial': datasets.Value('string'),
'license_plate.country': datasets.Value('string'),
'license_plate.mask': datasets.Value('string')
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data = dl_manager.download(f"{_DATA}{self.config.name}.tar.gz")
data = dl_manager.iter_archive(data)
annotations = dl_manager.download(f'{_DATA}{self.config.name}.csv')
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={
"data": data,
'annotations': annotations
}),
]
def _generate_examples(self, data, annotations):
annotations_df = pd.read_csv(annotations, sep=',', index_col=0)
images = {}
for idx, (file_path, file) in enumerate(data):
file_name = file_path.split('/')[-1]
images[file_name] = (file_path, file.read())
annotations_df.drop(
columns=['license_plate.region', 'license_plate.color'],
inplace=True,
errors='ignore')
annotations_df.fillna(0, inplace=True)
annotations_df.sort_values(by='file_name', inplace=True)
for row in annotations_df.itertuples(index=True):
image = images[row[1]]
name, ext = row[1].split('.')
labeled_image = images[f'{name}_labeled.{ext}']
yield idx, {
'bbox_id': row[0],
'bbox': row[2],
"image": {
"path": image[0],
"bytes": image[1]
},
"labeled_image": {
"path": labeled_image[0],
"bytes": labeled_image[1]
},
'license_plate.id': row[3],
'license_plate.visibility': row[4],
'license_plate.rows_count': row[5],
'license_plate.number': row[6],
'license_plate.serial': row[7],
'license_plate.country': row[8],
'license_plate.mask': row[9]
}
|