from pathlib import Path import datasets import numpy as np import pandas as pd import PIL.Image import PIL.ImageOps _CITATION = """\ @InProceedings{huggingface:dataset, title = {generated-usa-passeports-dataset}, author = {TrainingDataPro}, year = {2023} } """ _DESCRIPTION = """\ The dataset consists of selfies of people and videos of them wearing a printed 2d mask with their face. The dataset solves tasks in the field of anti-spoofing and it is useful for buisness and safety systems. The dataset includes: **attacks** - videos of people wearing printed portraits of themselves with cut-out eyes. """ _NAME = 'generated-usa-passeports-dataset' _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" _LICENSE = "cc-by-nc-nd-4.0" _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" def exif_transpose(img): if not img: return img exif_orientation_tag = 274 # Check for EXIF data (only present on some files) if hasattr(img, "_getexif") and isinstance( img._getexif(), dict) and exif_orientation_tag in img._getexif(): exif_data = img._getexif() orientation = exif_data[exif_orientation_tag] # Handle EXIF Orientation if orientation == 1: # Normal image - nothing to do! pass elif orientation == 2: # Mirrored left to right img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT) elif orientation == 3: # Rotated 180 degrees img = img.rotate(180) elif orientation == 4: # Mirrored top to bottom img = img.rotate(180).transpose(PIL.Image.FLIP_LEFT_RIGHT) elif orientation == 5: # Mirrored along top-left diagonal img = img.rotate(-90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT) elif orientation == 6: # Rotated 90 degrees img = img.rotate(-90, expand=True) elif orientation == 7: # Mirrored along top-right diagonal img = img.rotate(90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT) elif orientation == 8: # Rotated 270 degrees img = img.rotate(90, expand=True) return img def load_image_file(file, mode='RGB'): # Load the image with PIL img = PIL.Image.open(file) if hasattr(PIL.ImageOps, 'exif_transpose'): # Very recent versions of PIL can do exit transpose internally img = PIL.ImageOps.exif_transpose(img) else: # Otherwise, do the exif transpose ourselves img = exif_transpose(img) img = img.convert(mode) return np.array(img) class GeneratedUsaPasseportsDataset(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ 'original': datasets.Image(), 'us_pass_augmentated_1': datasets.Image(), 'us_pass_augmentated_2': datasets.Image(), 'us_pass_augmentated_3': datasets.Image() }), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE) def _split_generators(self, dl_manager): original = dl_manager.download_and_extract(f"{_DATA}original.zip") augmentation = dl_manager.download_and_extract( f"{_DATA}augmentation.zip") annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") # original = dl_manager.iter_files(original) # augmentation = dl_manager.iter_files(augmentation) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ "original": original, 'augmentation': augmentation, 'annotations': annotations }), ] def _generate_examples(self, original, augmentation, annotations): annotations_df = pd.read_csv(annotations, sep=';') for row in annotations_df.itertuples(): yield row[0], { 'original': load_image_file(f'{original}/{row[1]}'), 'us_pass_augmentated_1': load_image_file(f'{augmentation}/{row[2]}'), 'us_pass_augmentated_2': load_image_file(f'{augmentation}/{row[3]}'), 'us_pass_augmentated_3': load_image_file(f'{augmentation}/{row[4]}') } # for idx, (image_path, (attack_path, attack)) in enumerate( # zip(sorted(images), sorted(attacks, key=lambda x: x[0]))): # image_name = Path(image_path).name # yield idx, { # "photo": # load_image_file(image_path), # "attack": # attack_path, # # annotations_df.loc[annotations_df['photo'].str.lower() == # # image_name.lower()]['attack'].values[0], # 'phone': # annotations_df.loc[annotations_df['photo'].str.lower() == # image_name.lower()]['phone'].values[0], # 'gender': # annotations_df.loc[annotations_df['photo'].str.lower() == # image_name.lower()]['gender'].values[0], # 'age': # annotations_df.loc[annotations_df['photo'].str.lower() == # image_name.lower()]['age'].values[0], # 'country': # annotations_df.loc[annotations_df['photo'].str.lower() == # image_name.lower()] # ['country'].values[0], # }