feat: script
Browse files- makeup-detection-dataset.py +93 -0
makeup-detection-dataset.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
_CITATION = """\
|
5 |
+
@InProceedings{huggingface:dataset,
|
6 |
+
title = {makeup-detection-dataset},
|
7 |
+
author = {TrainingDataPro},
|
8 |
+
year = {2023}
|
9 |
+
}
|
10 |
+
"""
|
11 |
+
|
12 |
+
_DESCRIPTION = """\
|
13 |
+
The dataset consists of photos featuring the same individuals captured in two
|
14 |
+
distinct scenarios - *with and without makeup*. The dataset contains a diverse
|
15 |
+
range of individuals with various *ages, ethnicities and genders*. The images
|
16 |
+
themselves would be of high quality, ensuring clarity and detail for each
|
17 |
+
subject.
|
18 |
+
In photos with makeup, it is applied **to only specific parts** of the face,
|
19 |
+
such as *eyes, lips, or skin*.
|
20 |
+
In photos without makeup, individuals have a bare face with no visible
|
21 |
+
cosmetics or beauty enhancements. These images would provide a clear contrast
|
22 |
+
to the makeup images, allowing for significant visual analysis.
|
23 |
+
"""
|
24 |
+
_NAME = 'makeup-detection-dataset'
|
25 |
+
|
26 |
+
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
|
27 |
+
|
28 |
+
_LICENSE = ""
|
29 |
+
|
30 |
+
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
|
31 |
+
|
32 |
+
|
33 |
+
class MakeupDetectionDataset(datasets.GeneratorBasedBuilder):
|
34 |
+
"""Small sample of image-text pairs"""
|
35 |
+
|
36 |
+
def _info(self):
|
37 |
+
return datasets.DatasetInfo(
|
38 |
+
description=_DESCRIPTION,
|
39 |
+
features=datasets.Features({
|
40 |
+
'no_makeup': datasets.Image(),
|
41 |
+
'with_makeup': datasets.Image(),
|
42 |
+
'part': datasets.Value('string'),
|
43 |
+
'gender': datasets.Value('string'),
|
44 |
+
'age': datasets.Value('int8'),
|
45 |
+
'country': datasets.Value('string')
|
46 |
+
}),
|
47 |
+
supervised_keys=None,
|
48 |
+
homepage=_HOMEPAGE,
|
49 |
+
citation=_CITATION,
|
50 |
+
)
|
51 |
+
|
52 |
+
def _split_generators(self, dl_manager):
|
53 |
+
no_makeup = dl_manager.download(f"{_DATA}no_makeup.tar.gz")
|
54 |
+
with_makeup = dl_manager.download(f"{_DATA}with_makeup.tar.gz")
|
55 |
+
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
|
56 |
+
no_makeup = dl_manager.iter_archive(no_makeup)
|
57 |
+
with_makeup = dl_manager.iter_archive(with_makeup)
|
58 |
+
return [
|
59 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
60 |
+
gen_kwargs={
|
61 |
+
"no_makeup": no_makeup,
|
62 |
+
'with_makeup': with_makeup,
|
63 |
+
'annotations': annotations
|
64 |
+
}),
|
65 |
+
]
|
66 |
+
|
67 |
+
def _generate_examples(self, no_makeup, with_makeup, annotations):
|
68 |
+
annotations_df = pd.read_csv(annotations, sep=';')
|
69 |
+
|
70 |
+
for idx, ((image_path, image),
|
71 |
+
(mask_path, mask)) in enumerate(zip(no_makeup, with_makeup)):
|
72 |
+
yield idx, {
|
73 |
+
"no_makeup": {
|
74 |
+
"path": image_path,
|
75 |
+
"bytes": image.read()
|
76 |
+
},
|
77 |
+
"with_makeup": {
|
78 |
+
"path": mask_path,
|
79 |
+
"bytes": mask.read()
|
80 |
+
},
|
81 |
+
'part':
|
82 |
+
annotations_df.loc[annotations_df['no_makeup'].str.lower() ==
|
83 |
+
image_path.lower()]['part'].values[0],
|
84 |
+
'gender':
|
85 |
+
annotations_df.loc[annotations_df['no_makeup'].str.lower() ==
|
86 |
+
image_path.lower()]['gender'].values[0],
|
87 |
+
'age':
|
88 |
+
annotations_df.loc[annotations_df['no_makeup'].str.lower() ==
|
89 |
+
image_path.lower()]['age'].values[0],
|
90 |
+
'country':
|
91 |
+
annotations_df.loc[annotations_df['no_makeup'].str.lower() ==
|
92 |
+
image_path.lower()]['country'].values[0]
|
93 |
+
}
|