File size: 3,652 Bytes
89c6d8a 6d2e958 c397049 89c6d8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import os
import re
import pandas as pd
import datasets
# _CITATION = """\
# @article{ponomarenko_tid2008_2009,
# author = {Ponomarenko, Nikolay and Lukin, Vladimir and Zelensky, Alexander and Egiazarian, Karen and Astola, Jaakko and Carli, Marco and Battisti, Federica},
# title = {{TID2008} -- {A} {Database} for {Evaluation} of {Full}- {Reference} {Visual} {Quality} {Assessment} {Metrics}},
# year = {2009}
# }
# """
_DESCRIPTION = """"""
_HOMEPAGE = ""
_REPO = ""
# _LICENSE = ""
class IlluminantConfig(datasets.BuilderConfig):
"""BuilderConfig for IlluminantChanges."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for Imagette.
Args:
data_url: `string`, url to download the zip file from.
matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(IlluminantConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
class Databases_IlluminantChanges(datasets.GeneratorBasedBuilder):
""""""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
IlluminantConfig(
name="MNIST",
description="MNIST data",
data_url=f"./MNIST.zip",
),
IlluminantConfig(
name="CIFAR",
description="CIFAR data",
data_url=f"./CIFAR.zip",
),
IlluminantConfig(
name="Imagenet",
description="Imagenet data",
data_url=f"./Imagenet.zip",
),
IlluminantConfig(
name="TID13",
description="TID13 data",
data_url=f"./TID13.zip",
),
]
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
features = datasets.Features(
{
# "images": datasets.Image(),
"reference": datasets.Image(),
"distorted": datasets.Image(),
# "mos": datasets.Value("float")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# supervised_keys=("reference", "distorted", "mos"),
homepage=_HOMEPAGE,
# license=_LICENSE,
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(self.config.data_url)
print(f"Data url: {self.config.data_url} | {archive_path}")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.download_and_extract(archive_path),
"split": "train",
},
)
]
def _generate_examples(self, images, split):
desat_path = os.path.join(images, self.config.name, "Desat")
illum_path = os.path.join(images, self.config.name, "Illum")
illum_paths = [os.path.join(illum_path, p) for p in os.listdir(illum_path)]
## Get the correct desat image for each illum image
img_numbers = [re.findall("\d+", p)[0] for p in illum_paths]
desat_paths = [os.path.join(desat_path, f"im_orig_desat{n}.png") for n in img_numbers]
print(desat_paths[0], illum_paths[0])
for key, (desat, illum) in enumerate(zip(desat_paths, illum_paths)):
yield key, {
"reference": desat,
"distorted": illum,
} |