File size: 6,614 Bytes
ebf0fef b0c88e0 d20a69b ab89da9 ebf0fef d20a69b b0c88e0 d20a69b 8465804 d20a69b 8465804 d20a69b ab89da9 d20a69b f1ff5ac d20a69b 8465804 d20a69b 8465804 d20a69b 1add07d d20a69b bf93cd4 2197934 21a446c bf93cd4 4feab23 c235442 4feab23 d20a69b bf93cd4 d20a69b 4feab23 0ed895a d20a69b ed988b1 d20a69b ab89da9 d20a69b 8465804 d20a69b e9448e9 0ed895a 72330f3 4feab23 0ed895a 4feab23 0ed895a d20a69b 0ed895a d20a69b ebf0fef 0ed895a ebf0fef 0ed895a a5a0df3 1add07d 0ed895a ab89da9 1add07d 4feab23 b0c88e0 4feab23 58ae851 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
from typing import List, Dict
from PIL import Image
import numpy as np
import datasets
import pandas as pd
from datasets.download.streaming_download_manager import ArchiveIterable
_CITATION = """\
@article{koller2015continuous,
title={Continuous sign language recognition: Towards large vocabulary statistical recognition systems handling multiple signers},
author={Koller, Oscar and Forster, Jens and Ney, Hermann},
journal={Computer Vision and Image Understanding},
volume={141},
pages={108--125},
year={2015},
publisher={Elsevier}
}
@inproceedings{koller2017re,
title={Re-sign: Re-aligned end-to-end sequence modelling with deep recurrent CNN-HMMs},
author={Koller, Oscar and Zargaran, Sepehr and Ney, Hermann},
booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
pages={4297--4305},
year={2017}
}
"""
_DESCRIPTION = """\
This archive contains two sets of the RWTH-Weather-Phoenix 2014 corpus
a) the multisigner set
b) the signer independent set.
The signing is recorded by a stationary color camera placed in front of the sign language interpreters. Interpreters wear dark clothes in front of an artificial grey background with color transition. All recorded videos are at 25 frames per second and the size of the frames is 210 by 260 pixels. Each frame shows the interpreter box only.
It is released under non-commercial cc 4.0 license with attribution.
"""
_HOMEPAGE = "https://www-i6.informatik.rwth-aachen.de/~koller/RWTH-PHOENIX/"
_LICENSE = "CC BY-NC 4.0"
# Function to convert image file to numpy array
def image_to_numpy(file):
image = Image.open(file)
return np.array(image)
class RWTHPhoenixWeather2014Config(datasets.BuilderConfig):
"""BuilderConfig for RWTHPhoenixWeather2014Config."""
def __init__(self, main_data_folder, corpus_file_suffix, **kwargs):
"""BuilderConfig for RWTHPhoenixWeather2014Config.
Args:
main_data_folder: name of the RWTHPhoenix variant folder.
**kwargs: keyword arguments forwarded to super.
"""
super(RWTHPhoenixWeather2014Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.main_data_folder = main_data_folder
self.corpus_file_suffix = corpus_file_suffix
class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
"""RWTH-PHOENIX-Weather 2014: Continuous Sign Language Recognition Dataset."""
VERSION = datasets.Version("1.0.0")
DEFAULT_WRITER_BATCH_SIZE = 25
BUILDER_CONFIGS = [
RWTHPhoenixWeather2014Config(
name="multisigner",
description="",
main_data_folder="phoenix-2014-multisigner",
corpus_file_suffix=".corpus.csv"
),
RWTHPhoenixWeather2014Config(
name="signerindependent",
description="",
main_data_folder="phoenix-2014-signerindependent-SI5",
corpus_file_suffix=".SI5.corpus.csv"
),
RWTHPhoenixWeather2014Config(
name="pre-training",
description="",
main_data_folder="phoenix-2014-multisigner",
corpus_file_suffix=".corpus.csv"
),
]
def _info(self):
features_dict = {
"id": datasets.Value("string"),
"transcription": datasets.Value("string"),
}
if self.config.name != "pre-training":
features_dict["frames"] = datasets.Sequence(feature=datasets.Array3D(shape=(3, 224, 224), dtype="uint8"))
return datasets.DatasetInfo(
description=_DESCRIPTION + self.config.description,
features=datasets.Features(features_dict),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
frames = {}
other_data = {}
dataDirMapper = {
datasets.Split.TRAIN: "train",
datasets.Split.VALIDATION: "dev",
datasets.Split.TEST: "test",
}
for split in [
datasets.Split.TRAIN,
datasets.Split.VALIDATION,
datasets.Split.TEST,
]:
base_url = f"data/{self.config.main_data_folder}"
data_csv = dl_manager.download(
f"{base_url}/annotations/manual/{dataDirMapper[split]}{self.config.corpus_file_suffix}")
df = pd.read_csv(data_csv, sep='|')
example_ids = df['id']
annotations = df['annotation']
frame_archive_urls = dl_manager.download([
f"{base_url}/features/fullFrame-210x260px/{dataDirMapper[split]}/{id}.tar"
for id in example_ids
])
frames[split] = [
dl_manager.iter_archive(url)
for url in frame_archive_urls
]
other_data_split = {}
for frame, idx, annotation, in zip(frames[split], example_ids, annotations):
other_data_split[frame] = {
"id": idx,
"annotation": annotation,
}
other_data[split] = other_data_split
return [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"frame_archives": frames[split],
"other_data": other_data[split],
},
)
for split in [
datasets.Split.TRAIN,
datasets.Split.VALIDATION,
datasets.Split.TEST,
]
]
def _generate_examples(self, frame_archives: List[ArchiveIterable], other_data: Dict[ArchiveIterable, dict]):
"""
_generate_examples generates examples for the HuggingFace dataset.
It takes a list of frame_archives and the corresponding dict of other data.
Each frame_archive acts as a key for the further data.
:param frame_archives: list of ArchiveIterables
:param other_data: Dict from ArchiveIterables to other data
"""
for key, frames in enumerate(frame_archives):
ex = other_data[frames]
result = {
"id": ex['id'],
"transcription": ex['annotation'],
}
if self.config.name != 'pre-training':
result["frames"] = [
image_to_numpy(im) for p, im in frames
]
yield key, result
|