|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@article{koller2015continuous, |
|
title={Continuous sign language recognition: Towards large vocabulary statistical recognition systems handling multiple signers}, |
|
author={Koller, Oscar and Forster, Jens and Ney, Hermann}, |
|
journal={Computer Vision and Image Understanding}, |
|
volume={141}, |
|
pages={108--125}, |
|
year={2015}, |
|
publisher={Elsevier} |
|
} |
|
|
|
@inproceedings{koller2017re, |
|
title={Re-sign: Re-aligned end-to-end sequence modelling with deep recurrent CNN-HMMs}, |
|
author={Koller, Oscar and Zargaran, Sepehr and Ney, Hermann}, |
|
booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, |
|
pages={4297--4305}, |
|
year={2017} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This archive contains two sets of the RWTH-Weather-Phoenix 2014 corpus |
|
|
|
a) the multisigner set |
|
b) the signer independent set. |
|
|
|
The signing is recorded by a stationary color camera placed in front of the sign language interpreters. Interpreters wear dark clothes in front of an artificial grey background with color transition. All recorded videos are at 25 frames per second and the size of the frames is 210 by 260 pixels. Each frame shows the interpreter box only. |
|
It is released under non-commercial cc 4.0 license with attribution. |
|
""" |
|
|
|
_HOMEPAGE = "https://www-i6.informatik.rwth-aachen.de/~koller/RWTH-PHOENIX/" |
|
|
|
_LICENSE = "CC BY-NC 4.0" |
|
|
|
|
|
class RWTHPhoenixWeather2014Config(datasets.BuilderConfig): |
|
"""BuilderConfig for RWTHPhoenixWeather2014Config.""" |
|
|
|
def __init__(self, main_data_folder, corpus_file_suffix, **kwargs): |
|
"""BuilderConfig for RWTHPhoenixWeather2014Config. |
|
Args: |
|
main_data_folder: name of the RWTHPhoenix variant folder. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(RWTHPhoenixWeather2014Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.main_data_folder = main_data_folder |
|
self.corpus_file_suffix = corpus_file_suffix |
|
|
|
|
|
class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder): |
|
"""RWTH-PHOENIX-Weather 2014: Continuous Sign Language Recognition Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
DEFAULT_WRITER_BATCH_SIZE = 25 |
|
|
|
BUILDER_CONFIGS = [ |
|
RWTHPhoenixWeather2014Config( |
|
name="multisigner", |
|
description="", |
|
main_data_folder="phoenix-2014-multisigner", |
|
corpus_file_suffix=".corpus.csv" |
|
), |
|
RWTHPhoenixWeather2014Config( |
|
name="signerindependent", |
|
description="", |
|
main_data_folder="phoenix-2014-signerindependent-SI5", |
|
corpus_file_suffix=".SI5.corpus.csv" |
|
), |
|
RWTHPhoenixWeather2014Config( |
|
name="pre-training", |
|
description="", |
|
main_data_folder="phoenix-2014-multisigner", |
|
corpus_file_suffix=".corpus.csv" |
|
), |
|
] |
|
|
|
def _info(self): |
|
features_dict = { |
|
"id": datasets.Value("string"), |
|
"transcription": datasets.Value("string"), |
|
} |
|
|
|
if self.config.name != "pre-training": |
|
features_dict["frames"] = datasets.Sequence(feature=datasets.Image()) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION + self.config.description, |
|
features=datasets.Features(features_dict), |
|
|
|
|
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
example_ids = {} |
|
annotations = {} |
|
frames = {} |
|
|
|
dataDirMapper = { |
|
datasets.Split.TRAIN: "train", |
|
datasets.Split.VALIDATION: "dev", |
|
datasets.Split.TEST: "test", |
|
} |
|
|
|
for split in [ |
|
datasets.Split.TRAIN, |
|
datasets.Split.VALIDATION, |
|
datasets.Split.TEST, |
|
]: |
|
base_url = f"data/{self.config.main_data_folder}" |
|
|
|
data_csv = dl_manager.download( |
|
f"{base_url}/annotations/manual/{dataDirMapper[split]}{self.config.corpus_file_suffix}") |
|
|
|
df = pd.read_csv(data_csv, sep='|') |
|
|
|
example_ids[split] = df['id'] |
|
annotations[split] = df['annotation'] |
|
|
|
frame_archive_urls = dl_manager.download([ |
|
f"{base_url}/features/fullFrame-210x260px/{dataDirMapper[split]}/{id}.tar" |
|
for id in example_ids[split] |
|
]) |
|
|
|
frames[split] = [ |
|
dl_manager.iter_archive(url) |
|
for url in frame_archive_urls |
|
] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"example_ids": example_ids[split], |
|
"annotations": annotations[split], |
|
"frames": frames[split], |
|
}, |
|
) |
|
for split in [ |
|
datasets.Split.TRAIN, |
|
datasets.Split.VALIDATION, |
|
datasets.Split.TEST, |
|
] |
|
] |
|
|
|
def _generate_examples(self, example_ids, annotations, frames): |
|
for key, (idx, annotation, frames_list) in enumerate(zip(example_ids, annotations, frames)): |
|
result = { |
|
"id": idx, |
|
"transcription": annotation, |
|
} |
|
|
|
if self.config.name != 'pre-training': |
|
result["frames"] = [ |
|
{"path": p, "bytes": im.read()} for p, im in frames_list |
|
] |
|
|
|
yield key, result |
|
|