|
from typing import List, Dict |
|
|
|
from PIL import Image |
|
import numpy as np |
|
import datasets |
|
import pandas as pd |
|
from datasets.download.streaming_download_manager import ArchiveIterable |
|
|
|
_CITATION = """\ |
|
@article{koller2015continuous, |
|
title={Continuous sign language recognition: Towards large vocabulary statistical recognition systems handling multiple signers}, |
|
author={Koller, Oscar and Forster, Jens and Ney, Hermann}, |
|
journal={Computer Vision and Image Understanding}, |
|
volume={141}, |
|
pages={108--125}, |
|
year={2015}, |
|
publisher={Elsevier} |
|
} |
|
|
|
@inproceedings{koller2017re, |
|
title={Re-sign: Re-aligned end-to-end sequence modelling with deep recurrent CNN-HMMs}, |
|
author={Koller, Oscar and Zargaran, Sepehr and Ney, Hermann}, |
|
booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, |
|
pages={4297--4305}, |
|
year={2017} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This archive contains two sets of the RWTH-Weather-Phoenix 2014 corpus |
|
|
|
a) the multisigner set |
|
b) the signer independent set. |
|
|
|
The signing is recorded by a stationary color camera placed in front of the sign language interpreters. Interpreters wear dark clothes in front of an artificial grey background with color transition. All recorded videos are at 25 frames per second and the size of the frames is 210 by 260 pixels. Each frame shows the interpreter box only. |
|
It is released under non-commercial cc 4.0 license with attribution. |
|
""" |
|
|
|
_HOMEPAGE = "https://www-i6.informatik.rwth-aachen.de/~koller/RWTH-PHOENIX/" |
|
|
|
_LICENSE = "CC BY-NC 4.0" |
|
|
|
|
|
def image_to_numpy(file): |
|
image = Image.open(file) |
|
return np.array(image) |
|
|
|
|
|
class RWTHPhoenixWeather2014Config(datasets.BuilderConfig): |
|
"""BuilderConfig for RWTHPhoenixWeather2014Config.""" |
|
|
|
def __init__(self, main_data_folder, corpus_file_suffix, **kwargs): |
|
"""BuilderConfig for RWTHPhoenixWeather2014Config. |
|
Args: |
|
main_data_folder: name of the RWTHPhoenix variant folder. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(RWTHPhoenixWeather2014Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.main_data_folder = main_data_folder |
|
self.corpus_file_suffix = corpus_file_suffix |
|
|
|
|
|
class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder): |
|
"""RWTH-PHOENIX-Weather 2014: Continuous Sign Language Recognition Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
DEFAULT_WRITER_BATCH_SIZE = 25 |
|
|
|
BUILDER_CONFIGS = [ |
|
RWTHPhoenixWeather2014Config( |
|
name="multisigner", |
|
description="", |
|
main_data_folder="phoenix-2014-multisigner", |
|
corpus_file_suffix=".corpus.csv" |
|
), |
|
RWTHPhoenixWeather2014Config( |
|
name="signerindependent", |
|
description="", |
|
main_data_folder="phoenix-2014-signerindependent-SI5", |
|
corpus_file_suffix=".SI5.corpus.csv" |
|
), |
|
RWTHPhoenixWeather2014Config( |
|
name="pre-training", |
|
description="", |
|
main_data_folder="phoenix-2014-multisigner", |
|
corpus_file_suffix=".corpus.csv" |
|
), |
|
] |
|
|
|
def _info(self): |
|
features_dict = { |
|
"id": datasets.Value("string"), |
|
"transcription": datasets.Value("string"), |
|
} |
|
|
|
if self.config.name != "pre-training": |
|
features_dict["frames"] = datasets.Sequence(feature=datasets.Array3D(shape=(3, 224, 224), dtype="uint8")) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION + self.config.description, |
|
features=datasets.Features(features_dict), |
|
|
|
|
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
frames = {} |
|
other_data = {} |
|
|
|
dataDirMapper = { |
|
datasets.Split.TRAIN: "train", |
|
datasets.Split.VALIDATION: "dev", |
|
datasets.Split.TEST: "test", |
|
} |
|
|
|
for split in [ |
|
datasets.Split.TRAIN, |
|
datasets.Split.VALIDATION, |
|
datasets.Split.TEST, |
|
]: |
|
base_url = f"data/{self.config.main_data_folder}" |
|
|
|
data_csv = dl_manager.download( |
|
f"{base_url}/annotations/manual/{dataDirMapper[split]}{self.config.corpus_file_suffix}") |
|
|
|
df = pd.read_csv(data_csv, sep='|') |
|
|
|
example_ids = df['id'] |
|
annotations = df['annotation'] |
|
|
|
frame_archive_urls = dl_manager.download([ |
|
f"{base_url}/features/fullFrame-210x260px/{dataDirMapper[split]}/{id}.tar" |
|
for id in example_ids |
|
]) |
|
|
|
frames[split] = [ |
|
dl_manager.iter_archive(url) |
|
for url in frame_archive_urls |
|
] |
|
|
|
other_data_split = {} |
|
|
|
for frame, idx, annotation, in zip(frames[split], example_ids, annotations): |
|
other_data_split[frame] = { |
|
"id": idx, |
|
"annotation": annotation, |
|
} |
|
|
|
other_data[split] = other_data_split |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"frame_archives": frames[split], |
|
"other_data": other_data[split], |
|
}, |
|
) |
|
for split in [ |
|
datasets.Split.TRAIN, |
|
datasets.Split.VALIDATION, |
|
datasets.Split.TEST, |
|
] |
|
] |
|
|
|
def _generate_examples(self, frame_archives: List[ArchiveIterable], other_data: Dict[ArchiveIterable, dict]): |
|
""" |
|
_generate_examples generates examples for the HuggingFace dataset. |
|
It takes a list of frame_archives and the corresponding dict of other data. |
|
Each frame_archive acts as a key for the further data. |
|
|
|
:param frame_archives: list of ArchiveIterables |
|
:param other_data: Dict from ArchiveIterables to other data |
|
""" |
|
for key, frames in enumerate(frame_archives): |
|
ex = other_data[frames] |
|
|
|
result = { |
|
"id": ex['id'], |
|
"transcription": ex['annotation'], |
|
} |
|
|
|
if self.config.name != 'pre-training': |
|
result["frames"] = [ |
|
image_to_numpy(im) for p, im in frames |
|
] |
|
|
|
yield key, result |
|
|