|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""VATEX is a large-Scale (826K captions for 41.3K video clips), multilingual (English and Chinese) dataset for video-and-language research. |
|
The dataset covers 600 fine-grained human activities.""" |
|
|
|
import os |
|
import json |
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@InProceedings{Wang_2019_ICCV, |
|
author = {Wang, Xin and Wu, Jiawei and Chen, Junkun and Li, Lei and Wang, Yuan-Fang and Wang, William Yang}, |
|
title = {VaTeX: A Large-Scale, High-Quality Multilingual Dataset for Video-and-Language Research}, |
|
booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, |
|
month = {October}, |
|
year = {2019} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
VATEX is a large-scale multilingual video description dataset, which contains over 41,250 videos and 825,000 captions |
|
in both English and Chinese. VATEX is characterized by the following major unique properties. |
|
First, it contains both English and Chinese descriptions at scale, which can support many multilingual studies |
|
that are constrained by monolingual datasets. Secondly, VATEX has a high number of clip-sentence pairs |
|
with each video clip annotated with multiple unique sentences, and every caption is unique in |
|
the whole corpus. Third, VATEX contains more comprehensive yet representative video content, |
|
covering 600 human activities in total. Furthermore, both the English and Chinese corpora in |
|
VATEX are lexically richer and thus allow more natural and diverse caption generation. |
|
""" |
|
|
|
_HOMEPAGE = "https://eric-xw.github.io/vatex-website/index.html" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_URL_BASE = "https://eric-xw.github.io/vatex-website/data/" |
|
|
|
_VARIANTS = [ |
|
"v1.1", |
|
"v1.0", |
|
] |
|
|
|
class Vatex(datasets.GeneratorBasedBuilder): |
|
"""Vatex""" |
|
|
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS] |
|
DEFAULT_CONFIG_NAME = "v1.1" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"videoID": datasets.Value("string"), |
|
"path": datasets.Value("string"), |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
"enCap": datasets.features.Sequence(datasets.Value("string")), |
|
"chCap": datasets.features.Sequence(datasets.Value("string")), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = { |
|
"v1.1": { |
|
"train": os.path.join(_URL_BASE, "vatex_training_v1.0.json"), |
|
"validation": os.path.join(_URL_BASE, "vatex_validation_v1.0.json"), |
|
"public_test": os.path.join(_URL_BASE, "vatex_public_test_english_v1.1.json"), |
|
"private_test": os.path.join(_URL_BASE, "vatex_private_test_without_annotations.json"), |
|
}, |
|
"v1.0": { |
|
"train": os.path.join(_URL_BASE, "vatex_training_v1.0.json"), |
|
"validation": os.path.join(_URL_BASE, "vatex_validation_v1.0.json"), |
|
"public_test": os.path.join(_URL_BASE, "vatex_public_test_without_annotations.json"), |
|
}, |
|
} |
|
|
|
files_path = dl_manager.download_and_extract(urls) |
|
|
|
splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": files_path[self.config.name]["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": files_path[self.config.name]["validation"], |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split("public_test"), |
|
gen_kwargs={ |
|
"filepath": files_path[self.config.name]["public_test"], |
|
"split": "public_test", |
|
}, |
|
) |
|
] |
|
|
|
if self.config.name == "v1.1": |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split("private_test"), |
|
gen_kwargs={ |
|
"filepath": files_path[self.config.name]["private_test"], |
|
"split": "private_test", |
|
}, |
|
) |
|
) |
|
return splits |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""This function returns the examples.""" |
|
with open(filepath, encoding="utf-8") as json_file: |
|
annotations = json.load(json_file) |
|
for idx, instance in enumerate(annotations): |
|
videoID = instance["videoID"] |
|
splitted = videoID.split("_") |
|
start, end = int(splitted[-2]), int(splitted[-1]) |
|
videoID = "_".join(splitted[:-2]) |
|
|
|
if split in ["train", "validation"]: |
|
enCap = instance["enCap"] |
|
chCap = instance["chCap"] |
|
elif split == "public_test" and self.config.name == "v1.1": |
|
enCap = instance["enCap"] |
|
chCap = [] |
|
else: |
|
enCap, chCap = [], [] |
|
|
|
yield idx, { |
|
"videoID": videoID, |
|
"path": f"https://www.youtube.com/watch?v={videoID}", |
|
"start": start, |
|
"end": end, |
|
"enCap": enCap, |
|
"chCap": chCap, |
|
} |
|
|