File size: 6,454 Bytes
ce6310c 97f73ff ce6310c 97f73ff ce6310c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
# coding=utf-8
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""VATEX is a large-Scale (826K captions for 41.3K video clips), multilingual (English and Chinese) dataset for video-and-language research.
The dataset covers 600 fine-grained human activities."""
import os
import json
import datasets
_CITATION = """
@InProceedings{Wang_2019_ICCV,
author = {Wang, Xin and Wu, Jiawei and Chen, Junkun and Li, Lei and Wang, Yuan-Fang and Wang, William Yang},
title = {VaTeX: A Large-Scale, High-Quality Multilingual Dataset for Video-and-Language Research},
booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
month = {October},
year = {2019}
}
"""
_DESCRIPTION = """\
VATEX is a large-scale multilingual video description dataset, which contains over 41,250 videos and 825,000 captions
in both English and Chinese. VATEX is characterized by the following major unique properties.
First, it contains both English and Chinese descriptions at scale, which can support many multilingual studies
that are constrained by monolingual datasets. Secondly, VATEX has a high number of clip-sentence pairs
with each video clip annotated with multiple unique sentences, and every caption is unique in
the whole corpus. Third, VATEX contains more comprehensive yet representative video content,
covering 600 human activities in total. Furthermore, both the English and Chinese corpora in
VATEX are lexically richer and thus allow more natural and diverse caption generation.
"""
_HOMEPAGE = "https://eric-xw.github.io/vatex-website/index.html"
_LICENSE = "CC BY 4.0"
_URL_BASE = "https://eric-xw.github.io/vatex-website/data/"
_VARIANTS = [
"v1.1",
"v1.0",
]
class Vatex(datasets.GeneratorBasedBuilder):
"""Vatex"""
BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
DEFAULT_CONFIG_NAME = "v1.1"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"videoID": datasets.Value("string"),
"path": datasets.Value("string"),
"start": datasets.Value("int32"),
"end": datasets.Value("int32"),
"enCap": datasets.features.Sequence(datasets.Value("string")),
"chCap": datasets.features.Sequence(datasets.Value("string")),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE
)
def _split_generators(self, dl_manager):
urls = {
"v1.1": {
"train": os.path.join(_URL_BASE, "vatex_training_v1.0.json"),
"validation": os.path.join(_URL_BASE, "vatex_validation_v1.0.json"),
"public_test": os.path.join(_URL_BASE, "vatex_public_test_english_v1.1.json"),
"private_test": os.path.join(_URL_BASE, "vatex_private_test_without_annotations.json"),
},
"v1.0": {
"train": os.path.join(_URL_BASE, "vatex_training_v1.0.json"),
"validation": os.path.join(_URL_BASE, "vatex_validation_v1.0.json"),
"public_test": os.path.join(_URL_BASE, "vatex_public_test_without_annotations.json"),
},
}
# Download data for all splits once for all since they are tiny csv files
files_path = dl_manager.download_and_extract(urls)
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": files_path[self.config.name]["train"],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": files_path[self.config.name]["validation"],
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split("public_test"),
gen_kwargs={
"filepath": files_path[self.config.name]["public_test"],
"split": "public_test",
},
)
]
if self.config.name == "v1.1":
splits.append(
datasets.SplitGenerator(
name=datasets.Split("private_test"),
gen_kwargs={
"filepath": files_path[self.config.name]["private_test"],
"split": "private_test",
},
)
)
return splits
def _generate_examples(self, filepath, split):
"""This function returns the examples."""
with open(filepath, encoding="utf-8") as json_file:
annotations = json.load(json_file)
for idx, instance in enumerate(annotations):
videoID = instance["videoID"]
splitted = videoID.split("_")
start, end = int(splitted[-2]), int(splitted[-1])
videoID = "_".join(splitted[:-2])
if split in ["train", "validation"]:
enCap = instance["enCap"]
chCap = instance["chCap"]
elif split == "public_test" and self.config.name == "v1.1":
enCap = instance["enCap"]
chCap = []
else:
enCap, chCap = [], []
yield idx, {
"videoID": videoID,
"path": f"https://www.youtube.com/watch?v={videoID}",
"start": start,
"end": end,
"enCap": enCap,
"chCap": chCap,
}
|