|
|
|
|
|
|
|
import json |
|
import random |
|
import datasets |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
A video-centric instruction-tuning dataset involving timestamps for Video Large Language Models |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/RenShuhuai-Andy/TimeChat" |
|
|
|
|
|
_LICENSE = "" |
|
_SEED = 1234 |
|
|
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"charades": { |
|
"train": "./data/temporal_video_grounding/charades/instruct_tvg_12.4k_charades.json", |
|
"instruction": "", |
|
}, |
|
"didemo": { |
|
"train": "./data/temporal_video_grounding/didemo/instruct_tvg_33.0k_didemo.json", |
|
"instruction": "", |
|
}, |
|
"queryd": { |
|
"train": "./data/temporal_video_grounding/queryd/instruct_tvg_14.6k_queryd.json", |
|
"instruction": "", |
|
}, |
|
"hirest_grounding": { |
|
"train": "./data/temporal_video_grounding/hirest/instruct_tvg_0.5k_hirest.json", |
|
"instruction": "", |
|
}, |
|
"qvhighlights": { |
|
"train": "./data/video_highlight_detection/qvhighlights/instruct_vhd_6.9k_qvhighlights.json", |
|
"instruction": "", |
|
}, |
|
"youcook2": { |
|
"train": "./data/dense_video_captioning/youcook2/instruct_dvc_1.2k_youcook2.json", |
|
"instruction": "", |
|
}, |
|
"anet": { |
|
"train": "./data/dense_video_captioning/anet/instruct_dvc_10.0k_anet.json", |
|
"instruction": "", |
|
}, |
|
"vitt": { |
|
"train": "./data/dense_video_captioning/vitt/instruct_dvc_5.1k_vitt.json", |
|
"instruction": "", |
|
}, |
|
"tvsum": { |
|
"train": "./data/video_summarization/tvsum/instruct_vhd_50_tvsum.json", |
|
"instruction": "", |
|
}, |
|
"summe": { |
|
"train": "./data/video_summarization/summe/instruct_vhd_50_tvsum.json", |
|
"instruction": "", |
|
}, |
|
"coin": { |
|
"train": "./data/step_localization/coin/instruct_action_9.0k_coin.json", |
|
"instruction": "", |
|
}, |
|
"hirest_step": { |
|
"train": "./data/step_localization/hirest_step/instruct_action_0.5k_hirest.json", |
|
"instruction": "", |
|
}, |
|
"yttemporal": { |
|
"train": "./data/transcribed_speech_generation/yttemporal/instruct_tsp_31.6k_yttemporal.json", |
|
"instruction": "", |
|
}, |
|
} |
|
|
|
_CITATION = "" |
|
|
|
|
|
class TimeITDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="charades", version=VERSION, description="Charades-STA dataset for Temporal Video Grounding" |
|
), |
|
datasets.BuilderConfig( |
|
name="didemo", version=VERSION, description="DiDeMo dataset for Temporal Video Grounding" |
|
), |
|
datasets.BuilderConfig( |
|
name="queryd", version=VERSION, description="QuerYD dataset for Temporal Video Grounding" |
|
), |
|
datasets.BuilderConfig( |
|
name="hirest_grounding", version=VERSION, description="HiREST_grounding dataset for Temporal Video Grounding" |
|
), |
|
datasets.BuilderConfig( |
|
name="qvhighlights", version=VERSION, description="QVHighlights dataset for Video Highlight Detection" |
|
), |
|
datasets.BuilderConfig( |
|
name="youcook2", version=VERSION, description="YouCook2 dataset for Dense Video Captioning" |
|
), |
|
datasets.BuilderConfig( |
|
name="anet", version=VERSION, description="ActivityNet Captions dataset for Dense Video Captioning" |
|
), |
|
datasets.BuilderConfig( |
|
name="vitt", version=VERSION, description="ViTT dataset for Dense Video Captioning" |
|
), |
|
datasets.BuilderConfig( |
|
name="tvsum", version=VERSION, description="TVSum dataset for Video Summarization" |
|
), |
|
datasets.BuilderConfig( |
|
name="summe", version=VERSION, description="SumMe dataset for Video Summarization" |
|
), |
|
datasets.BuilderConfig( |
|
name="coin", version=VERSION, description="COIN dataset for Step Localization" |
|
), |
|
datasets.BuilderConfig( |
|
name="hirest_step", version=VERSION, description="HiREST_step dataset for Step Localization" |
|
), |
|
datasets.BuilderConfig( |
|
name="yttemporal", version=VERSION, description="YT-Temporal dataset for Transcribed Speech Generation" |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "youcook2" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"video": datasets.Value("string"), |
|
"QA": datasets.Sequence( |
|
{ |
|
"q": datasets.Value("string"), |
|
"a": datasets.Value("string") |
|
}, |
|
), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[self.config.name] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
ret = [] |
|
|
|
random.seed(_SEED) |
|
|
|
ret.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir["train"], |
|
"split": "train", |
|
"instruction_path": data_dir["instruction"], |
|
"data_dir": data_dir, |
|
}, |
|
) |
|
) |
|
return ret |
|
|
|
|
|
|
|
def _generate_examples(self, filepath, split, instruction_path, data_dir=None): |
|
|
|
instructions = json.load(open(instruction_path)) |
|
|
|
|
|
timeitdata = json.load(filepath) |
|
for i, d in enumerate(timeitdata): |
|
|
|
|
|
|
|
|
|
|
|
|
|
yield i, d |
|
|