Datasets:

Languages:
English
ArXiv:
License:
ShuhuaiRen commited on
Commit
9ca3dce
1 Parent(s): f81b6ee

Upload 5 files

Browse files
data/temporal_video_grounding/charades/charades_annotation/charades_sta_test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/temporal_video_grounding/charades/charades_annotation/charades_sta_train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/temporal_video_grounding/charades/charades_annotation/get_coco_format.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ import os
4
+ from copy import deepcopy
5
+ import pdb
6
+ import numpy as np
7
+ import random
8
+ from pathlib import Path
9
+
10
+
11
+ # read json files
12
+ def read_json(path):
13
+ with open(path, "r") as fin:
14
+ datas = json.load(fin)
15
+ annos = datas["annotations"]
16
+ return annos
17
+
18
+
19
+ def read_jsonl(path):
20
+ anno = []
21
+ with open(path, "r") as fin:
22
+ datas = fin.readlines()
23
+ for data in datas:
24
+ anno.append(json.loads(data.strip()))
25
+ return anno
26
+
27
+
28
+
29
+ def write_json(data, path):
30
+ with open(path, "w") as fout:
31
+ json.dump(data, fout)
32
+ return
33
+
34
+
35
+ def read_txt(path):
36
+ data = []
37
+ with open(path, "r") as fin:
38
+ lines = fin.readlines()
39
+ for i, line in enumerate(lines):
40
+ # e.g. AO8RW 0.0 6.9##a person is putting a book on a shelf.
41
+ line = line.strip("\n")
42
+ cap = line.split("##")[-1]
43
+ if len(cap) < 2:
44
+ continue
45
+ terms = line.split("##")[0].split(" ")
46
+ vid = terms[0] + ".mp4"
47
+ start_time = float(terms[1])
48
+ end_time = float(terms[2])
49
+ data.append({"image_id": vid, "caption": cap, "timestamp": [start_time, end_time], "id": i})
50
+ return data
51
+
52
+
53
+ def filter_sent(sent):
54
+ sent = sent.strip(" ")
55
+ if len(sent) < 2:
56
+ return False
57
+ sent = sent.replace("#", "")
58
+ return sent
59
+
60
+
61
+ if __name__ == "__main__":
62
+ parser = argparse.ArgumentParser()
63
+ parser.add_argument('--dataset', default='charades') # anet
64
+ parser.add_argument('--anno_path', default='/home/yaolinli/dataset/Charades/charades_annotation/')
65
+ parser.add_argument('--video_path', default='/home/yaolinli/dataset/Charades/videos') # ActivityNet_asr_denseCap/anet_6fps_224
66
+ parser.add_argument('--outpath', default='./')
67
+ args = parser.parse_args()
68
+ '''output data example:
69
+ {
70
+ "annotations": [
71
+ {
72
+ "image_id": "3MSZA.mp4",
73
+ "caption": "person turn a light on.",
74
+ "timestamp": [24.3, 30.4],
75
+ }],
76
+ }
77
+ '''
78
+
79
+ for split in ["train", "test"]: # "val", "test"
80
+ if args.dataset == "charades":
81
+ filename = f"charades_sta_{split}.txt"
82
+ annos = read_txt(os.path.join(args.anno_path, filename))
83
+ data = {}
84
+ data["annotations"] = annos
85
+
86
+ else:
87
+ print("Do not support this dataset!")
88
+ exit(0)
89
+
90
+ print(f"==> {args.dataset} dataset \t# examples num: {len(annos)}")
91
+ out_name = "{}.caption_coco_format.json".format(split)
92
+ Path(args.outpath).mkdir(parents=True, exist_ok=True)
93
+ write_json(data, os.path.join(args.outpath, out_name))
data/temporal_video_grounding/charades/charades_annotation/test.caption_coco_format.json ADDED
The diff for this file is too large to render. See raw diff
 
data/temporal_video_grounding/charades/charades_annotation/train.caption_coco_format.json ADDED
The diff for this file is too large to render. See raw diff