Datasets:

ArXiv:
License:
holylovenia commited on
Commit
950e79e
1 Parent(s): ae325f9

Upload wit.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wit.py +274 -0
wit.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @inproceedings{10.1145/3404835.3463257,
28
+ author = {Srinivasan, Krishna and Raman, Karthik and Chen, Jiecao and Bendersky, Michael and Najork, Marc},
29
+ title = {WIT: Wikipedia-Based Image Text Dataset for Multimodal Multilingual Machine Learning},
30
+ year = {2021},
31
+ isbn = {9781450380379},
32
+ publisher = {Association for Computing Machinery},
33
+ address = {New York, NY, USA},
34
+ url = {https://doi.org/10.1145/3404835.3463257},
35
+ doi = {10.1145/3404835.3463257},
36
+ booktitle = {Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval},
37
+ pages = {2443–2449},
38
+ numpages = {7},
39
+ keywords = {dataset, multimodal, machine learning, wikipedia, multilingual, image-text retrieval, neural networks},
40
+ location = {Virtual Event, Canada},
41
+ series = {SIGIR '21}
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "wit"
46
+
47
+ _DESCRIPTION = """\
48
+ Wikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset.
49
+ WIT is composed of a curated set of 37.6 million entity rich image-text examples with
50
+ 11.5 million unique images across 108 Wikipedia languages. There are more than 12k
51
+ examples in each of 108 languages, with 53 languages having 100k image-text pairs.
52
+ Nine languages are spoken in the Southeast Asian region.
53
+ Since the dataset contains multiple references, following Section 3.2 of the dataset's
54
+ paper, the `seacrowd_imtext` subsets specify which reference is used for each data
55
+ instance's texts via context in metadata.
56
+ """
57
+
58
+ _HOMEPAGE = "https://github.com/google-research-datasets/wit"
59
+
60
+ _LANGUAGES = {"ceb": "ceb", "fil": "fil", "ind": "id", "jav": "jv", "zlm": "zlm", "mya": "my", "tha": "th", "vie": "vi", "war": "war"}
61
+
62
+ _LANGUAGE_CODES = list(_LANGUAGES.values())
63
+
64
+ _LICENSE = Licenses.CC_BY_SA_3_0.value
65
+
66
+ _LOCAL = False
67
+
68
+ _URLS = {
69
+ "train_0": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00000-of-00010.tsv.gz",
70
+ "train_1": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00001-of-00010.tsv.gz",
71
+ "train_2": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00002-of-00010.tsv.gz",
72
+ "train_3": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00003-of-00010.tsv.gz",
73
+ "train_4": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00004-of-00010.tsv.gz",
74
+ "train_5": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00005-of-00010.tsv.gz",
75
+ "train_6": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00006-of-00010.tsv.gz",
76
+ "train_7": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00007-of-00010.tsv.gz",
77
+ "train_8": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00008-of-00010.tsv.gz",
78
+ "train_9": "https://storage.googleapis.com/gresearch/wit/wit_v1.train.all-00009-of-00010.tsv.gz",
79
+ "test_0": "https://storage.googleapis.com/gresearch/wit/wit_v1.test.all-00000-of-00005.tsv.gz",
80
+ "test_1": "https://storage.googleapis.com/gresearch/wit/wit_v1.test.all-00001-of-00005.tsv.gz",
81
+ "test_2": "https://storage.googleapis.com/gresearch/wit/wit_v1.test.all-00002-of-00005.tsv.gz",
82
+ "test_3": "https://storage.googleapis.com/gresearch/wit/wit_v1.test.all-00003-of-00005.tsv.gz",
83
+ "test_4": "https://storage.googleapis.com/gresearch/wit/wit_v1.test.all-00004-of-00005.tsv.gz",
84
+ "val_0": "https://storage.googleapis.com/gresearch/wit/wit_v1.val.all-00000-of-00005.tsv.gz",
85
+ "val_1": "https://storage.googleapis.com/gresearch/wit/wit_v1.val.all-00001-of-00005.tsv.gz",
86
+ "val_2": "https://storage.googleapis.com/gresearch/wit/wit_v1.val.all-00002-of-00005.tsv.gz",
87
+ "val_3": "https://storage.googleapis.com/gresearch/wit/wit_v1.val.all-00003-of-00005.tsv.gz",
88
+ "val_4": "https://storage.googleapis.com/gresearch/wit/wit_v1.val.all-00004-of-00005.tsv.gz",
89
+ }
90
+
91
+ _SUPPORTED_TASKS = [Tasks.IMAGE_CAPTIONING]
92
+
93
+ _SOURCE_VERSION = "1.0.0"
94
+
95
+ _SEACROWD_VERSION = "2024.06.20"
96
+
97
+
98
+ class WITDataset(datasets.GeneratorBasedBuilder):
99
+ """
100
+ WIT is an image-text dataset from https://huggingface.co/datasets/google/wit.
101
+ """
102
+
103
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
104
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
105
+
106
+ BUILDER_CONFIGS = (
107
+ [
108
+ SEACrowdConfig(
109
+ name=f"{_DATASETNAME}_source",
110
+ version=datasets.Version(_SOURCE_VERSION),
111
+ description=f"{_DATASETNAME} source schema for all 9 languages",
112
+ schema="source",
113
+ subset_id=f"{_DATASETNAME}",
114
+ )
115
+ ]
116
+ + [
117
+ SEACrowdConfig(
118
+ name=f"{_DATASETNAME}_seacrowd_imtext",
119
+ version=datasets.Version(_SEACROWD_VERSION),
120
+ description=f"{_DATASETNAME} SEACrowd schema for all 9 languages",
121
+ schema="seacrowd_imtext",
122
+ subset_id=f"{_DATASETNAME}",
123
+ )
124
+ ]
125
+ + [
126
+ SEACrowdConfig(
127
+ name=f"{_DATASETNAME}_{lang}_source",
128
+ version=datasets.Version(_SOURCE_VERSION),
129
+ description=f"{_DATASETNAME}_{lang} source schema",
130
+ schema="source",
131
+ subset_id=f"{_DATASETNAME}_{lang}",
132
+ )
133
+ for lang in _LANGUAGES
134
+ ]
135
+ + [
136
+ SEACrowdConfig(
137
+ name=f"{_DATASETNAME}_{lang}_seacrowd_imtext",
138
+ version=datasets.Version(_SEACROWD_VERSION),
139
+ description=f"{_DATASETNAME}_{lang} SEACrowd schema",
140
+ schema="seacrowd_imtext",
141
+ subset_id=f"{_DATASETNAME}_{lang}",
142
+ )
143
+ for lang in _LANGUAGES
144
+ ]
145
+ )
146
+
147
+ def _info(self) -> datasets.DatasetInfo:
148
+ if self.config.schema == "source":
149
+ features = datasets.Features(
150
+ {
151
+ "language": datasets.Value("string"),
152
+ "page_url": datasets.Value("string"),
153
+ "image_url": datasets.Value("string"),
154
+ "page_title": datasets.Value("string"),
155
+ "section_title": datasets.Value("string"),
156
+ "hierarchical_section_title": datasets.Value("string"),
157
+ "caption_reference_description": datasets.Value("string"),
158
+ "caption_attribution_description": datasets.Value("string"),
159
+ "caption_alt_text_description": datasets.Value("string"),
160
+ "mime_type": datasets.Value("string"),
161
+ "original_height": datasets.Value("int32"),
162
+ "original_width": datasets.Value("int32"),
163
+ "is_main_image": datasets.Value("bool"),
164
+ "attribution_passes_lang_id": datasets.Value("bool"),
165
+ "page_changed_recently": datasets.Value("bool"),
166
+ "context_page_description": datasets.Value("string"),
167
+ "context_section_description": datasets.Value("string"),
168
+ }
169
+ )
170
+ elif self.config.schema == "seacrowd_imtext":
171
+ features = schemas.image_text_features()
172
+ else:
173
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")
174
+
175
+ return datasets.DatasetInfo(
176
+ description=_DESCRIPTION,
177
+ features=features,
178
+ homepage=_HOMEPAGE,
179
+ license=_LICENSE,
180
+ citation=_CITATION,
181
+ )
182
+
183
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
184
+ """
185
+ Returns SplitGenerators.
186
+ """
187
+
188
+ train_paths = dl_manager.download_and_extract([v for k, v in _URLS.items() if "train" in k])
189
+ test_paths = dl_manager.download_and_extract([v for k, v in _URLS.items() if "test" in k])
190
+ val_paths = dl_manager.download_and_extract([v for k, v in _URLS.items() if "val" in k])
191
+
192
+ return [
193
+ datasets.SplitGenerator(
194
+ name=datasets.Split.TRAIN,
195
+ gen_kwargs={
196
+ "filepaths": train_paths,
197
+ "split": "train",
198
+ },
199
+ ),
200
+ datasets.SplitGenerator(
201
+ name=datasets.Split.TEST,
202
+ gen_kwargs={
203
+ "filepaths": test_paths,
204
+ "split": "test",
205
+ },
206
+ ),
207
+ datasets.SplitGenerator(
208
+ name=datasets.Split.VALIDATION,
209
+ gen_kwargs={
210
+ "filepaths": val_paths,
211
+ "split": "validation",
212
+ },
213
+ ),
214
+ ]
215
+
216
+ def _generate_examples(self, filepaths: Path, split: str) -> Tuple[int, Dict]:
217
+ """
218
+ Yields examples as (key, example) tuples.
219
+ """
220
+ subset_id = self.config.subset_id.split("_")
221
+ if len(subset_id) > 1:
222
+ language_list = subset_id[1]
223
+ if language_list in _LANGUAGES:
224
+ language_list = [_LANGUAGES[language_list]]
225
+ else:
226
+ language_list = _LANGUAGE_CODES
227
+
228
+ idx = 0
229
+ for file in filepaths:
230
+ with open(
231
+ file,
232
+ "r",
233
+ encoding="utf-8",
234
+ newline="",
235
+ ) as f:
236
+ data = csv.DictReader(
237
+ f,
238
+ delimiter="\t",
239
+ quoting=csv.QUOTE_ALL,
240
+ )
241
+ if self.config.schema == "seacrowd_imtext":
242
+ for d in data:
243
+ if d["language"] in language_list:
244
+ text = None
245
+ context = None
246
+ if d["caption_reference_description"] != "":
247
+ text = d["caption_reference_description"]
248
+ context = "caption_reference_description"
249
+ elif d["caption_attribution_description"] != "":
250
+ text = d["caption_attribution_description"]
251
+ context = "caption_attribution_description"
252
+ else:
253
+ text = d["caption_alt_text_description"]
254
+ context = "caption_alt_text_description"
255
+ x = {
256
+ "id": idx,
257
+ "image_paths": [d["image_url"]],
258
+ "texts": text,
259
+ "metadata": {
260
+ "context": context,
261
+ "labels": None,
262
+ },
263
+ }
264
+ yield idx, x
265
+ idx += 1
266
+
267
+ elif self.config.schema == "source":
268
+ for d in data:
269
+ if d["language"] in language_list:
270
+ x = {k: v if v != "" and k in self.info.features else None for k, v in d.items()}
271
+ yield idx, x
272
+ idx += 1
273
+ else:
274
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")