albertvillanova HF staff lhoestq HF staff commited on
Commit
0bb028f
1 Parent(s): ce994f5

Upload dataset + remove script (#3)

Browse files

- Upload dataset (29f23282b10ae67524312674a28f65892329aa0c)
- Upload dataset (ec82bd00b5fe48276aca95c0e23a08ce2301770a)
- Delete conceptual_captions.py (5ecc0c80db882861e8e161c0a9504500826a2a0d)
- nit (76e94d5db028aa350f09083be754f95609a08958)


Co-authored-by: Quentin Lhoest <[email protected]>

README.md CHANGED
@@ -37,21 +37,6 @@ dataset_info:
37
  num_examples: 15840
38
  download_size: 0
39
  dataset_size: 626076394
40
- - config_name: unlabeled
41
- features:
42
- - name: image_url
43
- dtype: string
44
- - name: caption
45
- dtype: string
46
- splits:
47
- - name: train
48
- num_bytes: 584520156
49
- num_examples: 3318333
50
- - name: validation
51
- num_bytes: 2698726
52
- num_examples: 15840
53
- download_size: 567211172
54
- dataset_size: 587218882
55
  - config_name: labeled
56
  features:
57
  - name: image_url
@@ -66,10 +51,37 @@ dataset_info:
66
  sequence: float64
67
  splits:
68
  - name: train
69
- num_bytes: 1199330856
70
  num_examples: 2007090
71
- download_size: 1282463277
72
- dataset_size: 1199330856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  ---
74
 
75
  # Dataset Card for Conceptual Captions
@@ -153,7 +165,7 @@ def fetch_images(batch, num_threads, timeout=None, retries=0):
153
 
154
 
155
  num_threads = 20
156
- dset = load_dataset("conceptual_captions")
157
  dset = dset.map(fetch_images, batched=True, batch_size=100, fn_kwargs={"num_threads": num_threads})
158
  ```
159
 
 
37
  num_examples: 15840
38
  download_size: 0
39
  dataset_size: 626076394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  - config_name: labeled
41
  features:
42
  - name: image_url
 
51
  sequence: float64
52
  splits:
53
  - name: train
54
+ num_bytes: 1199325228
55
  num_examples: 2007090
56
+ download_size: 532762865
57
+ dataset_size: 1199325228
58
+ - config_name: unlabeled
59
+ features:
60
+ - name: image_url
61
+ dtype: string
62
+ - name: caption
63
+ dtype: string
64
+ splits:
65
+ - name: train
66
+ num_bytes: 584517500
67
+ num_examples: 3318333
68
+ - name: validation
69
+ num_bytes: 2698710
70
+ num_examples: 15840
71
+ download_size: 375258708
72
+ dataset_size: 587216210
73
+ configs:
74
+ - config_name: labeled
75
+ data_files:
76
+ - split: train
77
+ path: labeled/train-*
78
+ - config_name: unlabeled
79
+ data_files:
80
+ - split: train
81
+ path: unlabeled/train-*
82
+ - split: validation
83
+ path: unlabeled/validation-*
84
+ default: true
85
  ---
86
 
87
  # Dataset Card for Conceptual Captions
 
165
 
166
 
167
  num_threads = 20
168
+ dset = load_dataset("google-research-datasets/conceptual_captions")
169
  dset = dset.map(fetch_images, batched=True, batch_size=100, fn_kwargs={"num_threads": num_threads})
170
  ```
171
 
conceptual_captions.py DELETED
@@ -1,159 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Conceptual Captions dataset."""
18
-
19
- import csv
20
- import textwrap
21
-
22
- import datasets
23
-
24
-
25
- _DESCRIPTION = """\
26
- Google's Conceptual Captions dataset has more than 3 million images, paired with natural-language captions.
27
- In contrast with the curated style of the MS-COCO images, Conceptual Captions images and their raw descriptions are harvested from the web,
28
- and therefore represent a wider variety of styles. The raw descriptions are harvested from the Alt-text HTML attribute associated with web images.
29
- The authors developed an automatic pipeline that extracts, filters, and transforms candidate image/caption pairs, with the goal of achieving a balance of cleanliness,
30
- informativeness, fluency, and learnability of the resulting captions.
31
- """
32
-
33
- _HOMEPAGE = "http://data.statmt.org/cc-100/"
34
-
35
- _LICENSE = """\
36
- The dataset may be freely used for any purpose, although acknowledgement of
37
- Google LLC ("Google") as the data source would be appreciated. The dataset is
38
- provided "AS IS" without any warranty, express or implied. Google disclaims all
39
- liability for any damages, direct or indirect, resulting from the use of the
40
- dataset.
41
- """
42
-
43
- _CITATION = """\
44
- @inproceedings{sharma2018conceptual,
45
- title = {Conceptual Captions: A Cleaned, Hypernymed, Image Alt-text Dataset For Automatic Image Captioning},
46
- author = {Sharma, Piyush and Ding, Nan and Goodman, Sebastian and Soricut, Radu},
47
- booktitle = {Proceedings of ACL},
48
- year = {2018},
49
- }
50
- """
51
-
52
- _URLS = {
53
- "unlabeled": {
54
- "train": "https://storage.googleapis.com/gcc-data/Train/GCC-training.tsv?_ga=2.191230122.-1896153081.1529438250",
55
- "validation": "https://storage.googleapis.com/gcc-data/Validation/GCC-1.1.0-Validation.tsv?_ga=2.141047602.-1896153081.1529438250",
56
- },
57
- "labeled": {
58
- "train": "https://storage.googleapis.com/conceptual-captions-v1-1-labels/Image_Labels_Subset_Train_GCC-Labels-training.tsv?_ga=2.234395421.-20118413.1607637118",
59
- },
60
- }
61
-
62
- _DESCRIPTIONS = {
63
- "unlabeled": textwrap.dedent(
64
- """\
65
- The basic version of the dataset split into Training, Validation, and Test splits.
66
- The Training split consists of 3,318,333 image-URL/caption pairs, with a total number of 51,201 total token types in the captions (i.e., total vocabulary).
67
- The average number of tokens per captions is 10.3 (standard deviation of 4.5), while the median is 9.0 tokens per caption.
68
- The Validation split consists of 15,840 image-URL/caption pairs, with similar statistics.
69
- """
70
- ),
71
- "labeled": textwrap.dedent(
72
- """\
73
- A subset of 2,007,090 image-URL/caption pairs from the training set with machine-generated image labels.
74
- The image labels are obtained using the Google Cloud Vision API.
75
- Each image label has a machine-generated identifier (MID) corresponding to the label's Google Knowledge Graph entry and a confidence score for its presence in the image.
76
-
77
- Note: 2,007,528 is the number of image-URL/caption pairs specified by the authors, but some rows are missing labels, so they are not included.
78
- """
79
- ),
80
- }
81
-
82
-
83
- class ConceptualCaptions(datasets.GeneratorBasedBuilder):
84
- """Builder for Conceptual Captions dataset."""
85
-
86
- VERSION = datasets.Version("1.0.0")
87
-
88
- BUILDER_CONFIGS = [
89
- datasets.BuilderConfig("unlabeled", version=VERSION, description=_DESCRIPTIONS["unlabeled"]),
90
- datasets.BuilderConfig("labeled", version=VERSION, description=_DESCRIPTIONS["labeled"]),
91
- ]
92
-
93
- DEFAULT_CONFIG_NAME = "unlabeled"
94
-
95
- def _info(self):
96
- features = datasets.Features(
97
- {
98
- "image_url": datasets.Value("string"),
99
- "caption": datasets.Value("string"),
100
- },
101
- )
102
- if self.config.name == "labeled":
103
- features.update(
104
- {
105
- "labels": datasets.Sequence(datasets.Value("string")),
106
- "MIDs": datasets.Sequence(datasets.Value("string")),
107
- "confidence_scores": datasets.Sequence(datasets.Value("float64")),
108
- }
109
- )
110
- return datasets.DatasetInfo(
111
- description=_DESCRIPTION,
112
- features=features,
113
- supervised_keys=None,
114
- homepage=_HOMEPAGE,
115
- license=_LICENSE,
116
- citation=_CITATION,
117
- )
118
-
119
- def _split_generators(self, dl_manager):
120
- downloaded_data = dl_manager.download(_URLS[self.config.name])
121
- splits = [
122
- datasets.SplitGenerator(
123
- name=datasets.Split.TRAIN,
124
- gen_kwargs={"annotations_file": downloaded_data["train"]},
125
- ),
126
- ]
127
- if self.config.name == "unlabeled":
128
- splits += [
129
- datasets.SplitGenerator(
130
- name=datasets.Split.VALIDATION,
131
- gen_kwargs={"annotations_file": downloaded_data["validation"]},
132
- ),
133
- ]
134
- return splits
135
-
136
- def _generate_examples(self, annotations_file):
137
- if self.config.name == "unlabeled":
138
- with open(annotations_file, encoding="utf-8") as f:
139
- for i, row in enumerate(csv.reader(f, delimiter="\t")):
140
- # Sanity check
141
- assert len(row) == 2
142
- caption, image_url = row
143
- yield i, {
144
- "image_url": image_url,
145
- "caption": caption,
146
- },
147
- else:
148
- with open(annotations_file, encoding="utf-8") as f:
149
- for i, row in enumerate(csv.reader(f, delimiter="\t")):
150
- caption, image_url, labels, MIDs, confidence_scores = row
151
- if not labels:
152
- continue
153
- yield i, {
154
- "image_url": image_url,
155
- "caption": caption,
156
- "labels": labels.split(","),
157
- "MIDs": MIDs.split(","),
158
- "confidence_scores": [float(x) for x in confidence_scores.split(",")],
159
- },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
labeled/train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8c506418aee5fd95f02408def637cff308a56eff0f807460278c39f4997a6f
3
+ size 177587991
labeled/train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b547c4eab4c583a62d10c308ccf43cc551bfa17001040b4eca13008269f8776d
3
+ size 177634889
labeled/train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90574bcb9c13add6a42dcf0bb7dc32c894e3dcf411f046e19a5db32759c93a2
3
+ size 177539985
unlabeled/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6c39ef1013b2b87292c4e36d99f1a2c8b3f9505eb5e415d4c12a6921ded818c
3
+ size 186743581
unlabeled/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:beb0e44e069b5e2c59ee1f3f3b50d7a19df10fd8bd1f88354c0167caa81a8ab2
3
+ size 186741058
unlabeled/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef68829402d122158866d3fb9a273df3b53b58c1466bbe8a49b3c3c88078fc05
3
+ size 1774069