Datasets:

Modalities:
Image
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
531be5e
1 Parent(s): 8bbdd6c

Convert dataset to Parquet (#7)

Browse files

- Convert dataset to Parquet (fac2b38db3152cc29d4efc37fa60b7bb9ea836ad)
- Delete loading script (6c37056d74a722e673a966d16ca2c70e0a313a80)

README.md CHANGED
@@ -20,6 +20,7 @@ task_ids:
20
  paperswithcode_id: fashion-mnist
21
  pretty_name: FashionMNIST
22
  dataset_info:
 
23
  features:
24
  - name: image
25
  dtype: image
@@ -37,16 +38,23 @@ dataset_info:
37
  '7': Sneaker
38
  '8': Bag
39
  '9': Ankle boot
40
- config_name: fashion_mnist
41
  splits:
42
  - name: train
43
- num_bytes: 31296655
44
  num_examples: 60000
45
  - name: test
46
- num_bytes: 5233818
47
  num_examples: 10000
48
- download_size: 30878645
49
- dataset_size: 36530473
 
 
 
 
 
 
 
 
50
  ---
51
 
52
  # Dataset Card for FashionMNIST
 
20
  paperswithcode_id: fashion-mnist
21
  pretty_name: FashionMNIST
22
  dataset_info:
23
+ config_name: fashion_mnist
24
  features:
25
  - name: image
26
  dtype: image
 
38
  '7': Sneaker
39
  '8': Bag
40
  '9': Ankle boot
 
41
  splits:
42
  - name: train
43
+ num_bytes: 31049107.0
44
  num_examples: 60000
45
  - name: test
46
+ num_bytes: 5192560.0
47
  num_examples: 10000
48
+ download_size: 36106894
49
+ dataset_size: 36241667.0
50
+ configs:
51
+ - config_name: fashion_mnist
52
+ data_files:
53
+ - split: train
54
+ path: fashion_mnist/train-*
55
+ - split: test
56
+ path: fashion_mnist/test-*
57
+ default: true
58
  ---
59
 
60
  # Dataset Card for FashionMNIST
fashion_mnist.py DELETED
@@ -1,144 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """FashionMNIST Data Set"""
18
-
19
-
20
- import struct
21
-
22
- import numpy as np
23
-
24
- import datasets
25
- from datasets.tasks import ImageClassification
26
-
27
-
28
- _CITATION = """\
29
- @article{DBLP:journals/corr/abs-1708-07747,
30
- author = {Han Xiao and
31
- Kashif Rasul and
32
- Roland Vollgraf},
33
- title = {Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning
34
- Algorithms},
35
- journal = {CoRR},
36
- volume = {abs/1708.07747},
37
- year = {2017},
38
- url = {http://arxiv.org/abs/1708.07747},
39
- archivePrefix = {arXiv},
40
- eprint = {1708.07747},
41
- timestamp = {Mon, 13 Aug 2018 16:47:27 +0200},
42
- biburl = {https://dblp.org/rec/bib/journals/corr/abs-1708-07747},
43
- bibsource = {dblp computer science bibliography, https://dblp.org}
44
- }
45
- """
46
-
47
- _DESCRIPTION = """\
48
- Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of
49
- 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image,
50
- associated with a label from 10 classes. We intend Fashion-MNIST to serve as a direct drop-in
51
- replacement for the original MNIST dataset for benchmarking machine learning algorithms.
52
- It shares the same image size and structure of training and testing splits.
53
- """
54
-
55
- _HOMEPAGE = "https://github.com/zalandoresearch/fashion-mnist"
56
- _LICENSE = "https://raw.githubusercontent.com/zalandoresearch/fashion-mnist/master/LICENSE"
57
-
58
- _URL = "https://github.com/zalandoresearch/fashion-mnist/raw/master/data/fashion/"
59
- _URLS = {
60
- "train_images": "train-images-idx3-ubyte.gz",
61
- "train_labels": "train-labels-idx1-ubyte.gz",
62
- "test_images": "t10k-images-idx3-ubyte.gz",
63
- "test_labels": "t10k-labels-idx1-ubyte.gz",
64
- }
65
-
66
- _NAMES = [
67
- "T - shirt / top",
68
- "Trouser",
69
- "Pullover",
70
- "Dress",
71
- "Coat",
72
- "Sandal",
73
- "Shirt",
74
- "Sneaker",
75
- "Bag",
76
- "Ankle boot",
77
- ]
78
-
79
-
80
- class FashionMnist(datasets.GeneratorBasedBuilder):
81
- """FashionMNIST Data Set"""
82
-
83
- BUILDER_CONFIGS = [
84
- datasets.BuilderConfig(
85
- name="fashion_mnist",
86
- version=datasets.Version("1.0.0"),
87
- description=_DESCRIPTION,
88
- )
89
- ]
90
-
91
- def _info(self):
92
- return datasets.DatasetInfo(
93
- description=_DESCRIPTION,
94
- features=datasets.Features(
95
- {
96
- "image": datasets.Image(),
97
- "label": datasets.features.ClassLabel(names=_NAMES),
98
- }
99
- ),
100
- supervised_keys=("image", "label"),
101
- homepage=_HOMEPAGE,
102
- citation=_CITATION,
103
- task_templates=[ImageClassification(image_column="image", label_column="label")],
104
- )
105
-
106
- def _split_generators(self, dl_manager):
107
- urls_to_download = {key: _URL + fname for key, fname in _URLS.items()}
108
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
109
-
110
- return [
111
- datasets.SplitGenerator(
112
- name=datasets.Split.TRAIN,
113
- gen_kwargs={
114
- "filepath": (downloaded_files["train_images"], downloaded_files["train_labels"]),
115
- "split": "train",
116
- },
117
- ),
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TEST,
120
- gen_kwargs={
121
- "filepath": (downloaded_files["test_images"], downloaded_files["test_labels"]),
122
- "split": "test",
123
- },
124
- ),
125
- ]
126
-
127
- def _generate_examples(self, filepath, split):
128
- """This function returns the examples in the raw form."""
129
- # Images
130
- with open(filepath[0], "rb") as f:
131
- # First 16 bytes contain some metadata
132
- _ = f.read(4)
133
- size = struct.unpack(">I", f.read(4))[0]
134
- _ = f.read(8)
135
- images = np.frombuffer(f.read(), dtype=np.uint8).reshape(size, 28, 28)
136
-
137
- # Labels
138
- with open(filepath[1], "rb") as f:
139
- # First 8 bytes contain some metadata
140
- _ = f.read(8)
141
- labels = np.frombuffer(f.read(), dtype=np.uint8)
142
-
143
- for idx in range(size):
144
- yield idx, {"image": images[idx], "label": int(labels[idx])}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fashion_mnist/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb36d8b5b74a491bce99357dcee62a32d347a356ea2a35d1c9057e7a7435ca5a
3
+ size 5175617
fashion_mnist/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5769a70af055b15ada8bd3dad9057557554fdd30e17cba45e7ef0f8bf79c5bb
3
+ size 30931277