Datasets:
Convert dataset to Parquet
#1
by
tanganke
- opened
- .DS_Store +0 -0
- README.md +18 -10
- kmnist.py +0 -122
- raw/t10k-labels-idx1-ubyte.gz → kmnist/test-00000-of-00001.parquet +2 -2
- raw/train-images-idx3-ubyte.gz → kmnist/train-00000-of-00001.parquet +2 -2
- kmnist_classmap.csv +0 -11
- raw/t10k-images-idx3-ubyte.gz +0 -3
- raw/train-labels-idx1-ubyte.gz +0 -3
.DS_Store
DELETED
Binary file (6.15 kB)
|
|
README.md
CHANGED
@@ -1,4 +1,10 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
dataset_info:
|
3 |
config_name: kmnist
|
4 |
features:
|
@@ -20,19 +26,21 @@ dataset_info:
|
|
20 |
'9': を
|
21 |
splits:
|
22 |
- name: train
|
23 |
-
num_bytes:
|
24 |
num_examples: 60000
|
25 |
- name: test
|
26 |
-
num_bytes:
|
27 |
num_examples: 10000
|
28 |
-
download_size:
|
29 |
-
dataset_size:
|
30 |
-
|
31 |
-
-
|
32 |
-
|
33 |
-
-
|
34 |
-
|
35 |
-
-
|
|
|
|
|
36 |
---
|
37 |
|
38 |
# KMNIST Dataset
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- ja
|
4 |
+
size_categories:
|
5 |
+
- 10K<n<100K
|
6 |
+
task_categories:
|
7 |
+
- image-classification
|
8 |
dataset_info:
|
9 |
config_name: kmnist
|
10 |
features:
|
|
|
26 |
'9': を
|
27 |
splits:
|
28 |
- name: train
|
29 |
+
num_bytes: 26807717.0
|
30 |
num_examples: 60000
|
31 |
- name: test
|
32 |
+
num_bytes: 4478963.0
|
33 |
num_examples: 10000
|
34 |
+
download_size: 30674033
|
35 |
+
dataset_size: 31286680.0
|
36 |
+
configs:
|
37 |
+
- config_name: kmnist
|
38 |
+
data_files:
|
39 |
+
- split: train
|
40 |
+
path: kmnist/train-*
|
41 |
+
- split: test
|
42 |
+
path: kmnist/test-*
|
43 |
+
default: true
|
44 |
---
|
45 |
|
46 |
# KMNIST Dataset
|
kmnist.py
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
import struct
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
import datasets
|
6 |
-
from datasets.tasks import ImageClassification
|
7 |
-
|
8 |
-
_CITATION = R"""
|
9 |
-
@article{DBLP:journals/corr/abs-1812-01718,
|
10 |
-
author = {Tarin Clanuwat and
|
11 |
-
Mikel Bober{-}Irizar and
|
12 |
-
Asanobu Kitamoto and
|
13 |
-
Alex Lamb and
|
14 |
-
Kazuaki Yamamoto and
|
15 |
-
David Ha},
|
16 |
-
title = {Deep Learning for Classical Japanese Literature},
|
17 |
-
journal = {CoRR},
|
18 |
-
volume = {abs/1812.01718},
|
19 |
-
year = {2018},
|
20 |
-
url = {http://arxiv.org/abs/1812.01718},
|
21 |
-
eprinttype = {arXiv},
|
22 |
-
eprint = {1812.01718},
|
23 |
-
timestamp = {Thu, 14 Oct 2021 09:15:14 +0200},
|
24 |
-
biburl = {https://dblp.org/rec/journals/corr/abs-1812-01718.bib},
|
25 |
-
bibsource = {dblp computer science bibliography, https://dblp.org}
|
26 |
-
}
|
27 |
-
"""
|
28 |
-
|
29 |
-
_URL = "./raw/"
|
30 |
-
_URLS = {
|
31 |
-
"train_images": "train-images-idx3-ubyte.gz",
|
32 |
-
"train_labels": "train-labels-idx1-ubyte.gz",
|
33 |
-
"test_images": "t10k-images-idx3-ubyte.gz",
|
34 |
-
"test_labels": "t10k-labels-idx1-ubyte.gz",
|
35 |
-
}
|
36 |
-
|
37 |
-
|
38 |
-
class KMNIST(datasets.GeneratorBasedBuilder):
|
39 |
-
|
40 |
-
BUILDER_CONFIGS = [
|
41 |
-
datasets.BuilderConfig(
|
42 |
-
name="kmnist",
|
43 |
-
version=datasets.Version("1.0.0"),
|
44 |
-
)
|
45 |
-
]
|
46 |
-
|
47 |
-
def _info(self):
|
48 |
-
return datasets.DatasetInfo(
|
49 |
-
features=datasets.Features(
|
50 |
-
{
|
51 |
-
"image": datasets.Image(),
|
52 |
-
"label": datasets.features.ClassLabel(
|
53 |
-
names=[
|
54 |
-
"お",
|
55 |
-
"き",
|
56 |
-
"す",
|
57 |
-
"つ",
|
58 |
-
"な",
|
59 |
-
"は",
|
60 |
-
"ま",
|
61 |
-
"や",
|
62 |
-
"れ",
|
63 |
-
"を",
|
64 |
-
]
|
65 |
-
),
|
66 |
-
}
|
67 |
-
),
|
68 |
-
supervised_keys=("image", "label"),
|
69 |
-
homepage="https://github.com/rois-codh/kmnist",
|
70 |
-
citation=_CITATION,
|
71 |
-
task_templates=[
|
72 |
-
ImageClassification(
|
73 |
-
image_column="image",
|
74 |
-
label_column="label",
|
75 |
-
)
|
76 |
-
],
|
77 |
-
)
|
78 |
-
|
79 |
-
def _split_generators(self, dl_manager):
|
80 |
-
urls_to_download = {key: _URL + fname for key, fname in _URLS.items()}
|
81 |
-
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
82 |
-
return [
|
83 |
-
datasets.SplitGenerator(
|
84 |
-
name=datasets.Split.TRAIN,
|
85 |
-
gen_kwargs={
|
86 |
-
"filepath": (
|
87 |
-
downloaded_files["train_images"],
|
88 |
-
downloaded_files["train_labels"],
|
89 |
-
),
|
90 |
-
"split": "train",
|
91 |
-
},
|
92 |
-
),
|
93 |
-
datasets.SplitGenerator(
|
94 |
-
name=datasets.Split.TEST,
|
95 |
-
gen_kwargs={
|
96 |
-
"filepath": (
|
97 |
-
downloaded_files["test_images"],
|
98 |
-
downloaded_files["test_labels"],
|
99 |
-
),
|
100 |
-
"split": "test",
|
101 |
-
},
|
102 |
-
),
|
103 |
-
]
|
104 |
-
|
105 |
-
def _generate_examples(self, filepath, split):
|
106 |
-
"""This function returns the examples in the raw form."""
|
107 |
-
# Images
|
108 |
-
with open(filepath[0], "rb") as f:
|
109 |
-
# First 16 bytes contain some metadata
|
110 |
-
_ = f.read(4)
|
111 |
-
size = struct.unpack(">I", f.read(4))[0]
|
112 |
-
_ = f.read(8)
|
113 |
-
images = np.frombuffer(f.read(), dtype=np.uint8).reshape(size, 28, 28)
|
114 |
-
|
115 |
-
# Labels
|
116 |
-
with open(filepath[1], "rb") as f:
|
117 |
-
# First 8 bytes contain some metadata
|
118 |
-
_ = f.read(8)
|
119 |
-
labels = np.frombuffer(f.read(), dtype=np.uint8)
|
120 |
-
|
121 |
-
for idx in range(size):
|
122 |
-
yield idx, {"image": images[idx], "label": str(labels[idx])}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
raw/t10k-labels-idx1-ubyte.gz → kmnist/test-00000-of-00001.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a0b2095400a7816350bb46b79425d822f37c0f16bad93cc7fefcfe793a8d472
|
3 |
+
size 4396347
|
raw/train-images-idx3-ubyte.gz → kmnist/train-00000-of-00001.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9eb80e7baa826bbe4bdc3f34302bd7a3ec4c9ef115f5b19472438b93f440842
|
3 |
+
size 26277686
|
kmnist_classmap.csv
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
index,codepoint,char
|
2 |
-
0,U+304A,お
|
3 |
-
1,U+304D,き
|
4 |
-
2,U+3059,す
|
5 |
-
3,U+3064,つ
|
6 |
-
4,U+306A,な
|
7 |
-
5,U+306F,は
|
8 |
-
6,U+307E,ま
|
9 |
-
7,U+3084,や
|
10 |
-
8,U+308C,れ
|
11 |
-
9,U+3092,を
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
raw/t10k-images-idx3-ubyte.gz
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:edd7a857845ad6bb1d0ba43fe7e794d164fe2dce499a1694695a792adfac43c5
|
3 |
-
size 3041136
|
|
|
|
|
|
|
|
raw/train-labels-idx1-ubyte.gz
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e38f9ebcd0f3ebcdec7fc8eabdcdaef93bb0df8ea12bee65224341c8183d8e17
|
3 |
-
size 29497
|
|
|
|
|
|
|
|