Witold Wydmański commited on
Commit
e3a713a
1 Parent(s): 067cde5

feat: add loading script

Browse files
.gitattributes CHANGED
@@ -56,3 +56,5 @@ reutersidf10k_test.csv filter=lfs diff=lfs merge=lfs -text
56
  test.csv filter=lfs diff=lfs merge=lfs -text
57
  reutersidf10k_train.csv filter=lfs diff=lfs merge=lfs -text
58
  train.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
56
  test.csv filter=lfs diff=lfs merge=lfs -text
57
  reutersidf10k_train.csv filter=lfs diff=lfs merge=lfs -text
58
  train.csv filter=lfs diff=lfs merge=lfs -text
59
+ test.npy filter=lfs diff=lfs merge=lfs -text
60
+ train.npy filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__
reuters10k.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler
5
+ import numpy as np
6
+
7
+
8
+ # TODO: Add BibTeX citation
9
+ # Find for instance the citation on arxiv or on the dataset repo/website
10
+ _CITATION = """\
11
+ @InProceedings{huggingface:dataset,
12
+ title = {A great new dataset},
13
+ author={huggingface, Inc.
14
+ },
15
+ year={2020}
16
+ }
17
+ """
18
+
19
+ _URL = "https://huggingface.co/datasets/wwydmanski/reuters10k/raw/main/"
20
+
21
+ class Reuters10K(datasets.GeneratorBasedBuilder):
22
+ """TODO: Short description of my dataset."""
23
+
24
+ VERSION = datasets.Version("0.0.1")
25
+
26
+ def _info(self):
27
+ return datasets.DatasetInfo(
28
+ description="Reuters10K dataset",
29
+ version=Reuters10K.VERSION,
30
+ )
31
+
32
+ def _split_generators(self, dl_manager):
33
+ train_url = _URL + "train.npy"
34
+ test_url = _URL + "test.npy"
35
+
36
+ data_dir = dl_manager.download_and_extract([train_url, test_url])
37
+ return [
38
+ datasets.SplitGenerator(
39
+ name=datasets.Split.TRAIN,
40
+ gen_kwargs={
41
+ "filepath": os.path.join(data_dir, "train.npy")
42
+ },
43
+ ),
44
+ datasets.SplitGenerator(
45
+ name=datasets.Split.TEST,
46
+ gen_kwargs={
47
+ "filepath": os.path.join(data_dir, "dev.npy")
48
+ },
49
+ )
50
+ ]
51
+
52
+ def _generate_examples(self, filepath):
53
+ """Yields examples."""
54
+ train_dataset = np.load(filepath, allow_pickle=True)
55
+
56
+ X_train = train_dataset.item()['data']
57
+ Y_train = train_dataset.item()['label']
58
+
59
+ scaler = MinMaxScaler()
60
+ X_train = scaler.fit_transform(X_train)
61
+
62
+ # yield "key", {"text": text, "label": label}
63
+ for i, (x, y) in enumerate(zip(X_train, Y_train)):
64
+ yield i, {"features": x, "label": y}
65
+
test.csv → test.npy RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:389a149bd99b460c8b37ac6ade993d7b99134d425a5912a0defa9fa4d319c7fb
3
- size 100050000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f85c13eac62427cfbbd8d7f5de51e91d70daa16868be2caa128cb036eb82e81
3
+ size 32392405
train.csv → train.npy RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fca58a1599dd252c228704f1cf1b966a30088f0795c23fded9e400e91cdfb2c8
3
- size 500250000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d30184c950cc71f8fa943c0c61564db94ee090206508ea6279cf7032f4c53a07
3
+ size 161971527