MatsRooth commited on
Commit
d16e9f3
1 Parent(s): c835363

Upload 3 files

Browse files

Python code that creates the dataset.

Files changed (3) hide show
  1. down_on_copy.py +67 -0
  2. down_on_create.py +80 -0
  3. down_on_hub.py +14 -0
down_on_copy.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import os
3
+ import shutil
4
+ #from datasets import load_dataset
5
+ train0, validation0, test0 = datasets.load_dataset("superb", "ks", split=["train","validation","test"])
6
+
7
+ labels = train0.features["label"].names
8
+ label2id = {x: labels.index(x) for x in labels}
9
+ # id2label = {str(id): label for label, id in label2id.items()}
10
+
11
+ down_id = label2id['down']
12
+ on_id = label2id['on']
13
+
14
+ train0_down = train0.filter(lambda example: example['label'] == down_id)
15
+ train0_on = train0.filter(lambda example: example['label'] == on_id)
16
+
17
+ shutil.rmtree('/home/mr249/ac_h/down_on/data/train/down',ignore_errors=True)
18
+ os.mkdir('/home/mr249/ac_h/down_on/data/train/down')
19
+ shutil.rmtree('/home/mr249/ac_h/down_on/data/train/on',ignore_errors=True)
20
+ os.mkdir('/home/mr249/ac_h/down_on/data/train/on')
21
+
22
+ for e in train0_down:
23
+ p = e['audio']['path']
24
+ newpath = '/home/mr249/ac_h/down_on/data/train/down/{}'.format(p.split('/')[len(p.split('/')) - 1])
25
+ os.link(p,newpath)
26
+
27
+ for e in train0_on:
28
+ p = e['audio']['path']
29
+ newpath = '/home/mr249/ac_h/down_on/data/train/on/{}'.format(p.split('/')[len(p.split('/')) - 1])
30
+ os.link(p,newpath)
31
+
32
+ validation0_down = validation0.filter(lambda example: example['label'] == down_id)
33
+ validation0_on = validation0.filter(lambda example: example['label'] == on_id)
34
+
35
+ shutil.rmtree('/home/mr249/ac_h/down_on/data/validation/down',ignore_errors=True)
36
+ os.mkdir('/home/mr249/ac_h/down_on/data/validation/down')
37
+ shutil.rmtree('/home/mr249/ac_h/down_on/data/validation/on',ignore_errors=True)
38
+ os.mkdir('/home/mr249/ac_h/down_on/data/validation/on')
39
+
40
+ for e in validation0_down:
41
+ p = e['audio']['path']
42
+ newpath = '/home/mr249/ac_h/down_on/data/validation/down/{}'.format(p.split('/')[len(p.split('/')) - 1])
43
+ os.link(p,newpath)
44
+
45
+ for e in validation0_on:
46
+ p = e['audio']['path']
47
+ newpath = '/home/mr249/ac_h/down_on/data/validation/on/{}'.format(p.split('/')[len(p.split('/')) - 1])
48
+ os.link(p,newpath)
49
+
50
+ test0_down = test0.filter(lambda example: example['label'] == down_id)
51
+ test0_on = test0.filter(lambda example: example['label'] == on_id)
52
+
53
+ shutil.rmtree('/home/mr249/ac_h/down_on/data/test/down',ignore_errors=True)
54
+ os.mkdir('/home/mr249/ac_h/down_on/data/test/down')
55
+ shutil.rmtree('/home/mr249/ac_h/down_on/data/test/on',ignore_errors=True)
56
+ os.mkdir('/home/mr249/ac_h/down_on/data/test/on')
57
+
58
+ for e in test0_down:
59
+ p = e['audio']['path']
60
+ newpath = '/home/mr249/ac_h/down_on/data/test/down/{}'.format(p.split('/')[len(p.split('/')) - 1])
61
+ os.link(p,newpath)
62
+
63
+ for e in test0_on:
64
+ p = e['audio']['path']
65
+ newpath = '/home/mr249/ac_h/down_on/data/test/on/{}'.format(p.split('/')[len(p.split('/')) - 1])
66
+ os.link(p,newpath)
67
+
down_on_create.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ #from datasets import load_dataset
3
+ train0, validation0, test0 = datasets.load_dataset("superb", "ks", split=["train","validation","test"])
4
+
5
+ labels = train0.features["label"].names
6
+ label2id = {x: labels.index(x) for x in labels}
7
+ id2label = {str(id): label for label, id in label2id.items()}
8
+
9
+ down_id = label2id['down']
10
+ on_id = label2id['on']
11
+
12
+ # This filters 51094 rows to 3706 rows, with features ['file', 'audio', 'label']
13
+ train1 = train0.filter(lambda example: example['label'] == down_id or example['label'] == on_id)
14
+
15
+ # 521 rows
16
+ validation1 = validation0.filter(lambda example: example['label'] == down_id or example['label'] == on_id)
17
+ # 499 rows
18
+ test1 = test0.filter(lambda example: example['label'] == down_id or example['label'] == on_id)
19
+
20
+ train1.to_csv('/home/mr249/ac_h/do1/tmp/train1.csv')
21
+ validation1.to_csv('/home/mr249/ac_h/do1/tmp/validation1.csv')
22
+ test1.to_csv('/home/mr249/ac_h/do1/tmp/test1.csv')
23
+
24
+ # Fix the labels. TODO Put this in this python program.
25
+ # This is kindof in the wrong location?
26
+ # /home/mr249/ac_h/do1
27
+ # cat tmp/train1.csv | awk -f ../script/fix-down-on-labels.awk > train.csv
28
+ # cat tmp/validation1.csv | awk -f ../script/fix-down-on-labels.awk > validation.csv
29
+ # cat tmp/test1.csv | awk -f ../script/fix-down-on-labels.awk > test.csv
30
+
31
+ # Create new datasets from the csv files
32
+
33
+ train2 = datasets.Dataset.from_csv('/home/mr249/ac_h/do1/train.csv','train')
34
+ validation2 = datasets.Dataset.from_csv('/home/mr249/ac_h/do1/validation.csv','validation')
35
+ test2 = datasets.Dataset.from_csv('/home/mr249/ac_h/do1/test.csv','test')
36
+
37
+ # Above, the second argument names the split. It may not be relevant.
38
+ # validation2.split
39
+ # NamedSplit('validation')
40
+
41
+ # Add the label names. It does not change the examples, which have ints as labels.
42
+ new_features = train2.features.copy()
43
+ new_features["label"] = datasets.ClassLabel(names=['down', 'on'],id=None)
44
+ train2 = train2.cast(new_features)
45
+ validation2 = validation2.cast(new_features)
46
+ test2 = test2.cast(new_features)
47
+
48
+ # Combine them into a DatasetDict.
49
+ down_on = datasets.DatasetDict({
50
+ "train": train2,
51
+ "validation": validation2,
52
+ "test": test2,
53
+ })
54
+
55
+ # Save to disk
56
+ down_on.save_to_disk('/home/mr249/ac_h/down_on')
57
+
58
+ # Load from disk
59
+ # However, we prefer to load from the hub.
60
+ # down_on2 = datasets.load_from_disk('/home/mr249/ac_h/down_on')
61
+
62
+ # Push to hub
63
+ # See https://huggingface.co/docs/datasets/upload_dataset
64
+ from huggingface_hub import login
65
+ login()
66
+ # It prompts for the token
67
+ down_on.push_to_hub("MatsRooth/down_on",private=False,embed_external_files=True)
68
+
69
+ # Now the dataset can be loaded from the hub!
70
+ train3 = load_dataset("MatsRooth/down_on", split="train")
71
+
72
+ # When running training, the names are wrong
73
+ # FileNotFoundError: [Errno 2] No such file or directory: "{'bytes': None, 'path':
74
+ # '/home/mr249/.cache/huggingface/datasets/downloads/extracted/5836831ec57281eff9b
75
+ # 1882385bf370d016058e6cba1fd7ff1dcb68cd8cddefd/down/28ed6bc9_nohash_4.wav'}"
76
+
77
+
78
+ # train3.features['label']
79
+ # ClassLabel(names=['down', 'on'], id=None)
80
+
down_on_hub.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://huggingface.co/docs/datasets/audio_dataset
2
+ from datasets import load_dataset
3
+
4
+ # Upload the down_on dataset to huggingfacehub
5
+
6
+ # This magically does the right thing to create the dataset, including
7
+ # setting the label names and IDs.
8
+ ds = load_dataset("audiofolder", data_dir="down_on/data")
9
+
10
+ from huggingface_hub import login
11
+ login()
12
+ ds.push_to_hub("MatsRooth/down_on",private=False,embed_external_files=True)
13
+
14
+