File size: 3,024 Bytes
d16e9f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import datasets
#from datasets import load_dataset
train0, validation0, test0  = datasets.load_dataset("superb", "ks", split=["train","validation","test"])

labels = train0.features["label"].names
label2id = {x: labels.index(x) for x in labels}
id2label = {str(id): label for label, id in label2id.items()}

down_id = label2id['down']
on_id = label2id['on']

# This filters 51094 rows to 3706 rows, with features ['file', 'audio', 'label']
train1 = train0.filter(lambda example: example['label'] == down_id or example['label'] == on_id)

# 521 rows
validation1 = validation0.filter(lambda example: example['label'] == down_id or example['label'] == on_id)
# 499 rows
test1 = test0.filter(lambda example: example['label'] == down_id or example['label'] == on_id)

train1.to_csv('/home/mr249/ac_h/do1/tmp/train1.csv')
validation1.to_csv('/home/mr249/ac_h/do1/tmp/validation1.csv')
test1.to_csv('/home/mr249/ac_h/do1/tmp/test1.csv')

# Fix the labels. TODO Put this in this python program.
# This is kindof in the wrong location?
# /home/mr249/ac_h/do1
# cat tmp/train1.csv | awk -f ../script/fix-down-on-labels.awk > train.csv
# cat tmp/validation1.csv | awk -f ../script/fix-down-on-labels.awk > validation.csv
# cat tmp/test1.csv | awk -f ../script/fix-down-on-labels.awk > test.csv

# Create new datasets from the csv files

train2 = datasets.Dataset.from_csv('/home/mr249/ac_h/do1/train.csv','train')
validation2 = datasets.Dataset.from_csv('/home/mr249/ac_h/do1/validation.csv','validation')
test2 = datasets.Dataset.from_csv('/home/mr249/ac_h/do1/test.csv','test')
 
# Above, the second argument names the split. It may not be relevant.
# validation2.split 
# NamedSplit('validation')

# Add the label names. It does not change the examples, which have ints as labels.
new_features = train2.features.copy()
new_features["label"] = datasets.ClassLabel(names=['down', 'on'],id=None)
train2 = train2.cast(new_features)
validation2 = validation2.cast(new_features)
test2 = test2.cast(new_features)

# Combine them into a DatasetDict.
down_on = datasets.DatasetDict({
    "train": train2,
    "validation": validation2,
    "test": test2,
})

# Save to disk
down_on.save_to_disk('/home/mr249/ac_h/down_on')

# Load from disk
# However, we prefer to load from the hub.
# down_on2 = datasets.load_from_disk('/home/mr249/ac_h/down_on')

# Push to hub
# See https://huggingface.co/docs/datasets/upload_dataset
from huggingface_hub import login
login()
# It prompts for the token
down_on.push_to_hub("MatsRooth/down_on",private=False,embed_external_files=True)

# Now the dataset can be loaded from the hub!
train3 = load_dataset("MatsRooth/down_on", split="train")

# When running training, the names are wrong
# FileNotFoundError: [Errno 2] No such file or directory: "{'bytes': None, 'path':
# '/home/mr249/.cache/huggingface/datasets/downloads/extracted/5836831ec57281eff9b
# 1882385bf370d016058e6cba1fd7ff1dcb68cd8cddefd/down/28ed6bc9_nohash_4.wav'}"


# train3.features['label']
# ClassLabel(names=['down', 'on'], id=None)