Matilde commited on
Commit
f38bf80
1 Parent(s): 2141506

Create homo_ita.py

Browse files
Files changed (1) hide show
  1. homo_ita.py +84 -0
homo_ita.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Artem Ploujnikov
3
+
4
+
5
+ # Lint as: python3
6
+ import json
7
+
8
+ import datasets
9
+
10
+ _DESCRIPTION = """\
11
+ Grapheme-to-Phoneme training, validation and test sets
12
+ """
13
+
14
+ _BASE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space/resolve/main/dataset"
15
+ _HOMEPAGE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space"
16
+ _NA = "N/A"
17
+ _SPLIT_TYPES = ["train", "valid", "test"]
18
+ _DATA_TYPES = ["lexicon", "sentence", "homograph"]
19
+ _SPLITS = [
20
+ f"{data_type}_{split_type}"
21
+ for data_type in _DATA_TYPES
22
+ for split_type in _SPLIT_TYPES
23
+ ]
24
+
25
+
26
+ class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
27
+ def __init__(self, base_url=None, splits=None, *args, **kwargs):
28
+ super().__init__(*args, **kwargs)
29
+ self.base_url = base_url or _BASE_URL
30
+ self.splits = splits or _SPLITS
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "sample_id": datasets.Value("string"),
38
+ "speaker_id": datasets.Value("string"),
39
+ "origin": datasets.Value("string"),
40
+ "char": datasets.Value("string"),
41
+ "phn": datasets.Sequence(datasets.Value("string")),
42
+ "homograph": datasets.Value("string"),
43
+ "homograph_wordid": datasets.Value("string"),
44
+ "homograph_char_start": datasets.Value("int32"),
45
+ "homograph_char_end": datasets.Value("int32"),
46
+ "homograph_phn_start": datasets.Value("int32"),
47
+ "homograph_phn_end": datasets.Value("int32"),
48
+ },
49
+ ),
50
+ supervised_keys=None,
51
+ homepage=_HOMEPAGE_URL,
52
+ )
53
+
54
+ def _get_url(self, split):
55
+ return f"{self.base_url}/{split}.json"
56
+
57
+ def _split_generator(self, dl_manager, split):
58
+ url = self._get_url(split)
59
+ path = dl_manager.download_and_extract(url)
60
+ return datasets.SplitGenerator(
61
+ name=split, gen_kwargs={"datapath": path, "datatype": split},
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ return [self._split_generator(dl_manager, split) for split in self.splits]
66
+
67
+ def _generate_examples(self, datapath, datatype):
68
+ with open(datapath, encoding="utf-8") as f:
69
+ data = json.load(f)
70
+ for sentence_counter, (sample_id, item) in enumerate(data.items()):
71
+ resp = {
72
+ "sample_id": sample_id,
73
+ "speaker_id": str(item.get("speaker_id") or _NA),
74
+ "origin": item["origin"],
75
+ "char": item["char"],
76
+ "phn": item["phn"],
77
+ "homograph": item.get("homograph", _NA),
78
+ "homograph_wordid": item.get("homograph_wordid", _NA),
79
+ "homograph_char_start": item.get("homograph_char_start", 0),
80
+ "homograph_char_end": item.get("homograph_char_end", 0),
81
+ "homograph_phn_start": item.get("homograph_phn_start", 0),
82
+ "homograph_phn_end": item.get("homograph_phn_end", 0),
83
+ }
84
+ yield sentence_counter, resp