Update homo_ita.py
Browse files- homo_ita.py +4 -21
homo_ita.py
CHANGED
@@ -1,9 +1,4 @@
|
|
1 |
-
|
2 |
-
# Copyright 2021 Artem Ploujnikov
|
3 |
-
|
4 |
-
|
5 |
-
# Lint as: python3
|
6 |
-
import json
|
7 |
|
8 |
import datasets
|
9 |
|
@@ -11,11 +6,11 @@ _DESCRIPTION = """\
|
|
11 |
Grapheme-to-Phoneme training, validation and test sets
|
12 |
"""
|
13 |
|
14 |
-
_BASE_URL = "https://huggingface.co/datasets/
|
15 |
-
_HOMEPAGE_URL = "https://huggingface.co/datasets/
|
16 |
_NA = "N/A"
|
17 |
_SPLIT_TYPES = ["train", "valid", "test"]
|
18 |
-
_DATA_TYPES = ["lexicon", "sentence"
|
19 |
_SPLITS = [
|
20 |
f"{data_type}_{split_type}"
|
21 |
for data_type in _DATA_TYPES
|
@@ -39,12 +34,6 @@ class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
|
|
39 |
"origin": datasets.Value("string"),
|
40 |
"char": datasets.Value("string"),
|
41 |
"phn": datasets.Sequence(datasets.Value("string")),
|
42 |
-
"homograph": datasets.Value("string"),
|
43 |
-
"homograph_wordid": datasets.Value("string"),
|
44 |
-
"homograph_char_start": datasets.Value("int32"),
|
45 |
-
"homograph_char_end": datasets.Value("int32"),
|
46 |
-
"homograph_phn_start": datasets.Value("int32"),
|
47 |
-
"homograph_phn_end": datasets.Value("int32"),
|
48 |
},
|
49 |
),
|
50 |
supervised_keys=None,
|
@@ -74,11 +63,5 @@ class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
|
|
74 |
"origin": item["origin"],
|
75 |
"char": item["char"],
|
76 |
"phn": item["phn"],
|
77 |
-
"homograph": item.get("homograph", _NA),
|
78 |
-
"homograph_wordid": item.get("homograph_wordid", _NA),
|
79 |
-
"homograph_char_start": item.get("homograph_char_start", 0),
|
80 |
-
"homograph_char_end": item.get("homograph_char_end", 0),
|
81 |
-
"homograph_phn_start": item.get("homograph_phn_start", 0),
|
82 |
-
"homograph_phn_end": item.get("homograph_phn_end", 0),
|
83 |
}
|
84 |
yield sentence_counter, resp
|
|
|
1 |
+
import csv
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
import datasets
|
4 |
|
|
|
6 |
Grapheme-to-Phoneme training, validation and test sets
|
7 |
"""
|
8 |
|
9 |
+
_BASE_URL = "https://huggingface.co/datasets/Matilde/Homo_ita/tree/main"
|
10 |
+
_HOMEPAGE_URL = "https://huggingface.co/datasets/Matilde/Homo_ita"
|
11 |
_NA = "N/A"
|
12 |
_SPLIT_TYPES = ["train", "valid", "test"]
|
13 |
+
_DATA_TYPES = ["lexicon", "sentence"]
|
14 |
_SPLITS = [
|
15 |
f"{data_type}_{split_type}"
|
16 |
for data_type in _DATA_TYPES
|
|
|
34 |
"origin": datasets.Value("string"),
|
35 |
"char": datasets.Value("string"),
|
36 |
"phn": datasets.Sequence(datasets.Value("string")),
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
},
|
38 |
),
|
39 |
supervised_keys=None,
|
|
|
63 |
"origin": item["origin"],
|
64 |
"char": item["char"],
|
65 |
"phn": item["phn"],
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
}
|
67 |
yield sentence_counter, resp
|