sunlixu commited on
Commit
1e6f1c0
1 Parent(s): 8352b2f

Upload Uyghur.py

Browse files
Files changed (1) hide show
  1. Uyghur.py +152 -0
Uyghur.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from datasets import DownloadManager, DatasetInfo
3
+ from datasets.tasks import AutomaticSpeechRecognition
4
+ import os
5
+ import csv
6
+ import json
7
+
8
+ # TODO: Add BibTeX citation
9
+ # Find for instance the citation on arxiv or on the dataset repo/website
10
+ _CITATION = """\
11
+ @InProceedings{huggingface:dataset,
12
+ title = {A great new dataset},
13
+ author={huggingface, Inc.
14
+ },
15
+ year={2022}
16
+ }
17
+ """
18
+ # TODO: Add description of the dataset here
19
+ # You can copy an official description
20
+ _DESCRIPTION = """\
21
+ This new dataset is from Xinjiang University and to do some ASR in low resource.
22
+ """
23
+
24
+ # TODO: Add a link to an official homepage for the dataset here
25
+ _HOMEPAGE = "http://www.openslr.org/22"
26
+
27
+ # TODO: Add the licence for the dataset here if you can find it
28
+ _LICENSE = ""
29
+
30
+ # TODO: Add link to the official dataset URLs here
31
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
32
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
33
+
34
+ _URL = "https://huggingface.co/datasets/sunlixu/Uyghur/"
35
+
36
+ _DL_URLS = {
37
+ "train":
38
+ 'train.tar.gz',
39
+ "test":
40
+ "test.tar.gz",
41
+ "cv":
42
+ "cv.tar.gz",
43
+ "all": {
44
+ "train":
45
+ 'train.tar.gz',
46
+ "test":
47
+ "test.tar.gz",
48
+ "cv":
49
+ "cv.tar.gz"
50
+ },
51
+ }
52
+
53
+
54
+ class UyghurASRConfig(datasets.BuilderConfig):
55
+ def __init__(self, **kwargs):
56
+ """BuilderConfig for SQUAD.
57
+ Args:
58
+ **kwargs: keyword arguments forwarded to super.
59
+ """
60
+ super(UyghurASRConfig, self).__init__(**kwargs)
61
+
62
+
63
+ class UyghurASR(datasets.GeneratorBasedBuilder):
64
+ DEFAULT_WRITER_BATCH_SIZE = 256
65
+ DEFAULT_CONFIG_NAME = "all"
66
+ BUILDER_CONFIGS = [
67
+ UyghurASRConfig(name="train", description="'train' speech."),
68
+ UyghurASRConfig(name="test", description="'test' speech."),
69
+ UyghurASRConfig(name="cv", description="'cv' speech."),
70
+ UyghurASRConfig(name="all", description="all"),
71
+ ]
72
+
73
+ def _info(self) -> DatasetInfo:
74
+ return datasets.DatasetInfo(
75
+ description="Uyghur_20",
76
+ features=datasets.Features(
77
+ {
78
+ "file": datasets.Value("string"),
79
+ "audio": datasets.features.Audio(sampling_rate=16_000),
80
+ "text": datasets.Value("string"),
81
+ "speaker_id": datasets.Value("string"),
82
+ "id": datasets.Value("string"),
83
+ }
84
+ ),
85
+ supervised_keys=None,
86
+ homepage='https://huggingface.co/datasets/sunlixu/Uyghur/',
87
+ citation=_CITATION,
88
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
89
+ )
90
+
91
+ def _split_generators(self, dl_manager: DownloadManager):
92
+ # print(self.config.name)
93
+ # archive_path = dl_manager.download_and_extract(_DL_URLS[self.config.name])
94
+ archive_path = dl_manager.download(_DL_URLS[self.config.name])
95
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
96
+ return [
97
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
98
+ gen_kwargs={"local_extracted_archive": archive_path["train"],
99
+ "files": dl_manager.iter_archive(archive_path["train"]),
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(name=datasets.Split.TEST,
103
+ gen_kwargs={"local_extracted_archive": archive_path['test'],
104
+ "files": dl_manager.iter_archive(archive_path["test"]),
105
+ },
106
+ ),
107
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION,
108
+ gen_kwargs={"local_extracted_archive": archive_path['cv'],
109
+ "files": dl_manager.iter_archive(archive_path["cv"]),
110
+ },
111
+ )
112
+ ]
113
+
114
+ def _generate_examples(self, files, local_extracted_archive):
115
+ """Generate examples from a Uyghur archive_path."""
116
+ key = 0
117
+ audio_data = {}
118
+ transcripts = []
119
+ for path, f in files:
120
+ if path.endswith(".wav"):
121
+ id_ = path.split("/")[-1][: -len(".wav")]
122
+ audio_data[id_] = f.read()
123
+ elif path.endswith(".txt"):
124
+ for line in f:
125
+ if line:
126
+ line = line.decode("utf-8").strip()
127
+ id_, transcript = line.split(" ", 1)
128
+ audio_file = f"{id_}.wav"
129
+ # speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
130
+ speaker_id = id_.split('_')[0]
131
+ audio_file = (
132
+ os.path.join(local_extracted_archive, audio_file)
133
+ if local_extracted_archive
134
+ else audio_file
135
+ )
136
+ transcripts.append(
137
+ {
138
+ "id": id_,
139
+ "speaker_id": speaker_id,
140
+ # "chapter_id": chapter_id,
141
+ "file": audio_file,
142
+ "text": transcript,
143
+ }
144
+ )
145
+ if audio_data and len(audio_data) == len(transcripts):
146
+ for transcript in transcripts:
147
+ audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
148
+ yield key, {"audio": audio, **transcript}
149
+ key += 1
150
+ audio_data = {}
151
+ transcripts = []
152
+