Datasets:
parquet-converter
commited on
Commit
•
6c0c09c
1
Parent(s):
8008549
Update parquet files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Docs/LEXICON.txt +0 -0
- Docs/LOCAL LEXICON.txt +0 -745
- Docs/Part 1 & 2.md +0 -80
- Docs/Part 3 - 6.md +0 -144
- Docs/Phonetic Inventory.pdf +0 -0
- Docs/Technical Reference (NSC).md +0 -267
- Part 1/Channel_0-00.tar +0 -3
- Part 1/Channel_0-01.tar +0 -3
- Part 1/Channel_0-02.tar +0 -3
- Part 1/Channel_0-03.tar +0 -3
- Part 1/Channel_0-04.tar +0 -3
- Part 1/Channel_0-05.tar +0 -3
- Part 1/Channel_1-00.tar +0 -3
- Part 1/Channel_1-01.tar +0 -3
- Part 1/Channel_1-02.tar +0 -3
- Part 1/Channel_1-03.tar +0 -3
- Part 1/Channel_1-04.tar +0 -3
- Part 1/Channel_1-05.tar +0 -3
- Part 1/Channel_2-00.tar +0 -3
- Part 1/Channel_2-01.tar +0 -3
- Part 1/Channel_2-02.tar +0 -3
- Part 1/Channel_2-03.tar +0 -3
- Part 1/Channel_2-04.tar +0 -3
- Part 1/Channel_2-05.tar +0 -3
- Part 2/Channel_0-00.tar +0 -3
- Part 2/Channel_0-01.tar +0 -3
- Part 2/Channel_0-02.tar +0 -3
- Part 2/Channel_0-03.tar +0 -3
- Part 2/Channel_0-04.tar +0 -3
- Part 2/Channel_1-00.tar +0 -3
- Part 2/Channel_1-01.tar +0 -3
- Part 2/Channel_1-02.tar +0 -3
- Part 2/Channel_1-03.tar +0 -3
- Part 2/Channel_1-04.tar +0 -3
- Part 2/Channel_1-05.tar +0 -3
- Part 2/Channel_1-06.tar +0 -3
- Part 2/Channel_2-00.tar +0 -3
- Part 2/Channel_2-01.tar +0 -3
- Part 2/Channel_2-02.tar +0 -3
- Part 2/Channel_2-03.tar +0 -3
- Part 2/Channel_2-04.tar +0 -3
- Part 2/Channel_2-05.tar +0 -3
- Part 3/DifferentRooms-00.tar +0 -3
- Part 3/DifferentRooms-01.tar +0 -3
- Part 3/DifferentRooms-02.tar +0 -3
- Part 3/DifferentRooms-03.tar +0 -3
- Part 3/DifferentRooms-04.tar +0 -3
- Part 3/DifferentRooms-05.tar +0 -3
- Part 3/DifferentRooms-06.tar +0 -3
- Part 3/DifferentRooms-07.tar +0 -3
Docs/LEXICON.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
Docs/LOCAL LEXICON.txt
DELETED
@@ -1,745 +0,0 @@
|
|
1 |
-
a la a la
|
2 |
-
Abbas abas
|
3 |
-
Achar at͡ʃa
|
4 |
-
Adli adli
|
5 |
-
Admiralty ɛdməɹəti
|
6 |
-
Adni adni
|
7 |
-
ah a
|
8 |
-
Ahmad amad
|
9 |
-
Aiman a͡iman
|
10 |
-
Airport ɛpɔt
|
11 |
-
Aisha a͡iʃa
|
12 |
-
Akmal akmal
|
13 |
-
Albert ɛlbət
|
14 |
-
Ali ali
|
15 |
-
Aliwal aliwal
|
16 |
-
Aljunied ald͡ʒunid
|
17 |
-
Allenby ɛlənbi
|
18 |
-
Amil amil
|
19 |
-
Amina amina
|
20 |
-
Amirah amiɹa
|
21 |
-
Ammar amar
|
22 |
-
Amoy amoi
|
23 |
-
Ampang ampaŋ
|
24 |
-
Ang aŋ
|
25 |
-
Angin aŋgin
|
26 |
-
Anika anika
|
27 |
-
Aniket aniket
|
28 |
-
Anish aniʃ
|
29 |
-
Anishka aniʃka
|
30 |
-
Anita anita
|
31 |
-
Ann Siang ansiaŋ
|
32 |
-
Anusha anuʃa
|
33 |
-
Apam apam
|
34 |
-
Aqilah akila
|
35 |
-
Arab ɛrəb
|
36 |
-
Armenian aminiən
|
37 |
-
Arnav aɹnav
|
38 |
-
Arul aɹul
|
39 |
-
Ashok aʃok
|
40 |
-
Ashraf aʃɹaf
|
41 |
-
Assam asam
|
42 |
-
Avenue ɛvənju
|
43 |
-
ayam ajam
|
44 |
-
ayer aje
|
45 |
-
Baghdad bagdad
|
46 |
-
Bahru baɹu
|
47 |
-
bak bʼaɹ̆k̚
|
48 |
-
Bakau baka͡u
|
49 |
-
Bakso bakso
|
50 |
-
Balakrishnan balakɹiʃnan
|
51 |
-
Bali bali
|
52 |
-
Balik balik
|
53 |
-
Ban ban
|
54 |
-
Bangau baŋa͡u
|
55 |
-
Bangkit baŋkit
|
56 |
-
bao p̚aw
|
57 |
-
Bartley batli
|
58 |
-
Basah basa
|
59 |
-
Batisah batisa
|
60 |
-
Batok batok
|
61 |
-
batu batu
|
62 |
-
Bay be
|
63 |
-
Bayfront befɹan
|
64 |
-
Bayshore beʃɔ
|
65 |
-
Beauty bjuti
|
66 |
-
Bedok bədoʔ
|
67 |
-
Bee bi
|
68 |
-
beehoon biː huːn
|
69 |
-
begedil bəˈgədɪl
|
70 |
-
belakang bəlakaŋ
|
71 |
-
Belilios bɛlilios
|
72 |
-
Bencoolen bɛnkulən
|
73 |
-
Bendemeer bɛndəmiə
|
74 |
-
Besar bəsa
|
75 |
-
Bilis bilis
|
76 |
-
Biryani biɹjani
|
77 |
-
Bishan biʃan
|
78 |
-
Blangah blaŋga
|
79 |
-
Bobo bobo
|
80 |
-
Boon bun
|
81 |
-
Botanic botɛnik
|
82 |
-
Boulevard boləvad
|
83 |
-
Braddell bɹɛdəl
|
84 |
-
Bras bɹas
|
85 |
-
Bright bɹa͡it
|
86 |
-
briyani brijani
|
87 |
-
Buah bua
|
88 |
-
Buangkok buaŋkɔk
|
89 |
-
Bubur b@b@ɹ
|
90 |
-
Bugis bugis
|
91 |
-
bukit buket
|
92 |
-
Buona Vista buonavista
|
93 |
-
Bussorah busora
|
94 |
-
by ba͡i
|
95 |
-
cafes kɛfɛs
|
96 |
-
cai t͡sʰa͡i
|
97 |
-
Caldecott kɛldəkɔt
|
98 |
-
Canai ʃana͡i
|
99 |
-
Canberra kɛnbəɹə
|
100 |
-
Canning kɛniŋ
|
101 |
-
Cashew kɛʃju
|
102 |
-
Casuarina kɛʒərina
|
103 |
-
Cavan kɛvən
|
104 |
-
Cecil sisəl
|
105 |
-
ChaCha t͡ʃat͡ʃa
|
106 |
-
Chai t͡ʃa͡i
|
107 |
-
Chander t͡ʃɛndə
|
108 |
-
Chandran t͡ʃadɹan
|
109 |
-
Chang ʒaŋ
|
110 |
-
Changi t͡ʃaŋi
|
111 |
-
chap d͡ʒap
|
112 |
-
char ʧa
|
113 |
-
Chee t͡ʃi
|
114 |
-
Chen t͡ʃən
|
115 |
-
Chendol t͡sɛndol
|
116 |
-
Cheng t͡ʃeŋ
|
117 |
-
Cheong t͡ʃiɔŋ
|
118 |
-
Cheow t͡ʃia͡u
|
119 |
-
Chew t͡ʃu
|
120 |
-
Chian t͡ʃian
|
121 |
-
Chiang t͡ʃiaŋ
|
122 |
-
Chim t͡ʃim
|
123 |
-
Chin t͡ʃin
|
124 |
-
Chinatown t͡ʃa͡inəta͡un
|
125 |
-
Chinese t͡ʃa͡inis
|
126 |
-
Ching t͡ʃiŋ
|
127 |
-
choa ʧua
|
128 |
-
Chong t͡ʃɔŋ
|
129 |
-
Choon t͡ʃun
|
130 |
-
Chor t͡ʃɔ
|
131 |
-
Chow t͡ʃa͡u
|
132 |
-
Choy t͡ʃɔ͡i
|
133 |
-
chu ʧu
|
134 |
-
Chuan t͡ʃuan
|
135 |
-
Chwee t͡ʃwi
|
136 |
-
Circle səkəl
|
137 |
-
City siti
|
138 |
-
Clarke kla
|
139 |
-
Clementi klɛmənti
|
140 |
-
Close klos
|
141 |
-
Coast kos
|
142 |
-
Commonwealth kɔmənwɛlθ
|
143 |
-
Compassvale kɔmpəsvel
|
144 |
-
Coniston kɔnistən
|
145 |
-
Coral kɔɹəl
|
146 |
-
Cove kov
|
147 |
-
Craig kɹeɡ
|
148 |
-
creaminess kriminəs
|
149 |
-
Crescent krɛsənt
|
150 |
-
Curry kaɹi
|
151 |
-
dai da͡i
|
152 |
-
Dakota dakota
|
153 |
-
Damai dama͡i
|
154 |
-
Dashreen daʃɹin
|
155 |
-
decor dɛkɔ
|
156 |
-
Deepak dipak
|
157 |
-
Deepika dipika
|
158 |
-
Desker dɛskə
|
159 |
-
Dhal dal
|
160 |
-
Dhoby Ghaut dobigɔt
|
161 |
-
di di
|
162 |
-
dim tin
|
163 |
-
Dorset dɔɹsɛt
|
164 |
-
Dou dou
|
165 |
-
Dover dovə
|
166 |
-
Downtown da͡unta͡un
|
167 |
-
Drive dra͡iv
|
168 |
-
dung duŋ
|
169 |
-
Dunlop danlop
|
170 |
-
Dunsfold dansfold
|
171 |
-
Durian djuɹiɛn
|
172 |
-
Duxton dakstən
|
173 |
-
East ist
|
174 |
-
Eber ɛbə
|
175 |
-
Edge ed͡ʒ
|
176 |
-
egg eg
|
177 |
-
Elok ɛlok
|
178 |
-
En ən
|
179 |
-
Eng eŋ
|
180 |
-
Erskine əɹskain
|
181 |
-
Esplanade ɛsplənad
|
182 |
-
Expo ɛkspo
|
183 |
-
Faber febə
|
184 |
-
Fajar fad͡ʒa
|
185 |
-
Fan fan
|
186 |
-
Farah faɹa
|
187 |
-
Farmway famwe
|
188 |
-
Farrer fɛɹə
|
189 |
-
Fatimah fatima
|
190 |
-
Fatt fat
|
191 |
-
Fazirah faziɹa
|
192 |
-
Fengshan fəŋʃan
|
193 |
-
Fernvale fənvel
|
194 |
-
Flanders flɛndəs
|
195 |
-
Foch fɔt͡ʃ
|
196 |
-
Folkestone fəukstəun
|
197 |
-
Foo fu
|
198 |
-
Fort fɔt
|
199 |
-
Founders\ fa͡undəz
|
200 |
-
Fun fan
|
201 |
-
fung fɔŋ
|
202 |
-
gah ga
|
203 |
-
gai k̚ai
|
204 |
-
Gambir gambiə
|
205 |
-
Ganges grend͡ʒ
|
206 |
-
Gao ka͡u
|
207 |
-
Garden gadən
|
208 |
-
Gate get
|
209 |
-
gau ga͡u
|
210 |
-
Gemmill gəmjul
|
211 |
-
Geylang gelaŋ
|
212 |
-
Giam kiam
|
213 |
-
glam gəlam
|
214 |
-
gombak gombak
|
215 |
-
gongfu gɔŋfu
|
216 |
-
Gopal gopal
|
217 |
-
Goreng goɹɛŋ
|
218 |
-
Granges gaŋəs
|
219 |
-
GRC d͡ʒiarsi
|
220 |
-
Great gɹet
|
221 |
-
Grove grov
|
222 |
-
Gu gu
|
223 |
-
Gu Zao Wei You Tiao gu t͡sau wei jow tiau
|
224 |
-
Guang guaŋ
|
225 |
-
Gul gal
|
226 |
-
Gula gula
|
227 |
-
Gunner ganə
|
228 |
-
Guok guok
|
229 |
-
Hae he
|
230 |
-
Haikal ha͡ikəl
|
231 |
-
Haji had͡ʒi
|
232 |
-
hakka haka
|
233 |
-
Halia halia
|
234 |
-
Halifax hɛlifɛks
|
235 |
-
Hall hɔl
|
236 |
-
Ham ham
|
237 |
-
Han han
|
238 |
-
Hang haŋ
|
239 |
-
Hao ha͡u
|
240 |
-
Har ha
|
241 |
-
Harbourfront habəfɹan
|
242 |
-
Haresh haɹeʃ
|
243 |
-
Hastings hɛstiŋs
|
244 |
-
Havelock hɛvəlɔk
|
245 |
-
Haw hɔ
|
246 |
-
Heights ha͡its
|
247 |
-
Henderson hɛndəsən
|
248 |
-
Heng xəŋ
|
249 |
-
Hertford həɹtfɔɹd
|
250 |
-
Hiang hiaŋ
|
251 |
-
Highway ha͡iwe
|
252 |
-
Hill hil
|
253 |
-
Hillview hilviu
|
254 |
-
Hindoo hindu
|
255 |
-
Hitam hitam
|
256 |
-
Hoe ho
|
257 |
-
Hokkien hɔkiɛn
|
258 |
-
Holland hɔlən
|
259 |
-
Hong hɔŋ
|
260 |
-
Hongkong hɔŋkɔŋ
|
261 |
-
Hoon hun
|
262 |
-
Hor hɔ
|
263 |
-
horlicks hɔliks
|
264 |
-
Horne hɔn
|
265 |
-
Hougang a͡ugaŋ
|
266 |
-
Hoy hɔi
|
267 |
-
Hua hua
|
268 |
-
Huan huan
|
269 |
-
Huat huat
|
270 |
-
Huay we
|
271 |
-
Huda huda
|
272 |
-
Huei hue
|
273 |
-
Hui hue
|
274 |
-
Huzaifi huza͡ifi
|
275 |
-
Huzairi huza͡iɹi
|
276 |
-
Hyderabad ha͡idəɹəbad
|
277 |
-
Ibrahim ibɹahim
|
278 |
-
ice ais
|
279 |
-
Ikan ikan
|
280 |
-
Ikmal ikmal
|
281 |
-
Imran imɹan
|
282 |
-
India indiə
|
283 |
-
Indranee indɹani
|
284 |
-
Iqbal ikbal
|
285 |
-
Irfan iɹfan
|
286 |
-
Irrawaddy iɹawadi
|
287 |
-
Irving əviŋ
|
288 |
-
istana istanə
|
289 |
-
Jalan d͡ʒalan
|
290 |
-
Jambol d͡ʒambol
|
291 |
-
Jasim d͡ʒasim
|
292 |
-
Java d͡ʒava
|
293 |
-
Jebat d͡ʒəbat
|
294 |
-
Jelapang d͡ʒəlapaŋ
|
295 |
-
Jellicoe d͡ʒɛliko
|
296 |
-
Jervois d͡ʒərvɔ͡is
|
297 |
-
Ji t͡ɕi
|
298 |
-
Jia d͡ʒia
|
299 |
-
Jiak d͡ʒiak
|
300 |
-
Jian d͡ʒiɛn
|
301 |
-
jiang dʒiang
|
302 |
-
Jiao t͡ɕia͡u
|
303 |
-
Jie d͡ʒiɛ
|
304 |
-
Jing d͡ʒiŋ
|
305 |
-
John d͡ʒɔn
|
306 |
-
Joo d͡ʒu
|
307 |
-
Jun d͡ʒun
|
308 |
-
Jurong ʤuɹɔŋ
|
309 |
-
kacang katʃaŋ
|
310 |
-
Kadaloor kadaluɹ
|
311 |
-
Kah ka
|
312 |
-
Kai ka͡i
|
313 |
-
kakak kakakʔ
|
314 |
-
kaki kaki
|
315 |
-
kambing kambeŋ
|
316 |
-
kampong kampoŋ
|
317 |
-
Kandahar kandəha
|
318 |
-
kang kaŋ
|
319 |
-
Kangkar kaŋka
|
320 |
-
Kapoor kapuɹ
|
321 |
-
Karaage ka. ra. ge
|
322 |
-
Katong katɔŋ
|
323 |
-
Kay kɛ
|
324 |
-
Kaya kaja
|
325 |
-
Kayu kaju
|
326 |
-
Keat kiət
|
327 |
-
Kee ki
|
328 |
-
Keerti kəɹti
|
329 |
-
Kelantan kəlantan
|
330 |
-
Kembangan kəmbaŋan
|
331 |
-
Keng kɛŋ
|
332 |
-
Kent kɛnt
|
333 |
-
Keppel kɛpəl
|
334 |
-
Kerbau kəba͡u
|
335 |
-
Keris kəris
|
336 |
-
Keropok kəɹopok
|
337 |
-
Ketupat kətupat
|
338 |
-
Kew kiu
|
339 |
-
Kia kia
|
340 |
-
Kiat kiɛt
|
341 |
-
Killiney kiləni
|
342 |
-
Kim kim
|
343 |
-
King kiŋ
|
344 |
-
Kio kio
|
345 |
-
Kirk kəɹk
|
346 |
-
Kit kit
|
347 |
-
Kitchener kit͡ʃənə
|
348 |
-
Koon kun
|
349 |
-
kopi k̚owp̚i
|
350 |
-
kosong kosoŋ
|
351 |
-
Kovan kovən
|
352 |
-
Kranji kɹanʤi
|
353 |
-
Kreta krɛta
|
354 |
-
Ku ku
|
355 |
-
Kuala kuala
|
356 |
-
kueh kue
|
357 |
-
Kumar kumaɹ
|
358 |
-
Kung kəŋ
|
359 |
-
Kupang kupaŋ
|
360 |
-
Kurau kuɹa͡u
|
361 |
-
kut kut
|
362 |
-
Kway kwe
|
363 |
-
Labrador lɛbɹədɔ
|
364 |
-
Lakeside leksa͡id
|
365 |
-
laksa lăɹ̆kʼsa
|
366 |
-
Lane len
|
367 |
-
Lap lap
|
368 |
-
lapis laɹpis
|
369 |
-
Larut laɹut
|
370 |
-
Laut laut
|
371 |
-
Lavender lɛvəndə
|
372 |
-
Lay le͡i
|
373 |
-
Layar laja
|
374 |
-
Lebar leba
|
375 |
-
Lee li
|
376 |
-
Lemak ləmakʔ
|
377 |
-
Lembu ləmbu
|
378 |
-
Lengkok lɛŋkɔk
|
379 |
-
Lentor lɛntɔ
|
380 |
-
Leong liɔŋ
|
381 |
-
Leonie liɔni
|
382 |
-
Lian liɛn
|
383 |
-
Liang liaŋ
|
384 |
-
Lim lim
|
385 |
-
Limau lima͡u
|
386 |
-
Ling liŋ
|
387 |
-
Link link
|
388 |
-
Little litəl
|
389 |
-
Liu liu
|
390 |
-
lo lo
|
391 |
-
Loke lɔk
|
392 |
-
long lɔŋ
|
393 |
-
lontong lontoŋ
|
394 |
-
Loop lup
|
395 |
-
lor lɔː
|
396 |
-
Lorong lorɔŋ
|
397 |
-
Loyang lojaŋ
|
398 |
-
Lua lua
|
399 |
-
Mackerrow mɛkəɹou
|
400 |
-
Macpherson mɛkfəsən
|
401 |
-
Madras madɹas
|
402 |
-
Maggie mɛgi
|
403 |
-
mai mai
|
404 |
-
Mak mak
|
405 |
-
Mala mala
|
406 |
-
Malan malan
|
407 |
-
Malu malu
|
408 |
-
Mantou manto
|
409 |
-
Mao ma͡u
|
410 |
-
Margaret magərət
|
411 |
-
Marina məɹina
|
412 |
-
Marine məɹin
|
413 |
-
Marsiling masəliŋ
|
414 |
-
Marymount mɛɹima͡un
|
415 |
-
Mas mas
|
416 |
-
masjid masd͡ʒed
|
417 |
-
mati mati
|
418 |
-
Mattar mata
|
419 |
-
Maude ma͡ud
|
420 |
-
Maxwell mɛkswɛl
|
421 |
-
Mayam majam
|
422 |
-
Mayflower mefla͡uwə
|
423 |
-
Mayo majo
|
424 |
-
McCallum mɛkkeləm
|
425 |
-
mee miː
|
426 |
-
Mei mɛi
|
427 |
-
Melaka məlaka
|
428 |
-
Melayu məlaju
|
429 |
-
Memorial məmɔɹiəl
|
430 |
-
Merah mɛɹa
|
431 |
-
merepek mərepeʔ
|
432 |
-
Mergui mɛrgui
|
433 |
-
Meridian məɹidiən
|
434 |
-
Merino mərino
|
435 |
-
Mian miɛn
|
436 |
-
milo maɪlo
|
437 |
-
min min
|
438 |
-
Ming miŋ
|
439 |
-
Mo mo
|
440 |
-
Mohamed mohamad
|
441 |
-
Morse moɹs
|
442 |
-
Mosque mosk
|
443 |
-
Mount ma͡unt
|
444 |
-
Mountbatten ma͡unbɛtən
|
445 |
-
Muah mua
|
446 |
-
Muhammed Muhammed
|
447 |
-
Mui Mui
|
448 |
-
Murray məre
|
449 |
-
Murtabak mətabak
|
450 |
-
Muthu mutu
|
451 |
-
Naan nan
|
452 |
-
Nabil nabil
|
453 |
-
Nabilah nabila
|
454 |
-
Nadia nadia
|
455 |
-
Najib nad͡ʒib
|
456 |
-
Nankin nankin
|
457 |
-
Napier nɛpiə
|
458 |
-
Nasi nasi
|
459 |
-
Nauser na͡usəɹ
|
460 |
-
Nazihah naziha
|
461 |
-
Nee ni
|
462 |
-
Newton njutən
|
463 |
-
Ngoh ŋɔ
|
464 |
-
Nibong nibɔŋ
|
465 |
-
Nicoll nikol
|
466 |
-
Nikita nikita
|
467 |
-
Niven nivən
|
468 |
-
Normanton nɔrməntən
|
469 |
-
Norris noɹis
|
470 |
-
North nɔf
|
471 |
-
Novena novina
|
472 |
-
Nur nuɹ
|
473 |
-
Nyonya njonja
|
474 |
-
o o
|
475 |
-
Oasis oesis
|
476 |
-
Ondeh ɔnde
|
477 |
-
one-north wannɔθ
|
478 |
-
Orchard ɔt͡ʃəd
|
479 |
-
Orh ɔ
|
480 |
-
Otah ota
|
481 |
-
Outram utrəm
|
482 |
-
padang padaŋ
|
483 |
-
pagar paga
|
484 |
-
Pagoda pagoda
|
485 |
-
Pahang pahaŋ
|
486 |
-
pahat pahat
|
487 |
-
Palmer pa͡umə
|
488 |
-
Panjang pand͡ʒaŋ
|
489 |
-
pantat pantat
|
490 |
-
Par pa
|
491 |
-
Parade pəɹed
|
492 |
-
Park pak
|
493 |
-
pasir pasə
|
494 |
-
Paya paja
|
495 |
-
Payoh pajo
|
496 |
-
Pei pei
|
497 |
-
pek peʔ
|
498 |
-
Pekin pekin
|
499 |
-
Pending pəndiŋ
|
500 |
-
peng peŋ
|
501 |
-
Pengat pəŋgat
|
502 |
-
penyet peɪɲjɛt
|
503 |
-
Petir pətiə
|
504 |
-
Phoenix finiks
|
505 |
-
Piah pia
|
506 |
-
Pie pa͡i
|
507 |
-
Pier piə
|
508 |
-
Ping piŋ
|
509 |
-
Pioneer pa͡iniə
|
510 |
-
Pipit pipit
|
511 |
-
Piring piɹiŋ
|
512 |
-
Pisang pisaŋ
|
513 |
-
Place ples
|
514 |
-
Pleasant plɛzənt
|
515 |
-
Png pəŋk
|
516 |
-
po bo
|
517 |
-
Poh po
|
518 |
-
Point pɔint
|
519 |
-
Pok pɔk
|
520 |
-
Pooja pud͡ʒa
|
521 |
-
Popiah popia
|
522 |
-
Potong potoŋ
|
523 |
-
prata p̚ɹaːɹːta
|
524 |
-
Preston pɹɛstən
|
525 |
-
Prinsep pɹinsɛp
|
526 |
-
Promenade pɹɔmənad
|
527 |
-
pulau pulau
|
528 |
-
Pulut pulut
|
529 |
-
Punggol pɔŋgol
|
530 |
-
Purvis pərvis
|
531 |
-
Puteh putɛ
|
532 |
-
Putri putɹi
|
533 |
-
Putu putu
|
534 |
-
Qi tɕʰi
|
535 |
-
Qian tɕʰiɛn
|
536 |
-
Qing tɕʰiŋ
|
537 |
-
Quay kki
|
538 |
-
Queenstown kwinsta͡un
|
539 |
-
Radhika ɹadihika
|
540 |
-
Radin radin
|
541 |
-
Raffles ɹɛfəls
|
542 |
-
Raja rad͡ʒa
|
543 |
-
Rajah rad͡ʒa
|
544 |
-
Rajesh ɹad͡ʒɛʃ
|
545 |
-
Ranggung ɹaŋgɔŋ
|
546 |
-
Rangoon ɹaŋgun
|
547 |
-
Rebus bus
|
548 |
-
Redhill ɹedhil
|
549 |
-
Ren rən
|
550 |
-
rendang rəndaŋ
|
551 |
-
Renjong ɹənd͡ʒɔŋ
|
552 |
-
Reservoir ɹɛsəvɔ
|
553 |
-
rezeki rəzəki
|
554 |
-
Rhu ɹu
|
555 |
-
Ridge ɹiʤ
|
556 |
-
ris rɪs
|
557 |
-
Riviera ɹiviɛɹa
|
558 |
-
Rizwan ɹizwan
|
559 |
-
Road ɹod
|
560 |
-
Rochor ɹot͡ʃo
|
561 |
-
rojak ɹodʒaɹʔ
|
562 |
-
Rong ɹɔŋ
|
563 |
-
Rotan ɹotan
|
564 |
-
roti ɹoti
|
565 |
-
Rou ɹou
|
566 |
-
Rowell ɹowɛl
|
567 |
-
Rui rue
|
568 |
-
Rumbia ɹumbia
|
569 |
-
Russels ɹasəls
|
570 |
-
Sa sa
|
571 |
-
Saba sa. ba
|
572 |
-
Sago sago
|
573 |
-
Sam sam
|
574 |
-
sambal sambal
|
575 |
-
Samudera samudɛɹa
|
576 |
-
Sanjib sand͡ʒib
|
577 |
-
satay saɹ̆tei
|
578 |
-
Saunders sɔndərs
|
579 |
-
Seah sia
|
580 |
-
Segar səga
|
581 |
-
Selegie səligi
|
582 |
-
Sembawang səmbawaŋ
|
583 |
-
Seng sɛŋ
|
584 |
-
Sengkang seŋkaŋ
|
585 |
-
Senja sənd͡ʒa
|
586 |
-
sentosa səntosə
|
587 |
-
Seow sia͡u
|
588 |
-
Serangoon ʃɛntən
|
589 |
-
Sha ʃa
|
590 |
-
Shahirah ʃahiɹa
|
591 |
-
shallot ʃɛlət
|
592 |
-
shallots ʃɛləts
|
593 |
-
Shan ʃan
|
594 |
-
Shanmugam ʃanmugam
|
595 |
-
Shao ʃa͡u
|
596 |
-
Sharifah ʃaɹifa
|
597 |
-
Sharma ʃaɹma
|
598 |
-
Sheng ʃəŋ
|
599 |
-
Shenton ʃɛntən
|
600 |
-
Shreya ʃɹeja
|
601 |
-
Shriya ʃɹija
|
602 |
-
Si si
|
603 |
-
Siak siak
|
604 |
-
Siam siam
|
605 |
-
Siang siaŋ
|
606 |
-
siew siu
|
607 |
-
Siglap siglap
|
608 |
-
Simei sime
|
609 |
-
Sinai sina͡i
|
610 |
-
Sing siŋ
|
611 |
-
Singh siŋh
|
612 |
-
Siti siti
|
613 |
-
Sixth siks
|
614 |
-
SMC ɛsɛmsi
|
615 |
-
Somerset saməsɜt
|
616 |
-
Soo su
|
617 |
-
Sophia sofia
|
618 |
-
Soto soto
|
619 |
-
Sotong sɔtɔŋ
|
620 |
-
sous su
|
621 |
-
South sa͡uθ
|
622 |
-
Springleaf spɹiŋlif
|
623 |
-
Square skuɛ
|
624 |
-
Srishti sɹiʃti
|
625 |
-
Stadium stediəm
|
626 |
-
Stevens stivəns
|
627 |
-
Street stɹit
|
628 |
-
Sturdee stəɹdi
|
629 |
-
Suan suan
|
630 |
-
sultan sultan
|
631 |
-
sum sam
|
632 |
-
Sumang sumaŋ
|
633 |
-
Sungei suŋa͡i
|
634 |
-
Suresh suɹɛʃ
|
635 |
-
Suria suria
|
636 |
-
Sussex sasɛks
|
637 |
-
Syazwan ʃazwan
|
638 |
-
Syed sa͡id
|
639 |
-
Sze sə
|
640 |
-
ta da
|
641 |
-
tai tai
|
642 |
-
tak taʔ
|
643 |
-
Taman taman
|
644 |
-
Tampines tɛmpənis
|
645 |
-
Tan tan
|
646 |
-
Tanah tana
|
647 |
-
Tandoori tandɔɹi
|
648 |
-
Tang taŋ
|
649 |
-
tanjong tandʒoŋ
|
650 |
-
Tarik ɹik
|
651 |
-
Tat tat
|
652 |
-
Tau da͡uh
|
653 |
-
Taufik ta͡ufik
|
654 |
-
Tauhu ta͡uhu
|
655 |
-
Teck tɛk
|
656 |
-
Tee ti
|
657 |
-
teh tei
|
658 |
-
telok təloʔ
|
659 |
-
Tempeh tempe
|
660 |
-
Teng teŋ
|
661 |
-
Teo tio
|
662 |
-
Terrace tɛɹəs
|
663 |
-
Thanggam taŋgəm
|
664 |
-
the θə
|
665 |
-
Thomson tɔmsən
|
666 |
-
Thosai tose
|
667 |
-
Tiam tiam
|
668 |
-
Tian tjan
|
669 |
-
Tiao tiau
|
670 |
-
Tiong tiɔŋ
|
671 |
-
Tng təŋ
|
672 |
-
Toa to
|
673 |
-
Toh to
|
674 |
-
Tong tɔŋ
|
675 |
-
Tongkang Tongkang
|
676 |
-
Tow ta͡u
|
677 |
-
tower ta͡uə
|
678 |
-
Tras tɹas
|
679 |
-
Trengganu tɹɛŋganu
|
680 |
-
Truro tɹuɹo
|
681 |
-
Tuas tuas
|
682 |
-
Tuck tak
|
683 |
-
Tutu tutu
|
684 |
-
Ubi ubi
|
685 |
-
Upper apə
|
686 |
-
Vadai vada͡i
|
687 |
-
Veerasamy viɹasami
|
688 |
-
Verdun vədən
|
689 |
-
vide vid
|
690 |
-
View viu
|
691 |
-
Vijaya vid͡ʒaja
|
692 |
-
Villa vila
|
693 |
-
Village vileʤ
|
694 |
-
Walk wɔk
|
695 |
-
Wan wan
|
696 |
-
Wang waŋ
|
697 |
-
Wanton wantan
|
698 |
-
Watt wat
|
699 |
-
Way we
|
700 |
-
Wee wikiɛt
|
701 |
-
Wei wei
|
702 |
-
Wen wən
|
703 |
-
West wɛs
|
704 |
-
Weyhill wejhil
|
705 |
-
Whye wa͡i
|
706 |
-
Wilkie wilki
|
707 |
-
Winstedt winstet
|
708 |
-
Wishart wiʃat
|
709 |
-
Woking wokiŋ
|
710 |
-
Woodlands wudləns
|
711 |
-
Woodleigh wudle
|
712 |
-
World wəl
|
713 |
-
Xian siɛn
|
714 |
-
xiao siau
|
715 |
-
Xilin silin
|
716 |
-
Xin sin
|
717 |
-
Xuan Xuan
|
718 |
-
Yan jɛn
|
719 |
-
Yang jaŋ
|
720 |
-
Yati jati
|
721 |
-
Yazid jazid
|
722 |
-
Yee i
|
723 |
-
Yew ju
|
724 |
-
Yi ji
|
725 |
-
Ying jiŋ
|
726 |
-
Yio jo
|
727 |
-
Yishun iʃun
|
728 |
-
Yoke jok
|
729 |
-
Yong yɔŋ
|
730 |
-
You ju
|
731 |
-
Yu Yu
|
732 |
-
Yuan juɛn
|
733 |
-
Yuhua jhua
|
734 |
-
Yun jun
|
735 |
-
Yusuf jusof
|
736 |
-
Zakirah zakira
|
737 |
-
Zao t͡sau
|
738 |
-
Zhe t͡ʃə
|
739 |
-
Zheng ʒəŋd͡ʒiɛn
|
740 |
-
Zhi ʒə
|
741 |
-
Zhong ʒoŋ
|
742 |
-
Zi t͡sə
|
743 |
-
Zion zion
|
744 |
-
Zuk zuk
|
745 |
-
Zulfadli zulfadli
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Docs/Part 1 & 2.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
# Part 1 & 2 Description
|
2 |
-
|
3 |
-
Part 1 features about 1000 hours of prompted recordings of phonetically-balanced scripts from about 1000 local English speakers.
|
4 |
-
|
5 |
-
Part 2 presents about 1000 hours of prompted recordings of sentences randomly generated from words based on people, food, location, brands, etc, from about 1000 local English speakers as well. Transcriptions of the recordings have been done orthographically and are available for download.
|
6 |
-
|
7 |
-
Parts 1 and 2 were recorded in quiet rooms using 3 microphones: a headset/ standing microphone (channel 0), a boundary microphone (channel 1), and a mobile phone (channel 3). Recordings that are available for download here have been down-sampled to 16kHz. Details of the microphone models used for each speaker as well as some corresponding non-personal and anonymized information can be found in the ~~accompanying spreadsheets.~~ `SpeakerMetaData` folder.
|
8 |
-
|
9 |
-
## Transcription Documentation
|
10 |
-
|
11 |
-
The audio recordings in Part 1 and Part 2 have been transcribed orthographically, as accurately as possible. ~~Transcriptions can be found in the \SCRIPT folders of every channel. Transcripts follow a 6-digit format, where the first number denote the channel number. The next four numbers correspond to the speaker ID that mark each audio recording folder (in \WAVE), and the last number denotes the session number.~~
|
12 |
-
|
13 |
-
---
|
14 |
-
|
15 |
-
**Reprocessed Version**: Part 1 and 2 is structured in the following manner:
|
16 |
-
|
17 |
-
- `SP{SpeakerID}-CH{ChannelID}-RC{RecordingID}.flac`
|
18 |
-
- `SP{SpeakerID}-CH{ChannelID}-RC{RecordingID}.json`
|
19 |
-
|
20 |
-
Each json file contains the following content:
|
21 |
-
|
22 |
-
```json
|
23 |
-
{
|
24 |
-
"SpeakerID": 0000,
|
25 |
-
"ChannelID": 0,
|
26 |
-
"SessionID": 0000,
|
27 |
-
"RecordingID": 0000,
|
28 |
-
"original_text": "This is a sample text",
|
29 |
-
"read_text": "transcribed sample text",
|
30 |
-
}
|
31 |
-
```
|
32 |
-
|
33 |
-
The `.flac` contains a copy of a losslessly compressed version of the 16kHz `.wav` file that was originally present in the source files.
|
34 |
-
|
35 |
-
*End of Edit.*
|
36 |
-
|
37 |
-
---
|
38 |
-
|
39 |
-
Transcriptions have been done verbatim and are generally in lowercase. Only proper nouns (such as names, addresses, months, etc.) and abbreviations have been capitalized. Additionally, initialisms are also separated by spaces (e.g. G S T and E R P).
|
40 |
-
|
41 |
-
Number sequences are spelled out according to what has been said, and punctuations are generally omitted unless it is a necessary part of a written form, such as the apostrophe in Mary’s. Where punctuations have been read out, transcriptions were done corresponding to what is being read (e.g. ham + cheese would be transcribed as ham plus cheese if that was what was read).
|
42 |
-
|
43 |
-
Unintelligible speech is marked by two asterisks (**).
|
44 |
-
|
45 |
-
Distinguishable non-speech acoustic events have also been marked by the following tags.
|
46 |
-
|
47 |
-
| Tags | Definition |
|
48 |
-
|---|---|
|
49 |
-
| `<FIL/>` | Fillers (e.g. oh, ah, uh, um) |
|
50 |
-
| `<SPK/>` | Noises from the speaker that are not intended in the prompted text, such as coughing, sighing or loud breathing. |
|
51 |
-
| `<STA/>` | Constant background noises, such as rain or traffic noise. |
|
52 |
-
| `<NON/>` | Intermittent non-human noises such as door slams or mouse clicks. |
|
53 |
-
| `<NPS/>` | Noises that were made by someone other than the speaker. |
|
54 |
-
|
55 |
-
## Prompt Acknowledgements (Part 1)
|
56 |
-
|
57 |
-
Some texts within Part 1 have been extracted from the websites set out below for the purpose of phonetic coverage and the generation of audio recordings and transcripts.
|
58 |
-
|
59 |
-
IMDA acknowledges that the texts have been adapted from:
|
60 |
-
[ChannelNewsAsia](https://www.channelnewsasia.com/news/singapore/)
|
61 |
-
[Today Online](https://www.todayonline.com/singapore)
|
62 |
-
[AsiaOne](https://www.asiaone.com/singapore)
|
63 |
-
[BusinessTimes](https://www.businesstimes.com.sg)
|
64 |
-
[MSN (Singapore)](https://www.msn.com/en-sg/news)
|
65 |
-
[Yahoo News (Singapore)](https://sg.news.yahoo.com)
|
66 |
-
[Coconuts.co](https://coconuts.co/singapore/news)
|
67 |
-
[EnterpriseSG](https://ie.enterprisesg.gov.sg)
|
68 |
-
[Mothership.SG](https://mothership.sg)
|
69 |
-
[Spore Singapore](https://www.sportsingapore.gov.sg)
|
70 |
-
[Tech in asia](https://www.techinasia.com/)
|
71 |
-
[TheBestSingapore](https://www.thebestsingapore.com)
|
72 |
-
[Straits Times](https://www.straitstimes.com/singapore)
|
73 |
-
[The New Paper](https://www.tnp.sg/news/singapore)
|
74 |
-
[IMDA](https://www.imda.gov.sg)
|
75 |
-
|
76 |
-
## Prompt Acknowledgements (Part 2)
|
77 |
-
|
78 |
-
The audio uttered by speakers with accompanying transcripts in Part 2 originated from scripts which had been randomly generated from words based on people, food, location, brands, etc.
|
79 |
-
|
80 |
-
All statements in the scripts are entirely fictitious and any portions of the randomly generated scripts which may refer to actual situations/events is entirely coincidental.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Docs/Part 3 - 6.md
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
# Part 3, 4 & 5 Documentation
|
2 |
-
|
3 |
-
## Part 3
|
4 |
-
|
5 |
-
Part 3 consists of about 1000 hours of conversational data recorded from about 1000 local English speakers, split into pairs.
|
6 |
-
The data includes conversations covering daily life and of speakers playing games provided.
|
7 |
-
|
8 |
-
Recordings were split into 2 environments. In the Same Room environment where speakers were in same room,
|
9 |
-
the recordings were done using 2 microphones: a close-talk mic and a boundary mic.
|
10 |
-
|
11 |
-
In the Separate Room environment, speakers were separated into individual rooms.
|
12 |
-
The recordings were done using 2 microphones in each room: a standing mic and a telephone.
|
13 |
-
|
14 |
-
## Part 4
|
15 |
-
|
16 |
-
In part 4, speakers were encouraged as best as possible to switch from Singapore English to their Mother Tongue languages.
|
17 |
-
These recordings were done under two environments. In the Same Room recordings, speakers sit at least two metres apart and record using their mobile phones.
|
18 |
-
In the Different Room environment, speakers would speak through each other via Zoom on their laptops, and recording using their mobile phones.
|
19 |
-
|
20 |
-
## Part 5
|
21 |
-
|
22 |
-
In Part 5, speakers were made to speak following the 4 styles: Debate, Finance topics, Positive Emotion and Negative Emotions.
|
23 |
-
All recordings were done in a Separate room session, via Zoom, where the audio is recorded using the mobile phone.
|
24 |
-
|
25 |
-
## Part 6
|
26 |
-
|
27 |
-
In Part 6, speakers were made to speak following the 3 styles within either of the 3 designs:
|
28 |
-
|
29 |
-
- Design 1 (holiday/hotel/restaurant)
|
30 |
-
- Design 2 (Bank, Telco, Insurance)
|
31 |
-
- Design 3 (HDB, MOE, MSF)
|
32 |
-
|
33 |
-
All recordings were done in a Separate room session, via Zoom, where the audio is recorded using the mobile phone.
|
34 |
-
|
35 |
-
## Part 3 formats (Reprocessed Version)
|
36 |
-
|
37 |
-
For Part 3 (Same Room):
|
38 |
-
|
39 |
-
- `SE<SessionID>-SP<SpeakerID>.flac` [The Audio from the SpeakerID]
|
40 |
-
- `SE<SessionID>-SP_Boundary.flac` [The Audio from the Boundary Microphone. Applicable only to same room part 3]
|
41 |
-
- `SE<SessionID>-SP<SpeakerID>.jsonl` [The Transcript from the SpeakerID in a jsonl format]
|
42 |
-
|
43 |
-
For Part 3 (Seperate Room):
|
44 |
-
|
45 |
-
- `SE<SessionID>-SP<SpeakerID>-IVR.flac` [The Audio from the Telephone Microphone (IVR / Interactive Voice Response). Applicable only to part 3]
|
46 |
-
- `SE<SessionID>-SP<SpeakerID>-Standing.flac` [The Audio from the Standing Microphone. Applicable only to seperate room part 3]
|
47 |
-
- `SE<SessionID>-SP<SpeakerID>.jsonl` [The Transcript from the SpeakerID in a jsonl format]
|
48 |
-
|
49 |
-
## Part 4 formats (Reprocessed Version)
|
50 |
-
|
51 |
-
- `SE<SessionID>-SP<SpeakerID>.flac` [The Audio from the SpeakerID]
|
52 |
-
- `SE<SessionID>-SP<SpeakerID>.jsonl` [The Transcript from the SpeakerID in a jsonl format]
|
53 |
-
|
54 |
-
## Part 5 formats (Reprocessed Version)
|
55 |
-
|
56 |
-
For `debate.tar` file:
|
57 |
-
|
58 |
-
- `SE<SessionID>-Pt<PartID>-SP<SpeakerID>.flac` [The Audio from the SpeakerID]
|
59 |
-
- `SE<SessionID>-Pt<PartID>-SP<SpeakerID>.jsonl` [The Transcript from the SpeakerID in a jsonl format]
|
60 |
-
|
61 |
-
`PartID` is a 2 digit number from 1-3 (e.g. 01, 02, 03)
|
62 |
-
|
63 |
-
For `FinanceEmotion.tar` file:
|
64 |
-
|
65 |
-
- `SE<SessionID>-Pt<PartID>-SP<SpeakerID>.flac` [The Audio from the SpeakerID]
|
66 |
-
- `SE<SessionID>-Pt<PartID>-SP<SpeakerID>.jsonl` [The Transcript from the SpeakerID in a jsonl format]
|
67 |
-
|
68 |
-
`PartID` can be either: `Fin` (Finance), `Pos` (Positive), `Neg` (Negative)
|
69 |
-
|
70 |
-
## Part 6 formats (Reprocessed Version)
|
71 |
-
|
72 |
-
For all 3 tar files, they follow the following format:
|
73 |
-
|
74 |
-
- `SE<SessionID>-Pt<PartID>-SP<SpeakerID>.flac` [The Audio from the SpeakerID]
|
75 |
-
- `SE<SessionID>-Pt<PartID>-SP<SpeakerID>.jsonl` [The Transcript from the SpeakerID in a jsonl format]
|
76 |
-
|
77 |
-
`PartID` can be either:
|
78 |
-
|
79 |
-
- `Bnk` (Banks)
|
80 |
-
- `Ins` (Insurance)
|
81 |
-
- `Tel` (Telco)
|
82 |
-
|
83 |
-
- `Hdb` ([HDB](https://en.wikipedia.org/wiki/Housing_and_Development_Board))
|
84 |
-
- `Moe` (Ministry of Education)
|
85 |
-
- `Msf` (Ministry of Social Affairs)
|
86 |
-
|
87 |
-
- `Hol` (Holiday / Travel Agency)
|
88 |
-
- `Hot` (Hotels)
|
89 |
-
- `Res` (Restaurants)
|
90 |
-
|
91 |
-
## Across all formats (jsonl files)
|
92 |
-
|
93 |
-
Each line in the jsonl contains the following format:
|
94 |
-
|
95 |
-
```json
|
96 |
-
{"start":0.0001, "end":0.0001, "text":"", "spkid": 3525}
|
97 |
-
```
|
98 |
-
|
99 |
-
Note that `spkid` may not be provided for all jsonl files. However `start`, `end` and `text` are always provided.
|
100 |
-
|
101 |
-
Additionally the notation for `<Z>` (Invalid and only if the lines contain that text only) has been removed from the transcript as it is not relavant. (e.g. When other people are speaking.)
|
102 |
-
|
103 |
-
If the line only contains a `<S>`, it is additionally removed as well. (applicable to Part 5 and Beyond)
|
104 |
-
|
105 |
-
## Notations (For Part 4, Part 5 and Part 6)
|
106 |
-
|
107 |
-
| TYPE | CONVENTION | EXAMPLE |
|
108 |
-
|-----------------------------|--------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
|
109 |
-
| 1. Numbers | full form | today is first jan twenty eighteen `<s/>` total is eleven dollars eighteen cents `<s/>` cool `<s/>` |
|
110 |
-
| Symbols | full form | |
|
111 |
-
| 2. Titles | full form (unless abbreviated) | doctor li-nan met mister muhamad-nor-hisham `<c/>` miss (um) madam aarti and professor ang at this year's meet up `<s/>` |
|
112 |
-
| 3. Acronyms | underscore `_` | he wanna study in n_a_f_a `<c/>` but couldn't get in `<s/>` so he's now gonna join n_t_u to study l_m_s `<c/>` linguistics i mean `<s/>` |
|
113 |
-
| Contractions | as is (unless unabbreviated) | |
|
114 |
-
| 4. Multi-word nouns | hyphen `-` | you just came back from hong-kong `<c/>` right `<s/>` are you now staying in ang-mo-kio `<c/>` near s_t-engineering there `<s/>` |
|
115 |
-
| 5. Discourse particles | square bracket `[...]` | [oh] he still wanna eat [ah] `<s/>` [wah] why he like that [one] `<s/>` damn greedy [lah] he `<s/>` |
|
116 |
-
| 6. Fillers | round bracket `(...)` | (uh) yah `<c/>` it's near h_s_s `<s/>` to get there `<c/>` (um) i think you can turn left and (er) actually turn right and walk straight `<s/>` you should reach in (erm) five minutes `<s/>` |
|
117 |
-
| 7. Interjections | Exclamation mark `!...` | !walao! why the hell do we need to study English [right] |
|
118 |
-
| 8. Paralinguistic Phenomena | (ppb) breath | (ppc) (um) there was once i confidently strut down the catwalk `<c/>` as though i'm (ppo) naomi-campbell `<s/>` then i (ppb) trip over a stone and (ppl) fell flat on my face `<s/>` (ppl) |
|
119 |
-
| | (ppc) cough | |
|
120 |
-
| | (ppl) laugh | |
|
121 |
-
| | (ppo) others | |
|
122 |
-
| 9. Other languages | hashtag `#...#` | she went #pasar malam# to buy #roti-john# but #tak sedap# `<s/>` lucky #muah-chee# #shiok# `<c/>` else she'll be #pek chek# `<s/>` |
|
123 |
-
| 10. Unclear words | `<UNK>` | i went to `<UNK>` `<c/>` (er) or was it (erm) `<UNK>` `<s/>` i can't remember the name `<s/>` |
|
124 |
-
| 11. Incomplete words | tilde `~` | [oh] i don't understand the abbrev~ abbrev~ (uh) abbreviation that they use nowadays `<s/>` |
|
125 |
-
| 12. Short pause (longer that 1000 ms) | `<s>` | |
|
126 |
-
| ~~13. Invalid~~ | `<Z>` | |
|
127 |
-
|
128 |
-
## Prompt Acknowledgements (Part 3 Only)
|
129 |
-
|
130 |
-
The conversational data, which includes the audio and accompanying transcripts, in Part 3 were collected from speakers who were speaking to each other in pairs.
|
131 |
-
|
132 |
-
Some speakers were given extracts from card games, and picture games to play to elicit more natural conversation.
|
133 |
-
|
134 |
-
IMDA acknowledges that the following games were used:
|
135 |
-
|
136 |
-
1. Smol talk – www.starknicked.com
|
137 |
-
2. Hypothetically Fun - http://www.hypotheticallyfun.com/
|
138 |
-
3. Baker, R., & Hazan, V. (2011). DiapixUK: task materials for the elicitation of multiple spontaneous speech dialogs. Behavior research methods, 43(3), 761-770.
|
139 |
-
|
140 |
-
Speakers were advised not to divulge private information about themselves or others,
|
141 |
-
as well as to avoid sensitive topics and conversations, by referencing the local radio broadcast guidelines.
|
142 |
-
|
143 |
-
Any sentiments and opinions that may have been expressed by the speakers remain their own, and do not represent the view of IMDA.
|
144 |
-
IMDA is not responsible for the accuracy/truth any of the statements made by the speakers.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Docs/Phonetic Inventory.pdf
DELETED
Binary file (360 kB)
|
|
Docs/Technical Reference (NSC).md
DELETED
@@ -1,267 +0,0 @@
|
|
1 |
-
# National Speech Corpus
|
2 |
-
|
3 |
-
*Technical Reference for the Creation of Speech Corpora for Technology*
|
4 |
-
|
5 |
-
## Introduction
|
6 |
-
|
7 |
-
The advancement of machine learning methods aided by the improvement in computing power has boosted the creation of new technological capabilities and industries. Though the field of data collection and annotation is not new, the new wave of technological advancements has certainly drawn attention to the demand for large amounts of high-quality data and the opportunities available in this industry.
|
8 |
-
The purpose of this technical reference is to provide an effective source of information for which local companies that are interested in pursuing the creation of large-scale speech datasets, or speech corpora, can leverage on.
|
9 |
-
|
10 |
-
### What is a speech corpus?
|
11 |
-
|
12 |
-
Speech corpora consist of speech audio files, corresponding text transcriptions of these audio files, as well as a lexicon. Speech corpora, especially large-scale ones that are used for speech-enabled applications, often require intensive resources to build. This is why certain languages, varieties, and even accents tend to perform better in these applications than the others. Some prominent corpora used in the research community include the Wall Street Journal read speech corpus (Robinson, Fransen, Pye, Foote, and Renals, 1995) and Switchboard spontaneous speech corpus (Godfrey, Holliman, and McDaniel, 1992) which are often used for benchmarking automatic speech recognition systems and algorithms, while CMU ARCTIC is commonly referenced in text-to-speech research (Kominek and Black, 2003).
|
13 |
-
|
14 |
-
### The National Speech Corpus
|
15 |
-
|
16 |
-
A point of reference that will be made throughout this technical reference is the National Speech Corpus (NSC). NSC was initiated in November 2017 by the Infocomm Media Development Authority of Singapore (IMDA) to provide companies and research institutions that are interested in developing speech-enabled tech applications with large-scale Singapore English accented speech datasets that is open and publicly available for use under the Singapore Open Data License. The corpus was initiated as part of the IMDA Industry Transformation Map to spur on speech-enabled tech applications and research efforts in Singapore since the construction of corpora is often resource-intensive and cannot be easily undertaken by start-ups or companies that wish to take advantage of these emerging technologies but are outside of the industry.
|
17 |
-
|
18 |
-
As of December 2019, NSC has contributed more than 3000 hours of Singapore English speech data and their corresponding transcriptions and lexicons, of which over 2000 hours are read speech, and more than 1000 hours conversational speech. Around half of the read speech corpora are recorded with phonetically balanced scripts to provide a baseline of English sentences read in the Singapore accent, while the other half focuses on named entities in Singapore such as food names and place names so as to aid technology providers who wish to localise their applications to the Singapore context. NSC has helped local technology providers such as Sentient.io to give their algorithms and solutions an edge over what international companies offer. For access to the corpus and latest updates, visit <www.imda.gov.sg/nationalspeechcorpus>.
|
19 |
-
|
20 |
-
### Scope of this reference
|
21 |
-
|
22 |
-
It is assumed that readers already possess a good grasp of related topics and notable works in the field of speech technology. Readers should also have some linguistic knowledge especially in terms of phonetics and phonology, as these would be essential especially when designing scripts for read speech corpora and in the construction of lexicons. Recommended texts for further reading are listed in Section 8.
|
23 |
-
|
24 |
-
It is important to keep in mind that different speech applications place different requirements and constraints on the speech corpora to be used, such as in terms of the number of speakers to record, or the amount of data to record from each speaker. This technical reference covers the creation of corpora, from the design to the post-processing of recorded speech data, mainly with the needs of speech recognition applications in mind.
|
25 |
-
|
26 |
-
Though the chapters are ordered according to the workflow of corpora building, they should be read and considered in entirety, and not as separate steps. The planning and consideration of practical, logistical, and ethical issues is essential to the success of the corpus.
|
27 |
-
|
28 |
-
## Designing read speech corpora
|
29 |
-
|
30 |
-
A read speech corpus refers simply to one where speakers are recorded reading a script or set list of words, phrases, or sentences. One of the main advantages of constructing read speech corpora is that these tend to be relatively easier to obtain and allow for more control over the content. On the other hand, one of the biggest trade-offs is the mismatch between the style of speech, where read speech corpora is ill-suited to train models that are meant to perform on more conversational or natural, spontaneous speech.
|
31 |
-
|
32 |
-
Of the 2000 hours of read speech in NSC, around half was recorded using a phonetically balanced script, and the other half a script that features words that are pertinent to the Singapore context. These will be elaborated in the following Sections 2.2 and 2.3. We will first look at the considerations for designing a phoneme inventory which often works in tandem with the designing of the scripts.
|
33 |
-
|
34 |
-
Phoneme inventory
|
35 |
-
Phonemes refer to the distinguishing or contrasting units of sounds in a language, and a phoneme inventory simply refers to the set of sounds. English has been described to possess around 44 phonemes, though the number varies by dialect and definition. For instance, there are around 20 vowel phonemes in British English and 15 to 19 for American English (Bizzocchi, 2017). For Singapore English, Deterding (2007) tabulated 22 consonants and 8 vowels, which the NSC has adapted upon to include the addition of two affricates and six diphthongs. Given that there are also non-English words found in the corpus, an additional 11 consonants and 1 vowel were included in the phoneme inventory.
|
36 |
-
|
37 |
-
The phoneme inventory is used as the basic units in acoustic modelling and places limits on the selection of the scripts used for the recording, as well as in the construction of the lexicon in the later part of the corpus-building (Section 6.2). It also ties in with the phonetic characteristics of the corpus, which will be covered in the next section.
|
38 |
-
|
39 |
-
### Phonetic balance and richness
|
40 |
-
|
41 |
-
For robust study of a particular dialect (in the linguistic sense) or accent, the phonetic coverage of the corpus is often taken into consideration. This is done by calculating the phone (i.e. phoneme), diphone (adjacent pair of phones), and triphone (combination of three consecutive phones) statistics of the corpus. A corpus is phonetically balanced when the coverage of phones, diphones, and triphones that occur in the corpus aligns with the frequency of occurrence in natural usage. In contrast, a corpus is phonetically rich when all the phones, diphones, and triphones that occur in a language are uniformly distributed (i.e. roughly same amount of training data) in the corpus. Speech corpora that are meant for the purposes of speech recognition have the primary need to be phonetically balanced, while those that are designed for speech synthesis need to be phonetically rich.
|
42 |
-
|
43 |
-
The NSC includes a phonetically balanced portion to provide a baseline of English speech spoken in the Singapore accent. A minimum of 1000 hours of speech was targeted to be recorded from 1000 speakers (i.e. 1 hour of read speech from 1 speaker). To do so, all speakers had to read a set of 200 sentences that were designed to include the phonemes that have been documented to occur in Singapore English. They also had to read another set of 600 sentences that were randomly selected from a large pool of 72,000 sentences that were crawled from a range of online local news sources. Each sentence could be repeated a maximum of 8 times so as to obtain more speaker variation and more coverage of linguistic phenomena.
|
44 |
-
|
45 |
-
When designing prompts, it is recommended that the number of words in each sentence be kept between 10 to 20 words. Prompts that are too short risk not having enough linguistic phenomena being captured in the corpus, while prompts that are too long are difficult for speakers to read without too much disfluencies. It is also important to check and obtain the necessary permissions for published texts before using them as prompts.
|
46 |
-
|
47 |
-
### Local words / Named Entities
|
48 |
-
|
49 |
-
Names are often a challenge for speech-enabled applications, given their highly diverse nature and the general difficulty in capturing the range of names that are and will be used comprehensively. This means that names tend to be out-of-vocabulary, i.e. they do not appear in the training data. While researchers in named entity recognition are still looking for new and better ways to solve this problem, the ability of technology providers and their speech applications to localise and tailor to the local context will no doubt play an influential role in the usability, and consequently, the take-up rate of the applications.
|
50 |
-
|
51 |
-
To give an example, even though the working language in Singapore is English, a large proportion of “local words” such as food names, place names, people’s names are not in English but originate instead from the other languages that Singaporeans speak, like Chinese, Hokkien, Malay, and Tamil. Speech applications that are not trained with such data would only be able to recognize and synthesize these named entities with the nearest English variants that may not even be phonemically similar, affecting the usability of applications that rely on accurate recognition or synthesis of named entities, including hands-free calling and navigation systems.
|
52 |
-
|
53 |
-
To aid technology providers working to tailor their products for Singaporean consumers, the NSC has provided more than 1000 hours of read speech data recorded using prompts with local named entities. Lists of words belonging to the aforementioned categories, in addition to other categories such as brand names and abbreviations (which are used frequently in Singaporean discourse and speech), were compiled and parsed into grammars to automatically generate scripts to be read as prompts for NSC. Speakers read a total of randomly selected 896 sentences. A common challenge with such corpora is that speakers often make errors when reading unfamiliar words or phrases (Hughes et al., 2010), though patterned errors, such as Malay and Indian speakers in NSC pronouncing /y/ in Chinese names as /ju/, could still prove useful for developers.
|
54 |
-
|
55 |
-
## Designing conversational speech corpora
|
56 |
-
|
57 |
-
Unlike a read speech corpus, a conversational or spontaneous speech corpus contains speech data that is produced freely and naturally. Past findings have revealed that speech recognition systems that are solely trained on read speech do not perform as well as systems trained on spontaneous speech when faced with spontaneous speech input, unless it can be ensured that the input is consistently fluent, that is, no fillers, false starts, repairs, or long pauses (Butzberger, Murveit, Shriberg, and Price, 1992). The match between the speaking styles of the training and test data is therefore important in developing and providing effective speech-enabled applications.
|
58 |
-
|
59 |
-
However, there are also some disadvantages to building conversational or spontaneous speech corpora. Unlike read speech, spontaneous speech means that there is less control over what occurs in the data, and exerting control on what speakers can or cannot say may affect spontaneity instead (Furui and Kawahara, 2007). Transcribing the data also requires significantly more human effort and labour, which is why it is expensive to collect and build spontaneous speech corpora.
|
60 |
-
|
61 |
-
The 1000 hours of conversational speech recorded for NSC were recorded in two modes: same-room, where speakers talked to their partners face-to-face, and telephone, where speakers were put into different rooms and talked to each other through the telephones provided. Each recording session was two hours long – assuming each speaker would contribute around an hour of data – with an additional buffer of 15 minutes to cater for a short break and long silences that sometimes occur during conversations. Speakers were encouraged to bring a family member or a friend whom they could speak at least two hours with as their partner for the recording as people tend to be more open to talking freely around someone they are familiar or comfortable with. However, not all speakers are able to bring a partner, and this is an important point take note of for future conversational speech corpora as well. These speakers were paired up with another speaker, i.e. a stranger, in NSC, and their relationship was recorded in the metadata.
|
62 |
-
|
63 |
-
Conventionally, conversational speech corpora may record speakers conversing freely. However, to minimize the occurrences of silences throughout the session, three methods of elicitation were used. The first was a spot-the-difference task using diapix materials from Baker and Hazan (2011) where speakers were asked to spot twelve differences in two similar pictures without looking at each other’s pictures. This served as a warm-up to the recording environment while eliciting descriptive and directional phrases, and generally took between 10 to 20 minutes to complete. Next, speakers played a conversational card game where they had to take turns asking each other questions on the cards, which lasted around 45 minutes to the whole of the recording session, depending on how much the speakers could elaborate on the questions. Finally, for the speakers who still had some time before the end of the recording session, they were given a list of topic prompts as reference for discussion.
|
64 |
-
|
65 |
-
An analysis by Tan (2019) of the performance of the three elicitation methods on the quality of conversations found that while diapix tasks may be designed with target words in mind, there would usually be a speaker who takes the lead in suggesting answers and the other confirms. Likewise, speakers generally stuck close to the questions on the conversational card games, resulting in a smaller set of lexical items with repeated tokens as compared to free-talk prompts that were the easiest to execute but unpredictable in terms of what the speakers would say. In a way, conversational card games also allowed for equal contribution as speakers took turns to ask and answer questions, and thus may be a useful way of eliciting balanced and more controlled conversations.
|
66 |
-
|
67 |
-
## Recruitment of speakers
|
68 |
-
|
69 |
-
### Demographic considerations
|
70 |
-
|
71 |
-
While corpora that are built for the purposes of speech synthesis require a large amount of data from a few individuals, speech recognition corpora require speech data from a wide variety of speakers. An understanding of the targeted linguistic variety and the underlying sociolinguistic variables is therefore crucial to the representative quality of the corpus.
|
72 |
-
|
73 |
-
According to the targeted number of speakers, decide on the demographic distribution that needs to be fulfilled. For NSC, a few demographic variables were prioritised, specifically gender, age, and ethnicity. The distribution of male and female speakers was needed to be largely equal. Three target age groups were also used – 18 to 30 years old, targeted at around 50% of the corpus, 31 to 45 years old, at 30%, and over 46 years old at 20%. To avoid an over-representation of Chinese Singaporeans – which comprises 74.3% of the 4.0 million Singaporean population in 2017 – in the corpus, a target of 50% Chinese, 25% Malay and 25% Indians was implemented, with some allowance for speakers who do not fall into these categories.
|
74 |
-
|
75 |
-
Considerations were also made with regards to educational level of the speakers so that there would be representation especially from speakers of lower educational backgrounds, but this had some difficulty to achieve due to the increasingly educated population of Singapore.
|
76 |
-
|
77 |
-
### Recruitment process
|
78 |
-
|
79 |
-
Different corpora with different purposes naturally require different types of participants. For read speech, it would be more important to recruit or prioritise speakers who are able to read sentences fluently without too much practice, and without too many mistakes. It may thus prove worthwhile to conduct interviews to assess the literacy and fluency levels of the speakers prior to confirming their participation in the recordings. One simple way to do so is through phone interviews, where a set of test sentences are sent to the speaker shortly before or during the phone interview, and the speaker is asked to read out the sentences on the spot.
|
80 |
-
|
81 |
-
After determining the targeted demographics of the corpus, list out the criteria that would be used when assessing the suitability of the participant.
|
82 |
-
|
83 |
-
In recruiting the speakers for NSC, some criteria were used in the selection. Speakers had to firstly reach at least 18 years of age so that they are legally (according to the law in Singapore) able to consent to participating in the recordings. To ensure that the speakers grew up in contact with the prototypical local accent, the following criteria were also used:
|
84 |
-
|
85 |
-
- Must be a Singapore citizen raised in Singapore, or
|
86 |
-
- Residents who have lived in Singapore for at least 18 years, and
|
87 |
-
- Have undergone a formal education in English in Singapore **public schools** for at least 6 years
|
88 |
-
|
89 |
-
### Participant data collection
|
90 |
-
|
91 |
-
For researchers and developers working in the field of speech technology, participant data is an important and essential source of information that allows them to understand the speech recordings with greater depth so as to derive useful insights and develop better applications. As such, the collection of participant data should be conducted in a purposeful manner, considering what data is likely to be necessary and useful for the target users of the corpus, and the various resources that are needed to store and publish the information in a manner that still protects the anonymity of the participants. The following is a non-exhaustive list of participant information that may be useful in the context of speech corpora as they are sociolinguistic variables that can explain reasons for language variation that possibly occur in the corpus:
|
92 |
-
|
93 |
-
- Gender
|
94 |
-
- Age
|
95 |
-
- Educational attainment
|
96 |
-
- Socioeconomic status
|
97 |
-
- Linguistic repertoire and proficiency
|
98 |
-
- First language(s) (languages first acquired when born)
|
99 |
-
- Region in which the speaker was brought up in
|
100 |
-
|
101 |
-
Table 1 lists out some types of information that are sensitive, i.e. high likelihood of identifying the corpus speakers, and solutions for presenting the information during publishing such that anonymity is still preserved. Keep in mind that information that are not sensitive on its own (e.g. gender, ethnicity) may be combined with other information such that it becomes possible to deduce the identity of the speakers.
|
102 |
-
|
103 |
-
| Sensitive information | Solution |
|
104 |
-
|:---:|:---:|
|
105 |
-
| Name | Give each speaker a unique speaker ID instead |
|
106 |
-
| Exact address | Replace with state, city, or town. The larger the grouping, the less identifiable it will be |
|
107 |
-
| Contact information | Omit from publishing and protect in a secure and confidential manner |
|
108 |
-
| Age | Replace with age groups |
|
109 |
-
| Income | Replace with income brackets and expect that participants may not be willing to provide such information |
|
110 |
-
|
111 |
-
For conversational speech recordings, it would also be useful to make a note of the partner’s speaker ID and the relationship that the speakers have with each other to facilitate further data selection and analysis. Keeping field notes of notable occurrences during each recording is also a good practice to maintain.
|
112 |
-
|
113 |
-
## Recording setup
|
114 |
-
|
115 |
-
### Recording equipment
|
116 |
-
|
117 |
-
For the read speech corpora in NSC, three microphones were used: a close-talk or standing microphone, a boundary microphone, and a mobile phone. The first two microphones were connected to a laptop through an audio interface or audio card while the mobile phone was placed near the speaker. The audio interface used was one from the Focusrite Scarlett series. An in-house recording software displayed the prompts on the laptop and speakers were taught how to navigate and record themselves reading the prompts.
|
118 |
-
|
119 |
-
The microphone set-up also differed according to the mode of recording that the speakers participated in for the conversational speech portion of NSC. For same-room recording, a close-talk microphone was given to each speaker, and a boundary microphone was placed on the table to record both speakers. For telephone recording, standing microphones were placed in each room in front of the speaker in addition to the telephone set that was connected internally through VoIP using an Interactive Voice Response system. Similar to the previous set-up, all microphones apart from the telephone were connected to a laptop through an audio interface. Adobe Audition – a digital audio workstation – was used to initiate and end the recordings.
|
120 |
-
|
121 |
-
When deciding your recording set-up, keep in the mind of the following points. Far-field microphones are omni-directional, which means all speakers can be recorded at the same time. However, these microphones also record ambient and environmental noises, as well as noises unintentionally generated by speakers when they fidget or knock on the tables. This will increase the difficulty of speaker separation (for multi-party data) and transcription, and possibly reduce the recording quality.
|
122 |
-
|
123 |
-
On the other hand, close-talk and standing microphones provide the highest audio quality which can allow for the use of automatic transcriptions prior to manual transcription. A pitfall is that they are prone to aspiration and spurts when they are positioned too closely to the speakers. Some speakers may also find close-talk microphones uncomfortable, resulting in more fidgeting and thus affecting the recording quality. However, close-talk microphones are more discrete as compared to standing microphones placed directly in front of the speakers.
|
124 |
-
|
125 |
-
Given that different microphones have different strengths and weaknesses, it is worth considering setting up multiple microphones in your recording environment as the weaknesses of certain microphones may be compensated by the strengths of another. For instance, data recorded by close-talk microphones could be used to train the automatic transcription of far-field microphone recordings, given that the audio times are in sync. A multi-microphone set-up will also allow for different kinds of audio data to be captured.
|
126 |
-
|
127 |
-
### Recording environment
|
128 |
-
|
129 |
-
The NSC was recorded mainly in quiet office spaces, though some speakers recorded in professional studios which provided more soundproofing from external noise.
|
130 |
-
|
131 |
-
Some points to take note of when testing the acoustics of the room:
|
132 |
-
|
133 |
-
- Background noise such as from air-conditioning should be limited (noise may be added to clean audio through post-processing if noisy audio is the intended purpose of recording)
|
134 |
-
- Be aware of the noises generated from outside the room at different times of the day or week, e.g. traffic noises, building plumbing, opening/closing of doors from other rooms, etc.
|
135 |
-
- Echo or reverberation levels of the room will add extra noises in the recordings. Check beforehand using software that can help you to determine and measure the RT60 levels, or perform a clapping test to obtain a rough estimate
|
136 |
-
|
137 |
-
The location of the recordings should also be held in clean and professional spaces as far as possible. This ties in with the ethics of the recordings and ensures the safety of both recording staff and speakers. For conversational speech, comfortable environments aid in relaxing and opening up speakers to talk more freely, and so it may be worthwhile to decorate the room to look like a resting or leisure spot. Obvious microphones and recording equipment set-up may become a constant reminder to the speakers that they are being recorded and may lead to some people altering their natural way of speaking, and hence this is also a point to note when choosing microphones and setting up the recording environment.
|
138 |
-
|
139 |
-
Ensure also that the materials placed in the recording room serve a purpose. Having extra materials such as prompts or posters that are unintended for the particular group of speakers could result in wasted effort or incongruency with the other sets of data recorded. You may also need to weigh the pros and cons of the speakers carrying personal electronic devices such as smartphones or tablets during the recording – whether these devices will help to elicit more natural conversations or distract the speakers away from the conversation instead.
|
140 |
-
|
141 |
-
### Recording process
|
142 |
-
|
143 |
-
#### Prior to the recording
|
144 |
-
|
145 |
-
It is always good practice to reconfirm the recording session with the participants before the day of the recording. For conversational speech formats that require speakers to attend in pairs or groups, expect and prepare for late attendees or no-shows in advance.
|
146 |
-
|
147 |
-
Prior to the start of the recording, ensure that all recording equipment are in working condition. Conduct a detailed briefing to let the speakers understand what they can expect to happen throughout the session, as well as what is expected of the speakers. For instance, a non-exhaustive list of information that would be good to cover:
|
148 |
-
|
149 |
-
- Purpose of the recording and what the speech data would be used for
|
150 |
-
- Purpose of collecting participant information
|
151 |
-
- Duration of the session and availability of breaks
|
152 |
-
- Any tasks that they would need to carry out
|
153 |
-
- Expectations or requirements regarding speech styles, languages, topics to talk about or avoid talking about, etc.
|
154 |
-
|
155 |
-
Written consent should be given before the start of the session, and speakers should be informed that they have the right to leave the recording session at any point in time. Ensure that documents filled in and signed by the speakers are placed securely in a designated position where only authorised personnel are able to access. As these documents tend to ask for identifying and sensitive information, it is crucial to ensure that both hardcopy and softcopy versions are stored safely and confidentially. A leak of information could bring about legal repercussions.
|
156 |
-
|
157 |
-
It is encouraged to provide some bottled water for speakers, especially in long recordings of more than 30 minutes to ensure that speakers are not only physically comfortable when speaking but also that the audio quality does not get affected.
|
158 |
-
|
159 |
-
#### During and after the recording
|
160 |
-
|
161 |
-
A dilemma in corpus building concerns whether there should be a recording assistant present in the room during recording. A common reason not to have a recording assistant is that having a third party, effectively a stranger, may lead some people to alter their natural ways of speaking, or even avoid personal topics altogether, regardless whether it is deliberate or not. On the other hand, having a recording assistant means that common recording audio issues (such as when speakers adjust the microphones too close to themselves) can be easily and quickly rectified. A recording assistant who is trained as an interviewer may also help to elicit conversations, effectively minimising silences that naturally occur during spontaneous conversations. It is thus up to the corpus builder to decide what best fits the goals and purposes of the speech corpus.
|
162 |
-
|
163 |
-
Regardless of the type of speech data being recorded, it is best that speakers are given ample breaks throughout to rest their voices and buffer the length of the recording session accordingly. An acceptable duration of a recording session is around 1 hour, after which physical fatigue is likely to set in and in turn affect the audio quality.
|
164 |
-
|
165 |
-
After the end of the recording session, ensure that the recordings have been properly saved in the correct format and destination, and prepare the prompts and recording room for the next session.
|
166 |
-
|
167 |
-
## Data processing
|
168 |
-
|
169 |
-
The work of processing recording data is perhaps the most laborious part of creating a speech corpus. As with all other steps in the building of corpora, decisions relating to how the data would be processed should be discussed in the initial development stage. A major part of data processing deals with transcribing the data, and though there are tools available such as speech recognition software to derive a base transcript first, delivering high quality transcripts still requires a great deal of human effort. In the following subsections, we will focus on some best practices for ensuring quality transcriptions as well as the building of the lexicon, otherwise known as the pronunciation dictionary. Audio processing will not be covered in this reference, though it may still be a necessary step to take depending on the audio quality and/or needs of the corpus.
|
170 |
-
|
171 |
-
### Transcribing the data
|
172 |
-
|
173 |
-
Prior to transcribing the data, transcription rules should be discussed and agreed upon. How many levels of transcriptions (orthographic, phonetic, prosodic, boundary markings and time alignments, etc.) and how broad or narrow the transcription needs to be would depend on the intended purposes and goals of the corpus. Commonly used transcription software includes Praat (Boersma and Weenink, 2020) and ELAN (Version 5.9; 2020). Transcribers should be trained and be familiar with the transcription rules so as to ensure accuracy and consistency in their transcriptions. A proper system in place to keep track of who has transcribed what would help when the need to rectify transcriptions arises. Each transcription work should be checked by at least another transcriber, and conflicting or varying arguments should be discussed to decide on the most appropriate transcription. This is known as multi-annotator agreement. Some may choose to have this process done in teams of threes or fives, with the majority argument taken as the checked transcription. Finally, transcribers should be native speakers of the language variety that the corpus is built for. This is especially important for corpora that require a high degree of local knowledge.
|
174 |
-
|
175 |
-
For read speech, most of the work required has to do with checking. Though speakers generally read according to the prompts, reducing the laboriousness of transcribing, disfluencies and misreadings or mispronunciations may still occur occasionally. These would need to be accounted for in the transcriptions.
|
176 |
-
|
177 |
-
Conversational speech, on the other hand, requires more human effort. Automatic transcriptions may be done first to generate a base transcription that can help transcribers speed up their work. Though often taken for granted, a major reason to why humans are able to disambiguate words easily from a string of sounds is due to the contextual and world knowledge that one possesses. As such, transcribers should be responsible for the entirety of a recording, rather than working in snippets of speech or on one speaker’s recording but not the partner’s. Transcription rules for conversational speech would also need to include the standardization of spelling of words that do not have a conventionalised form in general dictionaries. These could range from slang words or local lexical items, where transcribers are likely to vary widely in their spelling preferences.
|
178 |
-
|
179 |
-
The following is a non-exhaustive list of phenomena that you may wish to consider in deciding your transcription rules:
|
180 |
-
|
181 |
-
- Pauses
|
182 |
-
- Boundary-marking e.g. end of sentence/ commas
|
183 |
-
- Mid-sentence hesitations
|
184 |
-
- Numbers
|
185 |
-
- Symbols e.g. ($ spelt full form as *dollar*)
|
186 |
-
- Titles (e.g. mister, doctor, professor)
|
187 |
-
- Acronyms (e.g. SAFRA)
|
188 |
-
- Initialisms (e.g. IMDA)
|
189 |
-
- Multi-word nouns
|
190 |
-
- Discourse particles (e.g. lah / la / luh)
|
191 |
-
- Capitalisations
|
192 |
-
- Fillers (e.g. uh / er)
|
193 |
-
- Other languages
|
194 |
-
- Unclear words / segments
|
195 |
-
- Incomplete words / false starts / repairs
|
196 |
-
- Mispronunciations
|
197 |
-
- Identifying / sensitive information
|
198 |
-
- Paralinguistic phenomena (e.g. laughs, coughs, breaths)
|
199 |
-
- Non-speech acoustic events
|
200 |
-
|
201 |
-
### Lexicon
|
202 |
-
|
203 |
-
A lexicon, or a pronunciation dictionary, provides phonetic transcriptions to **all** the words found in the corpus. This is typically organized in a two-column text file with the orthographic form on the left and the phonetic transcription on the right. Though the International Phonetic Alphabet provides a comprehensive alphabet to transcribe phonetic sounds, the signs are often not easily machine-readable. As such, lexicons for speech applications have used other ASCII phonetic scripts such as ARPAbet or SAMPA. ARPAbet was developed and used in many American English systems and in the CMU Pronouncing Dictionary (“The CMU Pronouncing Dictionary”, n.d.). On the other hand, SAMPA was developed first for European languages, with 24 languages – including Cantonese and Thai – being covered at this point of writing (“SAMPA computer readable phonetic alphabet”, n.d.). A variant of SAMPA known as X-SAMPA was also developed to cover all of the symbols used in the International Phonetic Alphabet.
|
204 |
-
|
205 |
-
Similar to orthographic transcriptions, it is possible to generate phonetic transcriptions automatically such as through grapheme-to-phoneme (g2p) converters or dictionaries as a base for transcribers to work on. As mentioned earlier in Section 2.1, the phoneme inventory is crucial to the construction of the lexicon as it effectively places constraints on the automatic and manual transcriptions that will be obtained. Increasingly, pretrained g2p converters have been developed using state-of-the-art machine learning techniques and thus may help in generating base pronunciations more effectively, even for multilingual corpora.
|
206 |
-
|
207 |
-
In the case of NSC, a major challenge to building the lexicon has to do with the loanwords that typically occur in Singaporean speech, as mentioned earlier in Section 2.3. Given that there is no official standardized pronunciation of these non-English named entities, ethnic variation especially from speakers who do not speak the language that a particular word may originate from have resulted in multiple pronunciations for many of the loanwords covered in the corpus (Koh et al., 2019). These differing pronunciations have to be taken into consideration, even as they deviate from the canonical, especially if it has become part of the speakers’ variety. It is therefore highly encouraged for corpora that cater to multilingual societies to have a diverse team of transcribers who are native speakers of the local variety so that there are credible sources to base the transcriptions on.
|
208 |
-
|
209 |
-
### Checking and evaluation
|
210 |
-
|
211 |
-
As errors are guaranteed to occur, it is important to put in place a system for quality checking of the transcripts and lexicons. Spell-checking and formatting scripts may help to reduce the workload of correcting minor errors but are likely miss out on more significant errors such as where the transcriptions are spelt correctly but ultimately do not match what is being said. These errors are often only picked up on during manual checking, and hence we reiterate here the importance of keeping track of each transcriber’s work such that consistent errors may be corrected by batches without causing unforeseen errors or delays in the delivery.
|
212 |
-
|
213 |
-
Finally, some corpora, especially those created for automatic speech recognition, may require an additional step of evaluation in the form of a baseline automatic speech recognition system so as to validate the quality of the data. This first requires portioning the corpus into train, test, and if required, development sets as well. An acoustic model and language model would also need to be built and trained from the corpus on available toolkits such as Kaldi (Povey et al., 2011), before testing the corpus on the automatic speech recognition system to derive benchmarking or performance results which is often tabulated in terms of word error rate. Some further readings have been provided in Section 8 for readers who are interested to know more about acoustic and language modelling.
|
214 |
-
|
215 |
-
## Conclusion
|
216 |
-
|
217 |
-
In this reference, several considerations underlying the design, execution, and post-processing of speech data were discussed. Though the reference is written with speech corpora for speech recognition applications in mind, some general pointers regarding the recording and post-processing of speech data are still applicable to the building of speech corpora tailored to other aims. Given that building a speech corpus is a non-trivial task, in terms of both financial and labour-wise, effort should be taken to engage local stakeholders as much as possible to ensure that the corpus represents the targeted linguistic variety or accent well.
|
218 |
-
|
219 |
-
## Recommended reading
|
220 |
-
|
221 |
-
Hardcastle, W. J., Laver, J., & Gibbon, F. E. (2010). The handbook of phonetic sciences. Chichester, U.K: Wiley-Blackwell.
|
222 |
-
|
223 |
-
Gibbon, D., Moore, R., & Winski, R. (Eds.). (1998). *Spoken language system and corpus design.* Berlin, Boston: De Gruyter Mouton.
|
224 |
-
|
225 |
-
Jurafsky, D., & Martin, J. (2009). *Speech and language processing: an introduction to natural language processing, computational linguistics, and speech recognition (2nd ed.).* Upper Saddle River, N.J: Pearson Prentice Hall.
|
226 |
-
|
227 |
-
O’Keeffe, A., & McCarthy, M. (2010). *The Routledge handbook of corpus linguistics (1st ed.).* London; Routledge.
|
228 |
-
|
229 |
-
Schultz, T., & Kirchhoff, K. (2006). *Multilingual speech processing.* Elsevier Academic Press.
|
230 |
-
|
231 |
-
Singapore Open Data License (v. 1.0) <https://data.gov.sg/open-data-licence>
|
232 |
-
|
233 |
-
Wynne, M. (Ed.). (2005). *Developing linguistic corpora: a guide to good practice.* Oxford: Oxbow Books. Available online from <http://ota.ox.ac.uk/documents/creating/dlc/> [Accessed 2020-05-23].
|
234 |
-
|
235 |
-
## References
|
236 |
-
|
237 |
-
Baker, R., & Hazan, V. (2011). DiapixUK: Task materials for the elicitation of multiple spontaneous speech dialogs. Behavior Research Methods, 43(3), 761–770.
|
238 |
-
|
239 |
-
Bizzocchi, A. L. (2017). How many phonemes does the english language have. International Journal on Studies in English Language and Literature (IJSELL), 5(10), 36–46.
|
240 |
-
|
241 |
-
Boersma, P., & Weenink, D. (2020). Praat: doing phonetics by computer [Computer program]. Version 6.1.15, retrieved 20 May 2020 from <http://www.praat.org/>
|
242 |
-
|
243 |
-
Butzberger, J., Murveit, H., Shriberg, E., & Price, P. (1992). Spontaneous speech effects in large vocabulary speech recognition applications. In Proceedings of the workshop on Speech and Natural Language (HLT ’91). (pp. 339–343.). Association for Computational Linguistics, USA.
|
244 |
-
|
245 |
-
Deterding, D. (2007). Singapore English. Edinburgh University Press.
|
246 |
-
|
247 |
-
ELAN (Version 5.9) [Computer software]. (2020). Nijmegen: Max Planck Institute for Psycholinguistics, The Language Archive. Retrieved from <https://archive.mpi.nl/tla/elan>
|
248 |
-
|
249 |
-
Furui S. & Kawahara T. (2008) Transcription and distillation of spontaneous speech. In Benesty J., Sondhi M.M., Huang Y.A. (Eds.), Springer Handbook of Speech Processing. Springer Handbooks. (pp. 627-652). Springer, Berlin, Heidelberg
|
250 |
-
|
251 |
-
Godfrey, J. J., Holliman, E. C., & McDaniel, J. (1992). SWITCHBOARD: telephone speech corpus for research and development. Proceedings of ICASSP-92: 1992 IEEE International Conference on Acoustics, Speech, and Signal Processing, 1, 517-520.
|
252 |
-
|
253 |
-
Hughes, T., Nakajima, K., Ha, L., Vasu, A., Moreno, P. J., & LeBeau, M. (2010). Building transcribed speech corpora quickly and cheaply for many languages. INTERSPEECH 2010, 1914-1917.
|
254 |
-
|
255 |
-
Koh, J. X., Mislan, A., Khoo, K., Ang, B., Ang, W., Ng, C., & Tan, Y.-Y. (2019). Building the Singapore English National Speech Corpus. INTERSPEECH 2019, 321–325.
|
256 |
-
|
257 |
-
Kominek, J., & Black, A. W. (2003). CMU Arctic databases for speech synthesis. (Report no. CMU-LTI-03-177). Retrieved from <https://www.lti.cs.cmu.edu/sites/default/files/CMU-LTI-03-177-T.pdf>
|
258 |
-
|
259 |
-
Povey, D., Ghoshal, A., Boulianne, G., Burget, L., Glembek, O., Goel, N., Hannemann, M., Motlíček, P., Qian, Y., Schwarz, P., Silovský, J., Stemmer, G., & Vesel, K. (2011). The Kaldi speech recognition toolkit. *IEEE 2011 Workshop on Automatic Speech Recognition and Understanding.*
|
260 |
-
|
261 |
-
Robinson, T., Fransen, J., Pye, D., Foote, F., & Renals, S. (1995). WSJCAMO: a British English speech corpus for large vocabulary continuous speech recognition. *Proceedings of ICASSP-95: 1995 IEEE International Conference on Acoustics, Speech, and Signal Processing, 1,* 81-84.
|
262 |
-
|
263 |
-
SAMPA computer readable phonetic alphabet. (n.d.). Retrieved from <https://www.phon.ucl.ac.uk/home/sampa/>
|
264 |
-
|
265 |
-
Tan, Y-Y. (2019). Spontaneous speech elicitation for large speech corpus in multilingual Singapore. Proceedings of the LPSS. Academia Sinica, Taipei Taiwan.
|
266 |
-
|
267 |
-
The CMU Pronouncing Dictionary. (n.d.) Retrieved from <http://www.speech.cs.cmu.edu/cgi-bin/cmudict>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Part 1/Channel_0-00.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:a54ae955243f6f64f66f200e2158bff2d3d7c2c2bd1bd3699665a1b34425b879
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_0-01.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5ce8aed1dbae8c12a12205b192cb805d714769c1245435b22e839beda038dcf7
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_0-02.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:66e32a75aac1f9c1f2d5172c37bfa74d63ec7dad0f0efadf3eaa39c1ae781e05
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_0-03.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:8068d5276d6dbf01e384dde8170f8eecc3a9b3c9a294d0627220cf6ebad20d55
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_0-04.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:15523feb7a99d145c9e9b67d7503a3b27ae9af6ca95b552538d51d87cc191b0f
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_0-05.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:0f349072be7403cdb219d3981a6585b6fb85e20a41cfd3c2199447a2bddf2b97
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_1-00.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:14653e91559e166defcea6ac364e33a1c5545f5a7eaa87ed28355967a97ccae0
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_1-01.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:6c0bc01cc7f12b129aea7180ce1692e21c38cb69a0b4f3357b1f434ff5fb48d0
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_1-02.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e681bc65ee414ec99ad5cf0c2ed6259d35a27f19288ea81a2b70ea49fa3197a6
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_1-03.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f69f7e18cd75a196ecbe4631b864b53bfe00df233a326244cd52a712efbd3eb3
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_1-04.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ee5e78a78ae58fee83d92806cbfb9a5d36f341e0e23beec62e15e5ba01ac6940
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_1-05.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:c4dd285cfbcc1be82533a75ce5a6fcaa8d6645a8c3708b30464b86817da845a4
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_2-00.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d01ab4388fce70e26e0a69d14e7d4c44cb7a4351085c3151b1cf89b36d8857be
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_2-01.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3ea3737159eb4c16733e1d1f1c84426454fddac12de9eb790422870fafb8558f
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_2-02.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2140cbf50fdf31c298e94d461e11c4f040e82c1785633f0085437fe6d8f85276
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_2-03.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:348af73567d69ea9dd83d70db719dd6944b3c8fd983427f24082f4cb4bb3b9f0
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_2-04.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:12efd6af2c02147b41c851d24e27457a7eafec8dca187e1854a8629f65f0e70e
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 1/Channel_2-05.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:0ca864b458ef21ea3a6a2e29c87765dced6fb0dbe7972f8cf6589d24bb73e688
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_0-00.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d9fdfe9d825591b88d75b6301a30ae8c1b0021616e37dc26449053dfa4b28ca1
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_0-01.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:aedb429ba16d630d87d47f9259b6a19a258d250c5b41a393b48eef68e1dbae7f
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_0-02.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:51d608f7aec91e1d271d995fd091c313dc246475d00e94b69cd4327487553ac6
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_0-03.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:030bfa3171280050020ef1c418915e49ea7aea33992995bf76d5117481a153cd
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_0-04.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:22b602251e427989d4cc0b5553ccfa29e57d5091399a3f6ce2a129724e5c6329
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_1-00.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:9a60b4205c53eae2699e2f088dc0b0e8aa7569092fe6da586dd9e13562fcea3e
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_1-01.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:24b89e2dc05ebe8aea5a9b632ba1d34744b3dc05cf9fbe7fb1ee6c34b912f656
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_1-02.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:dff3353176537818f0553aebf8aa25ccc59f4a5b203c715cf897b1e7814b8771
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_1-03.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:7894f178ed1ffb8a3f43f645a6d96537717d7db277cd5b3e94a984c8a2daaea2
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_1-04.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:1e784d4557d814917f21acce19a39c8553990dea71472c163a01b79a128f17bc
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_1-05.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:25c8767e22e11c8b2e76ff7ed36b08f68a9ea94f290856e915ac26ea7d6734eb
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_1-06.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:89e289c938c4b8c0f66e62b32093c08ade9179f4a637df8dfffd342bf42f99fa
|
3 |
-
size 1492910080
|
|
|
|
|
|
|
|
Part 2/Channel_2-00.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:adbfcaf6ca3ca650d2e4c349f72b2c10453f4854588a2fd7ff3577383140414c
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_2-01.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:65236944aa927a2d44c3653dbfc70b1150b328bfa0218407a9f780e36e3c04da
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_2-02.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b799e3f826bef0d4edfbe1101db6bfbf4faeaf570015e35af5b83085622a1537
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_2-03.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:64a3ff557376d164e5f594aad24f7ed0af8f1442700fafeb2d7f92f13a542621
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_2-04.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b586befc59b229b06d53e6b26b8db27b6666411924793f9f98c6d5339599eadd
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 2/Channel_2-05.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:21b554a22e005b633b8476af7c8909642b3888652ff5d16b2d5db4e329da4cf1
|
3 |
-
size 5781678080
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-00.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:55171a9cf3d3073107596c63aa0f8d67aa7ca1079ceae9d9eda01743b7d4554c
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-01.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f450e999fe44ef7498c7dfe369453a708f8ef0a0d4a74bd2776e75001d8de3ff
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-02.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:1c19974b4967fdba179f3fc907bb8159ef946a5f685ea84cf181db03ca284bfa
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-03.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e49757a716e11403dc74a6a8901ffb50d45447c6fbf08f4595a50cf79364b08c
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-04.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:abc43760601ea2c011675367c8d7d90124e0ae215240f2d3cd394ac39349ec56
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-05.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ebc72df55a9f7d9ee12bb3a2082db68f86aa7587c516e77002c2c057417082d3
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-06.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:625c90f6a7742d9de3841f03c3bb1c5d77f954157a90252cf739e8d7e7c1190f
|
3 |
-
size 10737418240
|
|
|
|
|
|
|
|
Part 3/DifferentRooms-07.tar
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:c5d0a79e36dda7adbd38be82be5621bc8afb3147c1bf0a933e6e978477125c3c
|
3 |
-
size 5892249600
|
|
|
|
|
|
|
|