arxyzan commited on
Commit
def5026
1 Parent(s): 2465e0b

Delete common-voice-13-fa.py

Browse files
Files changed (1) hide show
  1. common-voice-13-fa.py +0 -136
common-voice-13-fa.py DELETED
@@ -1,136 +0,0 @@
1
- import csv
2
- import os
3
-
4
- import datasets
5
- from tqdm import tqdm
6
-
7
-
8
- _DESCRIPTION = """\
9
- Persian portion of the common voice 13 dataset, gathered and maintained by Hezar AI.
10
- """
11
-
12
- _CITATION = """\
13
- @inproceedings{commonvoice:2020,
14
- author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
15
- title = {Common Voice: A Massively-Multilingual Speech Corpus},
16
- booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
17
- pages = {4211--4215},
18
- year = 2020
19
- }
20
- """
21
-
22
- _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
23
-
24
- _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
25
-
26
- _BASE_URL = "https://huggingface.co/datasets/hezarai/common-voice-13-fa/resolve/main/"
27
-
28
- _AUDIO_URL = _BASE_URL + "audio/{split}.zip"
29
-
30
- _TRANSCRIPT_URL = _BASE_URL + "transcripts/{split}.tsv"
31
-
32
-
33
- class CommonVoiceFaConfig(datasets.BuilderConfig):
34
- """BuilderConfig for CommonVoice."""
35
-
36
- def __init__(self, **kwargs):
37
- super(CommonVoiceFaConfig, self).__init__(**kwargs)
38
-
39
-
40
- class CommonVoice(datasets.GeneratorBasedBuilder):
41
- DEFAULT_WRITER_BATCH_SIZE = 1000
42
-
43
- BUILDER_CONFIGS = [
44
- CommonVoiceFaConfig(
45
- name="commonvoice-13-fa",
46
- version="1.0.0",
47
- description=_DESCRIPTION,
48
- )
49
- ]
50
-
51
- def _info(self):
52
- features = datasets.Features(
53
- {
54
- "client_id": datasets.Value("string"),
55
- "path": datasets.Value("string"),
56
- "audio": datasets.features.Audio(sampling_rate=48_000),
57
- "sentence": datasets.Value("string"),
58
- "up_votes": datasets.Value("int64"),
59
- "down_votes": datasets.Value("int64"),
60
- "age": datasets.Value("string"),
61
- "gender": datasets.Value("string"),
62
- "accent": datasets.Value("string"),
63
- "locale": datasets.Value("string"),
64
- "segment": datasets.Value("string"),
65
- "variant": datasets.Value("string"),
66
- }
67
- )
68
-
69
- return datasets.DatasetInfo(
70
- description=_DESCRIPTION,
71
- features=features,
72
- supervised_keys=None,
73
- homepage=_HOMEPAGE,
74
- license=_LICENSE,
75
- citation=_CITATION,
76
- version=self.config.version,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- splits = ("train", "validation", "test")
81
- audio_urls = {split: _AUDIO_URL.format(split=split) for split in splits}
82
-
83
- archive_paths = dl_manager.download(audio_urls)
84
- local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
85
-
86
- transcript_urls = {split: _TRANSCRIPT_URL.format(split=split) for split in splits}
87
- transcript_paths = dl_manager.download_and_extract(transcript_urls)
88
-
89
- split_generators = []
90
- split_names = {
91
- "train": datasets.Split.TRAIN,
92
- "validation": datasets.Split.VALIDATION,
93
- "test": datasets.Split.TEST,
94
- }
95
- for split in splits:
96
- split_generators.append(
97
- datasets.SplitGenerator(
98
- name=split_names.get(split, split),
99
- gen_kwargs={
100
- "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
101
- "archives": [dl_manager.iter_archive(archive_paths.get(split))],
102
- "transcript_path": transcript_paths[split],
103
- },
104
- ),
105
- )
106
-
107
- return split_generators
108
-
109
- def _generate_examples(self, local_extracted_archive_paths, archives, transcript_path):
110
- data_fields = list(self._info().features.keys())
111
- metadata = {}
112
- with open(transcript_path, encoding="utf-8") as f:
113
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
114
- for row in tqdm(reader, desc="Reading metadata..."):
115
- if not row["path"].endswith(".mp3"):
116
- row["path"] += ".mp3"
117
- # accent -> accents in CV 8.0
118
- if "accents" in row:
119
- row["accent"] = row["accents"]
120
- del row["accents"]
121
- # if data is incomplete, fill with empty values
122
- for field in data_fields:
123
- if field not in row:
124
- row[field] = ""
125
- metadata[row["path"]] = row
126
-
127
- for i, audio_archive in enumerate(archives):
128
- for path, file in audio_archive:
129
- _, filename = os.path.split(path)
130
- if filename in metadata:
131
- result = dict(metadata[filename])
132
- # set the audio feature and the path to the extracted file
133
- path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
134
- result["audio"] = {"path": path, "bytes": file.read()}
135
- result["path"] = path
136
- yield path, result