pere commited on
Commit
bd5017c
1 Parent(s): a716b55

Saving transcriptions for split train step 500.

Browse files
.gitattributes CHANGED
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ *.csv filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ YAML tags:
4
+ annotations_creators:
5
+ - no-annotation
6
+ language_creators:
7
+ - found
8
+ license:
9
+ - other
10
+ multilinguality:
11
+ - multilingual
12
+ pretty_name: ncc_speech_v7
13
+ size_categories:
14
+ - 2G<n<1B
15
+ source_datasets:
16
+ - original
17
+ task_categories:
18
+ - automatic-speech-recognition
19
+ task_ids:
20
+ - language-modeling
21
+ configs:
22
+ - config_name: None
23
+ description: "This dataset does not need any config file."
24
+ ---
25
+
26
+ ## Dataset Card: NbAiLab/distil_raw_ncc_speech_v7
27
+ - Internal dataset created as input for creating Pseudo Labels.
28
+
29
+ ## General Information
30
+ The dataset is based on ncc_speech_v7 (Norwegian Colossal Corpus - Speech). It is then filtered by only including entries where the text language in Norwegian, and where the source is not from "nrk_translate".
31
+
32
+
33
+ ## Potential Use Cases
34
+ The ncc_speech_v7 corpus can be used for various purposes, including but not limited to:
35
+
36
+ - Training Automatic Speech Recognition models.
37
+ - Building text-to-speech systems.
38
+ - Research in speech recognition and natural language processing.
39
+ - Developing language models.
40
+
41
+ ## License
42
+ The ncc_speech_v7 corpus has a private license.
43
+
44
+ ## Citation
45
+ The corpus was created and cleaned by Freddy Wetjen, Rolv-Arild Braaten, Angelina Zanardi and Per Egil Kummervold. No publication is so far published based on this copus.
46
+
distil_raw_ncc_speech_v7.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from google.cloud import storage
3
+ import io
4
+ import json
5
+ import tarfile
6
+ import datasets
7
+
8
+ _CITATION = """\
9
+ # Citation details
10
+ """
11
+
12
+ _DESCRIPTION = """\
13
+ This database was created from NB deposit recordings
14
+ """
15
+
16
+ _HOMEPAGE = "https://ai.nb.no"
17
+
18
+ _GCS_BUCKET = "nb-datasets"
19
+ _GCS_BASE_PATH = "distil_raw_ncc_speech_v7/data/{split}/ncc_speech_v7-{lang_code}-{shard_idx:04d}-{shard_total:04d}"
20
+
21
+ _SHARDS = {
22
+ "no": {
23
+ datasets.Split.TRAIN: 256,
24
+ datasets.Split.VALIDATION: 1,
25
+ datasets.Split.TEST: 1,
26
+ },
27
+ }
28
+
29
+ _SOURCES = ["audio_books_no","clean_audio_books_no","clean_stortinget_no","norwegian_fleurs","nrk_no","nst","stortinget_no"]
30
+ _SHARDS["no"].update({f"validation_{source}": 1 for source in _SOURCES })
31
+ _SHARDS["no"].update({f"test_{source}": 1 for source in _SOURCES})
32
+
33
+ class distil_raw_ncc_speech_v7Config(datasets.BuilderConfig):
34
+ def __init__(self, *args, **kwargs):
35
+ super(distil_raw_ncc_speech_v7Config, self).__init__(*args, **kwargs)
36
+
37
+ class distil_raw_ncc_speech_v7(datasets.GeneratorBasedBuilder):
38
+ DEFAULT_WRITER_BATCH_SIZE = 1000
39
+ BUILDER_CONFIGS = [
40
+ distil_raw_ncc_speech_v7Config(
41
+ name="no",
42
+ version=datasets.Version("1.0.1"),
43
+ description="ncc_speech Norwegian",
44
+ ),
45
+ ]
46
+
47
+ def __init__(self, *args, post_processors=None, **kwargs):
48
+ if not isinstance(post_processors, (tuple, list)):
49
+ post_processors = [post_processors]
50
+ self.post_processors = post_processors
51
+ super().__init__(*args, **kwargs)
52
+
53
+ def _info(self):
54
+ sampling_rate = 16000
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=datasets.Features({
58
+ "id": datasets.Value("string"),
59
+ # ... (your existing features)
60
+ }),
61
+ supervised_keys=None,
62
+ homepage=_HOMEPAGE,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _info(self):
67
+ sampling_rate = 16000
68
+ return datasets.DatasetInfo(
69
+ description=_DESCRIPTION,
70
+ features=datasets.Features({
71
+ "id": datasets.Value("string"),
72
+ "group_id": datasets.Value("string"),
73
+ "source": datasets.Value("string"),
74
+ "audio_language": datasets.Value("string"),
75
+ "audio": datasets.features.Audio(sampling_rate=sampling_rate),
76
+ "audio_duration": datasets.Value("int32"),
77
+ "previous_text": datasets.Value("string"),
78
+ "text_en":datasets.Value("string"),
79
+ "text_language": datasets.Value("string"),
80
+ "text": datasets.Value("string"),
81
+ "timestamped_text_en": datasets.Value("string"),
82
+ "text_en": datasets.Value("string"),
83
+ "timestamped_text": datasets.Value("string"),
84
+ "wav2vec_wer": datasets.Value("float32"),
85
+ "whisper_wer": datasets.Value("float32"),
86
+ "verbosity_level": datasets.Value("int32"),
87
+ "file": datasets.Value("string"),
88
+ "channels": datasets.Value("int32"),
89
+ "frequency": datasets.Value("int32"),
90
+ "language": datasets.Value("string"),
91
+ "task": datasets.Value("string"),
92
+ "_post_processor": datasets.Value("string"),
93
+ }),
94
+ supervised_keys=None,
95
+ homepage=_HOMEPAGE,
96
+ citation=_CITATION,
97
+ # task_templates=[
98
+ # AutomaticSpeechRecognition(
99
+ # audio_column="audio",
100
+ # transcription_column="text"
101
+ # )
102
+ # ],
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ data_urls = {}
107
+ splits = _SHARDS[self.config.name].keys()
108
+ for split in splits:
109
+ data_urls[split] = []
110
+ shard_total = _SHARDS["no"][split]
111
+ for shard_idx in range(1, shard_total + 1):
112
+ string_formatting = dict(
113
+ split=split,
114
+ lang_code="no",
115
+ shard_idx=shard_idx,
116
+ shard_total=shard_total
117
+ )
118
+ gcs_path = _GCS_BASE_PATH.format(**string_formatting)
119
+ metadata_path = f"{gcs_path}.json"
120
+ archive_path = f"{gcs_path}.tar.gz"
121
+ data_urls[split].append((metadata_path, archive_path))
122
+
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=split, gen_kwargs={
126
+ "filepaths": data_urls[split],
127
+ }
128
+ ) for split in splits
129
+ ]
130
+
131
+ def _generate_examples(self, filepaths):
132
+ storage_client = storage.Client()
133
+ data_fields = list(self._info().features.keys())
134
+ data_fields.remove("audio")
135
+
136
+ for metadata_path, archive_path in filepaths:
137
+ metadata_bucket = storage_client.bucket(_GCS_BUCKET)
138
+ metadata_blob = metadata_bucket.blob(metadata_path)
139
+ json_data = metadata_blob.download_as_text()
140
+ metadata_records = json_data.split('\n')
141
+ metadata_content = {}
142
+ for record in metadata_records:
143
+ if record.strip():
144
+ #print(record)
145
+ metadata_object=json.loads(record)
146
+ metadata_key = metadata_object["id"]
147
+ metadata_content[metadata_key]= metadata_object
148
+
149
+ #metadata_content = json.loads(metadata_blob.download_as_text().split("\n"))
150
+
151
+ archive_bucket = storage_client.bucket(_GCS_BUCKET)
152
+ archive_blob = archive_bucket.blob(archive_path)
153
+ archive_bytes = io.BytesIO(archive_blob.download_as_bytes())
154
+
155
+ # with tarfile.open(fileobj=archive_bytes, mode="r") as tar:
156
+ # for audio_file in tar.getmembers():
157
+ # if audio_file.isfile() and audio_file.name.endswith(".mp3"):
158
+ # audio_bytes = tar.extractfile(audio_file).read()
159
+ # audio_dict = {"bytes": audio_bytes, "path": audio_file.name}
160
+ # yield metadata_content['id'], {"audio": audio_dict, **metadata_content}
161
+
162
+
163
+ with tarfile.open(fileobj=archive_bytes, mode="r") as tar:
164
+ for audio_file in tar.getmembers():
165
+ if audio_file.isfile() and audio_file.name.endswith(".mp3"):
166
+ metadata_key = f'{audio_file.name.replace(".mp3", "")}'
167
+ fields = {key: metadata_content[metadata_key].get(key, "") for key in data_fields}
168
+ fields["file"] = fields["id"] + ".mp3"
169
+ fields["channels"] = 1
170
+ fields["frequency"] = 16000
171
+ fields["task"] = "transcribe"
172
+ fields["language"] = fields["text_language"]
173
+ fields["_post_processor"] = None
174
+ audio_bytes = tar.extractfile(audio_file).read()
175
+ audio_dict = {"bytes": audio_bytes, "path": audio_file.name}
176
+ metadata_dict = {
177
+ "id": metadata_key,
178
+ "audio": audio_dict,
179
+ **fields
180
+ }
181
+ for func in self.post_processors:
182
+ if func is None:
183
+ yield metadata_key, metadata_dict
184
+ else:
185
+ func_name = func.__name__ if func.__name__ else hex(id(func)).replace("0x",
186
+ "lambda-")
187
+ result = func(metadata_dict)
188
+ if result:
189
+ result["_post_processor"] = func_name
190
+ yield f"{metadata_key}_{func_name}", result
train-transcription.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62f8e34bc23ba8cb8efd1bf2da31b7a8f8cc32724dc06dfd5dc23fbb0d8920d4
3
+ size 12622913