pere commited on
Commit
9d03d1b
1 Parent(s): 7b26bf2

first submit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. NPSC.py +207 -0
  2. README.md +139 -1
  3. data/eval/20170209.json +0 -0
  4. data/eval/20170209_16K_mp3.tar.gz +3 -0
  5. data/eval/20170209_48K_mp3.tar.gz +3 -0
  6. data/eval/20180109.json +0 -0
  7. data/eval/20180109_16K_mp3.tar.gz +3 -0
  8. data/eval/20180109_48K_mp3.tar.gz +3 -0
  9. data/eval/20180201.json +0 -0
  10. data/eval/20180201_16K_mp3.tar.gz +3 -0
  11. data/eval/20180201_48K_mp3.tar.gz +3 -0
  12. data/eval/20180307.json +0 -0
  13. data/eval/20180307_16K_mp3.tar.gz +3 -0
  14. data/eval/20180307_48K_mp3.tar.gz +3 -0
  15. data/eval/20180611.json +0 -0
  16. data/eval/20180611_16K_mp3.tar.gz +3 -0
  17. data/eval/20180611_48K_mp3.tar.gz +3 -0
  18. data/test/20170207.json +0 -0
  19. data/test/20170207_16K_mp3.tar.gz +3 -0
  20. data/test/20170207_48K_mp3.tar.gz +3 -0
  21. data/test/20171122.json +0 -0
  22. data/test/20171122_16K_mp3.tar.gz +3 -0
  23. data/test/20171122_48K_mp3.tar.gz +3 -0
  24. data/test/20171219.json +0 -0
  25. data/test/20171219_16K_mp3.tar.gz +3 -0
  26. data/test/20171219_48K_mp3.tar.gz +3 -0
  27. data/test/20180530.json +0 -0
  28. data/test/20180530_16K_mp3.tar.gz +3 -0
  29. data/test/20180530_48K_mp3.tar.gz +3 -0
  30. data/train/20170110.json +0 -0
  31. data/train/20170110_16K_mp3.tar.gz +3 -0
  32. data/train/20170110_48K_mp3.tar.gz +3 -0
  33. data/train/20170208.json +0 -0
  34. data/train/20170208_16K_mp3.tar.gz +3 -0
  35. data/train/20170208_48K_mp3.tar.gz +3 -0
  36. data/train/20170215.json +0 -0
  37. data/train/20170215_16K_mp3.tar.gz +3 -0
  38. data/train/20170215_48K_mp3.tar.gz +3 -0
  39. data/train/20170216.json +0 -0
  40. data/train/20170216_16K_mp3.tar.gz +3 -0
  41. data/train/20170216_48K_mp3.tar.gz +3 -0
  42. data/train/20170222.json +0 -0
  43. data/train/20170222_16K_mp3.tar.gz +3 -0
  44. data/train/20170222_48K_mp3.tar.gz +3 -0
  45. data/train/20170314.json +0 -0
  46. data/train/20170314_16K_mp3.tar.gz +3 -0
  47. data/train/20170314_48K_mp3.tar.gz +3 -0
  48. data/train/20170322.json +0 -0
  49. data/train/20170322_16K_mp3.tar.gz +3 -0
  50. data/train/20170322_48K_mp3.tar.gz +3 -0
NPSC.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """NPSC: Norwegian Parliament Speech Corpus"""
18
+
19
+ import io
20
+ import json
21
+ import tarfile
22
+ import datasets
23
+ from datasets.tasks import AutomaticSpeechRecognition
24
+
25
+
26
+ _CITATION = """\
27
+ @inproceedings{johansen2019ner,
28
+ title={},
29
+ author={},
30
+ booktitle={LREC 2022},
31
+ year={2022},
32
+ url={https://arxiv.org/abs/}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ The Norwegian Parliament Speech Corpus (NPSC) is a corpus for training a Norwegian ASR (Automatic Speech Recognition) models. The corpus is created by Språkbanken at the National Library in Norway.
38
+
39
+ NPSC is based on sound recording from meeting in the Norwegian Parliament. These talks are orthographically transcribed to either Norwegian Bokmål or Norwegian Nynorsk. In addition to the data actually included in this dataset, there is a significant amount of metadata that is included in the original corpus. Through the speaker id there is additional information about the speaker, like gender, age, and place of birth (ie dialect). Through the proceedings id the corpus can be linked to the official proceedings from the meetings.
40
+
41
+ The corpus is in total sound recordings from 40 entire days of meetings. This amounts to 140 hours of speech, 65,000 sentences or 1.2 million words.
42
+ """
43
+
44
+ _HOMEPAGE = "https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-58/"
45
+
46
+ # Example: https://huggingface.co/datasets/NbAiLab/NPSC/resolve/main/data/train/20170110_48K_mp3.tar.gz
47
+ _DATA_URL = "https://huggingface.co/datasets/NbAiLab/NPSC/resolve/main/data/{split}/{shard}_{config}.tar.gz"
48
+ # Example: https://huggingface.co/datasets/NbAiLab/NPSC/resolve/main/data/test/20170207.json
49
+ _METADATA_URL = "https://huggingface.co/datasets/NbAiLab/NPSC/resolve/main/data/{split}/{shard}.json"
50
+
51
+ _SHARDS = {
52
+ "eval": ["20170209", "20180109", "20180201", "20180307", "20180611"],
53
+ "test": ["20170207", "20171122", "20171219", "20180530"],
54
+ "train": ["20170110", "20170208", "20170215", "20170216", "20170222", "20170314", "20170322", "20170323", "20170403", "20170405", "20170419", "20170426", "20170503", "20170510", "20170516", "20170613", "20170615", "20171007", "20171012", "20171018", "20171024", "20171208", "20171211", "20171213", "20180316", "20180321", "20180404", "20180410", "20180411", "20180601", "20180613", "20180615"],
55
+ }
56
+
57
+
58
+ class NpscConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for NPSC."""
60
+
61
+ def __init__(self, *args, **kwargs):
62
+ """BuilderConfig for NPSC.
63
+
64
+ Args:
65
+ **kwargs: keyword arguments forwarded to super.
66
+ """
67
+ super(NpscConfig, self).__init__(*args, **kwargs)
68
+
69
+
70
+ class Npsc(datasets.GeneratorBasedBuilder):
71
+ """NPSC dataset."""
72
+
73
+ DEFAULT_WRITER_BATCH_SIZE = 1000
74
+ BUILDER_CONFIGS = [
75
+ NpscConfig(
76
+ name="48K_mp3",
77
+ version=datasets.Version("1.0.0"),
78
+ description="NPSC with samples in 48KHz stereo mp3)",
79
+ ),
80
+ NpscConfig(
81
+ name="16K_mp3",
82
+ version=datasets.Version("1.0.0"),
83
+ description="NPSC with samples in 16KHz mono mp3)",
84
+ ),
85
+ NpscConfig(
86
+ name="48K_mp3_bokmaal",
87
+ version=datasets.Version("1.0.0"),
88
+ description="NPSC with Bokmål samples in 48KHz stereo mp3)",
89
+ ),
90
+ NpscConfig(
91
+ name="16K_mp3_bokmaal",
92
+ version=datasets.Version("1.0.0"),
93
+ description="NPSC with Bokmål samples in 16KHz mono mp3)",
94
+ ),
95
+ NpscConfig(
96
+ name="48K_mp3_nynorsk",
97
+ version=datasets.Version("1.0.0"),
98
+ description="NPSC with Nynorsk samples in 48KHz stereo mp3)",
99
+ ),
100
+ NpscConfig(
101
+ name="16K_mp3_nynorsk",
102
+ version=datasets.Version("1.0.0"),
103
+ description="NPSC with Nynorsk samples in 16KHz mono mp3)",
104
+ ),
105
+ ]
106
+
107
+ def _info(self):
108
+ sampling_rate = 16_000 if self.config.name.startswith("16K") else 48_000
109
+ return datasets.DatasetInfo(
110
+ description=_DESCRIPTION,
111
+ features=datasets.Features(
112
+ {
113
+ "sentence_id": datasets.Value("int32"),
114
+ "meeting_date": datasets.Value("string"),
115
+ "sentence_order": datasets.Value("int32"),
116
+ "speaker_id" : datasets.Value("int32"),
117
+ "speaker_name": datasets.Value("string"),
118
+ "sentence_text": datasets.Value("string"),
119
+ "sentence_language_code": datasets.Value("string"),
120
+ "text": datasets.Value("string"),
121
+ "start_time": datasets.Value("int32"),
122
+ "end_time": datasets.Value("int32"),
123
+ "normsentence_text": datasets.Value("string"),
124
+ "transsentence_text": datasets.Value("string"),
125
+ "translated": datasets.Value("int32"),
126
+ "audio": datasets.features.Audio(sampling_rate=sampling_rate),
127
+
128
+ }
129
+ ),
130
+ supervised_keys=None,
131
+ homepage=_HOMEPAGE,
132
+ citation=_CITATION,
133
+ task_templates=[
134
+ AutomaticSpeechRecognition(
135
+ audio_column="audio",
136
+ transcription_column="sentence_text",
137
+ )
138
+ ],
139
+ )
140
+
141
+ def _split_generators(self, dl_manager):
142
+ """Returns SplitGenerators."""
143
+ data_urls = {}
144
+ config_name = self.config.name
145
+ if config_name.endswith("bokmaal") or config_name.endswith("nynorsk"):
146
+ config_name, *_ = config_name.rsplit("_", 1)
147
+ for split in ["train", "eval", "test"]:
148
+ data_urls[split] = []
149
+ for shard in _SHARDS[split]:
150
+ data_urls[split] += [(
151
+ _METADATA_URL.format(split=split, shard=shard),
152
+ _DATA_URL.format(split=split, shard=shard, config=config_name)
153
+ )]
154
+ train_downloaded_data = dl_manager.download(data_urls["train"])
155
+ validation_downloaded_data = dl_manager.download(data_urls["eval"])
156
+ test_downloaded_data = dl_manager.download(data_urls["test"])
157
+
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN, gen_kwargs={
161
+ "filepaths": train_downloaded_data,
162
+ }
163
+ ),
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.VALIDATION, gen_kwargs={
166
+ "filepaths": validation_downloaded_data,
167
+ }
168
+ ),
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TEST, gen_kwargs={
171
+ "filepaths": test_downloaded_data,
172
+ }
173
+ ),
174
+ ]
175
+
176
+ def _generate_examples(self, filepaths):
177
+ """Yields examples."""
178
+ data_fields = list(self._info().features.keys())
179
+ data_fields.remove("audio")
180
+ lang_code = None
181
+ if self.config.name.endswith("bokmaal"):
182
+ lang_code = "nb-no"
183
+ elif self.config.name.endswith("nynorsk"):
184
+ lang_code = "nn-no"
185
+ for metadata_path, archive_path in filepaths:
186
+ metadata = {}
187
+ with open(metadata_path) as metadata_file:
188
+ for line in metadata_file.read().split("\n"):
189
+ if line:
190
+ metadata_object = json.loads(line)
191
+ if "path" in metadata_object:
192
+ metadata_key = metadata_object["path"].split("/", 1)[-1]
193
+ metadata[metadata_key] = metadata_object
194
+ with open(archive_path, "rb") as archive_fs:
195
+ archive_bytes = io.BytesIO(archive_fs.read())
196
+ with tarfile.open(fileobj=archive_bytes, mode="r") as tar:
197
+ for audio_file in tar.getmembers():
198
+ if audio_file.isfile():
199
+ metadata_key = audio_file.name.split(".mp3", 1)[0].split("/", 1)[-1]
200
+ audio_bytes = tar.extractfile(audio_file).read()
201
+ audio_dict = {"bytes": audio_bytes, "path": audio_file.name}
202
+ fields = {key: metadata[metadata_key][key] for key in data_fields}
203
+ if lang_code:
204
+ if lang_code == fields.get("sentence_language_code", "").lower():
205
+ yield metadata_key, {"audio": audio_dict, **fields}
206
+ else:
207
+ yield metadata_key, {"audio": audio_dict, **fields}
README.md CHANGED
@@ -1,3 +1,141 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - 'no'
8
+ - nb
9
+ - nn
10
+ license:
11
+ - cc0-1.0
12
+ multilinguality:
13
+ - monolingual
14
+ size_categories:
15
+ - 2G<n<1B
16
+ source_datasets:
17
+ - original
18
+ task_categories:
19
+ - automatic-speech-recognition
20
+ - audio-classification
21
+ pretty_name: NPSC
22
+ tags:
23
+ - speech-modeling
24
  ---
25
+ # Dataset Card for NbAiLab/NPSC
26
+
27
+
28
+ ## Table of Contents
29
+ - [Dataset Description](#dataset-description)
30
+ - [Dataset Summary](#dataset-summary)
31
+ - [Data Fields](#data-fiels)
32
+ - [Dataset Creation](#dataset-creation)
33
+ - [Statistics](#statistics)
34
+ - [Document Types](#document-types)
35
+ - [Languages](#languages)
36
+ - [Publish Periode](#publish-periode)
37
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
38
+ - [Social Impact of Dataset](#social-impact-of-dataset)
39
+ - [Discussion of Biases](#discussion-of-biases)
40
+ - [Other Known Limitations](#other-known-limitations)
41
+ - [Additional Information](#additional-information)
42
+ - [Dataset Curators](#dataset-curators)
43
+ - [Licensing Information](#licensing-information)
44
+ - [Citation Information](#citation-information)
45
+
46
+ ## Dataset Description
47
+ - **Homepage:** https://www.nb.no/sprakbanken/
48
+ - **Repository:** https://www.nb.no/sprakbanken/ressurskatalog/oai-nb-no-sbr-58/
49
+ - **Paper:** https://www.nb.no/sprakbanken/
50
+ - **Point of Contact:** [Per Erik Solberg](mailto:[email protected])
51
+
52
+ The Norwegian Parliamentary Speech Corpus (NPSC) is a speech corpus made by the Norwegian Language Bank at the National Library of Norway in 2019-2021. The NPSC consists of recordings of speech from Stortinget, the Norwegian parliament, and corresponding orthographic transcriptions to Norwegian Bokmål and Norwegian Nynorsk. All transcriptions are done manually by trained linguists or philologists, and the manual transcriptions are subsequently proofread to ensure consistency and accuracy. Entire days of Parliamentary meetings are transcribed in the dataset.
53
+
54
+ This repository contains a version of the NPSC in the 🤗 Dataset Format. Note that the official release of the dataset, which can be found in [the repository of the Norwegian Language Bank](https://www.nb.no/sprakbanken/ressurskatalog/oai-nb-no-sbr-58/), contains more information than the version found here, including word-level metadata, metadata about the speakers, and detailed documentation.
55
+
56
+ ## How to Use
57
+ ```python
58
+ # Loads the 16K Bokmål corpus in streaming mode
59
+ from datasets import load_dataset
60
+ data = load_dataset("NbAiLab/NPSC", config="16K_mp3_bokmaal", streaming=True)
61
+ ```
62
+
63
+ ## Dataset Summary
64
+ The NPSC dataset contains JSON lines with language training data. The data loader will add audio data to this structure. Here is an example json object:
65
+ ```json
66
+ {
67
+ "sentence_id": 49853,
68
+ "sentence_order": 0,
69
+ "speaker_id": 32,
70
+ "meeting_date": "20170110",
71
+ "speaker_name": "Olemic Thommessen",
72
+ "sentence_text": "Stortingets møte er lovlig satt",
73
+ "sentence_language_code": "nb-NO",
74
+ "text": "Stortingets møte er lovlig satt",
75
+ "start_time": 320246,
76
+ "end_time": 323590,
77
+ "normsentence_text": "Stortingets møte er lovlig satt",
78
+ "transsentence_text": "Stortingets møte er lovleg sett",
79
+ "translated": 1,
80
+ "audio": {"path": "audio/20170110-095504_320246_323590.wav","array": [.......]}
81
+ }
82
+ ```
83
+
84
+ ## Data Fields
85
+ |**Key** | **Type** | **Description** |
86
+ |:-----------|:------------|:------------|
87
+ |**sentence_id:** | Integer | Unique identifier of the sentence |
88
+ |**sentence_order** | Integer | A number indicating the order of the sentences in the meeting |
89
+ |**speaker_id** | Integer | The ID of the speaker. This can be linked to the original dataset containing thorough demographic and dialectal information about the speaker. |
90
+ |**meeting_date** | String | The date for the meeting in the format __yyyymmdd__ |
91
+ | **speaker_name** | String | Name of the speaker. All speakers were members of the Norwegian Parliament or members of the Norwegian Government at the meeting date |
92
+ | **sentence_text** | String | The sentence text. The transcribed text string of the sentence in non-normalized form. This is the text of the manual transcriptions, without any postprocessing (apart from corrections of known errors). It may contain interrupted words, non-standard words and function words with a pronunciation deviating from the written form. Detailed metadata about the words in the sentence can be found in the word-tokenized version of the corpus in the official release of the dataset. |
93
+ | **sentence_language_code** | String | The language code of the sentence. The following alternatives exists in the file: ['nb-NO'. 'nn-NO', 'en-US']|
94
+ | **text** | String | sentence text. This is a copy of "sentence_text". It is included here to make it more convenient to interleave with other datasets.|
95
+ | **start_time** | Integer | The start time of the sentence in milliseconds. This time is relative to the start of audiofile of the entire meeting, which can be accessed in the official release |
96
+ | **end_time** | Integer | End time. See comment above. |
97
+ | **normsentence_text** | String | Normalized sentence text. In this version of the transcription, numbers and dates are written in digits on standardized formats, and common abbreviations are used. These modifications to the original transcriptions are produced automatically using normalization grammars |
98
+ | **transsentence_text** | String | Translated sentence text. Whenever the original transcription is in Bokmål (nb-NO), this field contains a machine-translated version in Nynorsk (nn-NO), and vice versa |
99
+ | **translated** | Integer | A flag indicating whether a machine-translated version has been produced or not. Sentences in en-US have not been translated |
100
+ | **audio** | Array | The dataloader will encode the accociated audio files and provide them as an array containing 'path', 'sound array','sampling_rate' |
101
+
102
+
103
+ #### Initial Data Collection
104
+ The procedure for the dataset creation is described in detail in our paper.
105
+
106
+
107
+ ## Statistics
108
+ | Feature | Value |
109
+ |:---------|-----------:|
110
+ | Duration, pauses included | 140,3 hours|
111
+ | Duration, pauses not included | 125,7 hours |
112
+ | Word count | 1,2 million |
113
+ | Sentence count | 64.531 |
114
+ | Language distribution | Nynorsk: 12,8%|
115
+ | | Bokmål: 87,2%|
116
+ | Gender distribution | Female: 38,3% |
117
+ | | Male: 61.7% |
118
+
119
+
120
+ ## Considerations for Using the Data
121
+ This corpus contains speech data. All recordings are of members of Parliament in a public setting, and can be distributed without any restrains.
122
+
123
+ ### Dataset Creators and Curators
124
+ The content of the dataset was created by the Norwegian Language Bank (Språkbanken) at the National Library of Norway. [Javier de la Rosa](mailto:[email protected]), [Freddy Wetjen](mailto:[email protected]), [Per Egil Kummervold](mailto:[email protected]), and [Andre Kaasen](mailto:[email protected]) all contributed in making this into a HuggingFace Dataset. Thanks to the HuggingFace team for assistance.
125
+
126
+ ## License
127
+ The sound and the transcriptions are released under the [CC-ZERO-license](https://creativecommons.org/publicdomain/zero/1.0/). The curation of the HuggingFace Dataset is released under [CC-BY-SA-3-license](https://creativecommons.org/licenses/by-sa/3.0/).
128
+
129
+ ### Citation Information
130
+ The following article gives detailed information about the corpus. Please refer to the article and this page if you are using this dataset:
131
+ ```
132
+
133
+ @misc{solberg2022norwegian,
134
+ title={The Norwegian Parliamentary Speech Corpus},
135
+ author={Per Erik Solberg and Pablo Ortiz},
136
+ year={2022},
137
+ eprint={2201.10881},
138
+ archivePrefix={arXiv},
139
+ primaryClass={cs.CL}
140
+ }
141
+ ```
data/eval/20170209.json ADDED
The diff for this file is too large to render. See raw diff
 
data/eval/20170209_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6401168c4c3582e343f78376222df65183d2907ff6e98c47f8215b83aaaf9cfc
3
+ size 14590351
data/eval/20170209_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ed1e606946794021528407e05b11a4d7af2ebbb597f7fea13ece4dfe3083b4d
3
+ size 73709123
data/eval/20180109.json ADDED
The diff for this file is too large to render. See raw diff
 
data/eval/20180109_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a6e9a2238ec00a6ae95f75b747ea4da99d3bdef15dcfbd33dab044b10d03270
3
+ size 16090115
data/eval/20180109_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7467f1e2d200c20c587624b152a6ed580f084f650927dbe9e166bb940935de6
3
+ size 82225926
data/eval/20180201.json ADDED
The diff for this file is too large to render. See raw diff
 
data/eval/20180201_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f48972a7569b73b3b3531c6bf7f0aad6d488700fbfd9a7f30714f21706221ba
3
+ size 13296011
data/eval/20180201_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b6e48833dbd46a30a38ab751da341739abfbcbe1a4f21db4678163efc4766be
3
+ size 67332012
data/eval/20180307.json ADDED
The diff for this file is too large to render. See raw diff
 
data/eval/20180307_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a29a77510a5b36663c50a6caafffbaf3d9348a95695840fdc7b77a32f69f894
3
+ size 34573705
data/eval/20180307_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07270d2b5e7f014acf776f339e7562e85cc5873003d9c13d6c6d488632438ce8
3
+ size 175623569
data/eval/20180611.json ADDED
The diff for this file is too large to render. See raw diff
 
data/eval/20180611_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f24d7107a589a46b99eae366fb1a0d7ecd208ec8ce4ea4980e1a31813b1c45b6
3
+ size 62765043
data/eval/20180611_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:227cb6309e24c5752f2bcafbf7e508ecf527a66f6480dcad8f5d3713e4c93c19
3
+ size 318923983
data/test/20170207.json ADDED
The diff for this file is too large to render. See raw diff
 
data/test/20170207_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c635a843a3d6809903732402421efbf843b4c311dca44d1c557c61a0c912193
3
+ size 11602500
data/test/20170207_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6074a745c657768e471adc27245bbdaf81d93c574ae31ffb0c1c6e1438e33e4e
3
+ size 59167326
data/test/20171122.json ADDED
The diff for this file is too large to render. See raw diff
 
data/test/20171122_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b27d503518ffb699159988b3e896484d9dfe1f04b2d0ef16df5da79f5827f777
3
+ size 30671734
data/test/20171122_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ec8d8b33f7d61e171e22d59c9dc6364eaef5b045bb9af2a36963906189914d
3
+ size 155438682
data/test/20171219.json ADDED
The diff for this file is too large to render. See raw diff
 
data/test/20171219_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d24123c2e6da4053d50133c36d5571c9e62c460980c76385ddac370f23932e53
3
+ size 63037509
data/test/20171219_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa99ef609b4ff2b8415f98f45f540b2b3cd528b595a3363abad77beeee3537ea
3
+ size 326640954
data/test/20180530.json ADDED
The diff for this file is too large to render. See raw diff
 
data/test/20180530_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44822703dae2d6334a84c57b39cbf2522eedeb841e658afbc751bcaf2953e5d4
3
+ size 27201057
data/test/20180530_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5200656274e752cc13a46bca697f660eb2804d77355be5f9369b69cb2c17100
3
+ size 138540914
data/train/20170110.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train/20170110_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77744f2014826f81cae68de2eb7cf44f0a4b60a17293b872fd762631baf4092
3
+ size 44734335
data/train/20170110_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:595116e928622961bcdea6f9715b7197dd9c85b5f8da5ffc7823904aad93f89a
3
+ size 226819893
data/train/20170208.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train/20170208_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8601b9e4dd021cae2892ea2aa65af2e29a7c852deacda25ac3f93271ba95379
3
+ size 24160285
data/train/20170208_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e4532d067561859194877fff5c354aef862ba58398fcfec0b05b3bc78350ec0
3
+ size 122588726
data/train/20170215.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train/20170215_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae6c7fde0180c87182660ee0a96ef35c1a8a3e766b3bb4d327a1936eece3ff8
3
+ size 36336266
data/train/20170215_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd62caf39f83e521800b033fdf87df45a7337dde874dc0ad97415e1cd8b7f1a4
3
+ size 183726650
data/train/20170216.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train/20170216_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3d4d0ee292669b984ca739b2060aec21bbdb643f690ab5186ce14588752e31e
3
+ size 15082505
data/train/20170216_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad5676c77a60481fc0be8804b9b0e6d3c329265e652a0ff74581caabfba1642f
3
+ size 76742902
data/train/20170222.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train/20170222_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc59e741602432b26f54947239c017e5dd18bcffff0f2d5c78d23e51859c4fa6
3
+ size 50044405
data/train/20170222_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c839d30cc8fd7e27a3e2750777ddf1963df701afa2eaa4c4de68c3bfb8188371
3
+ size 253967991
data/train/20170314.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train/20170314_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75b700dc7ef1bcd7c898ebd8842ffc041b780c28f6ea30775b551a86f2c0a4e8
3
+ size 30641469
data/train/20170314_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b73e90000080fadcab27a387702a86fd09f9db3de2c218dd60d09c35a237bdc8
3
+ size 155183579
data/train/20170322.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train/20170322_16K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5188b7f24f7bc3ceb101b7cd44a103c95c12ece600ca2854b203d8961a40bc02
3
+ size 37750938
data/train/20170322_48K_mp3.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d57c2eee940534585db03006882fbf71001f8f58a56a5eb27ac84c844229dac7
3
+ size 191764343