|
from typing import List |
|
import os |
|
import csv |
|
import ast |
|
import gzip |
|
import json |
|
|
|
import datasets |
|
from datasets.utils.logging import get_logger |
|
|
|
logger = get_logger(__name__) |
|
|
|
_URL = "https://asappresearch.github.io/slue-toolkit/" |
|
|
|
_DL_URLS = { |
|
"slue-hvb": "data/slue-hvb_blind.zip", |
|
"slue-sqa5": "data/slue-sqa5_blind.zip", |
|
"slue-vp_nel": "data/slue-vp_nel_blind.zip", |
|
} |
|
|
|
_LICENSE = """ |
|
======================================================= |
|
The license of this script |
|
MIT License |
|
Copyright (c) 2023 ASAPP Inc. |
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
|
of this software and associated documentation files (the "Software"), to deal |
|
in the Software without restriction, including without limitation the rights |
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|
copies of the Software, and to permit persons to whom the Software is |
|
furnished to do so, subject to the following conditions: |
|
The above copyright notice and this permission notice shall be included in all |
|
copies or substantial portions of the Software. |
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|
SOFTWARE. |
|
======================================================= |
|
SLUE-HVB dataset contains a subset of the Gridspace-Stanford Harper Valley speech dataset and the copyright of this subset remains the same with the original license, CC-BY-4.0. See also original license notice (https://github.com/cricketclub/gridspace-stanford-harper-valley/blob/master/LICENSE) |
|
|
|
Additionally, we provide dialog act classification annotation and it is covered with the same license as CC-BY-4.0. |
|
======================================================= |
|
SLUE-SQA-5 Dataset |
|
|
|
SLUE-SQA-5 Dataset contains question texts and answer strings (question_text, normalized_question_text, and answer_spans column in .tsv files) from these datasets, |
|
* SQuAD1.1 (for questions whose question_id starts with ‘squad-’) |
|
* Natural Questions (for questions whose question_id starts with ‘nq-’) |
|
* WebQuestions (for questions whose question_id starts with ‘wq-’) |
|
* CuratedTREC (for questions whose question_id starts with ‘trec-’) |
|
* TriviaQA (for questions whose question_id starts with ‘triviaqa-’) |
|
Additionally, we provide audio recordings (.wav files in “question” directories) of these questions. |
|
|
|
For questions from TriviaQA (questions whose question_id starts with ‘triviaqa-’), their question texts, answer strings, and audio recordings are licensed with the same Apache License 2.0 as TriviaQA (for more detail, please refer to https://github.com/mandarjoshi90/triviaqa/blob/master/LICENSE). |
|
For questions from the other 4 datasets, their question texts, answer strings, and audio recordings are licensed with Creative Commons Attribution-ShareAlike 4.0 International license. |
|
|
|
SLUE-SQA-5 also contains a subset of Spoken Wikipedia, including the audios placed in “document” directories and their transcripts (document_text and normalized_document_text column in .tsv files). Additionally, we provide the text-to-speech alignments (.txt files in “word2time” directories).These contents are licensed with the same Creative Commons (CC BY-SA 4.0) license as Spoken Wikipedia. |
|
======================================================= |
|
SLUE-vp_nel Dataset |
|
|
|
SLUE-vp_nel includes word-level time stamps for dev and test splits of the SLUE-voxpopuli corpus. |
|
For the dev split, the dataset also contains named entity annotations and corresponding time-stamps in a tsv format. |
|
======================================================= |
|
|
|
""" |
|
|
|
_CITATION = """\ |
|
@inproceedings{shon2023slue_phase2, |
|
title={SLUE Phase-2: A Benchmark Suite of Diverse Spoken Language Understanding Tasks}, |
|
author={Shon, Suwon and Arora, Siddhant and Lin, Chyi-Jiunn and Pasad, Ankita and Wu, Felix and Sharma, Roshan and Wu, Wei-Lun and Lee, Hung-Yi and Livescu, Karen and Watanabe, Shinji}, |
|
booktitle={ACL}, |
|
year={2023}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Spoken Language Understanding Evaluation (SLUE) benchmark Phase 2. |
|
""" |
|
|
|
def parse_qa_answer_spans(answer_spans): |
|
answer_spans = ast.literal_eval(answer_spans) |
|
return [{"answer": a, "start_second": s, "end_second": e} for a, s, e in answer_spans] |
|
|
|
def load_word2time(word2time_file): |
|
word2time = [] |
|
with open(word2time_file, "r") as f: |
|
for line in f.readlines(): |
|
entity = line.strip().split('\t') |
|
if len(entity)==1: |
|
word = entity[0] |
|
normalized_word, start_sec, end_sec = "", -1.0, -1.0 |
|
else: |
|
word, normalized_word, start_sec, end_sec = entity |
|
start_sec, end_sec = float(start_sec), float(end_sec) |
|
word2time.append( |
|
{ |
|
"word": word, |
|
"normalized_word": normalized_word, |
|
"start_second": start_sec, |
|
"end_second": end_sec, |
|
} |
|
) |
|
return word2time |
|
|
|
def parse_nel_time_spans(nel_timestamps): |
|
nel_timestamps = ast.literal_eval(nel_timestamps) |
|
if nel_timestamps is None: |
|
return [] |
|
return [ |
|
{ |
|
"ne_label": ne, |
|
"start_char_idx": start, |
|
"char_offset": off, |
|
"start_sec": t0, |
|
"end_sec": t1, |
|
} |
|
for ne, start, off, t0, t1 in nel_timestamps |
|
] |
|
|
|
def read_word_timestamps(word_alignments_fn): |
|
data = json.loads(open(word_alignments_fn).read()) |
|
return [ |
|
{"word": word, "start_sec": start, "end_sec": end} |
|
for word, start, end in data["timestamps"] |
|
] |
|
|
|
class SLUE2Config(datasets.BuilderConfig): |
|
"""BuilderConfig for SLUE.""" |
|
|
|
def __init__(self, **kwargs): |
|
""" |
|
Args: |
|
data_dir: `string`, the path to the folder containing the files in the |
|
downloaded .tar |
|
citation: `string`, citation for the data set |
|
url: `string`, url for information about the data set |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(SLUE2Config, self).__init__( |
|
version=datasets.Version("2.4.0", ""), **kwargs |
|
) |
|
|
|
|
|
class SLUE2(datasets.GeneratorBasedBuilder): |
|
"""Librispeech dataset.""" |
|
|
|
DEFAULT_WRITER_BATCH_SIZE = 256 |
|
DEFAULT_CONFIG_NAME = "hvb" |
|
BUILDER_CONFIGS = [ |
|
SLUE2Config( |
|
name="hvb", |
|
description="SLUE-HVB set.", |
|
), |
|
SLUE2Config( |
|
name="sqa5", |
|
description="SLUE-SQA-5 set which includes Spoken Question Answering task.", |
|
), |
|
SLUE2Config( |
|
name="vp_nel", |
|
description="SLUE-vp_nel set with named entity labels and time-stamps.", |
|
), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "hvb": |
|
features = { |
|
"issue_id": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"speaker_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"utt_index": datasets.Value("int32"), |
|
"channel": datasets.Value("int32"), |
|
"role": datasets.Value("string"), |
|
"start_ms": datasets.Value("int32"), |
|
"duration_ms": datasets.Value("int32"), |
|
"intent": datasets.Value("string"), |
|
"dialog_acts": datasets.Sequence( |
|
datasets.Value("string"), |
|
), |
|
} |
|
elif self.config.name == "sqa5": |
|
features = { |
|
"question_id": datasets.Value("string"), |
|
"question_audio": datasets.Audio(sampling_rate=16_000), |
|
"question_speaker_id": datasets.Value("string"), |
|
"raw_question_text": datasets.Value("string"), |
|
"normalized_question_text": datasets.Value("string"), |
|
"document_id": datasets.Value("string"), |
|
"document_audio": datasets.Audio(sampling_rate=16_000), |
|
"document_speaker_id": datasets.Value("string"), |
|
"raw_document_text": datasets.Value("string"), |
|
"normalized_document_text": datasets.Value("string"), |
|
"word2time": datasets.Sequence( |
|
{ |
|
"word": datasets.Value("string"), |
|
"normalized_word": datasets.Value("string"), |
|
"start_second": datasets.Value("float64"), |
|
"end_second": datasets.Value("float64"), |
|
} |
|
), |
|
"answer_spans": datasets.Sequence( |
|
{ |
|
"answer": datasets.Value("string"), |
|
"start_second": datasets.Value("float64"), |
|
"end_second": datasets.Value("float64"), |
|
} |
|
), |
|
} |
|
elif self.config.name == "vp_nel": |
|
features = { |
|
"id": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"speaker_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"word_timestamps": datasets.Sequence( |
|
{ |
|
"word": datasets.Value("string"), |
|
"start_sec": datasets.Value("float64"), |
|
"end_sec": datasets.Value("float64"), |
|
} |
|
), |
|
"ne_timestamps": datasets.Sequence( |
|
{ |
|
"ne_label": datasets.Value("string"), |
|
"start_char_idx": datasets.Value("int32"), |
|
"char_offset": datasets.Value("int32"), |
|
"start_sec": datasets.Value("float64"), |
|
"end_sec": datasets.Value("float64"), |
|
} |
|
), |
|
} |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(features), |
|
supervised_keys=("file", "text"), |
|
homepage=_URL, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
|
|
config_name = f"slue-{self.config.name}" |
|
|
|
dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name]) |
|
data_dir = os.path.join(dl_dir, config_name) |
|
|
|
splits = [] |
|
if self.config.name in ["hvb", "sqa5"]: |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir or "", f"{config_name}_fine-tune.tsv" |
|
), |
|
"data_dir": data_dir, |
|
}, |
|
) |
|
) |
|
if self.config.name in ["hvb", "sqa5", "vp_nel"]: |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir or "", f"{config_name}_dev.tsv" |
|
), |
|
"data_dir": data_dir, |
|
}, |
|
), |
|
) |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir or "", f"{config_name}_test_blind.tsv" |
|
), |
|
"data_dir": data_dir, |
|
}, |
|
), |
|
) |
|
if self.config.name == "sqa5": |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name="verified_test", |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir or "", f"{config_name}_verified-test_blind.tsv" |
|
), |
|
"data_dir": data_dir, |
|
}, |
|
) |
|
) |
|
return splits |
|
|
|
def _generate_examples(self, filepath, data_dir): |
|
logger.info(f"generating examples from = {filepath}") |
|
|
|
with open(filepath) as f: |
|
if self.config.name == "sqa5": |
|
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE) |
|
else: |
|
reader = csv.DictReader(f, delimiter="\t") |
|
|
|
for idx, row in enumerate(reader): |
|
if self.config.name == "hvb": |
|
split = "test" if "test" in filepath else "dev" if "dev" in filepath else "fine-tune" |
|
audio_file = os.path.join( |
|
data_dir, split, |
|
f'{row["issue_id"]}_{row["start_ms"]}_{int(row["start_ms"]) + int(row["duration_ms"])}.wav' |
|
) |
|
example = { |
|
"issue_id": row["issue_id"], |
|
"audio": audio_file, |
|
"speaker_id": row["speaker_id"], |
|
"text": row["text"], |
|
"utt_index": int(row["utt_index"]), |
|
"channel": int(row["channel"]), |
|
"role": row["role"], |
|
"start_ms": int(row["start_ms"]), |
|
"duration_ms": int(row["duration_ms"]), |
|
"intent": row["intent"], |
|
"dialog_acts": eval(row.get("dialog_acts", "[]")), |
|
} |
|
elif self.config.name == "sqa5": |
|
question_audio_file = os.path.join( |
|
data_dir, row["split"], "question", row["question_id"] + ".wav" |
|
) |
|
document_audio_file = os.path.join( |
|
data_dir, row["split"], "document", row["document_id"] + ".wav" |
|
) |
|
word2time_file = os.path.join( |
|
data_dir, row["split"], "word2time", row["document_id"] + ".txt" |
|
) |
|
example = { |
|
"question_id": row["question_id"], |
|
"question_audio": question_audio_file, |
|
"question_speaker_id": row["question_speaker_id"], |
|
"raw_question_text": row["question_text"], |
|
"normalized_question_text": row["normalized_question_text"], |
|
"document_id": row["document_id"], |
|
"document_audio": document_audio_file, |
|
"document_speaker_id": row["document_speaker_id"], |
|
"raw_document_text": row["document_text"], |
|
"normalized_document_text": row["normalized_document_text"], |
|
"word2time": load_word2time(word2time_file), |
|
"answer_spans": parse_qa_answer_spans(row.get("answer_spans", "[]")), |
|
} |
|
elif self.config.name == "vp_nel": |
|
split = "test" if "test" in filepath else "dev" |
|
utt_id = row["id"] |
|
word_alignments_fn = os.path.join( |
|
data_dir, "word_timestamps", split, f"{utt_id}.json" |
|
) |
|
audio_file = os.path.join( |
|
data_dir, |
|
'audio', |
|
split, |
|
f"{utt_id}.ogg", |
|
) |
|
example = { |
|
"id": utt_id, |
|
"audio": audio_file, |
|
"speaker_id": row["speaker_id"], |
|
"text": row["normalized_text"], |
|
"ne_timestamps": parse_nel_time_spans( |
|
row.get("normalized_nel", "[]") |
|
), |
|
"word_timestamps": read_word_timestamps(word_alignments_fn), |
|
} |
|
yield idx, example |
|
|