|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TED TALKS IWSLT: Web Inventory of Transcribed and Translated Ted Talks in 109 languages.""" |
|
|
|
|
|
import io |
|
import xml.etree.ElementTree as ET |
|
import zipfile |
|
from collections import defaultdict |
|
|
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{cettolo-etal-2012-wit3, |
|
title = "{WIT}3: Web Inventory of Transcribed and Translated Talks", |
|
author = "Cettolo, Mauro and |
|
Girardi, Christian and |
|
Federico, Marcello", |
|
booktitle = "Proceedings of the 16th Annual conference of the European Association for Machine Translation", |
|
month = may # " 28{--}30", |
|
year = "2012", |
|
address = "Trento, Italy", |
|
publisher = "European Association for Machine Translation", |
|
url = "https://www.aclweb.org/anthology/2012.eamt-1.60", |
|
pages = "261--268", |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
The core of WIT3 is the TED Talks corpus, that basically redistributes the original content published by the TED Conference website (http://www.ted.com). Since 2007, |
|
the TED Conference, based in California, has been posting all video recordings of its talks together with subtitles in English |
|
and their translations in more than 80 languages. Aside from its cultural and social relevance, this content, which is published under the Creative Commons BYNC-ND license, also represents a precious |
|
language resource for the machine translation research community, thanks to its size, variety of topics, and covered languages. |
|
This effort repurposes the original content in a way which is more convenient for machine translation researchers. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://wit3.fbk.eu/" |
|
|
|
|
|
_LICENSE = "CC-BY-NC-4.0" |
|
|
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/ted_talks_iwslt/resolve/main/data/XML_releases.tgz" |
|
|
|
|
|
_LANGUAGES = ( |
|
"mr", |
|
"eu", |
|
"hr", |
|
"rup", |
|
"szl", |
|
"lo", |
|
"ms", |
|
"ht", |
|
"hy", |
|
"mg", |
|
"arq", |
|
"uk", |
|
"ku", |
|
"ig", |
|
"sr", |
|
"ug", |
|
"ne", |
|
"pt-br", |
|
"sq", |
|
"af", |
|
"km", |
|
"en", |
|
"tt", |
|
"ja", |
|
"inh", |
|
"mn", |
|
"eo", |
|
"ka", |
|
"nb", |
|
"fil", |
|
"uz", |
|
"fi", |
|
"tl", |
|
"el", |
|
"tg", |
|
"bn", |
|
"si", |
|
"gu", |
|
"sk", |
|
"kn", |
|
"ar", |
|
"hup", |
|
"zh-tw", |
|
"sl", |
|
"be", |
|
"bo", |
|
"fr", |
|
"ps", |
|
"tr", |
|
"ltg", |
|
"la", |
|
"ko", |
|
"lv", |
|
"nl", |
|
"fa", |
|
"ru", |
|
"et", |
|
"vi", |
|
"pa", |
|
"my", |
|
"sw", |
|
"az", |
|
"sv", |
|
"ga", |
|
"sh", |
|
"it", |
|
"da", |
|
"lt", |
|
"kk", |
|
"mk", |
|
"tlh", |
|
"he", |
|
"ceb", |
|
"bg", |
|
"fr-ca", |
|
"ha", |
|
"ml", |
|
"mt", |
|
"as", |
|
"pt", |
|
"zh-cn", |
|
"cnh", |
|
"ro", |
|
"hi", |
|
"es", |
|
"id", |
|
"bs", |
|
"so", |
|
"cs", |
|
"te", |
|
"ky", |
|
"hu", |
|
"th", |
|
"pl", |
|
"nn", |
|
"ca", |
|
"is", |
|
"ta", |
|
"de", |
|
"srp", |
|
"ast", |
|
"bi", |
|
"lb", |
|
"art-x-bork", |
|
"am", |
|
"oc", |
|
"zh", |
|
"ur", |
|
"gl", |
|
) |
|
|
|
|
|
_LANGUAGE_PAIRS = [ |
|
("eu", "ca"), |
|
("nl", "en"), |
|
("nl", "hi"), |
|
("de", "ja"), |
|
("fr-ca", "hi"), |
|
] |
|
|
|
_LANGUAGE_TO_FULL_NAME = { |
|
"eu": "Basque", |
|
"de": "German", |
|
"nl": "Dutch", |
|
"hi": "Hindi", |
|
"ja": "Japanese", |
|
"fr-ca": "French", |
|
"ca": "Catalan" |
|
} |
|
|
|
|
|
_YEAR = {"2014": "-20140120", "2015": "-20150530", "2016": "-20160408"} |
|
|
|
_YEAR_FOLDER = { |
|
"2014": "XML_releases/xml-20140120", |
|
"2015": "XML_releases/xml-20150616", |
|
"2016": "XML_releases/xml", |
|
} |
|
|
|
|
|
class TedTalksIWSLTConfig(datasets.BuilderConfig): |
|
""" "Builder Config for the TedTalks IWSLT dataset""" |
|
|
|
def __init__(self, language_pair=(None, None), year=None, **kwargs): |
|
"""BuilderConfig for TedTalks IWSLT dataset. |
|
Args: |
|
for the `datasets.features.text.TextEncoder` used for the features feature. |
|
language_pair: pair of languages that will be used for translation. Should |
|
contain 2-letter coded strings. First will be used at source and second |
|
as target in supervised mode. For example: ("pl", "en"). |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
name = "%s_%s_%s" % (language_pair[0], language_pair[1], year) |
|
source, target = language_pair |
|
assert source in _LANGUAGES, f"Invalid source code in language pair: {source}" |
|
assert target in _LANGUAGES, f"Invalid target code in language pair: {target}" |
|
assert ( |
|
source != target |
|
), f"Source::{source} and Target::{target} language pairs cannot be the same!" |
|
assert year in _YEAR.keys() |
|
|
|
description = ( |
|
f"Translation Ted Talks dataset (WIT3) between {source} and {target}" |
|
) |
|
super(TedTalksIWSLTConfig, self).__init__( |
|
name=name, |
|
description=description, |
|
**kwargs, |
|
) |
|
|
|
self.language_pair = language_pair |
|
self.year = year |
|
|
|
|
|
|
|
class TedTalksIWSLT(datasets.GeneratorBasedBuilder): |
|
"""TED TALKS IWSLT: Web Inventory of Transcribed and Translated Ted Talks in 109 languages.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIG_CLASS = TedTalksIWSLTConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
TedTalksIWSLTConfig( |
|
language_pair=language_pair, year=year, version=datasets.Version("1.1.0") |
|
) |
|
for language_pair in _LANGUAGE_PAIRS |
|
for year in _YEAR.keys() |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
|
|
|
|
|
|
|
|
"source": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"source_lang": datasets.Value("string"), |
|
"target_lang": datasets.Value("string"), |
|
}, |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
data_dir = dl_manager.download(_URL) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"files": dl_manager.iter_archive(data_dir), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
"""Yields examples.""" |
|
|
|
def parse_zip_file(path, file): |
|
def et_to_dict(tree): |
|
"""This is used to convert the xml to a list of dicts""" |
|
|
|
dct = {tree.tag: {} if tree.attrib else None} |
|
children = list(tree) |
|
if children: |
|
dd = defaultdict(list) |
|
for dc in map(et_to_dict, children): |
|
for k, v in dc.items(): |
|
dd[k].append(v) |
|
dct = {tree.tag: dd} |
|
if tree.attrib: |
|
dct[tree.tag].update((k, v) for k, v in tree.attrib.items()) |
|
if tree.text: |
|
text = tree.text.strip() |
|
if children or tree.attrib: |
|
if text: |
|
dct[tree.tag]["text"] = text |
|
else: |
|
dct[tree.tag] = text |
|
return dct |
|
|
|
with zipfile.ZipFile(io.BytesIO(file)) as zf: |
|
try: |
|
tree = ET.parse(zf.open(path.split("/")[-1][:-3] + "xml")) |
|
root = tree.getroot() |
|
talks = et_to_dict(root).get("xml").get("file") |
|
ids = [talk.get("head")[0].get("talkid") for talk in talks] |
|
except Exception as pe: |
|
logger.warning(f"ERROR: {pe}") |
|
logger.warning( |
|
"This likely means that you have a malformed XML file!" |
|
) |
|
ids = [] |
|
return talks, ids |
|
|
|
language_pair = self.config.language_pair |
|
year = self.config.year |
|
|
|
source_file_path = ( |
|
_YEAR_FOLDER[year] + "/ted_" + language_pair[0] + _YEAR[year] + ".zip" |
|
) |
|
target_file_path = ( |
|
_YEAR_FOLDER[year] + "/ted_" + language_pair[1] + _YEAR[year] + ".zip" |
|
) |
|
|
|
source_talks, source_ids = None, None |
|
target_talks, target_ids = None, None |
|
for path, file in files: |
|
if source_ids is not None and target_ids is not None: |
|
break |
|
|
|
if source_ids is None and path.endswith(source_file_path): |
|
source_talks, source_ids = parse_zip_file(path, file.read()) |
|
elif target_ids is None and path.endswith(target_file_path): |
|
target_talks, target_ids = parse_zip_file(path, file.read()) |
|
|
|
if source_ids is None or target_ids is None: |
|
source_ids = list() |
|
target_ids = list() |
|
|
|
comm_talkids = [talkid for talkid in target_ids if talkid in source_ids] |
|
|
|
translation = list() |
|
|
|
for talkid in comm_talkids: |
|
source = list( |
|
filter( |
|
lambda talk: talk.get("head")[0].get("talkid") == talkid, |
|
source_talks, |
|
) |
|
) |
|
target = list( |
|
filter( |
|
lambda talk: talk.get("head")[0].get("talkid") == talkid, |
|
target_talks, |
|
) |
|
) |
|
|
|
if len(source) == 0 or len(target) == 0: |
|
pass |
|
else: |
|
source = source[0] |
|
target = target[0] |
|
|
|
if source.get("head")[0].get("description") and target.get("head")[0].get( |
|
"description" |
|
): |
|
if ( |
|
source.get("head")[0].get("description")[0] |
|
and target.get("head")[0].get("description")[0] |
|
): |
|
temp_dict = dict() |
|
temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_1" |
|
temp_dict[language_pair[0]] = ( |
|
source.get("head")[0] |
|
.get("description")[0] |
|
.replace("TED Talk Subtitles and Transcript: ", "") |
|
) |
|
temp_dict[language_pair[1]] = ( |
|
target.get("head")[0] |
|
.get("description")[0] |
|
.replace("TED Talk Subtitles and Transcript: ", "") |
|
) |
|
translation.append(temp_dict) |
|
|
|
if source.get("head")[0].get("title") and target.get("head")[0].get( |
|
"title" |
|
): |
|
if ( |
|
source.get("head")[0].get("title")[0] |
|
and target.get("head")[0].get("title")[0] |
|
): |
|
temp_dict = dict() |
|
temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_2" |
|
temp_dict[language_pair[0]] = source.get("head")[0].get("title")[0] |
|
temp_dict[language_pair[1]] = target.get("head")[0].get("title")[0] |
|
translation.append(temp_dict) |
|
|
|
if source.get("head")[0].get("seekvideo") and target.get("head")[0].get( |
|
"seekvideo" |
|
): |
|
source_transc = ( |
|
source.get("head")[0].get("transcription")[0].get("seekvideo") |
|
) |
|
target_transc = ( |
|
target.get("head")[0].get("transcription")[0].get("seekvideo") |
|
) |
|
|
|
transc = zip(source_transc, target_transc) |
|
transcriptions = [ |
|
{ |
|
"id": s.get("id"), |
|
language_pair[0]: s.get("text"), |
|
language_pair[1]: t.get("text"), |
|
} |
|
for s, t in transc |
|
] |
|
translation.extend(transcriptions) |
|
for talk_segment in translation: |
|
result = { |
|
"source": talk_segment[language_pair[0]], |
|
"target": talk_segment[language_pair[1]], |
|
"source_lang": _LANGUAGE_TO_FULL_NAME[language_pair[0]], |
|
"target_lang": _LANGUAGE_TO_FULL_NAME[language_pair[1]], |
|
} |
|
yield talk_segment["id"], result |