|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import datasets |
|
|
|
_DESCRIPTION = """\ |
|
Corpus used for training AfriBERTa models |
|
""" |
|
_CITATION = """\ |
|
@inproceedings{ogueji-etal-2021-small, |
|
title = "Small Data? No Problem! Exploring the Viability of Pretrained Multilingual Language Models for Low-resourced Languages", |
|
author = "Ogueji, Kelechi and |
|
Zhu, Yuxin and |
|
Lin, Jimmy", |
|
booktitle = "Proceedings of the 1st Workshop on Multilingual Representation Learning", |
|
month = nov, |
|
year = "2021", |
|
address = "Punta Cana, Dominican Republic", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.mrl-1.11", |
|
pages = "116--126", |
|
} |
|
""" |
|
_HOMEPAGE_URL = "https://github.com/keleog/afriberta" |
|
_VERSION = "1.0.0" |
|
_LANGUAGES = [ |
|
"afaanoromoo", |
|
"amharic", |
|
"gahuza", |
|
"hausa", |
|
"igbo", |
|
"pidgin", |
|
"somali", |
|
"swahili", |
|
"tigrinya", |
|
"yoruba"] |
|
|
|
_DATASET_URLS = { |
|
language: { |
|
"train": f"https://huggingface.co/datasets/castorini/afriberta-corpus/resolve/main/{language}/train.zip", |
|
"test": f"https://huggingface.co/datasets/castorini/afriberta-corpus/resolve/main/{language}/eval.zip", |
|
} for language in _LANGUAGES |
|
} |
|
|
|
class AfribertaCorpus(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
version=datasets.Version(_VERSION), |
|
name=language, |
|
description=f"AfriBERTa corpus for {language}." |
|
) for language in _LANGUAGES |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
}, |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
language = self.config.name |
|
downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[language]) |
|
|
|
splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"file_path": os.path.join(downloaded_files["train"], "train.txt"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"file_path": os.path.join(downloaded_files["test"], "eval.txt"), |
|
}, |
|
), |
|
] |
|
return splits |
|
|
|
def _generate_examples(self, file_path): |
|
with open(file_path, encoding="utf-8") as f: |
|
for sentence_counter, line in enumerate(f): |
|
result = ( |
|
sentence_counter, |
|
{ |
|
"id": str(sentence_counter), |
|
"text": line, |
|
}, |
|
) |
|
yield result |
|
|