afriberta-corpus / afriberta-corpus.py
kelechi's picture
updated dataset card
1be6acb
raw
history blame
3.67 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import os
import datasets
_DESCRIPTION = """\
Corpus used for training AfriBERTa models
"""
_CITATION = """\
@inproceedings{ogueji-etal-2021-small,
title = "Small Data? No Problem! Exploring the Viability of Pretrained Multilingual Language Models for Low-resourced Languages",
author = "Ogueji, Kelechi and
Zhu, Yuxin and
Lin, Jimmy",
booktitle = "Proceedings of the 1st Workshop on Multilingual Representation Learning",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.mrl-1.11",
pages = "116--126",
}
"""
_HOMEPAGE_URL = "https://github.com/keleog/afriberta"
_VERSION = "1.0.0"
_LANGUAGES = [
"afaanoromoo",
"amharic",
"gahuza",
"hausa",
"igbo",
"pidgin",
"somali",
"swahili",
"tigrinya",
"yoruba"]
_DATASET_URLS = {
language: {
"train": f"https://huggingface.co/datasets/castorini/afriberta-corpus/resolve/main/{language}/train.zip",
"test": f"https://huggingface.co/datasets/castorini/afriberta-corpus/resolve/main/{language}/eval.zip",
} for language in _LANGUAGES
}
class AfribertaCorpus(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
version=datasets.Version(_VERSION),
name=language,
description=f"AfriBERTa corpus for {language}."
) for language in _LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"text": datasets.Value("string"),
},
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
language = self.config.name
downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[language])
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"file_path": os.path.join(downloaded_files["train"], "train.txt"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"file_path": os.path.join(downloaded_files["test"], "eval.txt"),
},
),
]
return splits
def _generate_examples(self, file_path):
with open(file_path, encoding="utf-8") as f:
for sentence_counter, line in enumerate(f):
result = (
sentence_counter,
{
"id": str(sentence_counter),
"text": line,
},
)
yield result