File size: 6,206 Bytes
73deada
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af58a54
73deada
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cde9296
73deada
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""MasakhaNEWS: News Topic Classification for African languages"""

import datasets
import pandas
import pandas as pd

logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@inproceedings{shode-etal-2023-nollysenti,
    title = "{N}olly{S}enti: Leveraging Transfer Learning and Machine Translation for {N}igerian Movie Sentiment Classification",
    author = "Shode, Iyanuoluwa  and
      Adelani, David Ifeoluwa  and
      Peng, JIng  and
      Feldman, Anna",
    editor = "Rogers, Anna  and
      Boyd-Graber, Jordan  and
      Okazaki, Naoaki",
    booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
    month = jul,
    year = "2023",
    address = "Toronto, Canada",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2023.acl-short.85",
    doi = "10.18653/v1/2023.acl-short.85",
    pages = "986--998",
    abstract = "Africa has over 2000 indigenous languages but they are under-represented in NLP research due to lack of datasets. In recent years, there have been progress in developing labelled corpora for African languages. However, they are often available in a single domain and may not generalize to other domains. In this paper, we focus on the task of sentiment classification for cross-domain adaptation. We create a new dataset, Nollywood movie reviews for five languages widely spoken in Nigeria (English, Hausa, Igbo, Nigerian Pidgin, and Yoruba). We provide an extensive empirical evaluation using classical machine learning methods and pre-trained language models. By leveraging transfer learning, we compare the performance of cross-domain adaptation from Twitter domain, and cross-lingual adaptation from English language. Our evaluation shows that transfer from English in the same target domain leads to more than 5{\%} improvement in accuracy compared to transfer from Twitter in the same language. To further mitigate the domain difference, we leverage machine translation from English to other Nigerian languages, which leads to a further improvement of 7{\%} over cross-lingual evaluation. While machine translation to low-resource languages are often of low quality, our analysis shows that sentiment related words are often preserved.",
}
"""

_DESCRIPTION = """\
NollySenti is the first publicly available dataset for movie sentiment classification in five Nigerian languages.

The languages are:
- English (eng)
- Hausa (hau)
- Igbo (ibo)
- Nigerian Pidgin (pcm)
- Yorùbá (yor)

The train/validation/test sets are available for all the five languages.

For more details see *** arXiv link **
"""
#_URL = "https://github.com/masakhane-io/masakhane-news/raw/main/data/"
_URL = "https://github.com/IyanuSh/NollySenti/raw/main/data/"
_TRAINING_FILE = "train.tsv"
_DEV_FILE = "dev.tsv"
_TEST_FILE = "test.tsv"


class NollysentiConfig(datasets.BuilderConfig):
    """BuilderConfig for Nollysenti"""

    def __init__(self, **kwargs):
        """BuilderConfig for Nollysenti.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(NollysentiConfig, self).__init__(**kwargs)


class Nollysenti(datasets.GeneratorBasedBuilder):
    """Masakhanews dataset."""

    BUILDER_CONFIGS = [
        NollysentiConfig(name="en", version=datasets.Version("1.0.0"), description="Nollysenti English dataset"),
        NollysentiConfig(name="ha", version=datasets.Version("1.0.0"), description="Nollysenti Hausa dataset"),
        NollysentiConfig(name="ig", version=datasets.Version("1.0.0"), description="Nollysenti Igbo dataset"),
        NollysentiConfig(
            name="pcm", version=datasets.Version("1.0.0"), description="Nollysenti Nigerian-Pidgin dataset"
        ),
        NollysentiConfig(name="yo", version=datasets.Version("1.0.0"), description="Nollysenti Yoruba dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "label": datasets.features.ClassLabel(
                        names=["positive", "negative"]
                    ),
                    "review": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/IyanuSh/NollySenti",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}{self.config.name}/{_TRAINING_FILE}",
            "dev": f"{_URL}{self.config.name}/{_DEV_FILE}",
            "test": f"{_URL}{self.config.name}/{_TEST_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        df = pd.read_csv(filepath, sep='\t')
        df = df.dropna()
        N = df.shape[0]

        for id_ in range(N):
            yield id_, {
                "label": df['sentiment'].iloc[id_],
                "review": df[self.config.name+'_review'].iloc[id_],
            }