Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Hausa
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
736ab22
1 Parent(s): 7474a2e

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (991fa1bd0cd97744c93d10929507d98321c0be5e)
- Delete loading script (c693e0bbc89b3b5b255992150f01d5fd93df7b68)

README.md CHANGED
@@ -33,16 +33,25 @@ dataset_info:
33
  '4': World
34
  splits:
35
  - name: train
36
- num_bytes: 144932
37
  num_examples: 2045
38
  - name: validation
39
- num_bytes: 20565
40
  num_examples: 290
41
  - name: test
42
- num_bytes: 41195
43
  num_examples: 582
44
- download_size: 195824
45
- dataset_size: 206692
 
 
 
 
 
 
 
 
 
46
  ---
47
 
48
  # Dataset Card for Hausa VOA News Topic Classification dataset (hausa_voa_topics)
 
33
  '4': World
34
  splits:
35
  - name: train
36
+ num_bytes: 144928
37
  num_examples: 2045
38
  - name: validation
39
+ num_bytes: 20561
40
  num_examples: 290
41
  - name: test
42
+ num_bytes: 41191
43
  num_examples: 582
44
+ download_size: 124578
45
+ dataset_size: 206680
46
+ configs:
47
+ - config_name: default
48
+ data_files:
49
+ - split: train
50
+ path: data/train-*
51
+ - split: validation
52
+ path: data/validation-*
53
+ - split: test
54
+ path: data/test-*
55
  ---
56
 
57
  # Dataset Card for Hausa VOA News Topic Classification dataset (hausa_voa_topics)
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12dbe9637a3dac6b4616d4c2fc0ec9726be1cf32310bab7a912e621d481e20c1
3
+ size 24908
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d45ff4b33bae859882a3b3470376beae9289654305eae9dfeca7e21a0842604
3
+ size 85393
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a0a4305322fbf33cfe484a09cdd9df582022a1a42864d2bb3c43e9b430393f
3
+ size 14277
hausa_voa_topics.py DELETED
@@ -1,91 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Hausa VOA News Topic Classification dataset."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- A collection of news article headlines in Hausa from VOA Hausa.
28
- Each headline is labeled with one of the following classes: Nigeria,
29
- Africa, World, Health or Politics.
30
-
31
- The dataset was presented in the paper:
32
- Hedderich, Adelani, Zhu, Alabi, Markus, Klakow: Transfer Learning and
33
- Distant Supervision for Multilingual Transformer Models: A Study on
34
- African Languages (EMNLP 2020).
35
- """
36
-
37
- _CITATION = """\
38
- @inproceedings{hedderich-etal-2020-transfer,
39
- title = "Transfer Learning and Distant Supervision for Multilingual Transformer Models: A Study on African Languages",
40
- author = "Hedderich, Michael A. and
41
- Adelani, David and
42
- Zhu, Dawei and
43
- Alabi, Jesujoba and
44
- Markus, Udia and
45
- Klakow, Dietrich",
46
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
47
- year = "2020",
48
- publisher = "Association for Computational Linguistics",
49
- url = "https://www.aclweb.org/anthology/2020.emnlp-main.204",
50
- doi = "10.18653/v1/2020.emnlp-main.204",
51
- }
52
- """
53
-
54
- _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/uds-lsv/transfer-distant-transformer-african/master/data/hausa_newsclass/train_clean.tsv"
55
- _VALIDATION_DOWNLOAD_URL = "https://raw.githubusercontent.com/uds-lsv/transfer-distant-transformer-african/master/data/hausa_newsclass/dev.tsv"
56
- _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/uds-lsv/transfer-distant-transformer-african/master/data/hausa_newsclass/test.tsv"
57
-
58
-
59
- class HausaVOATopics(datasets.GeneratorBasedBuilder):
60
- """Hausa VOA News Topic Classification dataset."""
61
-
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {
67
- "news_title": datasets.Value("string"),
68
- "label": datasets.features.ClassLabel(names=["Africa", "Health", "Nigeria", "Politics", "World"]),
69
- }
70
- ),
71
- homepage="https://github.com/uds-lsv/transfer-distant-transformer-african",
72
- citation=_CITATION,
73
- task_templates=[TextClassification(text_column="news_title", label_column="label")],
74
- )
75
-
76
- def _split_generators(self, dl_manager):
77
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
78
- validation_path = dl_manager.download_and_extract(_VALIDATION_DOWNLOAD_URL)
79
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
80
- return [
81
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
82
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
83
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
84
- ]
85
-
86
- def _generate_examples(self, filepath):
87
- """Generate Hausa VOA News Topic examples."""
88
- with open(filepath, encoding="utf-8") as csv_file:
89
- csv_reader = csv.DictReader(csv_file, delimiter="\t")
90
- for id_, row in enumerate(csv_reader):
91
- yield id_, {"news_title": row["news_title"], "label": row["label"]}