ultimate_arabic_news / ultimate_arabic_news.py
khalidalt's picture
Update ultimate_arabic_news.py
0aeabb1
raw
history blame
4.12 kB
import csv
import datasets
import os
_DESCRIPTION = "TODO"
_HOMEPAGE = "TODO"
_LICENSE = "TODO"
_URL = {"UltimateArabic":"https://huggingface.co/datasets/khalidalt/ultimate_arabic_news/blob/main/UltimateArabic.csv","UltimateArabicPrePros":"https://huggingface.co/datasets/khalidalt/ultimate_arabic_news/raw/main/UltimateArabicPrePros.csv"}
class UAN_Config(datasets.BuilderConfig):
"""BuilderConfig for Ultamte Arabic News"""
def __init__(self, **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(UAN_Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class Ultimate_Arabic_News(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
UAN_Config(
name="UltimateArabic",
description=textwrap.dedent(
"""\
UltimateArabic: A file containing more than 193,000 original Arabic news texts, without pre-processing. The texts contain words,
numbers, and symbols that can be removed using pre-processing to increase accuracy when using the dataset in various Arabic natural
language processing tasks such as text classification."""
),
),
UAN_Config(
name="UltimateArabicPrePros",
description=textwrap.dedent(
"""UltimateArabicPrePros: It is a file that contains the data mentioned in the first file, but after pre-processing, where
the number of data became about 188,000 text documents, where stop words, non-Arabic words, symbols and numbers have been
removed so that this file is ready for use directly in the various Arabic natural language processing tasks. Like text
classification.
"""
),
),
]
def _info(self):
# TODO(tydiqa): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.Value("string"),
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://data.mendeley.com/datasets/jz56k5wxz7/1",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(tydiqa): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
UltAr_downloaded = dl_manager.download_and_extract(_URL['UltimateArabic'])
UltArPre_downloaded = dl_manager.download_and_extract(_URL['UltimateArabicPrePros'])
if self.config.name == "UltimateArabic":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": UltAr_downloaded["train"]},
),
]
elif self.config.name == "UltimateArabicPrePros":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": UltArPre_downloaded["train"]},
),
]
def _generate_examples(self, csv_file):
with open(csv_file, encoding="utf-8") as f:
data = csv.DictReader(f)
for row, item in enumerate(data):
yield row, {"text": item['text'],"label": item['label']}