File size: 4,670 Bytes
1092b34
 
 
80d508a
1092b34
f51d149
1092b34
f51d149
34c1fd9
f51d149
1092b34
f51d149
1092b34
cd5fa63
1092b34
 
 
 
 
 
 
 
 
 
 
 
 
3df0ae4
1092b34
cfb6805
 
 
 
 
 
 
 
 
1092b34
cfb6805
 
 
 
 
 
 
 
 
0aeabb1
1092b34
0aeabb1
1092b34
40423fa
 
 
 
 
 
 
 
 
 
e020613
 
40423fa
 
 
 
 
 
 
 
 
 
 
 
80d508a
 
 
 
 
 
 
 
 
 
 
 
3084941
80d508a
 
 
 
 
 
 
3084941
80d508a
40423fa
80d508a
 
 
 
 
 
 
 
40423fa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import csv
import datasets
import os
import textwrap

_DESCRIPTION = "  The Ultimate Arabic News Dataset is a collection of single-label modern Arabic texts that are used in news websites and press articles. Arabic news data was collected by web scraping techniques from many famous news sites such as Al-Arabiya, Al-Youm Al-Sabea (Youm7), the news published on the Google search engine and other various sources."

_CITATION = "Al-Dulaimi, Ahmed Hashim (2022), “Ultimate Arabic News Dataset”, Mendeley Data, V1, doi: 10.17632/jz56k5wxz7.1"

_HOMEPAGE = "https://data.mendeley.com/datasets/jz56k5wxz7/1"

_LICENSE = "CC BY 4.0 "

_URL = {"UltimateArabic":"https://data.mendeley.com/public-files/datasets/jz56k5wxz7/files/b7ca9d26-ed76-4481-bc61-cca9c90178a0/file_downloaded","UltimateArabicPrePros":"https://huggingface.co/datasets/khalidalt/ultimate_arabic_news/resolve/main/UltimateArabicPrePros.csv"}
        
          
class UAN_Config(datasets.BuilderConfig):

    """BuilderConfig for Ultamte Arabic News"""

    def __init__(self, **kwargs):
        """
        Args:
            **kwargs: keyword arguments forwarded to super.
        """
        super(UAN_Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
        

class Ultimate_Arabic_News(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.1.0")
    BUILDER_CONFIGS = [
    UAN_Config(
        name="UltimateArabic",
        description=textwrap.dedent(
            """\
      UltimateArabic: A file containing more than 193,000 original Arabic news texts, without pre-processing. The texts contain words, 
      numbers, and symbols that can be removed using pre-processing to increase accuracy when using the dataset in various Arabic natural 
      language processing tasks such as text classification."""
        ),
    ),
    UAN_Config(
        name="UltimateArabicPrePros",
        description=textwrap.dedent(
            """UltimateArabicPrePros: It is a file that contains the data mentioned in the first file, but after pre-processing, where 
            the number of data became about 188,000 text documents, where stop words, non-Arabic words, symbols and numbers have been 
            removed so that this file is ready for use directly in the various Arabic natural language processing tasks. Like text 
            classification.
      """
            ),
        ),
    ]

    def _info(self):
        # TODO(tydiqa): Specifies the datasets.DatasetInfo object
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # datasets.features.FeatureConnectors
            features=datasets.Features(
                {

                    "text": datasets.Value("string"),
                    "label": datasets.Value("string"),
                },

            ),
            # If there's a common (input, target) tuple from the features,
            # specify them here. They'll be used if as_supervised=True in
            # builder.as_dataset.
            supervised_keys=None,
            # Homepage of the dataset for documentation
            homepage="https://data.mendeley.com/datasets/jz56k5wxz7/1",
            citation=_CITATION,
        )


    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # TODO(tydiqa): Downloads the data and defines the splits
        # dl_manager is a datasets.download.DownloadManager that can be used to
        # download and extract URLs
        UltAr_downloaded = dl_manager.download_and_extract(_URL['UltimateArabic'])
        UltArPre_downloaded = dl_manager.download_and_extract(_URL['UltimateArabicPrePros'])
        if self.config.name == "UltimateArabic":
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    # These kwargs will be passed to _generate_examples
                    gen_kwargs={"csv_file": UltAr_downloaded},
                ),
            ]
        elif self.config.name == "UltimateArabicPrePros":
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    # These kwargs will be passed to _generate_examples
                    gen_kwargs={"csv_file": UltArPre_downloaded},
                ),

            ]
            
    def _generate_examples(self, csv_file):
        with open(csv_file, encoding="utf-8") as f:
            data =  csv.DictReader(f)
            for row, item in enumerate(data):
                yield row, {"text": item['text'],"label": item['label']}