BDas commited on
Commit
4c4e15b
1 Parent(s): 070eacc

Create new file

Browse files
Files changed (1) hide show
  1. EnglishNLPDataset.py +90 -0
EnglishNLPDataset.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """English review multi-classification dataset."""
2
+
3
+
4
+ import csv
5
+ import datasets
6
+ from datasets.tasks import TextClassification
7
+
8
+ _CITATION = """\
9
+ ----EnglishNLPDataset----
10
+ """
11
+
12
+ _DESCRIPTION = """\
13
+ The dataset, prepared in English, includes 10.000 tests, 10.000 validations and 80000 train data.
14
+ The data is composed of customer comments and created from e-commerce sites.
15
+ """
16
+
17
+
18
+ _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/BihterDass/EnglishTextClassificationDataset/main/train.csv"
19
+ _VALIDATION_DOWNLOAD_URL ="https://raw.githubusercontent.com/BihterDass/EnglishTextClassificationDataset/main/dev.csv"
20
+ _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/BihterDass/EnglishTextClassificationDataset/main/test.csv"
21
+
22
+ class EnglishNLPDatasetConfig(datasets.BuilderConfig):
23
+ """BuilderConfig for EnglishNLPDataset Config"""
24
+
25
+ def __init__(self, **kwargs):
26
+ """BuilderConfig for EnglishNLPDatasetConfig
27
+ Args:
28
+ **kwargs: keyword arguments forwarded to super.
29
+ """
30
+ super(EnglishNLPDatasetConfig, self).__init__(**kwargs)
31
+
32
+ class EnglishNLPDataset(datasets.GeneratorBasedBuilder):
33
+ """EnglishNLPDataset Classification dataset."""
34
+ BUILDER_CONFIGS = [
35
+ EnglishNLPDatasetConfig(
36
+ name="EnglishData",
37
+ version=datasets.Version("1.0.0"),
38
+ description="EnglishNLPDataset: It is a classification study that will contribute to natural language processing operations.",
39
+ ),
40
+ ]
41
+ def _info(self):
42
+
43
+ return datasets.DatasetInfo(
44
+ # This is the description that will appear on the datasets page.
45
+ description=_DESCRIPTION,
46
+ features=datasets.Features(
47
+ {
48
+ "text": datasets.Value("string"),
49
+ "label": datasets.ClassLabel(names=["neg", "nor","pos"]),
50
+ }
51
+ ),
52
+ supervised_keys=None,
53
+ # Homepage of the dataset for documentation
54
+ homepage="https://github.com/BihterDass/EnglishTextClassificationDataset",
55
+ citation=_CITATION,
56
+ task_templates=[TextClassification(text_column="text", label_column="label")],
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+ """Returns SplitGenerators."""
61
+
62
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
63
+ validation_path = dl_manager.download_and_extract(_VALIDATION_DOWNLOAD_URL)
64
+ test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
65
+
66
+ return [
67
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
68
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
69
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
70
+ ]
71
+
72
+ def _generate_examples(self, filepath):
73
+ """Yields examples."""
74
+ with open(filepath, encoding="utf-8") as csv_file:
75
+ csv_reader = csv.reader(
76
+ csv_file,
77
+ delimiter=",",
78
+ quoting=csv.QUOTE_ALL,
79
+ skipinitialspace=True,
80
+ )
81
+ for id_, row in enumerate(csv_reader):
82
+ (
83
+ text,
84
+ label,
85
+ ) = row
86
+
87
+ yield id_, {
88
+ "text": text,
89
+ "label": int(label),
90
+ }