davzoku commited on
Commit
f910114
1 Parent(s): 9067824

Delete loading script

Browse files
Files changed (1) hide show
  1. ag_news.py +0 -94
ag_news.py DELETED
@@ -1,94 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """AG News topic classification dataset."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- AG is a collection of more than 1 million news articles. News articles have been
28
- gathered from more than 2000 news sources by ComeToMyHead in more than 1 year of
29
- activity. ComeToMyHead is an academic news search engine which has been running
30
- since July, 2004. The dataset is provided by the academic comunity for research
31
- purposes in data mining (clustering, classification, etc), information retrieval
32
- (ranking, search, etc), xml, data compression, data streaming, and any other
33
- non-commercial activity. For more information, please refer to the link
34
- http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html .
35
-
36
- The AG's news topic classification dataset is constructed by Xiang Zhang
37
- ([email protected]) from the dataset above. It is used as a text
38
- classification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann
39
- LeCun. Character-level Convolutional Networks for Text Classification. Advances
40
- in Neural Information Processing Systems 28 (NIPS 2015).
41
- """
42
-
43
- _CITATION = """\
44
- @inproceedings{Zhang2015CharacterlevelCN,
45
- title={Character-level Convolutional Networks for Text Classification},
46
- author={Xiang Zhang and Junbo Jake Zhao and Yann LeCun},
47
- booktitle={NIPS},
48
- year={2015}
49
- }
50
- """
51
-
52
- _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv"
53
- _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv"
54
-
55
-
56
- class AGNews(datasets.GeneratorBasedBuilder):
57
- """AG News topic classification dataset."""
58
-
59
- def _info(self):
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features(
63
- {
64
- "text": datasets.Value("string"),
65
- "label": datasets.features.ClassLabel(names=["World", "Sports", "Business", "Sci/Tech"]),
66
- }
67
- ),
68
- homepage="http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html",
69
- citation=_CITATION,
70
- task_templates=[TextClassification(text_column="text", label_column="label")],
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
75
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
76
- return [
77
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
78
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
79
- ]
80
-
81
- def _generate_examples(self, filepath):
82
- """Generate AG News examples."""
83
- with open(filepath, encoding="utf-8") as csv_file:
84
- csv_reader = csv.reader(
85
- csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
86
- )
87
- for id_, row in enumerate(csv_reader):
88
- label, title, description = row
89
- # Original labels are [1, 2, 3, 4] ->
90
- # ['World', 'Sports', 'Business', 'Sci/Tech']
91
- # Re-map to [0, 1, 2, 3].
92
- label = int(label) - 1
93
- text = " ".join((title, description))
94
- yield id_, {"text": text, "label": label}