albertvillanova HF staff davzoku commited on
Commit
eb185aa
1 Parent(s): 68a83b6

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (90678249c4fd2c979ef3c747d3f11eedf269d05e)
- Delete loading script (f9101146636e792b6db57233ecb45a73071633f5)
- Delete legacy dataset_infos.json (76e5fcb2f38e3bdd843044c28d085bb0325a9844)


Co-authored-by: Walter <[email protected]>

README.md CHANGED
@@ -33,13 +33,20 @@ dataset_info:
33
  '3': Sci/Tech
34
  splits:
35
  - name: train
36
- num_bytes: 29817351
37
  num_examples: 120000
38
  - name: test
39
- num_bytes: 1879478
40
  num_examples: 7600
41
- download_size: 31327765
42
- dataset_size: 31696829
 
 
 
 
 
 
 
43
  train-eval-index:
44
  - config: default
45
  task: text-classification
 
33
  '3': Sci/Tech
34
  splits:
35
  - name: train
36
+ num_bytes: 29817303
37
  num_examples: 120000
38
  - name: test
39
+ num_bytes: 1879474
40
  num_examples: 7600
41
+ download_size: 19820267
42
+ dataset_size: 31696777
43
+ configs:
44
+ - config_name: default
45
+ data_files:
46
+ - split: train
47
+ path: data/train-*
48
+ - split: test
49
+ path: data/test-*
50
  train-eval-index:
51
  - config: default
52
  task: text-classification
ag_news.py DELETED
@@ -1,94 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """AG News topic classification dataset."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- AG is a collection of more than 1 million news articles. News articles have been
28
- gathered from more than 2000 news sources by ComeToMyHead in more than 1 year of
29
- activity. ComeToMyHead is an academic news search engine which has been running
30
- since July, 2004. The dataset is provided by the academic comunity for research
31
- purposes in data mining (clustering, classification, etc), information retrieval
32
- (ranking, search, etc), xml, data compression, data streaming, and any other
33
- non-commercial activity. For more information, please refer to the link
34
- http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html .
35
-
36
- The AG's news topic classification dataset is constructed by Xiang Zhang
37
- ([email protected]) from the dataset above. It is used as a text
38
- classification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann
39
- LeCun. Character-level Convolutional Networks for Text Classification. Advances
40
- in Neural Information Processing Systems 28 (NIPS 2015).
41
- """
42
-
43
- _CITATION = """\
44
- @inproceedings{Zhang2015CharacterlevelCN,
45
- title={Character-level Convolutional Networks for Text Classification},
46
- author={Xiang Zhang and Junbo Jake Zhao and Yann LeCun},
47
- booktitle={NIPS},
48
- year={2015}
49
- }
50
- """
51
-
52
- _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv"
53
- _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv"
54
-
55
-
56
- class AGNews(datasets.GeneratorBasedBuilder):
57
- """AG News topic classification dataset."""
58
-
59
- def _info(self):
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features(
63
- {
64
- "text": datasets.Value("string"),
65
- "label": datasets.features.ClassLabel(names=["World", "Sports", "Business", "Sci/Tech"]),
66
- }
67
- ),
68
- homepage="http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html",
69
- citation=_CITATION,
70
- task_templates=[TextClassification(text_column="text", label_column="label")],
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
75
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
76
- return [
77
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
78
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
79
- ]
80
-
81
- def _generate_examples(self, filepath):
82
- """Generate AG News examples."""
83
- with open(filepath, encoding="utf-8") as csv_file:
84
- csv_reader = csv.reader(
85
- csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
86
- )
87
- for id_, row in enumerate(csv_reader):
88
- label, title, description = row
89
- # Original labels are [1, 2, 3, 4] ->
90
- # ['World', 'Sports', 'Business', 'Sci/Tech']
91
- # Re-map to [0, 1, 2, 3].
92
- label = int(label) - 1
93
- text = " ".join((title, description))
94
- yield id_, {"text": text, "label": label}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71de87ec66bc5737752a2502204dfa6d7fe9856ade3ea444dc6317789a4f13fb
3
+ size 1234829
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc508d6d9868594e3da960a8cfeb63ab5a4746598b93428c224397080c1f52ee
3
+ size 18585438