albertvillanova HF staff commited on
Commit
7a61fcd
1 Parent(s): 9ba0287

Delete loading script

Browse files
Files changed (1) hide show
  1. yelp_polarity.py +0 -162
yelp_polarity.py DELETED
@@ -1,162 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- # Copyright 2019 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
18
- #
19
- # Licensed under the Apache License, Version 2.0 (the "License");
20
- # you may not use this file except in compliance with the License.
21
- # You may obtain a copy of the License at
22
- #
23
- # http://www.apache.org/licenses/LICENSE-2.0
24
- #
25
- # Unless required by applicable law or agreed to in writing, software
26
- # distributed under the License is distributed on an "AS IS" BASIS,
27
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
28
- # See the License for the specific language governing permissions and
29
- # limitations under the License.
30
- """Yelp Polarity Reviews dataset."""
31
-
32
-
33
- import datasets
34
- from datasets.tasks import TextClassification
35
-
36
-
37
- _DESCRIPTION = """\
38
- Large Yelp Review Dataset.
39
- This is a dataset for binary sentiment classification. \
40
- We provide a set of 560,000 highly polar yelp reviews for training, and 38,000 for testing. \
41
-
42
- ORIGIN
43
- The Yelp reviews dataset consists of reviews from Yelp. It is extracted
44
- from the Yelp Dataset Challenge 2015 data. For more information, please
45
- refer to http://www.yelp.com/dataset_challenge
46
-
47
- The Yelp reviews polarity dataset is constructed by
48
- Xiang Zhang ([email protected]) from the above dataset.
49
- It is first used as a text classification benchmark in the following paper:
50
- Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks
51
- for Text Classification. Advances in Neural Information Processing Systems 28
52
- (NIPS 2015).
53
-
54
-
55
- DESCRIPTION
56
-
57
- The Yelp reviews polarity dataset is constructed by considering stars 1 and 2
58
- negative, and 3 and 4 positive. For each polarity 280,000 training samples and
59
- 19,000 testing samples are take randomly. In total there are 560,000 trainig
60
- samples and 38,000 testing samples. Negative polarity is class 1,
61
- and positive class 2.
62
-
63
- The files train.csv and test.csv contain all the training samples as
64
- comma-sparated values. There are 2 columns in them, corresponding to class
65
- index (1 and 2) and review text. The review texts are escaped using double
66
- quotes ("), and any internal double quote is escaped by 2 double quotes ("").
67
- New lines are escaped by a backslash followed with an "n" character,
68
- that is "\n".
69
- """
70
-
71
- _CITATION = """\
72
- @article{zhangCharacterlevelConvolutionalNetworks2015,
73
- archivePrefix = {arXiv},
74
- eprinttype = {arxiv},
75
- eprint = {1509.01626},
76
- primaryClass = {cs},
77
- title = {Character-Level {{Convolutional Networks}} for {{Text Classification}}},
78
- abstract = {This article offers an empirical exploration on the use of character-level convolutional networks (ConvNets) for text classification. We constructed several large-scale datasets to show that character-level convolutional networks could achieve state-of-the-art or competitive results. Comparisons are offered against traditional models such as bag of words, n-grams and their TFIDF variants, and deep learning models such as word-based ConvNets and recurrent neural networks.},
79
- journal = {arXiv:1509.01626 [cs]},
80
- author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann},
81
- month = sep,
82
- year = {2015},
83
- }
84
-
85
- """
86
-
87
- _DOWNLOAD_URL = "https://s3.amazonaws.com/fast-ai-nlp/yelp_review_polarity_csv.tgz"
88
-
89
-
90
- class YelpPolarityReviewsConfig(datasets.BuilderConfig):
91
- """BuilderConfig for YelpPolarityReviews."""
92
-
93
- def __init__(self, **kwargs):
94
- """BuilderConfig for YelpPolarityReviews.
95
-
96
- Args:
97
-
98
- **kwargs: keyword arguments forwarded to super.
99
- """
100
- super(YelpPolarityReviewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
101
-
102
-
103
- class YelpPolarity(datasets.GeneratorBasedBuilder):
104
- """Yelp Polarity reviews dataset."""
105
-
106
- BUILDER_CONFIGS = [
107
- YelpPolarityReviewsConfig(
108
- name="plain_text",
109
- description="Plain text",
110
- )
111
- ]
112
-
113
- def _info(self):
114
- return datasets.DatasetInfo(
115
- description=_DESCRIPTION,
116
- features=datasets.Features(
117
- {
118
- "text": datasets.Value("string"),
119
- "label": datasets.features.ClassLabel(names=["1", "2"]),
120
- }
121
- ),
122
- supervised_keys=None,
123
- homepage="https://course.fast.ai/datasets",
124
- citation=_CITATION,
125
- task_templates=[TextClassification(text_column="text", label_column="label")],
126
- )
127
-
128
- def _vocab_text_gen(self, train_file):
129
- for _, ex in self._generate_examples(train_file):
130
- yield ex["text"]
131
-
132
- def _split_generators(self, dl_manager):
133
- arch_path = dl_manager.download(_DOWNLOAD_URL)
134
- train_file = "yelp_review_polarity_csv/train.csv"
135
- test_file = "yelp_review_polarity_csv/test.csv"
136
- return [
137
- datasets.SplitGenerator(
138
- name=datasets.Split.TRAIN,
139
- gen_kwargs={
140
- "filepath": train_file,
141
- "files": dl_manager.iter_archive(arch_path),
142
- },
143
- ),
144
- datasets.SplitGenerator(
145
- name=datasets.Split.TEST,
146
- gen_kwargs={
147
- "filepath": test_file,
148
- "files": dl_manager.iter_archive(arch_path),
149
- },
150
- ),
151
- ]
152
-
153
- def _generate_examples(self, filepath, files):
154
- """Generate Yelp examples."""
155
- for path, f in files:
156
- if path == filepath:
157
- for line_id, line in enumerate(f):
158
- line = line.decode("utf-8")
159
- # The format of the line is:
160
- # "1", "The text of the review."
161
- yield line_id, {"text": line[5:-2].strip(), "label": line[1]}
162
- break