albertvillanova HF staff commited on
Commit
c848ab9
1 Parent(s): b512ea6

Delete loading script

Browse files
Files changed (1) hide show
  1. id_newspapers_2018.py +0 -115
id_newspapers_2018.py DELETED
@@ -1,115 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Indonesian Newspapers 2018"""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- logger = datasets.logging.get_logger(__name__)
24
-
25
-
26
- _CITATION = """\
27
- @inproceedings{id_newspapers_2018,
28
- author = {},
29
- title = {Indonesian Newspapers 2018},
30
- year = {2019},
31
- url = {https://github.com/feryandi/Dataset-Artikel},
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- The dataset contains around 500K articles (136M of words) from 7 Indonesian newspapers: Detik, Kompas, Tempo,
37
- CNN Indonesia, Sindo, Republika and Poskota. The articles are dated between 1st January 2018 and 20th August 2018
38
- (with few exceptions dated earlier). The size of uncompressed 500K json files (newspapers-json.tgz) is around 2.2GB,
39
- and the cleaned uncompressed in a big text file (newspapers.txt.gz) is about 1GB. The original source in Google Drive
40
- contains also a dataset in html format which include raw data (pictures, css, javascript, ...)
41
- from the online news website
42
- """
43
-
44
- _HOMEPAGE = "https://github.com/feryandi/Dataset-Artikel"
45
-
46
- _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International Public License"
47
-
48
- _URL = "http://cloud.uncool.ai/index.php/s/kF83dQHfGeS2LX2/download"
49
-
50
-
51
- class IdNewspapers2018Config(datasets.BuilderConfig):
52
- """BuilderConfig for IdNewspapers2018"""
53
-
54
- def __init__(self, **kwargs):
55
- """BuilderConfig for IdNewspapers2018.
56
- Args:
57
- **kwargs: keyword arguments forwarded to super.
58
- """
59
- super(IdNewspapers2018Config, self).__init__(**kwargs)
60
-
61
-
62
- class IdNewspapers2018(datasets.GeneratorBasedBuilder):
63
- VERSION = datasets.Version("1.0.0")
64
-
65
- BUILDER_CONFIGS = [
66
- IdNewspapers2018Config(
67
- name="id_newspapers_2018",
68
- version=VERSION,
69
- description="IdNewspapers2018 dataset",
70
- ),
71
- ]
72
-
73
- def _info(self):
74
- features = datasets.Features(
75
- {
76
- "id": datasets.Value("string"),
77
- "url": datasets.Value("string"),
78
- "date": datasets.Value("string"),
79
- "title": datasets.Value("string"),
80
- "content": datasets.Value("string"),
81
- }
82
- )
83
- return datasets.DatasetInfo(
84
- description=_DESCRIPTION,
85
- features=features,
86
- supervised_keys=None,
87
- homepage=_HOMEPAGE,
88
- license=_LICENSE,
89
- citation=_CITATION,
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- archive = dl_manager.download(_URL)
94
- return [
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TRAIN,
97
- gen_kwargs={
98
- "files": dl_manager.iter_archive(archive),
99
- },
100
- )
101
- ]
102
-
103
- def _generate_examples(self, files):
104
- id = 0
105
- for path, f in files:
106
- if path.startswith("newspapers") and path.endswith(".json"):
107
- data = json.loads(f.read().decode("utf-8"))
108
- yield id, {
109
- "id": str(id),
110
- "url": data["url"],
111
- "date": data["date"],
112
- "title": data["title"],
113
- "content": data["content"],
114
- }
115
- id += 1