tarekziade
commited on
Commit
•
55a16f5
1
Parent(s):
a979f85
first version
Browse files- dataset_dict.json +1 -0
- test/data-00000-of-00001.arrow +3 -0
- test/dataset_info.json +24 -0
- test/state.json +13 -0
- train/data-00000-of-00001.arrow +3 -0
- train/dataset_info.json +24 -0
- train/state.json +13 -0
- wikiextract.py +194 -0
dataset_dict.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"splits": ["train", "test"]}
|
test/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f91790985ba565bfe73a1b31db8c445be125ac7edfc655c1c98453ab619e370e
|
3 |
+
size 485808
|
test/dataset_info.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"citation": "",
|
3 |
+
"description": "",
|
4 |
+
"features": {
|
5 |
+
"title": {
|
6 |
+
"dtype": "string",
|
7 |
+
"_type": "Value"
|
8 |
+
},
|
9 |
+
"id": {
|
10 |
+
"dtype": "int64",
|
11 |
+
"_type": "Value"
|
12 |
+
},
|
13 |
+
"summary": {
|
14 |
+
"dtype": "string",
|
15 |
+
"_type": "Value"
|
16 |
+
},
|
17 |
+
"category": {
|
18 |
+
"dtype": "string",
|
19 |
+
"_type": "Value"
|
20 |
+
}
|
21 |
+
},
|
22 |
+
"homepage": "",
|
23 |
+
"license": ""
|
24 |
+
}
|
test/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "82a2f0e3fd5f277f",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": null
|
13 |
+
}
|
train/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8bf8171fe054fb5be592a2f60e06d2cce893fdf6db09c9c5d16cc4c945e1850
|
3 |
+
size 4348768
|
train/dataset_info.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"citation": "",
|
3 |
+
"description": "",
|
4 |
+
"features": {
|
5 |
+
"title": {
|
6 |
+
"dtype": "string",
|
7 |
+
"_type": "Value"
|
8 |
+
},
|
9 |
+
"id": {
|
10 |
+
"dtype": "int64",
|
11 |
+
"_type": "Value"
|
12 |
+
},
|
13 |
+
"summary": {
|
14 |
+
"dtype": "string",
|
15 |
+
"_type": "Value"
|
16 |
+
},
|
17 |
+
"category": {
|
18 |
+
"dtype": "string",
|
19 |
+
"_type": "Value"
|
20 |
+
}
|
21 |
+
},
|
22 |
+
"homepage": "",
|
23 |
+
"license": ""
|
24 |
+
}
|
train/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "5a2d02e6a2e569cd",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": null
|
13 |
+
}
|
wikiextract.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Creates a text/category dataset using Wikipedia.
|
3 |
+
|
4 |
+
Explores the 40 root categories and their sub-categories to collect pages that are seen only on
|
5 |
+
each root category. The produced dataset provides 200 pages per category.
|
6 |
+
|
7 |
+
Author: Tarek Ziadé / Mozilla
|
8 |
+
|
9 |
+
"""
|
10 |
+
from collections import defaultdict
|
11 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
12 |
+
from threading import Lock
|
13 |
+
|
14 |
+
import wikipediaapi
|
15 |
+
from datasets import Dataset, DatasetDict
|
16 |
+
import nltk
|
17 |
+
from nltk.tokenize import sent_tokenize
|
18 |
+
import pandas as pd
|
19 |
+
from tqdm import tqdm
|
20 |
+
|
21 |
+
|
22 |
+
_LIMIT_PER_CAT = 200
|
23 |
+
_ROOT_CATS = [
|
24 |
+
"Academic_disciplines",
|
25 |
+
"Business",
|
26 |
+
"Communication",
|
27 |
+
"Concepts",
|
28 |
+
"Culture",
|
29 |
+
"Economy",
|
30 |
+
"Education",
|
31 |
+
"Energy",
|
32 |
+
"Engineering",
|
33 |
+
"Entertainment",
|
34 |
+
"Entities",
|
35 |
+
"Ethics",
|
36 |
+
"Food_and_drink",
|
37 |
+
"Geography",
|
38 |
+
"Government",
|
39 |
+
"Health",
|
40 |
+
"History",
|
41 |
+
"Human_behavior",
|
42 |
+
"Humanities",
|
43 |
+
"Information",
|
44 |
+
"Internet",
|
45 |
+
"Knowledge",
|
46 |
+
"Language",
|
47 |
+
"Law",
|
48 |
+
"Life",
|
49 |
+
"Lists",
|
50 |
+
"Mass media",
|
51 |
+
"Mathematics",
|
52 |
+
"Military",
|
53 |
+
"Nature",
|
54 |
+
"People",
|
55 |
+
"Philosophy",
|
56 |
+
"Politics",
|
57 |
+
"Religion",
|
58 |
+
"Science",
|
59 |
+
"Society",
|
60 |
+
"Sports",
|
61 |
+
"Technology",
|
62 |
+
"Time",
|
63 |
+
"Universe",
|
64 |
+
]
|
65 |
+
|
66 |
+
|
67 |
+
class WikiExtractor:
|
68 |
+
def __init__(self):
|
69 |
+
self.visited_page_ids = defaultdict(set)
|
70 |
+
self.all_ids = set()
|
71 |
+
self.client = wikipediaapi.Wikipedia("MediaWikiCat Project", "en", timeout=30)
|
72 |
+
self.data_lock = Lock()
|
73 |
+
self.pbar = None
|
74 |
+
|
75 |
+
def fetch_pages_from_category(
|
76 |
+
self,
|
77 |
+
root_category_name,
|
78 |
+
category_name,
|
79 |
+
limit_per_category=_LIMIT_PER_CAT,
|
80 |
+
depth=0,
|
81 |
+
max_depth=10,
|
82 |
+
):
|
83 |
+
if len(self.visited_page_ids[root_category_name]) >= limit_per_category:
|
84 |
+
return []
|
85 |
+
|
86 |
+
if depth > max_depth: # Limit the recursion depth
|
87 |
+
return []
|
88 |
+
|
89 |
+
cat = self.client.page(category_name)
|
90 |
+
pages = []
|
91 |
+
|
92 |
+
# Fetch pages from the current category
|
93 |
+
for c in cat.categorymembers.values():
|
94 |
+
if (
|
95 |
+
c.ns == wikipediaapi.Namespace.MAIN
|
96 |
+
and c.pageid not in self.visited_page_ids
|
97 |
+
):
|
98 |
+
if c.pageid in self.all_ids:
|
99 |
+
continue
|
100 |
+
pages.append(c)
|
101 |
+
|
102 |
+
with self.data_lock: # Ensure thread-safe updates
|
103 |
+
self.visited_page_ids[root_category_name].add(c.pageid)
|
104 |
+
self.all_ids.add(c.pageid)
|
105 |
+
|
106 |
+
if len(self.visited_page_ids[root_category_name]) >= limit_per_category:
|
107 |
+
break
|
108 |
+
|
109 |
+
# Fetch pages from subcategories
|
110 |
+
for subcat in cat.categorymembers.values():
|
111 |
+
if subcat.ns == wikipediaapi.Namespace.CATEGORY:
|
112 |
+
pages += self.fetch_pages_from_category(
|
113 |
+
root_category_name,
|
114 |
+
subcat.title,
|
115 |
+
limit_per_category,
|
116 |
+
depth + 1,
|
117 |
+
max_depth,
|
118 |
+
)
|
119 |
+
|
120 |
+
return pages
|
121 |
+
|
122 |
+
def preprocess_content(self, text):
|
123 |
+
sentences = sent_tokenize(text)[:5]
|
124 |
+
return " ".join(sentences)
|
125 |
+
|
126 |
+
def process_page(self, page):
|
127 |
+
if page.summary:
|
128 |
+
summary = self.preprocess_content(page.summary)
|
129 |
+
else:
|
130 |
+
summary = self.preprocess_content(page.text)
|
131 |
+
|
132 |
+
summary = self.preprocess_content(summary)
|
133 |
+
return {
|
134 |
+
"title": page.title,
|
135 |
+
"id": page.pageid,
|
136 |
+
"summary": summary,
|
137 |
+
}
|
138 |
+
|
139 |
+
def process_category(self, category):
|
140 |
+
category_data = []
|
141 |
+
category = f"Category:{category}"
|
142 |
+
pages = self.fetch_pages_from_category(category, category)
|
143 |
+
|
144 |
+
for page in pages:
|
145 |
+
try:
|
146 |
+
data = self.process_page(page)
|
147 |
+
except Exception as e:
|
148 |
+
import pdb
|
149 |
+
|
150 |
+
pdb.set_trace()
|
151 |
+
continue
|
152 |
+
|
153 |
+
data["category"] = category
|
154 |
+
category_data.append(data)
|
155 |
+
if self.pbar is not None:
|
156 |
+
self.pbar.update(1)
|
157 |
+
|
158 |
+
return category_data
|
159 |
+
|
160 |
+
def __call__(self):
|
161 |
+
with tqdm(
|
162 |
+
total=len(_ROOT_CATS) * _LIMIT_PER_CAT, desc="Processing Categories"
|
163 |
+
) as pbar:
|
164 |
+
self.pbar = pbar
|
165 |
+
data = []
|
166 |
+
with ThreadPoolExecutor(max_workers=15) as executor:
|
167 |
+
future_to_category = {
|
168 |
+
executor.submit(self.process_category, category): category
|
169 |
+
for category in _ROOT_CATS
|
170 |
+
}
|
171 |
+
|
172 |
+
for future in as_completed(future_to_category):
|
173 |
+
category_data = future.result()
|
174 |
+
data.extend(category_data)
|
175 |
+
|
176 |
+
return data
|
177 |
+
|
178 |
+
|
179 |
+
def main():
|
180 |
+
nltk.download("punkt")
|
181 |
+
extractor = WikiExtractor()
|
182 |
+
data = extractor()
|
183 |
+
dataset = Dataset.from_pandas(pd.DataFrame(data))
|
184 |
+
|
185 |
+
train_test_split = dataset.train_test_split(test_size=0.1)
|
186 |
+
dataset_dict = DatasetDict(
|
187 |
+
{"train": train_test_split["train"], "test": train_test_split["test"]}
|
188 |
+
)
|
189 |
+
|
190 |
+
dataset_dict.save_to_disk("topics.dataset")
|
191 |
+
|
192 |
+
|
193 |
+
if __name__ == "__main__":
|
194 |
+
main()
|