tarekziade commited on
Commit
b3e60fd
1 Parent(s): 69d0f36

will be in main repo

Browse files
Files changed (1) hide show
  1. wikiextract.py +0 -190
wikiextract.py DELETED
@@ -1,190 +0,0 @@
1
- """
2
- Creates a text/category dataset using Wikipedia.
3
-
4
- Explores the 40 root categories and their sub-categories to collect pages that are seen only on
5
- each root category. The produced dataset provides 200 pages per category.
6
-
7
- Author: Tarek Ziadé / Mozilla
8
-
9
- """
10
- import os
11
- from collections import defaultdict
12
- from concurrent.futures import ThreadPoolExecutor, as_completed
13
- from threading import Lock
14
-
15
- import wikipediaapi
16
- from datasets import Dataset, DatasetDict
17
- import nltk
18
- from nltk.tokenize import sent_tokenize
19
- import pandas as pd
20
- from tqdm import tqdm
21
-
22
-
23
- _LIMIT_PER_CAT = 200
24
- _ROOT_CATS = [
25
- "Academic_disciplines",
26
- "Business",
27
- "Communication",
28
- "Concepts",
29
- "Culture",
30
- "Economy",
31
- "Education",
32
- "Energy",
33
- "Engineering",
34
- "Entertainment",
35
- "Entities",
36
- "Ethics",
37
- "Food_and_drink",
38
- "Geography",
39
- "Government",
40
- "Health",
41
- "History",
42
- "Human_behavior",
43
- "Humanities",
44
- "Information",
45
- "Internet",
46
- "Knowledge",
47
- "Language",
48
- "Law",
49
- "Life",
50
- "Lists",
51
- "Mass media",
52
- "Mathematics",
53
- "Military",
54
- "Nature",
55
- "People",
56
- "Philosophy",
57
- "Politics",
58
- "Religion",
59
- "Science",
60
- "Society",
61
- "Sports",
62
- "Technology",
63
- "Time",
64
- "Universe",
65
- ]
66
-
67
-
68
- class WikiExtractor:
69
- def __init__(self):
70
- self.visited_page_ids = defaultdict(set)
71
- self.all_ids = set()
72
- self.client = wikipediaapi.Wikipedia("MediaWikiCat Project", "en", timeout=30)
73
- self.data_lock = Lock()
74
- self.pbar = None
75
-
76
- def fetch_pages_from_category(
77
- self,
78
- root_category_name,
79
- category_name,
80
- limit_per_category=_LIMIT_PER_CAT,
81
- depth=0,
82
- max_depth=10,
83
- ):
84
- if len(self.visited_page_ids[root_category_name]) >= limit_per_category:
85
- return []
86
-
87
- if depth > max_depth: # Limit the recursion depth
88
- return []
89
-
90
- cat = self.client.page(category_name)
91
- pages = []
92
-
93
- # Fetch pages from the current category
94
- for c in cat.categorymembers.values():
95
- if (
96
- c.ns == wikipediaapi.Namespace.MAIN
97
- and c.pageid not in self.visited_page_ids
98
- ):
99
- if c.pageid in self.all_ids:
100
- continue
101
- pages.append(c)
102
-
103
- with self.data_lock: # Ensure thread-safe updates
104
- self.visited_page_ids[root_category_name].add(c.pageid)
105
- self.all_ids.add(c.pageid)
106
-
107
- if len(self.visited_page_ids[root_category_name]) >= limit_per_category:
108
- break
109
-
110
- # Fetch pages from subcategories
111
- for subcat in cat.categorymembers.values():
112
- if subcat.ns == wikipediaapi.Namespace.CATEGORY:
113
- pages += self.fetch_pages_from_category(
114
- root_category_name,
115
- subcat.title,
116
- limit_per_category,
117
- depth + 1,
118
- max_depth,
119
- )
120
-
121
- return pages
122
-
123
- def preprocess_content(self, text):
124
- sentences = sent_tokenize(text)[:5]
125
- return " ".join(sentences)
126
-
127
- def process_page(self, page):
128
- if page.summary:
129
- summary = self.preprocess_content(page.summary)
130
- else:
131
- summary = self.preprocess_content(page.text)
132
-
133
- summary = self.preprocess_content(summary)
134
- return {
135
- "title": page.title,
136
- "id": page.pageid,
137
- "summary": summary,
138
- }
139
-
140
- def process_category(self, category):
141
- category_data = []
142
- category = f"Category:{category}"
143
- pages = self.fetch_pages_from_category(category, category)
144
-
145
- for page in pages:
146
- data = self.process_page(page)
147
- data["category"] = category.removeprefix("Category:")
148
- category_data.append(data)
149
- if self.pbar is not None:
150
- self.pbar.update(1)
151
-
152
- return category_data
153
-
154
- def __call__(self):
155
- with tqdm(
156
- total=len(_ROOT_CATS) * _LIMIT_PER_CAT, desc="Processing Categories"
157
- ) as pbar:
158
- self.pbar = pbar
159
- with ThreadPoolExecutor(max_workers=15) as executor:
160
- future_to_category = {
161
- executor.submit(self.process_category, category): category
162
- for category in _ROOT_CATS
163
- }
164
-
165
- for future in as_completed(future_to_category):
166
- category_data = future.result()
167
- for item in category_data:
168
- yield item
169
-
170
-
171
- def main():
172
- nltk.download("punkt")
173
- extractor = WikiExtractor()
174
- pages = list(extractor())
175
-
176
- def gen():
177
- for page in pages:
178
- yield page
179
-
180
- dataset = Dataset.from_generator(gen)
181
- train_test_split = dataset.train_test_split(test_size=0.1)
182
- dataset_dict = DatasetDict(
183
- {"train": train_test_split["train"], "test": train_test_split["test"]}
184
- )
185
-
186
- dataset_dict.push_to_hub("tarekziade/wikipedia-topics")
187
-
188
-
189
- if __name__ == "__main__":
190
- main()