File size: 5,144 Bytes
55a16f5
 
 
 
 
 
 
 
 
f2cd704
55a16f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2cd704
 
55a16f5
 
 
 
 
f2cd704
55a16f5
 
 
 
 
 
f2cd704
55a16f5
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
"""
    Creates a text/category dataset using Wikipedia.

    Explores the 40 root categories and their sub-categories to collect pages that are seen only on
    each root category. The produced dataset provides 200 pages per category.

    Author: Tarek Ziadé / Mozilla

"""
import os
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock

import wikipediaapi
from datasets import Dataset, DatasetDict
import nltk
from nltk.tokenize import sent_tokenize
import pandas as pd
from tqdm import tqdm


_LIMIT_PER_CAT = 200
_ROOT_CATS = [
    "Academic_disciplines",
    "Business",
    "Communication",
    "Concepts",
    "Culture",
    "Economy",
    "Education",
    "Energy",
    "Engineering",
    "Entertainment",
    "Entities",
    "Ethics",
    "Food_and_drink",
    "Geography",
    "Government",
    "Health",
    "History",
    "Human_behavior",
    "Humanities",
    "Information",
    "Internet",
    "Knowledge",
    "Language",
    "Law",
    "Life",
    "Lists",
    "Mass media",
    "Mathematics",
    "Military",
    "Nature",
    "People",
    "Philosophy",
    "Politics",
    "Religion",
    "Science",
    "Society",
    "Sports",
    "Technology",
    "Time",
    "Universe",
]


class WikiExtractor:
    def __init__(self):
        self.visited_page_ids = defaultdict(set)
        self.all_ids = set()
        self.client = wikipediaapi.Wikipedia("MediaWikiCat Project", "en", timeout=30)
        self.data_lock = Lock()
        self.pbar = None

    def fetch_pages_from_category(
        self,
        root_category_name,
        category_name,
        limit_per_category=_LIMIT_PER_CAT,
        depth=0,
        max_depth=10,
    ):
        if len(self.visited_page_ids[root_category_name]) >= limit_per_category:
            return []

        if depth > max_depth:  # Limit the recursion depth
            return []

        cat = self.client.page(category_name)
        pages = []

        # Fetch pages from the current category
        for c in cat.categorymembers.values():
            if (
                c.ns == wikipediaapi.Namespace.MAIN
                and c.pageid not in self.visited_page_ids
            ):
                if c.pageid in self.all_ids:
                    continue
                pages.append(c)

                with self.data_lock:  # Ensure thread-safe updates
                    self.visited_page_ids[root_category_name].add(c.pageid)
                    self.all_ids.add(c.pageid)

                if len(self.visited_page_ids[root_category_name]) >= limit_per_category:
                    break

        # Fetch pages from subcategories
        for subcat in cat.categorymembers.values():
            if subcat.ns == wikipediaapi.Namespace.CATEGORY:
                pages += self.fetch_pages_from_category(
                    root_category_name,
                    subcat.title,
                    limit_per_category,
                    depth + 1,
                    max_depth,
                )

        return pages

    def preprocess_content(self, text):
        sentences = sent_tokenize(text)[:5]
        return " ".join(sentences)

    def process_page(self, page):
        if page.summary:
            summary = self.preprocess_content(page.summary)
        else:
            summary = self.preprocess_content(page.text)

        summary = self.preprocess_content(summary)
        return {
            "title": page.title,
            "id": page.pageid,
            "summary": summary,
        }

    def process_category(self, category):
        category_data = []
        category = f"Category:{category}"
        pages = self.fetch_pages_from_category(category, category)

        for page in pages:
            try:
                data = self.process_page(page)
            except Exception as e:
                import pdb

                pdb.set_trace()
                continue

            data["category"] = category
            category_data.append(data)
            if self.pbar is not None:
                self.pbar.update(1)

        return category_data

    def __call__(self):
        with tqdm(
            total=len(_ROOT_CATS) * _LIMIT_PER_CAT, desc="Processing Categories"
        ) as pbar:
            self.pbar = pbar
            with ThreadPoolExecutor(max_workers=15) as executor:
                future_to_category = {
                    executor.submit(self.process_category, category): category
                    for category in _ROOT_CATS
                }

                for future in as_completed(future_to_category):
                    category_data = future.result()
                    for item in category_data:
                        yield item


def main():
    nltk.download("punkt")
    extractor = WikiExtractor()
    dataset = Dataset.from_generator(extractor)

    train_test_split = dataset.train_test_split(test_size=0.1)
    dataset_dict = DatasetDict(
        {"train": train_test_split["train"], "test": train_test_split["test"]}
    )

    dataset_dict.save_to_disk(os.path.dirname(__file__))


if __name__ == "__main__":
    main()