|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Wikipedia-Utils: Preprocessed Wikipedia Texts for NLP""" |
|
|
|
|
|
import json |
|
from typing import Dict, Iterator, List, Tuple, Union |
|
|
|
import datasets |
|
|
|
|
|
_DESCRIPTION = "Preprocessed Wikipedia texts generated with scripts in singletongue/wikipedia-utils repo." |
|
|
|
_HOMEPAGE = "https://github.com/singletongue/wikipedia-utils" |
|
|
|
_LICENSE = "The content of Wikipedia is licensed under the CC-BY-SA 3.0 and GFDL licenses." |
|
|
|
_URL_BASE = "https://github.com/singletongue/wikipedia-utils/releases/download" |
|
_URLS = { |
|
"corpus-jawiki-20230403": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403.txt.gz", |
|
"corpus-jawiki-20230403-cirrus": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403-cirrus.txt.gz", |
|
"corpus-jawiki-20230403-filtered-large": f"{_URL_BASE}/2023-04-03/corpus-jawiki-20230403-filtered-large.txt.gz", |
|
"paragraphs-jawiki-20230403": f"{_URL_BASE}/2023-04-03/paragraphs-jawiki-20230403.json.gz", |
|
"passages-c300-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-c300-jawiki-20230403.json.gz", |
|
"passages-c400-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-c400-jawiki-20230403.json.gz", |
|
"passages-para-jawiki-20230403": f"{_URL_BASE}/2023-04-03/passages-para-jawiki-20230403.json.gz", |
|
} |
|
|
|
_VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
class WikipediaUtils(datasets.GeneratorBasedBuilder): |
|
"""Wikipedia-Utils dataset.""" |
|
|
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name=name, version=_VERSION) for name in _URLS.keys()] |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
if self.config.name.startswith("corpus"): |
|
features = datasets.Features({"text": datasets.Value("string")}) |
|
elif self.config.name.startswith("paragraphs"): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"pageid": datasets.Value("int64"), |
|
"revid": datasets.Value("int64"), |
|
"paragraph_index": datasets.Value("int64"), |
|
"title": datasets.Value("string"), |
|
"section": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"html_tag": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name.startswith("passages"): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int64"), |
|
"pageid": datasets.Value("int64"), |
|
"revid": datasets.Value("int64"), |
|
"title": datasets.Value("string"), |
|
"section": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
else: |
|
raise ValueError("Invalid dataset config name is specified.") |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
url = _URLS[self.config.name] |
|
filepath = dl_manager.download_and_extract(url) |
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath})] |
|
|
|
def _generate_examples(self, filepath: str) -> Iterator[Tuple[int, Dict[str, Union[int, str]]]]: |
|
if self.config.name.startswith("corpus"): |
|
with open(filepath) as f: |
|
for id_, line in enumerate(f): |
|
line = line.rstrip("\n") |
|
yield id_, {"text": line} |
|
|
|
elif self.config.name.startswith(("paragraphs", "passages")): |
|
with open(filepath) as f: |
|
for line in f: |
|
item = json.loads(line) |
|
yield item["id"], item |
|
|
|
else: |
|
raise ValueError("Invalid dataset config name is specified.") |
|
|