|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import pandas as pd |
|
import re |
|
import gzip |
|
import json |
|
import datasets |
|
from pathlib import Path |
|
|
|
|
|
def get_open_method(path): |
|
path = Path(path) |
|
ext = path.suffix |
|
|
|
if ext == ".gz": |
|
import gzip |
|
open_func = gzip.open |
|
elif ext == ".bz2": |
|
import bz2 |
|
open_func = bz2.open |
|
else: |
|
open_func = open |
|
return open_func |
|
|
|
|
|
def read_file(path): |
|
open_func = get_open_method(path) |
|
with open_func(path, "rt", encoding="UTF-8") as f: |
|
return f.read() |
|
|
|
|
|
|
|
|
|
_CITATION = "" |
|
|
|
_DESCRIPTION = """\ |
|
French Wikipedia dataset for Entity Linking |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/GaaH/frwiki_good_pages_el" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
_URLs = { |
|
"frwiki": "data.tar.gz", |
|
"entities": "data.tar.gz", |
|
} |
|
|
|
_NER_CLASS_LABELS = [ |
|
"B", |
|
"I", |
|
"O", |
|
] |
|
|
|
_ENTITY_TYPES = [ |
|
"DATE", |
|
"PERSON", |
|
"GEOLOC", |
|
"ORG", |
|
"OTHER", |
|
] |
|
|
|
|
|
def text_to_el_features(doc_qid, doc_title, text, title2qid, title2wikipedia, title2wikidata): |
|
res = { |
|
"title": doc_title.replace("_", " "), |
|
"qid": doc_qid, |
|
} |
|
text_dict = { |
|
"words": [], |
|
"labels": [], |
|
"qids": [], |
|
"titles": [], |
|
"wikipedia": [], |
|
"wikidata": [], |
|
} |
|
entity_pattern = r"\[E=(.+?)\](.+?)\[/E\]" |
|
|
|
|
|
i = 0 |
|
for m in re.finditer(entity_pattern, text): |
|
mention_title = m.group(1) |
|
mention = m.group(2) |
|
|
|
mention_qid = title2qid.get(mention_title, "").replace("_", " ") |
|
mention_wikipedia = title2wikipedia.get(mention_title, "") |
|
mention_wikidata = title2wikidata.get(mention_title, "") |
|
|
|
|
|
mention_wikipedia = re.sub(entity_pattern, r"\2", mention_wikipedia) |
|
|
|
mention_wikidata = re.sub(entity_pattern, r"\2", mention_wikidata) |
|
|
|
|
|
|
|
|
|
|
|
mention_words = mention.split() |
|
|
|
j = m.start(0) |
|
prev_text = text[i:j].split() |
|
len_prev_text = len(prev_text) |
|
text_dict["words"].extend(prev_text) |
|
text_dict["labels"].extend(["O"] * len_prev_text) |
|
text_dict["qids"].extend([None] * len_prev_text) |
|
text_dict["titles"].extend([None] * len_prev_text) |
|
text_dict["wikipedia"].extend([None] * len_prev_text) |
|
text_dict["wikidata"].extend([None] * len_prev_text) |
|
|
|
text_dict["words"].extend(mention_words) |
|
|
|
|
|
if mention_wikipedia == "": |
|
len_mention = len(mention_words) |
|
text_dict["labels"].extend(["O"] * len_mention) |
|
text_dict["qids"].extend([None] * len_mention) |
|
text_dict["titles"].extend([None] * len_mention) |
|
text_dict["wikipedia"].extend([None] * len_mention) |
|
text_dict["wikidata"].extend([None] * len_mention) |
|
else: |
|
len_mention_tail = len(mention_words) - 1 |
|
|
|
|
|
|
|
|
|
text_dict["labels"].extend(["B"] + ["I"] * len_mention_tail) |
|
text_dict["qids"].extend([mention_qid] + [None] * len_mention_tail) |
|
text_dict["titles"].extend( |
|
[mention_title] + [None] * len_mention_tail) |
|
text_dict["wikipedia"].extend( |
|
[mention_wikipedia] + [None] * len_mention_tail) |
|
text_dict["wikidata"].extend( |
|
[mention_wikidata] + [None] * len_mention_tail) |
|
|
|
i = m.end(0) |
|
|
|
tail = text[i:].split() |
|
len_tail = len(tail) |
|
text_dict["words"].extend(tail) |
|
text_dict["labels"].extend(["O"] * len_tail) |
|
text_dict["qids"].extend([None] * len_tail) |
|
text_dict["titles"].extend([None] * len_tail) |
|
text_dict["wikipedia"].extend([None] * len_tail) |
|
text_dict["wikidata"].extend([None] * len_tail) |
|
res.update(text_dict) |
|
return res |
|
|
|
|
|
class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
""" |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="frwiki", version=VERSION, |
|
description="The frwiki dataset for Entity Linking"), |
|
datasets.BuilderConfig(name="entities", version=VERSION, |
|
description="Entities and their descriptions"), |
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "frwiki" |
|
|
|
def _info(self): |
|
if self.config.name == "frwiki": |
|
features = datasets.Features({ |
|
"title": datasets.Value("string"), |
|
"qid": datasets.Value("string"), |
|
"words": [datasets.Value("string")], |
|
"wikipedia": [datasets.Value("string")], |
|
"wikidata": [datasets.Value("string")], |
|
"labels": [datasets.ClassLabel(names=_NER_CLASS_LABELS)], |
|
"titles": [datasets.Value("string")], |
|
"qids": [datasets.Value("string")], |
|
}) |
|
elif self.config.name == "entities": |
|
features = datasets.Features({ |
|
"qid": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"aliases": [datasets.Value("string")], |
|
"type": datasets.ClassLabel(names=_ENTITY_TYPES), |
|
"wikipedia": datasets.Value("string"), |
|
"wikidata": datasets.Value("string"), |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"data_dir": Path(data_dir, "data"), |
|
"split": "train" |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
|
|
self, data_dir, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
entities_path = Path(data_dir, "entities.jsonl.gz") |
|
corpus_path = Path(data_dir, "corpus.jsonl.gz") |
|
|
|
def _identiy(x): |
|
return x |
|
|
|
if self.config.name == "frwiki": |
|
title2wikipedia = {} |
|
title2wikidata = {} |
|
title2qid = {} |
|
with gzip.open(entities_path, "rt", encoding="UTF-8") as ent_file: |
|
for line in ent_file: |
|
item = json.loads( |
|
line, parse_int=_identiy, parse_float=_identiy, parse_constant=_identiy) |
|
title = item["title"] |
|
title2wikipedia[title] = item["wikipedia_description"] |
|
title2wikidata[title] = item["wikidata_description"] |
|
title2qid[title] = item["qid"] |
|
|
|
with gzip.open(corpus_path, "rt", encoding="UTF-8") as crps_file: |
|
for id, line in enumerate(crps_file): |
|
item = json.loads(line, parse_int=lambda x: x, |
|
parse_float=lambda x: x, parse_constant=lambda x: x) |
|
qid = item["qid"] |
|
title = item["title"] |
|
text = item["text"] |
|
|
|
features = text_to_el_features( |
|
qid, title, text, title2qid, title2wikipedia, title2wikidata) |
|
yield id, features |
|
elif self.config.name == "entities": |
|
entity_pattern = r"\[E=(.+?)\](.+?)\[/E\]" |
|
with gzip.open(entities_path, "rt", encoding="UTF-8") as ent_file: |
|
for id, line in enumerate(ent_file): |
|
item = json.loads( |
|
line, parse_int=_identiy, parse_float=_identiy, parse_constant=_identiy) |
|
try: |
|
qid = item["qid"] |
|
item["wikipedia"] = re.sub( |
|
entity_pattern, |
|
r"\2", |
|
item.pop("wikipedia_description") |
|
) |
|
item["wikidata"] = item.pop("wikidata_description") |
|
if qid is None or qid == "": |
|
item["qid"] = "" |
|
item["wikidata"] = "" |
|
item["label"] = "" |
|
item["aliases"] = [] |
|
if item["type"] not in _ENTITY_TYPES: |
|
item["type"] = "OTHER" |
|
yield id, item |
|
except: |
|
import sys |
|
print(item, file=sys.stderr) |
|
return |
|
|