|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""DWIE is conceived as an entity-centric dataset that describes interactions and properties of conceptual entities on the level of the complete document.""" |
|
|
|
import datasets |
|
from datasets import DownloadManager |
|
import os |
|
import json |
|
import requests |
|
from typing import Optional, List, Union |
|
import argparse |
|
import hashlib |
|
from collections import OrderedDict |
|
from time import sleep |
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@article{ZAPOROJETS2021102563, |
|
title = {{DWIE}: An entity-centric dataset for multi-task document-level information extraction}, |
|
journal = {Information Processing & Management}, |
|
volume = {58}, |
|
number = {4}, |
|
pages = {102563}, |
|
year = {2021}, |
|
issn = {0306-4573}, |
|
doi = {https://doi.org/10.1016/j.ipm.2021.102563}, |
|
url = {https://www.sciencedirect.com/science/article/pii/S0306457321000662}, |
|
author = {Klim Zaporojets and Johannes Deleu and Chris Develder and Thomas Demeester} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
DWIE is conceived as an entity-centric dataset that describes interactions and properties of conceptual entities |
|
on the level of the complete document. This contrasts with currently dominant mention-driven approaches that start |
|
from the detection and classification of named entity mentions in individual sentences. Also, the dataset was |
|
randomly sampled from a news platform (English online content from Deutsche Welle), and the annotation scheme |
|
was generated to cover that content. This makes the setting more realistic than in datasets with pre-determined |
|
annotation schemes, and non-uniform sampling of content to obtain balanced annotations.""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/klimzaporojets/DWIE" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLS = {"Task_1": |
|
{ |
|
"url":"https://github.com/klimzaporojets/DWIE/archive/refs/heads/master.zip" |
|
} |
|
} |
|
|
|
|
|
|
|
class DWIE(datasets.GeneratorBasedBuilder): |
|
""" |
|
DWIE is conceived as an entity-centric dataset that describes interactions and properties of conceptual entities on the level of the complete document. |
|
""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="Task_1", version=VERSION, |
|
description="Relation classification"), |
|
] |
|
DEFAULT_CONFIG_NAME = "Task_1" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"content": datasets.Value("string"), |
|
"tags": datasets.Value("string"), |
|
"mentions": [ |
|
{ |
|
"begin": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
"text": datasets.Value("string"), |
|
"concept": datasets.Value("int32"), |
|
"candidates" : datasets.Sequence(datasets.Value("string")), |
|
"scores": datasets.Sequence(datasets.Value("float32")) |
|
} |
|
], |
|
"concepts": [ |
|
{ |
|
"concept": datasets.Value("int32"), |
|
"text": datasets.Value("string"), |
|
"keyword": datasets.Value("bool"), |
|
"count": datasets.Value("int32"), |
|
"link": datasets.Value("string"), |
|
"tags": datasets.Sequence(datasets.Value("string")), |
|
|
|
} |
|
], |
|
"relations": [ |
|
{ |
|
"s": datasets.Value("int32"), |
|
"p": datasets.Value("string"), |
|
"o": datasets.Value("int32"), |
|
|
|
} |
|
], |
|
"frames": [ |
|
{ |
|
"type": datasets.Value("string"), |
|
"slots": [{ |
|
"name": datasets.Value("string"), |
|
"value":datasets.Value("int32") |
|
}] |
|
|
|
} |
|
], |
|
"iptc": datasets.Sequence(datasets.Value("string")) |
|
|
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[self.config.name] |
|
downloaded = dl_manager.download_and_extract(_URLS) |
|
article_id_to_url_json= json.load(open(downloaded['Task_1']['url'] + '/DWIE-master/data/article_id_to_url.json')) |
|
ids_to_new_ids = dict() |
|
|
|
ids_to_new_ids[18525950] = 19026607 |
|
|
|
should_tokenize = False |
|
|
|
content_to_new_content = {'DW_40663341': [('starting with Sunday\'s', 'starting Sunday\'s'), |
|
('$1 million (€840,000)', 'one million dollars (840,000 euros)'), |
|
('who kneel in protest during', 'to kneel in protest during')]} |
|
|
|
articles_done = 0 |
|
total_articles = len(article_id_to_url_json) |
|
problematic_articles = set() |
|
problematic_hash_articles = set() |
|
all_annos = [] |
|
for curr_article in article_id_to_url_json: |
|
article_id = curr_article['id'] |
|
article_url = curr_article['url'] |
|
article_id_nr = int(article_id[3:]) |
|
if article_id_nr in ids_to_new_ids: |
|
article_url = article_url.replace(str(article_id_nr), str(ids_to_new_ids[article_id_nr])) |
|
article_hash = curr_article['hash'] |
|
|
|
|
|
annos_only_art_path = downloaded['Task_1']['url'] + '/DWIE-master/data/annos/' + curr_article['id'] + '.json' |
|
annos_only_json = json.load(open(annos_only_art_path)) |
|
done = False |
|
attempts = 0 |
|
while not done and attempts <= 3: |
|
|
|
a = requests.get(article_url, allow_redirects=True).json() |
|
if 'name' in a: |
|
article_title = a['name'] |
|
else: |
|
print('WARNING: no name detected for ', article_id) |
|
article_title = '' |
|
if 'teaser' in a: |
|
article_teaser = a['teaser'] |
|
else: |
|
print('WARNING: no teaser detected for ', article_id) |
|
article_teaser = '' |
|
|
|
if 'text' in a: |
|
article_text = a['text'] |
|
else: |
|
print('WARNING: no text detected for ', article_id) |
|
article_text = '' |
|
|
|
article_content_no_strip = '{}\n{}\n{}'.format(article_title, article_teaser, article_text) |
|
article_content = article_content_no_strip |
|
|
|
if article_id in content_to_new_content: |
|
for str_dw, str_dwie in content_to_new_content[article_id]: |
|
article_content = article_content.replace(str_dw, str_dwie) |
|
|
|
if 'mentions' in annos_only_json: |
|
for idx_mention, curr_mention in enumerate(annos_only_json['mentions']): |
|
curr_mention_text = curr_mention['text'].replace(' ', ' ') |
|
curr_mention_text = curr_mention_text.replace('', '') |
|
solved = False |
|
if "begin" not in curr_mention: |
|
curr_mention["begin"] = 0 |
|
if "end" not in curr_mention: |
|
curr_mention["end"] = 0 |
|
if "text" not in curr_mention: |
|
curr_mention["text"] = "" |
|
if "concept" not in curr_mention: |
|
curr_mention["concept"] = 0 |
|
|
|
|
|
if "candidates" not in curr_mention: |
|
curr_mention["candidates"] = [] |
|
if "scores" not in curr_mention: |
|
curr_mention["scores"] = [] |
|
|
|
if article_content[curr_mention['begin']:curr_mention['end']] != curr_mention_text: |
|
curr_mention_begin = curr_mention['begin'] |
|
curr_mention_end = curr_mention['end'] |
|
offset = 0 |
|
|
|
if not solved: |
|
print('--------------------------------') |
|
print('ERROR ALIGNMENT: texts don\'t match for {}: "{}" vs "{}", the textual content of ' |
|
'the files won\'t be complete ' |
|
.format(article_id, article_content[curr_mention['begin']:curr_mention['end']], |
|
curr_mention_text)) |
|
print('--------------------------------') |
|
problematic_articles.add(article_id) |
|
else: |
|
if "candidates" not in curr_mention: |
|
curr_mention["candidates"] = [] |
|
|
|
curr_mention['begin'] = curr_mention_begin - offset |
|
curr_mention['end'] = curr_mention_end - offset |
|
if 'concepts' in annos_only_json: |
|
for idx_concept, curr_concept in enumerate(annos_only_json['concepts']): |
|
if "concept" not in curr_concept: |
|
curr_concept["concept"] = 0 |
|
if "text" not in curr_concept: |
|
curr_concept["text"] = "" |
|
if "count" not in curr_concept: |
|
curr_concept["count"] = 0 |
|
if "link" not in curr_concept: |
|
curr_concept["link"] = "" |
|
if "tags" not in curr_concept: |
|
curr_concept["tags"] = [] |
|
|
|
if not should_tokenize: |
|
annos_json = {'id': annos_only_json['id'], |
|
'content': article_content, |
|
'tags': annos_only_json['tags'], |
|
'mentions': annos_only_json['mentions'], |
|
'concepts': annos_only_json['concepts'], |
|
'relations': annos_only_json['relations'], |
|
'frames': annos_only_json['frames'], |
|
'iptc': annos_only_json['iptc']} |
|
all_annos.append(annos_json) |
|
|
|
|
|
else: |
|
tokenized = tokenizer.tokenize(article_content) |
|
tokens = list() |
|
begin = list() |
|
end = list() |
|
for curr_token in tokenized: |
|
tokens.append(curr_token['token']) |
|
begin.append(curr_token['offset']) |
|
end.append(curr_token['offset'] + curr_token['length']) |
|
annos_json = OrderedDict({'id': annos_only_json['id'], |
|
'content': article_content, |
|
'tokenization': OrderedDict({'tokens': tokens, 'begin': begin, 'end': end}), |
|
'tags': annos_only_json['tags'], |
|
'mentions': annos_only_json['mentions'], |
|
'concepts': annos_only_json['concepts'], |
|
'relations': annos_only_json['relations'], |
|
'frames': annos_only_json['frames'], |
|
'iptc': annos_only_json['iptc']}) |
|
|
|
hash_content = hashlib.sha1(article_content.encode("UTF-8")).hexdigest() |
|
|
|
if hash_content != article_hash: |
|
print('!!ERROR - hash doesn\'t match for ', article_id) |
|
problematic_hash_articles.add(article_id) |
|
attempts += 1 |
|
|
|
sleep(.1) |
|
done = True |
|
if done: |
|
articles_done += 1 |
|
|
|
|
|
return[ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"all_annos" : all_annos, |
|
|
|
} |
|
|
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, all_annos): |
|
|
|
|
|
for data in all_annos: |
|
yield data['id'], { |
|
"id": data['id'], |
|
"content":data['content'], |
|
"tags": data['tags'], |
|
"mentions": data['mentions'], |
|
"concepts": data['concepts'], |
|
"relations": data['relations'], |
|
"frames": data['frames'], |
|
"iptc": data['iptc'] |
|
} |
|
|
|
|
|
|
|
|