|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TexPrax: Data collected during the project https://texprax.de/ """ |
|
|
|
|
|
import csv |
|
import os |
|
import ast |
|
|
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{stangier-etal-2022-texprax, |
|
title = "{T}ex{P}rax: A Messaging Application for Ethical, Real-time Data Collection and Annotation", |
|
author = {Stangier, Lorenz and |
|
Lee, Ji-Ung and |
|
Wang, Yuxi and |
|
M{\"u}ller, Marvin and |
|
Frick, Nicholas and |
|
Metternich, Joachim and |
|
Gurevych, Iryna}, |
|
booktitle = "Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: System Demonstrations", |
|
month = nov, |
|
year = "2022", |
|
address = "Taipei, Taiwan", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2022.aacl-demo.2", |
|
pages = "9--16", |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset was collected in the [TexPrax](https://texprax.de/) project and contains named entities annotated by three researchers as well as annotated sentences (problem/P, cause/C, solution/S, and other/O). |
|
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://texprax.de/" |
|
|
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial 4.0" |
|
|
|
|
|
|
|
_SENTENCE_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-sentences.zip?sequence=8&isAllowed=y" |
|
_ENTITY_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-ner.zip?sequence=9&isAllowed=y" |
|
|
|
class TexPraxConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for TexPrax.""" |
|
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): |
|
super(TexPraxConfig, self).__init__(**kwargs) |
|
|
|
|
|
class TexPraxDataset(datasets.GeneratorBasedBuilder): |
|
"""German dialgues that ocurred between workers in a factory. This dataset contains token level entity annotation as well as sentence level problem, cause, solution annotations.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="sentence_cl", version=VERSION, description="Sentence level annotations of the TexPrax dataset."), |
|
datasets.BuilderConfig(name="ner", version=VERSION, description="BIO-tagged named entites of the TexPrax dataset."), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "sentence_cl" |
|
|
|
def _info(self): |
|
if self.config.name == "sentence_cl": |
|
features = datasets.Features( |
|
{ |
|
|
|
"id": datasets.Value("string"), |
|
"sentence": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel( |
|
names=[ |
|
"P", |
|
"C", |
|
"S", |
|
"O", |
|
]), |
|
"subsplit": datasets.Value("string"), |
|
|
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
|
|
"id": datasets.Value("string"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"entities": datasets.Sequence( |
|
datasets.features.ClassLabel( |
|
names=[ |
|
"B-LOC", |
|
"I-LOC", |
|
"B-ED", |
|
"B-ACT", |
|
"I-ACT", |
|
"B-PRE", |
|
"I-PRE", |
|
"B-AKT", |
|
"I-AKT", |
|
"B-PER", |
|
"I-PER", |
|
"B-A", |
|
"B-G", |
|
"B-I", |
|
"I-I", |
|
"B-OT", |
|
"I-OT", |
|
"B-M", |
|
"I-M", |
|
"B-P", |
|
"I-P", |
|
"B-PR", |
|
"I-PR", |
|
"B-PE", |
|
"I-PE", |
|
"O", |
|
] |
|
) |
|
), |
|
"subsplit": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
if self.config.name == "sentence_cl": |
|
urls = _SENTENCE_URL |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "sents_train.csv"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "sents_test.csv"), |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
else: |
|
urls = _ENTITY_URL |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "entities_train.csv"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "entities_test.csv"), |
|
"split": "test" |
|
}, |
|
) |
|
] |
|
|
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
creader = csv.reader(f, delimiter=';', quotechar='"') |
|
next(creader) |
|
for key, row in enumerate(creader): |
|
if self.config.name == "sentence_cl": |
|
dialog_id, turn_id, sentence_id, sentence, label, domain, batch = row |
|
idx = f"{dialog_id}_{turn_id}_{sentence_id}" |
|
yield key, { |
|
"id": idx, |
|
"sentence": sentence, |
|
"label": label, |
|
"subsplit": batch, |
|
|
|
} |
|
else: |
|
idx, sentence, labels, split = row |
|
|
|
yield key, { |
|
"id": idx, |
|
"tokens": [t.strip() for t in ast.literal_eval(sentence)], |
|
"entities": [l.strip() for l in ast.literal_eval(labels)], |
|
"subsplit": split, |
|
} |
|
|
|
|
|
|
|
|