Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
TexPrax / TexPrax.py
lee
initial commit
d1cdea5
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Add description
"""TexPrax: Data collected during the project https://texprax.de/ """
import csv
import os
import ast
#import json
import datasets
# TODO: Add citation
_CITATION = """\
@inproceedings{stangier-etal-2022-texprax,
title = "{T}ex{P}rax: A Messaging Application for Ethical, Real-time Data Collection and Annotation",
author = {Stangier, Lorenz and
Lee, Ji-Ung and
Wang, Yuxi and
M{\"u}ller, Marvin and
Frick, Nicholas and
Metternich, Joachim and
Gurevych, Iryna},
booktitle = "Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: System Demonstrations",
month = nov,
year = "2022",
address = "Taipei, Taiwan",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.aacl-demo.2",
pages = "9--16",
}
"""
# TODO: Add description
_DESCRIPTION = """\
This dataset was collected in the [TexPrax](https://texprax.de/) project and contains named entities annotated by three researchers as well as annotated sentences (problem/P, cause/C, solution/S, and other/O).
"""
# TODO: Add link
_HOMEPAGE = "https://texprax.de/"
# TODO: Add license
_LICENSE = "Creative Commons Attribution-NonCommercial 4.0"
# TODO: Add tudatalib urls here!
_SENTENCE_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-sentences.zip?sequence=8&isAllowed=y"
_ENTITY_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-ner.zip?sequence=9&isAllowed=y"
class TexPraxConfig(datasets.BuilderConfig):
"""BuilderConfig for TexPrax."""
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
super(TexPraxConfig, self).__init__(**kwargs)
class TexPraxDataset(datasets.GeneratorBasedBuilder):
"""German dialgues that ocurred between workers in a factory. This dataset contains token level entity annotation as well as sentence level problem, cause, solution annotations."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="sentence_cl", version=VERSION, description="Sentence level annotations of the TexPrax dataset."),
datasets.BuilderConfig(name="ner", version=VERSION, description="BIO-tagged named entites of the TexPrax dataset."),
]
DEFAULT_CONFIG_NAME = "sentence_cl" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
if self.config.name == "sentence_cl": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
# Note: ID consists of <dialog-id_sentence-id_turn-id>
"id": datasets.Value("string"),
"sentence": datasets.Value("string"),
"label": datasets.features.ClassLabel(
names=[
"P",
"C",
"S",
"O",
]),
"subsplit": datasets.Value("string"),
# These are the features of your dataset like images, labels ...
}
)
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
# Note: ID consists of <dialog-id_turn-id>
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"entities": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B-LOC",
"I-LOC",
"B-ED",
"B-ACT",
"I-ACT",
"B-PRE",
"I-PRE",
"B-AKT",
"I-AKT",
"B-PER",
"I-PER",
"B-A",
"B-G",
"B-I",
"I-I",
"B-OT",
"I-OT",
"B-M",
"I-M",
"B-P",
"I-P",
"B-PR",
"I-PR",
"B-PE",
"I-PE",
"O",
]
)
),
"subsplit": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
if self.config.name == "sentence_cl":
urls = _SENTENCE_URL
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "sents_train.csv"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "sents_test.csv"),
"split": "test"
},
),
]
else:
urls = _ENTITY_URL
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "entities_train.csv"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "entities_test.csv"),
"split": "test"
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(filepath, encoding="utf-8") as f:
creader = csv.reader(f, delimiter=';', quotechar='"')
next(creader) # skip header
for key, row in enumerate(creader):
if self.config.name == "sentence_cl":
dialog_id, turn_id, sentence_id, sentence, label, domain, batch = row
idx = f"{dialog_id}_{turn_id}_{sentence_id}"
yield key, {
"id": idx,
"sentence": sentence,
"label": label,
"subsplit": batch,
#"domain": domain,
}
else:
idx, sentence, labels, split = row
# Yields examples as (key, example) tuples
yield key, {
"id": idx,
"tokens": [t.strip() for t in ast.literal_eval(sentence)],
"entities": [l.strip() for l in ast.literal_eval(labels)],
"subsplit": split,
}