seth / seth.py
maier-s's picture
Upload folder using huggingface_hub
d86d5c0 verified
raw
history blame
6.19 kB
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
import re
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@Article{SETH2016,
Title= {SETH detects and normalizes genetic variants in text.},
Author= {Thomas, Philippe and Rockt{\"{a}}schel, Tim and Hakenberg, J{\"{o}}rg and Lichtblau, Yvonne and Leser, Ulf},
Journal= {Bioinformatics},
Year= {2016},
Month= {Jun},
Doi= {10.1093/bioinformatics/btw234},
Language = {eng},
Medline-pst = {aheadofprint},
Pmid = {27256315},
Url = {http://dx.doi.org/10.1093/bioinformatics/btw234} Titel anhand dieser DOI in Citavi-Projekt übernehmen
}
"""
_DESCRIPTION = """\
This Dataset is used to for the Advanced Machine Learning and XAI course of the DHBW CAS in Heilbronn
"""
class SethConfig(datasets.BuilderConfig):
"""BuilderConfig for Seth Dataset"""
def __init__(self, **kwargs):
"""BuilderConfig for Seth.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SethConfig, self).__init__(**kwargs)
class Seth(datasets.GeneratorBasedBuilder):
"""Seth dataset."""
BUILDER_CONFIGS = [
SethConfig(name="Seth", version=datasets.Version("1.0.0"), description="Seth dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"tokens": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-Gene",
"B-SNP",
"I-SNP",
"I-Gene",
"B-RS",
"I-RS"
]
)
)
}
),
supervised_keys=None,
homepage="https://rockt.github.io/SETH/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
#downloaded_file = dl_manager.download_and_extract(_URL)
data_files = {
"train": "./SETH-train.iob",
"test": "./SETH-test.iob",
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
document = {"id":None,
"tokens":[],
"labels":[]
}
documents = [] # Wird befüllt mit den Documented aus der Datei. Besteht aus einem Key "tokens" und "labels"
pattern = r"#\d+" # Reg Experassion um eine Documented ID zu detektieren
for idx, line in enumerate(f):
match = re.match(pattern, line)
#Überspringe erste Zeile weil das ein Header ist
if idx == 0:
continue
# Here ist die Dokumenten ID
if match:
if document["id"] != None:
# Speichere altes Dokument bevor ein neues angelegt wird
documents.append(document)
yield guid,document
guid+=1
document = {"id":int(line[1:]), # Speichere nur die Nummer ohne die Raute
"tokens":[],
"labels":[]
}
else:
#Initialisiere neues DOkument
document = {"id":int(line[1:]), # Speichere nur die Nummer ohne die Raute
"tokens":[],
"labels":[]
}
# Hier handeln wir die Sonderfälle ab
elif len(line.split(",")) >2:
# Sonderfall 1: ,,Label
if(line.split(",")[0] == "" and line.split(",")[1]==""):
document["tokens"].append(",")
document["labels"].append(line.split(",")[2].split("\n")[0])
# Sonderfall 2:Text,Text,Test,Label -> Label steht immer am schluss
else:
document["tokens"].append(",".join(line.split(",")[0:-1])) # Bringe die Splits wieder zusammen ohne das Label
document["labels"].append(line.split(",")[-1].split("\n")[0])
# Sonst gehen wir einfach vom standard aus Word sowie Tag
else:
word_tag = line.split(",")
# Hier erkennen wir den Ende eines Satzes dieser besteht aus " , "
if word_tag[0] == " " and word_tag[1] == " \n":
continue
document["tokens"].append(word_tag[0])
document["labels"].append(word_tag[1].split("\n")[0])
documents.append(document)
yield guid,document