|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import json |
|
import datasets |
|
|
|
_BASE_URL = "https://huggingface.co/datasets/EMBO/sd-character-level-ner/resolve/main/" |
|
|
|
class SourceDataNLP(datasets.GeneratorBasedBuilder): |
|
"""SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology.""" |
|
|
|
_NER_LABEL_NAMES = [ |
|
"O", |
|
"B-SMALL_MOLECULE", |
|
"I-SMALL_MOLECULE", |
|
"B-GENEPROD", |
|
"I-GENEPROD", |
|
"B-SUBCELLULAR", |
|
"I-SUBCELLULAR", |
|
"B-CELL", |
|
"I-CELL", |
|
"B-TISSUE", |
|
"I-TISSUE", |
|
"B-ORGANISM", |
|
"I-ORGANISM", |
|
"B-EXP_ASSAY", |
|
"I-EXP_ASSAY", |
|
] |
|
_PANEL_START_NAMES = ["O", "B-PANEL_START", "I-PANEL_START"] |
|
|
|
_CITATION = """\ |
|
@Unpublished{ |
|
huggingface: dataset, |
|
title = {SourceData NLP}, |
|
authors={Thomas Lemberger & Jorge Abreu-Vicente, EMBO}, |
|
year={2021} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This dataset is based on the SourceData database and is intented to facilitate training of NLP tasks in the cell and molecualr biology domain. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/EMBO/sd-character-level-ner" |
|
|
|
_LICENSE = "CC-BY 4.0" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
_URLS = { |
|
"NER": f"{_BASE_URL}sd_character_panels.zip", |
|
"PANELIZATION": f"{_BASE_URL}sd_character_panelization.zip", |
|
} |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="NER", version=VERSION, description="Dataset for entity recognition"), |
|
datasets.BuilderConfig( |
|
name="PANELIZATION", |
|
version=VERSION, |
|
description="Dataset for figure legend segmentation into panel-specific legends.", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "NER" |
|
|
|
def _info(self): |
|
if self.config.name == "NER": |
|
features = datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"labels": datasets.Sequence( |
|
feature=datasets.ClassLabel(num_classes=len(self._NER_LABEL_NAMES), |
|
names=self._NER_LABEL_NAMES) |
|
) |
|
} |
|
) |
|
elif self.config.name == "PANELIZATION": |
|
features = datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"labels": datasets.Sequence( |
|
feature=datasets.ClassLabel(num_classes=len(self._PANEL_START_NAMES), |
|
names=self._PANEL_START_NAMES) |
|
) |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=self._DESCRIPTION, |
|
features=features, |
|
supervised_keys=("words", "label_ids"), |
|
homepage=self._HOMEPAGE, |
|
license=self._LICENSE, |
|
citation=self._CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
"""Returns SplitGenerators. |
|
Uses local files if a data_dir is specified. Otherwise downloads the files from their official url.""" |
|
|
|
if self.config.name in ["NER"]: |
|
url = self._URLS["NER"] |
|
data_dir = dl_manager.download_and_extract(url) |
|
data_dir += "/sd_character_panels" |
|
elif self.config.name == "PANELIZATION": |
|
url = self._URLS[self.config.name] |
|
data_dir = dl_manager.download_and_extract(url) |
|
data_dir += "/sd_character_panelization" |
|
else: |
|
raise ValueError(f"unkonwn config name: {self.config.name}") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir + "/train.jsonl"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": data_dir + "/test.jsonl"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": data_dir + "/eval.jsonl"}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples. This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method. |
|
It is in charge of opening the given file and yielding (key, example) tuples from the dataset |
|
The key is not important, it's more here for legacy reason (legacy from tfds)""" |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
if self.config.name == "NER": |
|
labels = data["label_ids"]["entity_types"] |
|
tag_mask = [0 if tag == "O" else 1 for tag in labels] |
|
yield id_, { |
|
"text": data["text"], |
|
"labels": labels |
|
} |
|
elif self.config.name == "PANELIZATION": |
|
labels = data["label_ids"]["panel_start"] |
|
tag_mask = [1 if t == "B-PANEL_START" else 0 for t in labels] |
|
yield id_, { |
|
"text": data["text"], |
|
"labels": data["label_ids"]["panel_start"] |
|
} |
|
|