keyword_pubmed / keyword_pubmed.py
enoriega's picture
Updated the schema of the dataset to contain the indices of the words that are keywords
3a4dbfa
raw
history blame
5.97 kB
""" Loading script for the Keyword PubMed dataset."""
import os
from pathlib import Path
import re
import datasets
class KeywordPubmedDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="sentence", version=VERSION, description="Comprises sentences that contain a keyword"),
datasets.BuilderConfig(name="document", version=VERSION, description="Contains all the sentences in a document that contains at least a keyword"),
]
DEFAULT_CONFIG_NAME = "document" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
if self.config.name == "sentence":
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"pmcid": datasets.Value("string"),
"keyword_rank": datasets.Value("int32"),
"keyword_indices": datasets.Sequence(datasets.Value("int32"))
}
)
else:
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"pmcid": datasets.Value("string"),
"keyword_rank": datasets.Value("int32"),
"keyword_indices": datasets.Sequence(datasets.Value("int32"))
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description= "Dataset for MLM comprising sentences that contain a keyword relevant to the domain",
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
# homepage=_HOMEPAGE,
# License for the dataset if available
# license=_LICENSE,
# Citation for the dataset
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.data_dir:
data_dir = self.config.data_dir
else:
data_dir = dl_manager.download_and_extract('data_files.tar.gz')
# Load the keywords from the file
with open(os.path.join(data_dir, 'keywords.txt'), 'r') as f:
keyword_ranks = {line.strip().split(":")[0].lower():rank for rank, line in enumerate(f)}
keywords = set(keyword_ranks.keys())
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"dirpath": os.path.join(data_dir, "train"),
"keywords": keywords,
"ranks": keyword_ranks,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"dirpath": os.path.join(data_dir, "dev"),
"keywords": keywords,
"ranks": keyword_ranks,
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, dirpath, keywords, ranks):
item_ix = 0
for filepath in Path(dirpath).iterdir():
filepath = Path(filepath)
if filepath.suffix == ".txt":
pmcid = filepath.name.split(".")[0]
with filepath.open(encoding="utf-8") as f:
for sentence in f:
sentence = sentence.strip()
if sentence: # Ignore blanks
sentence = re.sub("\s+", " ", sentence)
kw_indices, rank = self._keyword_indices(sentence, keywords, ranks)
has_keyword = rank > -1
if self.config.name == "sentence":
# Yields examples as (key, example) tuples
if has_keyword:
yield item_ix, {
"sentence": sentence,
"keyword_rank": rank,
"pmcid": pmcid,
"keyword_indices": kw_indices
}
item_ix += 1
else: # Else document
yield item_ix, {
"sentence": sentence,
"keyword_rank": rank,
"pmcid": pmcid,
"keyword_indices": kw_indices
}
item_ix += 1
def _keyword_indices(self, sentence, keywords, ranks):
# Lowercase and split the sentence
words = sentence.lower().split()
indices = list()
top_rank = -1
# Check every word until it finds a keyword
for w_ix, word in enumerate(words):
if word in keywords:
indices.append(w_ix)
rank = ranks[word]
if rank < top_rank or top_rank == -1:
top_rank = rank
return indices, top_rank
if __name__ == "__main__":
ds = KeywordPubmedDataset()
ds.download_and_prepare()