wikimedqa / wikimedqa.py_
sileod's picture
Rename wikimedqa.py to wikimedqa.py_
a656b4a verified
raw
history blame
4.28 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import csv
import os
import textwrap
import numpy as np
import datasets
import pandas as pd
_CITATION = """
@article{sileo2023generating,
title={Generating multiple-choice questions for medical question answering with distractors and cue-masking},
author={Sileo, Damien and Uma, Kanimozhi and Moens, Marie-Francine},
journal={arXiv preprint arXiv:2303.07069},
year={2023}
}
"""
_DESCRIPTION = """\
Anonymous submission
"""
URL = 'https://sileod.s3.eu-west-3.amazonaws.com/wikimedqa/'
class WikiMedQAConfig(datasets.BuilderConfig):
"""BuilderConfig for WikiMedQA."""
def __init__(
self,
data_dir,
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
super(WikiMedQAConfig, self).__init__(version=datasets.Version("1.0.5", ""), **kwargs)
self.text_features = {k:k for k in ['text']+[f'option_{i}' for i in range(8)]}
self.label_column = 'label'
self.label_classes = list('01234567')
self.data_url = URL
self.url=URL
self.data_dir=data_dir
self.citation = _CITATION
self.process_label = process_label
class WikiMedQA(datasets.GeneratorBasedBuilder):
"""Evaluation of word estimative of probability understanding"""
BUILDER_CONFIGS = [
WikiMedQAConfig(
name="medwiki",
data_dir="medwiki"),
WikiMedQAConfig(
name="wikem",
data_dir="wikem"),
WikiMedQAConfig(
name="wikidoc",
data_dir="wikidoc"),
]
def _info(self):
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
features["idx"] = datasets.Value("int32")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _CITATION,
)
def _split_generators(self, dl_manager):
data_dirs=[]
for split in ['train','validation','test']:
url=f'{URL}{self.config.data_dir}.csv'
print(url)
data_dirs+=[dl_manager.download(url)]
print(data_dirs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": data_dirs[0],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": data_dirs[1],
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": data_dirs[2],
"split": "test",
},
),
]
def _generate_examples(self, data_file, split):
df = pd.read_csv(data_file)
df=df[['text','options','label']]
train, dev, test = np.split(df.sample(frac=1, random_state=42),
[int(.9*len(df)), int(.95*len(df))])
df=eval(split)
df['options']=df['options'].map(eval)
for i in range(8):
df[f'option_{i}']=df.options.map(lambda x:x[i])
del df['options']
df['idx']=df.index
for idx, example in df.iterrows():
yield idx, dict(example)