|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import os |
|
import textwrap |
|
import numpy as np |
|
import datasets |
|
import pandas as pd |
|
|
|
|
|
_CITATION = """\ |
|
Anonymous submission |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Anonymous submission |
|
""" |
|
|
|
URL = 'https://sileod.s3.eu-west-3.amazonaws.com/wikimedqa/' |
|
|
|
|
|
class WikiMedQAConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for WikiMedQA.""" |
|
|
|
def __init__( |
|
self, |
|
data_dir, |
|
label_classes=None, |
|
process_label=lambda x: x, |
|
**kwargs, |
|
): |
|
|
|
super(WikiMedQAConfig, self).__init__(version=datasets.Version("1.0.5", ""), **kwargs) |
|
self.text_features = {k:k for k in ['text']+[f'option_{i}' for i in range(8)]} |
|
self.label_column = 'label' |
|
self.label_classes = list('01234567') |
|
self.data_url = URL |
|
self.url=URL |
|
self.data_dir=data_dir |
|
self.citation = _CITATION |
|
self.process_label = process_label |
|
|
|
|
|
class WikiMedQA(datasets.GeneratorBasedBuilder): |
|
"""Evaluation of word estimative of probability understanding""" |
|
|
|
BUILDER_CONFIGS = [ |
|
WikiMedQAConfig( |
|
name="medwiki", |
|
data_dir="medwiki"), |
|
WikiMedQAConfig( |
|
name="wikem", |
|
data_dir="wikem"), |
|
WikiMedQAConfig( |
|
name="wikidoc", |
|
data_dir="wikidoc"), |
|
] |
|
|
|
def _info(self): |
|
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()} |
|
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes) |
|
features["idx"] = datasets.Value("int32") |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(features), |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + _CITATION, |
|
) |
|
def _split_generators(self, dl_manager): |
|
|
|
data_dirs=[] |
|
for split in ['train','validation','test']: |
|
url=f'{URL}{self.config.data_dir}.csv' |
|
print(url) |
|
data_dirs+=[dl_manager.download(url)] |
|
print(data_dirs) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": data_dirs[0], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"data_file": data_dirs[1], |
|
"split": "dev", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"data_file": data_dirs[2], |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_file, split): |
|
df = pd.read_csv(data_file) |
|
df=df[['text','options','label']] |
|
train, dev, test = np.split(df.sample(frac=1, random_state=42), |
|
[int(.9*len(df)), int(.95*len(df))]) |
|
df=eval(split) |
|
df['options']=df['options'].map(eval) |
|
for i in range(8): |
|
df[f'option_{i}']=df.options.map(lambda x:x[i]) |
|
del df['options'] |
|
df['idx']=df.index |
|
for idx, example in df.iterrows(): |
|
yield idx, dict(example) |
|
|