probability_words_nli / probability_words_nli.py
sileod's picture
Update probability_words_nli.py
56f07ab
raw
history blame
4.73 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import csv
import os
import textwrap
import numpy as np
import datasets
import pandas as pd
_CITATION = """\
@inproceedings{sileo-moens-2023-probing,
title = "Probing neural language models for understanding of words of estimative probability",
author = "Sileo, Damien and
Moens, Marie-francine",
booktitle = "Proceedings of the 12th Joint Conference on Lexical and Computational Semantics (*SEM 2023)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.starsem-1.41",
doi = "10.18653/v1/2023.starsem-1.41",
pages = "469--476",
}
"""
_DESCRIPTION = """\
Probing neural language models for understanding of words of estimative probability
"""
URL = 'https://huggingface.co/datasets/sileod/probability_words_nli/resolve/main/'
class WepProbeConfig(datasets.BuilderConfig):
"""BuilderConfig for WepProbe."""
def __init__(
self,
data_dir,
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
super(WepProbeConfig, self).__init__(version=datasets.Version("1.0.5", ""), **kwargs)
self.text_features = {k:k for k in ['context', 'hypothesis', 'valid_hypothesis', 'invalid_hypothesis','problog','probability_word','distractor','hypothesis_assertion']}
self.label_column = 'label'
self.label_classes = ['valid', 'invalid']
self.data_url = URL
self.url=URL
self.data_dir=data_dir
self.citation = _CITATION
self.process_label = process_label
class WepProbe(datasets.GeneratorBasedBuilder):
"""Evaluation of word estimative of probability understanding"""
BUILDER_CONFIGS = [
WepProbeConfig(
name="reasoning_1hop",
data_dir="reasoning_1hop"),
WepProbeConfig(
name="reasoning_2hop",
data_dir="reasoning_2hop"),
WepProbeConfig(
name="usnli",
data_dir="usnli"),
]
def _info(self):
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
if self.config.name=='usnli':
del features['problog']
if self.config.label_classes:
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
else:
features["label"] = datasets.Value("float32")
features["idx"] = datasets.Value("int32")
features["probability"] = datasets.Value("float32")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _CITATION,
)
def _split_generators(self, dl_manager):
data_dirs=[]
for split in ['train','validation','test']:
url=f'{URL}{self.config.data_dir}_{split}.csv'
print(url)
data_dirs+=[dl_manager.download(url)]
print(data_dirs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": data_dirs[0],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": data_dirs[1],
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": data_dirs[2],
"split": "test",
},
),
]
def _generate_examples(self, data_file, split):
df = pd.read_csv(data_file).drop(['rnd','split','_'],axis=1,errors='ignore')
df['idx']=df.index
for idx, example in df.iterrows():
yield idx, dict(example)