Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
relentless / experiments /flan_ul2_additional_analysis.py
asahi417's picture
fix the logit overflow caused by pad_token https://github.com/asahi417/lmppl/issues/5
4a89ca9
import os
import json
from glob import glob
import pandas as pd
from datasets import load_dataset
os.makedirs("results/flan_ul2_additional_analysis", exist_ok=True)
data = load_dataset("cardiffnlp/relentless", split="test")
data = {i['relation_type']: i for i in data}
pred_zero = {}
for i in glob("results/lm_qa_zeroshot/flan-ul2/*.jsonl"):
r = os.path.basename(i).replace("__", "/").replace("_", " ").replace("ppl.", "").replace("is ", "").replace(".jsonl", "")
with open(i) as f:
pred_zero[r] = [json.loads(l)['perplexity'] for l in f.read().split("\n")]
pred_few = {}
for i in glob("results/lm_qa_1shots_0seed/flan-ul2/*.jsonl"):
r = os.path.basename(i).replace("__", "/").replace("_", " ").replace("ppl.", "").replace("is ", "").replace(".jsonl", "")
with open(i) as f:
pred_few[r] = [json.loads(l)['perplexity'] for l in f.read().split("\n")]
def get_rank(score):
s2r = {s: n for n, s in enumerate(sorted(score))}
return [s2r[s] for s in score]
for k, v in data.items():
df = pd.DataFrame({
"pairs": v['pairs'],
"score_fewshot": pred_few[k],
"score_zeroshot": pred_zero[k],
"score_true": v["scores_mean"],
"rank_fewshot": get_rank(pred_few[k]),
"rank_zeroshot": get_rank(pred_zero[k]),
"rank_true": v["ranks"],
})
df.to_csv(f"results/flan_ul2_additional_analysis/{k[:4]}.csv", index=False)