Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
relentless / experiments /analysis /get_qualitative.py
asahi417's picture
init
1bb8d13
raw
history blame
4.55 kB
import os
import json
import pandas as pd
from datasets import load_dataset
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
os.makedirs("experiments/analysis/qualitative", exist_ok=True)
# baselines
target = {
"flan-t5-xxl": "Flan-T5\textsubscript{XXL}",
"opt-13b": "OPT\textsubscript{13B}",
"davinci": "GPT-3\textsubscript{davinci}"
}
pretty_name = {
'average': "Avg",
'is competitor/rival of': "Rival",
'is friend/ally of': "Ally",
'is influenced by': "Inf",
'is known for': "Know",
'is similar to': "Sim"
}
p = 30
data = load_dataset("cardiffnlp/relentless_full", split="test")
for prompt in ['qa', 'lc']:
output = []
for d in data:
for i in target.keys():
with open(f"experiments/results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
ppl = [json.loads(x)['perplexity'] for x in f.read().split("\n") if len(x) > 0]
rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
prediction = [rank_map[p] for p in ppl]
# get index
total_n = len(d['ranks'])
p = int(total_n / 3)
top_n = [0, int(total_n * p / 100) + 1]
top_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[top_n[0]: top_n[1]]]
bottom_n = [total_n - int(total_n * p / 100), total_n]
bottom_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
mid_n = [top_n[1], bottom_n[0]]
mid_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
# top
top_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[top_n[0]: top_n[1]]]
top_acc = len(set(top_pred).intersection(set(top_label))) / len(top_label) * 100
# middle
mid_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
mid_acc = len(set(mid_pred).intersection(set(mid_label))) / len(mid_label) * 100
# top
bottom_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
bottom_acc = len(set(bottom_pred).intersection(set(bottom_label))) / len(bottom_label) * 100
# the index of bottom p percent
output.append({
"relation_type": d['relation_type'],
"model": i,
"top_pred_and_bottom_gold": [" : ".join(d['pairs'][x]) for x in set(top_pred).intersection(bottom_label)],
"bottom_pred_and_top_gold": [" : ".join(d['pairs'][x]) for x in set(bottom_pred).intersection(top_label)],
})
df = pd.DataFrame(output)
df.to_csv(f"experiments/analysis/qualitative/{prompt}.{p}.csv", index=False)
# df.pop("top_num")
# df.pop("bottom_num")
df['relation_type'] = [pretty_name[i] for i in df['relation_type']]
print(df)
new_df = []
for _, i in df.iterrows():
top_pred_and_bottom_gold = i['top_pred_and_bottom_gold'][:min(len(i['top_pred_and_bottom_gold']), 4)]
bottom_pred_and_top_gold = i['bottom_pred_and_top_gold'][:min(len(i['bottom_pred_and_top_gold']), 4)]
for x in range(max(len(bottom_pred_and_top_gold), len(top_pred_and_bottom_gold))):
# for x in range(max(len(bottom_pred_and_top_gold), len(top_pred_and_bottom_gold)) // 3):
if len(top_pred_and_bottom_gold) >= x + 1:
t = ", ".join(top_pred_and_bottom_gold[x * 1:min(len(top_pred_and_bottom_gold) + 1, (x + 1)*1)])
else:
t = ""
if len(bottom_pred_and_top_gold) >= x + 1:
b = ", ".join(bottom_pred_and_top_gold[x*1:min(len(bottom_pred_and_top_gold) + 1, (x + 1)*1)])
else:
b = ""
new_df.append({"relation_type": i['relation_type'], "model": i['model'], "top": t, "bottom": b})
df_new = pd.DataFrame(new_df)
df_new['model'] = [target[i] for i in df_new['model']]
df_new = df_new[['model', 'relation_type', 'top', 'bottom']]
df_new = df_new.sort_values(by=['model', 'relation_type'])
df_new.to_csv(f"experiments/analysis/qualitative/{prompt}.{p}.format.csv", index=False)
with pd.option_context("max_colwidth", 1000):
table = df_new.to_latex(index=False, escape=False)
table = table.split(r"\midrule")[1].split(r"\bottomrule")[0]
print(table)