Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
File size: 2,982 Bytes
1bb8d13
 
 
 
7f5f9d0
 
 
b016a6f
 
 
1bb8d13
4a89ca9
 
 
1bb8d13
 
 
 
 
 
 
 
4a89ca9
 
 
 
 
1bb8d13
 
 
 
 
7f5f9d0
 
 
 
 
1bb8d13
4a89ca9
1bb8d13
 
 
 
 
 
 
b016a6f
1bb8d13
 
 
 
 
 
 
b016a6f
1bb8d13
 
 
b016a6f
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import json
import pandas as pd
from datasets import load_dataset

pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)

data_valid = load_dataset("cardiffnlp/relentless", split="test")
lc_valid = pd.read_csv("results/lm_lc/lm.csv", index_col=0)
qa_valid = pd.read_csv("results/lm_qa/lm.csv", index_col=0)

data_test = load_dataset("cardiffnlp/relentless", split="test")
lc = pd.read_csv("results/lm_lc/lm.csv", index_col=0)
qa = pd.read_csv("results/lm_qa/lm.csv", index_col=0)

target = {
    "flan-t5-xxl": "Flan-T5\textsubscript{XXL}",
    "flan-ul2": "Flan-UL2",
    "opt-13b": "OPT\textsubscript{13B}",
    "davinci": "GPT-3\textsubscript{davinci}"
}
pretty_name = {
    'competitor/rival of': "Rival",
    'friend/ally of': "Ally",
    'influenced by': "Inf",
    'known for': "Know",
    'similar to': "Sim"
}
p = 30
table = []
for prompt in ['qa', 'lc']:
    for i in target.keys():
        if i in ['flan-t5-xxl', 'flan-ul2'] and prompt == 'lc':
            continue
        if i in ['opt-13b', 'davinci'] and prompt == 'qa':
            continue

        for d in data_test:
            with open(f"results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
                negative_ppl = sorted([json.loads(x)['perplexity'] * -1 for x in f.read().split("\n") if len(x) > 0], reverse=True)
            top_pred = negative_ppl[int(len(negative_ppl) * p / 100)]
            bottom_pred = negative_ppl[-int(len(negative_ppl) * p / 100)]
            scores = sorted(d['scores_mean'], reverse=True)
            top = scores[int(len(scores) * p / 100)]
            bottom = scores[-int(len(scores) * p / 100)]

            with open(f"results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
                negative_ppl_valid = [json.loads(x)['perplexity'] * -1 for x in f.read().split("\n") if len(x) > 0]
            _d = [x for x in data_valid if x['relation_type'] == d['relation_type']][0]
            scores_val = _d['scores_mean']
            false_top = ", ".join([":".join(_d['pairs'][n]) for n, (s, p) in enumerate(zip(scores_val, negative_ppl_valid)) if s <= bottom and p >= top_pred])
            false_bottom = ", ".join([":".join(_d['pairs'][n]) for n, (s, p) in enumerate(zip(scores_val, negative_ppl_valid)) if s >= top and p <= bottom_pred])

            table.append({
                "model": target[i], "relation": pretty_name[d['relation_type']], "top": false_top, "bottom": false_bottom
            })

table = pd.DataFrame(table)
table.to_csv("results/qualitative.csv", index=False)
with pd.option_context("max_colwidth", 1000):
    _table = table[['model', 'relation', 'top']]
    _table = _table[_table['top'].str.len() > 0]
    print(_table.to_latex(index=False, escape=False))
    _table = table[['model', 'relation', 'bottom']]
    _table = _table[_table['bottom'].str.len() > 0]
    print(_table.to_latex(index=False, escape=False))