|
import os |
|
from time import sleep |
|
import pandas as pd |
|
import openai |
|
from datasets import load_dataset |
|
|
|
data = load_dataset("cardiffnlp/relentless", split="test") |
|
openai.api_key = os.getenv("OPENAI_API_KEY", None) |
|
pretty_name = {"competitor/rival of": "Rival", "friend/ally of": "Ally", "influenced by": "Inf", "known for": "Know", "similar to": "Sim"} |
|
pretty_model = {"gpt-3.5-turbo": "GPT-3.5", "gpt-4": "GPT-4"} |
|
|
|
|
|
def get_reply(model, text): |
|
while True: |
|
try: |
|
reply = openai.ChatCompletion.create(model=model, messages=[{"role": "user", "content": text}]) |
|
break |
|
except Exception: |
|
print('Rate limit exceeded. Waiting for 10 seconds.') |
|
sleep(10) |
|
return reply['choices'][0]['message']['content'] |
|
|
|
|
|
prompt_dict = { |
|
"friend/ally of": "entities that are friends or allies", |
|
"competitor/rival of": "entities that are competitors or rivals", |
|
"known for": "what entities are known for", |
|
"influenced by": "what has influenced different entities", |
|
"similar to": "entities that are similar" |
|
} |
|
|
|
|
|
def get_prompt(_data): |
|
ref = "\n".join([str(_i) for _i in _data["prototypical_examples"]]) |
|
prefix = f'Consider the following reference list of {prompt_dict[_data["relation_type"]]}, \n{ref}\n' \ |
|
f'Now sort the entity pairs from the following list based on the extent to which they also represent ' \ |
|
f'{prompt_dict[_data["relation_type"]]} in descending order. Do not include the pairs from the reference list. ' \ |
|
f'The output should contain all the entity pairs from the following list and no duplicates:\n' |
|
x = "\n".join([f'{str(_i)}' for _i in _data["pairs"]]) |
|
return f'{prefix}\n\n{x}' |
|
|
|
|
|
if __name__ == '__main__': |
|
os.makedirs('results/chat', exist_ok=True) |
|
|
|
full_result = [] |
|
valid_count = [] |
|
for target_model in ['gpt-3.5-turbo', 'gpt-4']: |
|
|
|
for d in data: |
|
output_file = f"results/chat/{target_model}.{d['relation_type'].replace(' ', '_').replace('/', '-')}.json" |
|
if not os.path.exists(output_file): |
|
print(target_model, d['relation_type']) |
|
i = get_prompt(d) |
|
out = get_reply(target_model, i) |
|
with open(output_file, 'w') as f: |
|
f.write(out) |
|
with open(output_file) as f: |
|
string_pairs = [f'{str(_i)}' for _i in d["pairs"]] |
|
out = [i for i in f.read().split("\n") if len(i) > 0] |
|
|
|
new_out = [] |
|
for i in out: |
|
try: |
|
i = "[" + i.replace("],", "]").split("[")[1] |
|
i = i.split("]")[0] + "]" |
|
i = str(eval(i)) |
|
if i not in new_out: |
|
new_out.append(i) |
|
except Exception: |
|
continue |
|
ex = [i for i in string_pairs if i not in new_out] |
|
valid_n = len(d['pairs']) - len(ex) |
|
|
|
valid_count.append({"model": target_model, "relation_type": d['relation_type'], "valid": 100 * valid_n / len(d['pairs'])}) |
|
new_out = new_out + ex |
|
maps = {x: n + 1 for n, x in enumerate(new_out)} |
|
prediction = [maps[i] for i in string_pairs] |
|
|
|
true_rank = d['ranks'] |
|
tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T |
|
cor = tmp.corr("spearman").values[0, 1] |
|
full_result.append({"model": target_model, "relation_type": d['relation_type'], "correlation": cor}) |
|
|
|
df = pd.DataFrame(full_result) |
|
df = df.pivot(columns="relation_type", index="model", values="correlation") |
|
df['Avg'] = df.mean(1) |
|
df = (df * 100).round(1) |
|
|
|
df_cnt = pd.DataFrame(valid_count) |
|
df_cnt = df_cnt.pivot(index='model', columns='relation_type') |
|
df_cnt['Avg'] = df_cnt.mean(1) |
|
df_cnt = df_cnt.round(1) |
|
|
|
df = pd.DataFrame(df.astype(str).values + " (" + df_cnt.astype(str).values + "%)", columns=[pretty_name[c] if c in pretty_name else c for c in df.columns], index=df.index) |
|
df.index = [pretty_model[m] for m in df.index] |
|
df = df.T |
|
print(df.to_latex()) |
|
|
|
|
|
|