Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
90d2c06
1 Parent(s): 5005827
experiments/baseline_lm_lc.py CHANGED
@@ -57,7 +57,7 @@ for lm, ppl_class, batch, pretty_name in [
57
  else:
58
  scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
59
 
60
- content = "\n".join([f'* ["{a}", "{b}"]' for a, b in d['positive_examples']])
61
  prompt_input = f"{prompt_dict[d['relation_type']]}:\n{content}"
62
  if ppl_class is LM:
63
  prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
 
57
  else:
58
  scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
59
 
60
+ content = "\n".join([f'* ["{a}", "{b}"]' for a, b in d['prototypical_examples']])
61
  prompt_input = f"{prompt_dict[d['relation_type']]}:\n{content}"
62
  if ppl_class is LM:
63
  prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
experiments/baseline_lm_qa.py CHANGED
@@ -57,7 +57,7 @@ for lm, ppl_class, batch, pretty_name in [
57
  else:
58
  scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
59
 
60
- proto = ",".join([f'["{a}", "{b}"]' for a, b in d['positive_examples']])
61
  prefix = f"Answer the question by yes or no. We know that {proto} are examples of {prompt_dict[d['relation_type']]}."
62
  if ppl_class is LM or ppl_class is OpenAI:
63
  prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?\n yes' for x, y in d['pairs']]
 
57
  else:
58
  scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
59
 
60
+ proto = ",".join([f'["{a}", "{b}"]' for a, b in d['prototypical_examples']])
61
  prefix = f"Answer the question by yes or no. We know that {proto} are examples of {prompt_dict[d['relation_type']]}."
62
  if ppl_class is LM or ppl_class is OpenAI:
63
  prompt_input = [f'{prefix} Are ["{x}", "{y}"] {prompt_dict[d["relation_type"]]} as well?\n yes' for x, y in d['pairs']]
experiments/baseline_relbert.py CHANGED
@@ -13,7 +13,7 @@ def cosine_similarity(a, b):
13
 
14
 
15
  # load dataset
16
- data = load_dataset("cardiffnlp/relentless_full", split="test")
17
  full_result = []
18
 
19
  for lm in ['base', 'large']:
@@ -57,8 +57,6 @@ for lm in ['base', 'large']:
57
  cor_min = tmp.corr("spearman").values[0, 2]
58
  cor_mean = tmp.corr("spearman").values[0, 3]
59
  full_result.append({"model": f"RelBERT\textsubscript{'{'}{lm.upper()}{'}'}", "relation_type": d['relation_type'], "correlation": cor_max})
60
- # full_result.append({"model": f"relbert-roberta-{lm} (min)", "relation_type": d['relation_type'], "correlation": cor_min})
61
- # full_result.append({"model": f"relbert-roberta-{lm} (mean)", "relation_type": d['relation_type'], "correlation": cor_mean})
62
 
63
  df = pd.DataFrame(full_result)
64
  df = df.pivot(columns="relation_type", index="model", values="correlation")
 
13
 
14
 
15
  # load dataset
16
+ data = load_dataset("cardiffnlp/relentless", split="test")
17
  full_result = []
18
 
19
  for lm in ['base', 'large']:
 
57
  cor_min = tmp.corr("spearman").values[0, 2]
58
  cor_mean = tmp.corr("spearman").values[0, 3]
59
  full_result.append({"model": f"RelBERT\textsubscript{'{'}{lm.upper()}{'}'}", "relation_type": d['relation_type'], "correlation": cor_max})
 
 
60
 
61
  df = pd.DataFrame(full_result)
62
  df = df.pivot(columns="relation_type", index="model", values="correlation")