init
Browse files
experiments/baseline_gpt4.py
CHANGED
@@ -4,9 +4,9 @@ import pandas as pd
|
|
4 |
import openai
|
5 |
from datasets import load_dataset
|
6 |
|
7 |
-
data = load_dataset("cardiffnlp/
|
8 |
openai.api_key = os.getenv("OPENAI_API_KEY", None)
|
9 |
-
pretty_name = {"
|
10 |
pretty_model = {"gpt-3.5-turbo": "GPT-3.5", "gpt-4": "GPT-4"}
|
11 |
|
12 |
|
@@ -22,16 +22,16 @@ def get_reply(model, text):
|
|
22 |
|
23 |
|
24 |
prompt_dict = {
|
25 |
-
"
|
26 |
-
"
|
27 |
-
"
|
28 |
-
"
|
29 |
-
"
|
30 |
}
|
31 |
|
32 |
|
33 |
def get_prompt(_data):
|
34 |
-
ref = "\n".join([str(_i) for _i in _data["
|
35 |
prefix = f'Consider the following reference list of {prompt_dict[_data["relation_type"]]}, \n{ref}\n' \
|
36 |
f'Now sort the entity pairs from the following list based on the extent to which they also represent ' \
|
37 |
f'{prompt_dict[_data["relation_type"]]} in descending order. Do not include the pairs from the reference list. ' \
|
@@ -41,14 +41,14 @@ def get_prompt(_data):
|
|
41 |
|
42 |
|
43 |
if __name__ == '__main__':
|
44 |
-
os.makedirs('
|
45 |
|
46 |
full_result = []
|
47 |
valid_count = []
|
48 |
for target_model in ['gpt-3.5-turbo', 'gpt-4']:
|
49 |
|
50 |
for d in data:
|
51 |
-
output_file = f"
|
52 |
if not os.path.exists(output_file):
|
53 |
print(target_model, d['relation_type'])
|
54 |
i = get_prompt(d)
|
@@ -96,5 +96,5 @@ if __name__ == '__main__':
|
|
96 |
df.index = [pretty_model[m] for m in df.index]
|
97 |
print(df.to_latex())
|
98 |
df = df.T
|
99 |
-
# df.to_csv("
|
100 |
|
|
|
4 |
import openai
|
5 |
from datasets import load_dataset
|
6 |
|
7 |
+
data = load_dataset("cardiffnlp/relentless", split="test")
|
8 |
openai.api_key = os.getenv("OPENAI_API_KEY", None)
|
9 |
+
pretty_name = {"competitor/rival of": "Rival", "friend/ally of": "Ally", "influenced by": "Inf", "known for": "Know", "similar to": "Sim"}
|
10 |
pretty_model = {"gpt-3.5-turbo": "GPT-3.5", "gpt-4": "GPT-4"}
|
11 |
|
12 |
|
|
|
22 |
|
23 |
|
24 |
prompt_dict = {
|
25 |
+
"friend/ally of": "entities that are friends or allies",
|
26 |
+
"competitor/rival of": "entities that are competitors or rivals",
|
27 |
+
"known for": "what entities are known for",
|
28 |
+
"influenced by": "what has influenced different entities",
|
29 |
+
"similar to": "entities that are similar"
|
30 |
}
|
31 |
|
32 |
|
33 |
def get_prompt(_data):
|
34 |
+
ref = "\n".join([str(_i) for _i in _data["prototypical_examples"]])
|
35 |
prefix = f'Consider the following reference list of {prompt_dict[_data["relation_type"]]}, \n{ref}\n' \
|
36 |
f'Now sort the entity pairs from the following list based on the extent to which they also represent ' \
|
37 |
f'{prompt_dict[_data["relation_type"]]} in descending order. Do not include the pairs from the reference list. ' \
|
|
|
41 |
|
42 |
|
43 |
if __name__ == '__main__':
|
44 |
+
os.makedirs('results/chat', exist_ok=True)
|
45 |
|
46 |
full_result = []
|
47 |
valid_count = []
|
48 |
for target_model in ['gpt-3.5-turbo', 'gpt-4']:
|
49 |
|
50 |
for d in data:
|
51 |
+
output_file = f"results/chat/{target_model}.{d['relation_type'].replace(' ', '_').replace('/', '-')}.json"
|
52 |
if not os.path.exists(output_file):
|
53 |
print(target_model, d['relation_type'])
|
54 |
i = get_prompt(d)
|
|
|
96 |
df.index = [pretty_model[m] for m in df.index]
|
97 |
print(df.to_latex())
|
98 |
df = df.T
|
99 |
+
# df.to_csv("results/chat/chat.csv")
|
100 |
|
experiments/baseline_lm_lc.py
CHANGED
@@ -10,11 +10,11 @@ from lmppl import EncoderDecoderLM, LM, OpenAI
|
|
10 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
|
11 |
|
12 |
prompt_dict = {
|
13 |
-
"
|
14 |
-
"
|
15 |
-
"
|
16 |
-
"
|
17 |
-
"
|
18 |
}
|
19 |
data = load_dataset("cardiffnlp/relentless", split="test")
|
20 |
full_result = []
|
|
|
10 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
|
11 |
|
12 |
prompt_dict = {
|
13 |
+
"friend/ally of": "Complete the following list with examples of entities that are friends or allies",
|
14 |
+
"competitor/rival of": "Complete the following list with examples of entities that are competitors or rivals",
|
15 |
+
"known for": "Complete the following list with examples of what entities are known for",
|
16 |
+
"influenced by": "Complete the following list with examples of what has influenced different entities",
|
17 |
+
"similar to": "Complete the following list with examples of entities that are similar"
|
18 |
}
|
19 |
data = load_dataset("cardiffnlp/relentless", split="test")
|
20 |
full_result = []
|
experiments/baseline_lm_qa.py
CHANGED
@@ -10,39 +10,39 @@ from lmppl import EncoderDecoderLM, LM, OpenAI
|
|
10 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
|
11 |
|
12 |
prompt_dict = {
|
13 |
-
"
|
14 |
-
"
|
15 |
-
"
|
16 |
-
"
|
17 |
-
"
|
18 |
}
|
19 |
data = load_dataset("cardiffnlp/relentless", split="test")
|
20 |
full_result = []
|
21 |
for lm, ppl_class, batch, pretty_name in [
|
22 |
-
("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
|
23 |
-
("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
|
24 |
-
("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
|
25 |
-
("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
|
26 |
-
("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
|
27 |
-
("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
|
28 |
-
("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
|
29 |
-
("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
|
30 |
-
("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
|
31 |
-
("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
|
32 |
-
("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
|
33 |
-
# ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
|
34 |
-
("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
|
35 |
-
("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
|
36 |
-
("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
|
37 |
-
("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
|
38 |
-
("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
|
39 |
-
("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
|
40 |
-
("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
|
41 |
-
("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
|
42 |
-
("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
|
43 |
-
("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
|
44 |
-
("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
|
45 |
-
|
46 |
]:
|
47 |
os.makedirs(f"results/lm_qa/{os.path.basename(lm)}", exist_ok=True)
|
48 |
scorer = None
|
|
|
10 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
|
11 |
|
12 |
prompt_dict = {
|
13 |
+
"friend/ally of": "entities that are friends or allies",
|
14 |
+
"competitor/rival of": "entities that are competitors or rivals",
|
15 |
+
"known for": "examples of what entities are known for",
|
16 |
+
"influenced by": "what has influenced different entities",
|
17 |
+
"similar to": "examples of entities that are similar"
|
18 |
}
|
19 |
data = load_dataset("cardiffnlp/relentless", split="test")
|
20 |
full_result = []
|
21 |
for lm, ppl_class, batch, pretty_name in [
|
22 |
+
# ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
|
23 |
+
# ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
|
24 |
+
# ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
|
25 |
+
# ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
|
26 |
+
# ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
|
27 |
+
# ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
|
28 |
+
# ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
|
29 |
+
# ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
|
30 |
+
# ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
|
31 |
+
# ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
|
32 |
+
# ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
|
33 |
+
# # ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
|
34 |
+
# ("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
|
35 |
+
# ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
|
36 |
+
# ("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
|
37 |
+
# ("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
|
38 |
+
# ("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
|
39 |
+
# ("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
|
40 |
+
# ("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
|
41 |
+
# ("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
|
42 |
+
# ("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
|
43 |
+
# ("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
|
44 |
+
# ("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
|
45 |
+
("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
|
46 |
]:
|
47 |
os.makedirs(f"results/lm_qa/{os.path.basename(lm)}", exist_ok=True)
|
48 |
scorer = None
|
experiments/results/chat/gpt-3.5-turbo.competitor-rival_of.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1. Liverpool FC - Manchester United
|
2 |
+
2. Apple - Microsoft
|
3 |
+
3. Microsoft - Google
|
4 |
+
4. Netflix - Disney Plus
|
5 |
+
5. PyTorch - TensorFlow
|
6 |
+
6. Razer - Dell
|
7 |
+
7. Spotify - Apple
|
8 |
+
8. Manchester United - Arsenal
|
9 |
+
9. Saudi Arabia - Israel
|
10 |
+
10. Coca-Cola Company - Pepsi
|
11 |
+
11. Twitter - Facebook
|
12 |
+
12. Arsenal - Tottenham Hotspur
|
13 |
+
13. Nintendo - Xbox
|
14 |
+
14. Liverpool FC - Manchester City
|
15 |
+
15. Nike - Adidas
|
16 |
+
16. Manchester City - Manchester United
|
17 |
+
17. Amazon - Ebay
|
18 |
+
18. McDonald's - Burger King
|
19 |
+
19. Sprite - 7 Up
|
experiments/results/chat/gpt-3.5-turbo.friend-ally_of.json
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1. ['Australia', 'New Zealand']
|
2 |
+
2. ['Aznar', 'Bush']
|
3 |
+
3. ['Extinction Rebellion', 'Greta Thunberg']
|
4 |
+
4. ['Elsa', 'Anna']
|
5 |
+
5. ['CIA', 'MI6']
|
6 |
+
6. ['UK', 'Commonwealth']
|
7 |
+
7. ['Singapore', 'Israel']
|
8 |
+
8. ['India', 'US']
|
9 |
+
9. ['US', 'Canada']
|
10 |
+
10. ['UN', 'NATO']
|
11 |
+
11. ['Germany', 'France']
|
12 |
+
12. ['Spain', 'Portugal']
|
13 |
+
13. ['France', 'Belgium']
|
14 |
+
14. ['Malaysia', 'Singapore']
|
15 |
+
15. ['Islamic State', 'Denys Prokopenko']
|
16 |
+
16. ['China', 'North Korea']
|
17 |
+
17. ['Armenia', 'Azerbaijan']
|
18 |
+
18. ['Russia', 'Georgia']
|
19 |
+
19. ['Japan', 'Taiwan']
|
20 |
+
20. ['South Korea', 'Japan']
|
21 |
+
21. ['UK', 'Ireland']
|
22 |
+
22. ['Turkey', 'All Nippon Airways']
|
23 |
+
23. ['Pedro Sánchez', 'Pablo Iglesias']
|
24 |
+
24. ['Hillary Clinton', 'Barack Obama']
|
25 |
+
25. ['Rishi Sunak', 'Leo Varadkar']
|
26 |
+
26. ['Boris Johnson', 'Emmanuel Macron']
|
27 |
+
27. ['Di Maio', 'Salvini']
|
28 |
+
28. ['Brazil', 'India']
|
29 |
+
29. ['Kylo Ren', 'Rey']
|
30 |
+
30. ['Keir Starmer', 'Jeremy Corbyn']
|
31 |
+
31. ['Margaret Thatcher', 'Ronald Reagan']
|
32 |
+
32. ['Singapore', 'Malaysia'] (Removing duplicate)
|
33 |
+
33. ['UK', 'Australia'] (Removing duplicate)
|
34 |
+
34. ['Cersei Lannister', 'Euron Greyjoy']
|
35 |
+
35. ['FTX', 'Alameda Research']
|
36 |
+
36. ['Sophia Loren', 'Marlon Brando']
|
37 |
+
37. ['Paul Rudd', 'Memento']
|
38 |
+
38. ['Jean-Michel Basquiat', 'Andy Warhol']
|
39 |
+
39. ['India', 'Brazil']
|
40 |
+
40. ['Nikon', 'Tokina']
|
41 |
+
41. ['Google', 'Samsung']
|
42 |
+
42. ['IMF', 'The World Bank']
|
43 |
+
43. ['Instagram', 'WhatsApp']
|
44 |
+
44. ['Windows', 'Xbox']
|
45 |
+
45. ['Johnny Cash', 'Waylon Jennings']
|
46 |
+
46. ['Oman', 'Iran']
|
47 |
+
47. ['China', 'Huawei']
|
48 |
+
48. ['Amazon', 'Royal Mail']
|
49 |
+
49. ['Red Bull', 'GoPro']
|
50 |
+
50. ['HSBC', 'BlackRock']
|
51 |
+
51. ['Tata Motors', 'Jaguar']
|
52 |
+
52. ['KGB', 'CIA']
|
53 |
+
53. ['JP Morgan', 'Morgan Stanley']
|
54 |
+
54. ['Eva Perón', 'Interpol']
|
55 |
+
55. ['Eastern Orthodoxy', 'Oriental Orthodoxy']
|
56 |
+
56. ['Darth Vader', 'Emperor Palpatine']
|
57 |
+
57. ['Doja Cat', 'Anthony Albanese']
|
58 |
+
58. ['Thomas Jefferson', 'Kid Cudi']
|
59 |
+
59. ['Liam Gallagher', 'Noel Gallagher']
|
60 |
+
60. ['Quentin Tarantino', 'Edgar Wright']
|
61 |
+
61. ['Rishi Sunak', 'Joe Biden']
|
62 |
+
62. ['Macbeth', 'Banquo']
|
63 |
+
63. ['Ron Weasley', 'Neville Longbottom']
|
64 |
+
64. ['Bob Marley', 'Abu Bakr']
|
65 |
+
65. ['Noah Schnapp', 'Galatasaray S.K.']
|
66 |
+
66. ['Kendall Jenner', 'Bergen']
|
67 |
+
67. ['Porter Wagoner', 'Dolly Parton']
|
68 |
+
68. ['Stephen Hawking', 'Brian Cox']
|
69 |
+
69. ['Johnny Knoxville', 'Catherine Zeta-Jones']
|
70 |
+
70. ['Mark Drakeford', 'Rishi Sunak']
|
71 |
+
71. ['J.R.R. Tolkien', 'C.S. Lewis']
|
72 |
+
72. ['Beatles', 'Rolling Stones']
|
73 |
+
73. ['Benedict Cumberbatch', 'Hanukkah']
|
74 |
+
74. ['United States', 'United Kingdom']
|
75 |
+
75. ['Linus Sebastian', 'Marques Brownlee']
|
76 |
+
76. ['Saturn', 'Rachel Bilson']
|
77 |
+
77. ['Huawei', 'China']
|
78 |
+
78. ['Achilles', 'Jonathan Bailey']
|
79 |
+
79. ['The Beatles', 'Queen']
|