daman1209arora commited on
Commit
8ce189e
1 Parent(s): 1cabc3e

Upload 3 files

Browse files
Files changed (3) hide show
  1. compute_metrics.py +213 -0
  2. data.zip +3 -0
  3. inference.py +179 -0
compute_metrics.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import json
3
+ import pandas as pd
4
+
5
+ QUES_TYPES = ['MCQ','MCQ(multiple)','Integer','Numeric']
6
+
7
+ models = [
8
+ "Random",
9
+ "GPT3_normal",
10
+ "GPT3.5_normal",
11
+ "GPT4_normal",
12
+ "GPT4_CoT",
13
+ 'GPT4_CoT_self_refine',
14
+ "GPT4_CoT+OneShot",
15
+ "GPT4_CoT+SC@8"
16
+ ]
17
+
18
+ def get_aggregate(answers, question_type, single_threshold=None, multiple_threshold=None):
19
+ # Pass optional \tau_{single} and \tau_{multiple} parameters if needed for evaluation under risk.
20
+ if question_type == 'MCQ(multiple)' or question_type == 'MCQ':
21
+ letter_to_idx = {'A': 0, 'B': 1, 'C': 2, 'D': 3, 'None': 4}
22
+ idx_to_letter = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'None'}
23
+ abcd = [0,0,0,0,0]
24
+ for ans in answers:
25
+ if ans == 'None':
26
+ abcd[letter_to_idx[ans]] += 1
27
+ else:
28
+ for c in ans:
29
+ abcd[letter_to_idx[c]] += 1
30
+ if question_type == 'MCQ':
31
+ abcd = abcd[:-1]
32
+ answer = idx_to_letter[np.argmax(abcd)]
33
+ if single_threshold is not None:
34
+ answer = answer if abcd[np.argmax(abcd)]/len(answers) >= single_threshold else "None"
35
+ else:
36
+ if multiple_threshold is not None:
37
+ options_selected = [idx_to_letter[x] for x in range(len(abcd)) if abcd[x] >= len(answers)*multiple_threshold and idx_to_letter[x] != 'None']
38
+ else:
39
+ options_selected = [idx_to_letter[x] for x in range(len(abcd)) if abcd[x] >= len(answers)/2 and idx_to_letter[x] != 'None']
40
+ if len(options_selected) == 0:
41
+ answer = "None"
42
+ else:
43
+ answer = ''.join(sorted(options_selected))
44
+ else: # For integer and numeric answers, choose the most common response(other than None)
45
+ while "None" in answers:
46
+ answers.remove("None")
47
+ if len(answers) == 0:
48
+ answers = ["None"]
49
+ unique, counts = np.unique(answers, return_counts=True)
50
+ answer = unique[np.argmax(counts)]
51
+ return answer
52
+
53
+
54
+ def compute_score(gold, resp, question_type, year):
55
+ assert question_type in QUES_TYPES
56
+ if question_type == 'MCQ(multiple)':
57
+ gold = set([c for c in ['A', 'B', 'C', 'D'] if c in gold])
58
+ resp = set([c for c in ['A', 'B', 'C', 'D'] if c in resp])
59
+ if resp == gold :
60
+ return 1.0
61
+ else:
62
+ if len(resp-gold) == 0:
63
+ return 0.25*len(resp)
64
+ return 0.0 # If response contains something not in the gold set, give 0
65
+ elif question_type == 'MCQ':
66
+ gold = set([c for c in ['A', 'B', 'C', 'D'] if c in gold])
67
+ resp = set([c for c in ['A', 'B', 'C', 'D'] if c in resp])
68
+ return int(gold == resp)
69
+ else:
70
+ if resp == "None":
71
+ return 0.0
72
+ g, r = float(gold), float(resp)
73
+ return int(abs(g-r) <= 0.01)
74
+
75
+
76
+ def construct_responses_table():
77
+ responses = {}
78
+ for model in models:
79
+ if "SC@" in model:
80
+ pass
81
+ elif "Random" == model:
82
+ pass
83
+ else:
84
+ responses[model] = json.load(open(f"data/responses/{model}_responses/responses.json"))
85
+ dataset = json.load(open('data/dataset.json'))
86
+ extracts = {
87
+ "Type": [],
88
+ "Index": [],
89
+ "Description": [],
90
+ "Subject": [],
91
+ "Gold": [],
92
+ }
93
+ for model in models:
94
+ if "Random" == model:
95
+ continue
96
+ else:
97
+ extracts[f'{model}'] = []
98
+
99
+
100
+ for i, q in enumerate(dataset):
101
+ extracts['Type'].append(q['type'])
102
+ extracts['Index'].append(q['index'])
103
+ extracts['Description'].append(q['description'])
104
+ extracts['Subject'].append(q['subject'])
105
+ extracts['Gold'].append(q['gold'])
106
+
107
+ for model in models:
108
+ if "SC@" in model:
109
+ continue
110
+ elif "Random" == model:
111
+ continue
112
+ else:
113
+ try:
114
+ assert q['question'] == responses[model][i]['question']
115
+ except:
116
+
117
+ print(q['question'])
118
+ breakpoint()
119
+ print(responses[model][i]['question'])
120
+ breakpoint()
121
+ try:
122
+ extracts[f'{model}'].append(responses[model][i]['extract'])
123
+ except:
124
+ print(extracts)
125
+
126
+ if "GPT4_CoT+SC" in model:
127
+ num_responses = int(model.split("@")[1])
128
+ for i, q in enumerate(dataset):
129
+ sc_responses = json.load(open('data/responses/GPT4_CoT+SC_responses/responses.json'))
130
+ resp = sc_responses[i]
131
+ answers = [resp['GPT4_CoT+SC_response']['choices'][k]['extract'] for k in range(num_responses)]
132
+ answer = get_aggregate(answers, resp['type'])
133
+
134
+ extracts[f'{model}'].append(answer)
135
+ pd.DataFrame(extracts).to_csv('results/extracts.csv', index=False)
136
+
137
+ return pd.read_csv('results/extracts.csv',dtype=str)
138
+
139
+
140
+ responses = construct_responses_table()
141
+ output = []
142
+ for i, response in responses.iterrows():
143
+ out = {}
144
+ out["Type"] = response["Type"]
145
+ out["Index"] = response["Index"]
146
+ out["Description"] = response["Description"]
147
+ out["Subject"] = response["Subject"]
148
+ gold = response["Gold"]
149
+ out["Gold"] = gold
150
+ if response["Type"] == "MCQ":
151
+ out["Random"] = 0.25
152
+ elif response["Type"] == "MCQ(multiple)":
153
+ num_ans = len(gold)
154
+ if num_ans == 1:
155
+ out["Random"] = 0.0625
156
+ elif num_ans == 2:
157
+ out["Random"] = 0.09375
158
+ elif num_ans == 3:
159
+ out["Random"] = 0.203125
160
+ elif num_ans == 4:
161
+ out["Random"] = 0.5
162
+ else:
163
+ out["Random"] = 0
164
+
165
+ for model in models:
166
+ if model == "Random":
167
+ continue
168
+ resp = response[f"{model}"]
169
+ if not isinstance(resp, str):
170
+ resp = "None"
171
+ out[f"{model}"] = resp
172
+ out[f'{model}'] = compute_score(gold,resp,out["Type"],out["Description"])
173
+ out[f'Max'] = 1
174
+ output.append(out)
175
+
176
+ df = pd.DataFrame()
177
+ df['Type'] = [x['Type'] for x in output]
178
+ df['Index'] = [x['Index'] for x in output]
179
+ df['Description'] = [x['Description'] for x in output]
180
+ df['Subject'] = [x['Subject'] for x in output]
181
+ df['Gold'] = [x['Gold'] for x in output]
182
+ df['Random'] = [x['Random'] for x in output]
183
+ for model in models:
184
+ df[f"{model}"] = [
185
+ x.get(f"{model}", "None") for x in output]
186
+ df[f"{model}"] = [x.get(f"{model}", 0) for x in output]
187
+
188
+
189
+
190
+ df.to_csv(f"results/scores.csv", index=False)
191
+
192
+ modes = ['overall', 'type_wise', 'subject_wise']
193
+ for mode in modes:
194
+ col_dict = {}
195
+ for model in models:
196
+ col_dict[f'{model}'] = ['mean']
197
+
198
+ if mode != 'overall':
199
+ col_dict[f'{models[0]}'].insert(0,'count')
200
+
201
+ if mode == 'overall':
202
+ grouped_multiple = df.agg(col_dict)
203
+ elif mode == 'type_wise':
204
+ grouped_multiple = df.groupby(['Type']).agg(col_dict)
205
+ elif mode == 'subject_wise':
206
+ grouped_multiple = df.groupby(['Subject']).agg(col_dict)
207
+
208
+ if mode != 'overall':
209
+ grouped_multiple.columns = ['count'] + models
210
+ grouped_multiple = grouped_multiple.reset_index()
211
+ grouped_multiple = grouped_multiple.round(3)
212
+ grouped_multiple.to_csv(f"results/aggregated_scores_{mode}.csv", index=False)
213
+ print("Done!")
data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9db3aa734a3e5b7e02cead16c17b6687ff363d8a4d1015395f04f28ace33a07a
3
+ size 8069293
inference.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from tqdm import tqdm
3
+ import json
4
+ import os
5
+ import openai
6
+ from tqdm import tqdm
7
+ import argparse
8
+ import multiprocessing
9
+ from copy import deepcopy
10
+ from functools import partial
11
+
12
+ prompt_library = {
13
+ "MCQ": "In this problem, only one option will be correct. Give a detailed solution and end the solution with the final answer.",
14
+ "MCQ(multiple)": "In this problem, multiple options can be correct. Give a detailed solution and end the solution with the final answer.",
15
+ "Integer": "In this problem, the final answer will be a non-negative integer. Give a detailed solution and end the solution with the final answer.",
16
+ "Numeric": "In this problem, the final will be a numeric value. Give the numerical answer correct upto the 2nd decimal digit. Give a detailed solution and end the solution with the final answer.",
17
+ }
18
+
19
+ few_shot_examples = json.load(open('data/few_shot_examples.json'))
20
+
21
+
22
+ def write_in_file(response_file, response_dict, question, mode, model_nickname):
23
+ if os.path.exists(response_file):
24
+ with open(response_file, 'r') as infile:
25
+ responses = json.load(infile)
26
+ else:
27
+ responses = []
28
+
29
+ found = False
30
+ for i, old_resp in enumerate(responses):
31
+ if old_resp['description'] == question['description'] and old_resp['index'] == question['index']:
32
+ responses[i][f"{model_nickname}_{mode}_response" ] = response_dict[f"{model_nickname}_{mode}_response"]
33
+ found = True
34
+ break
35
+
36
+ if not found:
37
+ responses.append(response_dict)
38
+
39
+ json.dump(sorted(responses, key=lambda elem: (elem['description'], elem['index'])), open(response_file, 'w'), indent=4)
40
+ print(f"####UPDATED {response_file}, Current size : {len(responses)}####")
41
+
42
+
43
+ def get_response(question,model, model_nickname, mode, response_file, lock):
44
+
45
+ response_dict = deepcopy(question)
46
+ prefix_prompt = prompt_library[question['type']]
47
+ suffix_prompt = ""
48
+
49
+ if mode in ['CoT', 'CoT+SC', 'CoT+Exam'] :
50
+ suffix_prompt = "Let's think step by step.\n"
51
+
52
+ ques = question["question"]
53
+ stripped_ques = ques.replace("\n\n", "\n").strip()
54
+ if mode in ['CoT+OneShot', 'CoT', 'CoT+SC', 'CoT+Exam']:
55
+ if mode == 'CoT+Exam':
56
+ if response_dict['type'] in ['MCQ', 'MCQ(multiple)']:
57
+ if response_dict['type'] == 'MCQ':
58
+ exam_prompt = "If the answer is wrong, you'll be given -1 marks. If the answer is correct, you'll be given +3 marks. If you're unsure of the answer, you can skip the question, and you'll be given 0 marks."
59
+ else:
60
+ exam_prompt = "If any of the options in the final answer is wrong, you'll be given -2 marks. If all the options are correct, you'll be given +4 marks. If some of the options are correct, you'll be given +1 for each correct option. If you're unsure of the answer, you can skip the question, and you'll be given 0 marks."
61
+ prompt = prefix_prompt + " " + exam_prompt + "\n\n" + "Problem: " + stripped_ques + "\nSolution: " + suffix_prompt
62
+ else:
63
+ print("No point doing this for Numeric/Integer questions since there is no negative marking...")
64
+ breakpoint()
65
+ else:
66
+ if mode == 'CoT+OneShot':
67
+ ex = few_shot_examples[question['subject']][question['type']]
68
+ prompt = prefix_prompt + "\n\n" + "Problem: " + ex['problem'] + "\nSolution: " + ex['solution'] + "\n\n" + "Problem: " + stripped_ques + "\nSolution: "
69
+ else:
70
+ prompt = prefix_prompt + "\n\n" + "Problem: " + stripped_ques + "\nSolution: " + suffix_prompt
71
+ else:
72
+ prompt = prefix_prompt + "\n\n" + "Problem: " + stripped_ques + suffix_prompt
73
+ prompt = prompt.strip()
74
+ response_dict[f"prompt"] = prompt
75
+ num_retries = 0
76
+ print(f'Question: {question["description"]}, Index: {question["index"]}, Model: {model_nickname}, Mode: {mode}, query begins')
77
+
78
+ while True:
79
+ try:
80
+ if model in ["text-davinci-003", "text-davinci-002", 'davinci-002']:
81
+ response = openai.Completion.create(
82
+ model=model,
83
+ prompt=prompt,
84
+ max_tokens=2048,
85
+ temperature=0 if mode in ['CoT', 'normal', 'CoT+Exam'] else 0.5,
86
+ n=1 if mode in ['CoT', 'normal', 'CoT+Exam'] else 3
87
+ )
88
+ else:
89
+ response = openai.ChatCompletion.create(
90
+ model=model,
91
+ messages=[
92
+ {"role": "system", "content": ""},
93
+ {"role": "user", "content": prompt}
94
+ ],
95
+ max_tokens=2048,
96
+ temperature=0 if mode in ['CoT+OneShot', 'CoT', 'normal', 'CoT+Exam'] else 0.5,
97
+ n=1 if mode in ['CoT+OneShot', 'CoT', 'normal', 'CoT+Exam'] else 8
98
+ )
99
+
100
+ lock.acquire()
101
+ response_dict[f"{model_nickname}_{mode}_response"] = response
102
+ write_in_file(response_file, response_dict, question, mode, model_nickname)
103
+ lock.release()
104
+ break
105
+
106
+ except Exception as e:
107
+ num_retries += 1
108
+ print("Failure!", e)
109
+ return
110
+
111
+ def main():
112
+ '''
113
+ The code can restart from the already done questions in case there is a failure midpoint.
114
+ '''
115
+ args = argparse.ArgumentParser()
116
+ args.add_argument('--model', default='gpt-3.5-turbo')
117
+ args.add_argument('--data', default='data/dataset.json')
118
+ args.add_argument('--mode', default='normal')
119
+ args.add_argument('--num_procs', default=1, type=int)
120
+ args.add_argument('--max_questions', default=1, type=int)
121
+ args = args.parse_args()
122
+
123
+ openai.organization = os.getenv("OPENAI_ORG")
124
+ openai.api_key = os.getenv("OPENAI_API_KEY")
125
+
126
+ model_nickname = {
127
+ "davinci-002": "davinci-002",
128
+ "text-davinci-003": "GPT3",
129
+ "gpt-3.5-turbo": "GPT3.5",
130
+ "gpt-4-0613": "GPT4_0613",
131
+ "gpt-4-0314": "GPT4"
132
+ }
133
+ assert args.model in model_nickname.keys()
134
+ assert args.mode in ['normal', 'CoT', 'CoT+OneShot', 'CoT+Exam', 'CoT+SC']
135
+
136
+ out_file_dir = f'responses/{model_nickname[args.model]}_{args.mode}_responses'
137
+ out_file = os.path.join(out_file_dir, 'responses.json')
138
+ questions = json.load(open(args.data))
139
+
140
+ rem_ques = []
141
+
142
+ if os.path.exists(out_file):
143
+
144
+ for question in tqdm(questions[:args.max_questions]):
145
+ if os.path.exists(out_file):
146
+ with open(out_file, 'r') as infile:
147
+ responses = json.load(infile)
148
+ found = False
149
+
150
+ for i, old_resp in enumerate(responses):
151
+ if question['type'] in ['Numeric', 'Integer'] and args.mode == 'CoT+Exam':
152
+ found = True
153
+ if old_resp['description'] == question['description'] and old_resp['index'] == question['index']:
154
+
155
+ found = all([old_resp.get(
156
+ f"{model_nickname[args.model]}_{args.mode}_response", False) for model in [args.model]])
157
+ if found:
158
+ print("This question has already been done")
159
+ else:
160
+ rem_ques.append(question)
161
+ else:
162
+ os.makedirs(out_file_dir, exist_ok=True)
163
+ if args.mode == 'CoT+Exam':
164
+ rem_ques = []
165
+ for q in questions:
166
+ if q['type'] in ['MCQ', 'MCQ(multiple)']:
167
+ rem_ques.append(q)
168
+ else:
169
+ rem_ques = questions[:args.max_questions]
170
+ print(f"There are {len(rem_ques)} problems remaining")
171
+
172
+ manager = multiprocessing.Manager()
173
+ lock = manager.Lock()
174
+ pool = multiprocessing.Pool(args.num_procs)
175
+ f = partial(get_response, model=args.model, model_nickname=model_nickname[args.model], mode=args.mode, response_file=out_file, lock=lock)
176
+ pool.map(f, rem_ques)
177
+
178
+ if __name__ == '__main__':
179
+ main()