Upload 12 files
Browse files- Figure_3.png +3 -0
- Figure_4.png +3 -0
- Figure_5.png +3 -0
- Figure_6.png +3 -0
- LICENSE +21 -0
- create_dataset_files.py +21 -0
- create_exp1_outputs.py +55 -0
- create_exp1_results.py +32 -0
- create_exp2_outputs.py +75 -0
- create_exp2_results.py +32 -0
- exp_lib.py +70 -0
- requirements.txt +3 -0
Figure_3.png
ADDED
Git LFS Details
|
Figure_4.png
ADDED
Git LFS Details
|
Figure_5.png
ADDED
Git LFS Details
|
Figure_6.png
ADDED
Git LFS Details
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Nancy Otero
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
create_dataset_files.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# assume we are starting with data.json
|
2 |
+
# then we create several files per example:
|
3 |
+
# name format as {mae}_{example_num}_question|incorrect_answer|correct_answer
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
df = pd.read_json('data/data.json')
|
9 |
+
|
10 |
+
for i, row in tqdm(df.iterrows()):
|
11 |
+
mae = row['Misconception ID']
|
12 |
+
num = row['Example Number']
|
13 |
+
correct_answer = row['Correct Answer']
|
14 |
+
incorrect_answer = row['Incorrect Answer']
|
15 |
+
question = row['Question']
|
16 |
+
with open(f'data/txt_files/{mae}_{num}_correct_answer.txt', 'a') as file:
|
17 |
+
file.write(correct_answer)
|
18 |
+
with open(f'data/txt_files/{mae}_{num}_incorrect_answer.txt', 'a') as file:
|
19 |
+
file.write(incorrect_answer)
|
20 |
+
with open(f'data/txt_files/{mae}_{num}_question.txt', 'a') as file:
|
21 |
+
file.write(question)
|
create_exp1_outputs.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import pandas as pd
|
3 |
+
import pandas as pd
|
4 |
+
import json
|
5 |
+
import urllib
|
6 |
+
import math
|
7 |
+
import time
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
from tqdm import tqdm
|
11 |
+
from io import StringIO
|
12 |
+
import exp_lib
|
13 |
+
|
14 |
+
def experiment_1_trial(data_df, model_name):
|
15 |
+
x = data_df.sample(frac=1)
|
16 |
+
train_df = x.drop_duplicates('Misconception ID')
|
17 |
+
test_df = x.iloc[::-1].drop_duplicates('Misconception ID')
|
18 |
+
test_df = test_df.reset_index()
|
19 |
+
prompt = exp_lib.generate_prompt_test_batch(train_df.to_dict(orient='records'), test_df.to_dict(orient='records'))
|
20 |
+
response = exp_lib.get_gpt4_diagnosis(model_name, prompt)
|
21 |
+
response_df = pd.read_csv(StringIO(response), header=None, names=["test_example", "diagnosis"])
|
22 |
+
test_df["Predicted Diagnosis"] = response_df["diagnosis"].str.strip()
|
23 |
+
test_df["Model"] = model_name
|
24 |
+
return test_df[['Misconception ID', 'Example Number', 'Topic', 'Predicted Diagnosis', 'Model']]
|
25 |
+
|
26 |
+
|
27 |
+
def experiment_1(input_file_path, model_name, num_iterations, output_file_path):
|
28 |
+
data_df = pd.read_json(input_file_path)
|
29 |
+
experiment_1_results_list = []
|
30 |
+
for i in tqdm(range(num_iterations)):
|
31 |
+
try:
|
32 |
+
trial_result = experiment_1_trial(data_df, model_name)
|
33 |
+
trial_result['Trial'] = i
|
34 |
+
experiment_1_results_list.append(trial_result)
|
35 |
+
except Exception as e:
|
36 |
+
print(e)
|
37 |
+
experiment_1_results_df = pd.concat(experiment_1_results_list)
|
38 |
+
experiment_1_results_df['Correct'] = (experiment_1_results_df['Misconception ID'] == experiment_1_results_df['Predicted Diagnosis'])
|
39 |
+
experiment_1_results_df.to_csv(output_file_path)
|
40 |
+
|
41 |
+
|
42 |
+
if __name__ == '__main__':
|
43 |
+
experiment_name = 'experiment_1'
|
44 |
+
input_file_path = 'data/data.json'
|
45 |
+
model_name = 'gpt-4-turbo'
|
46 |
+
num_iterations = 100
|
47 |
+
output_file_path = f'outputs/{experiment_name}_{model_name}_{num_iterations}iters.csv'
|
48 |
+
|
49 |
+
experiment_1(
|
50 |
+
input_file_path,
|
51 |
+
model_name,
|
52 |
+
num_iterations,
|
53 |
+
output_file_path
|
54 |
+
)
|
55 |
+
|
create_exp1_results.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
output_file_path = 'outputs/experiment_1_gpt-4-turbo_100iters.csv'
|
4 |
+
outputs_df = pd.read_csv(output_file_path)
|
5 |
+
|
6 |
+
print(outputs_df.head())
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
def calculate_precision_recall(df):
|
11 |
+
# Calculate precision and recall
|
12 |
+
true_positives = df['Correct'].sum()
|
13 |
+
total_predicted = len(df)
|
14 |
+
total_actual = df['Misconception ID'].nunique()
|
15 |
+
|
16 |
+
precision = true_positives / total_predicted if total_predicted else 0
|
17 |
+
recall = true_positives / total_actual if total_actual else 0
|
18 |
+
|
19 |
+
return precision, recall
|
20 |
+
|
21 |
+
# Overall precision and recall
|
22 |
+
overall_precision, overall_recall = calculate_precision_recall(outputs_df)
|
23 |
+
|
24 |
+
# Precision and recall per topic
|
25 |
+
topic_precision_recall = outputs_df.groupby('Topic').apply(calculate_precision_recall).apply(pd.Series)
|
26 |
+
topic_precision_recall.columns = ['Precision', 'Recall']
|
27 |
+
|
28 |
+
# Display results
|
29 |
+
print(f"Overall Precision: {overall_precision:.3f}")
|
30 |
+
print(f"Overall Recall: {overall_recall:.3f}")
|
31 |
+
print("\nPrecision and Recall per Topic:")
|
32 |
+
print(topic_precision_recall)
|
create_exp2_outputs.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import pandas as pd
|
3 |
+
import pandas as pd
|
4 |
+
import json
|
5 |
+
import urllib
|
6 |
+
import math
|
7 |
+
import time
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
from tqdm import tqdm
|
11 |
+
from io import StringIO
|
12 |
+
import exp_lib
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
def experiment_2_trial(data_df, model_name):
|
17 |
+
x = data_df.sample(frac=1)
|
18 |
+
train_df = x.drop_duplicates('Misconception ID')
|
19 |
+
test_df = x.iloc[::-1].drop_duplicates('Misconception ID')
|
20 |
+
test_df = test_df.reset_index()
|
21 |
+
topics = [
|
22 |
+
'Ratios and proportional reasoning',
|
23 |
+
'Number Operations',
|
24 |
+
'Patterns, relationships, and functions',
|
25 |
+
'Number sense',
|
26 |
+
'Algebraic representations',
|
27 |
+
'Variables, expressions, and operations',
|
28 |
+
'Equations and inequalities',
|
29 |
+
'Properties of number and operations'
|
30 |
+
]
|
31 |
+
# now, iterate by topic and slice each topic data out of train_df, test_df
|
32 |
+
topic_test_dfs = []
|
33 |
+
for topic in topics:
|
34 |
+
topic_test_df = test_df[test_df['Topic'] == topic].copy()
|
35 |
+
topic_test_df = topic_test_df.reset_index()
|
36 |
+
topic_train_df = train_df[train_df['Topic'] == topic].copy()
|
37 |
+
prompt = exp_lib.generate_prompt_test_batch(topic_train_df.to_dict(orient='records'), topic_test_df.to_dict(orient='records'))
|
38 |
+
response = exp_lib.get_gpt4_diagnosis(model_name, prompt)
|
39 |
+
response_df = pd.read_csv(StringIO(response), header=None, names=["test_example", "diagnosis"])
|
40 |
+
topic_test_df["Predicted Diagnosis"] = response_df["diagnosis"].str.strip()
|
41 |
+
topic_test_df["Model"] = model_name
|
42 |
+
topic_test_dfs.append(topic_test_df)
|
43 |
+
|
44 |
+
topic_test_df2 = pd.concat(topic_test_dfs)
|
45 |
+
return topic_test_df2[['Misconception ID', 'Example Number', 'Topic', 'Predicted Diagnosis', 'Model']]
|
46 |
+
|
47 |
+
|
48 |
+
def experiment_2(input_file_path, model_name, num_iterations, output_file_path):
|
49 |
+
data_df = pd.read_json(input_file_path)
|
50 |
+
experiment_2_results_list = []
|
51 |
+
for i in tqdm(range(num_iterations)):
|
52 |
+
try:
|
53 |
+
trial_result = experiment_2_trial(data_df, model_name)
|
54 |
+
trial_result['Trial'] = i
|
55 |
+
experiment_2_results_list.append(trial_result)
|
56 |
+
except Exception as e:
|
57 |
+
print(e)
|
58 |
+
experiment_2_results_df = pd.concat(experiment_2_results_list)
|
59 |
+
experiment_2_results_df['Correct'] = (experiment_2_results_df['Misconception ID'] == experiment_2_results_df['Predicted Diagnosis'])
|
60 |
+
experiment_2_results_df.to_csv(output_file_path)
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == '__main__':
|
64 |
+
experiment_name = 'experiment_2'
|
65 |
+
input_file_path = 'data/data.json'
|
66 |
+
model_name = 'gpt-4-turbo'
|
67 |
+
num_iterations = 100
|
68 |
+
output_file_path = f'outputs/{experiment_name}_{model_name}_{num_iterations}iters.csv'
|
69 |
+
|
70 |
+
experiment_2(
|
71 |
+
input_file_path,
|
72 |
+
model_name,
|
73 |
+
num_iterations,
|
74 |
+
output_file_path
|
75 |
+
)
|
create_exp2_results.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
output_file_path = 'outputs/experiment_2_gpt-4-turbo_100iters.csv'
|
4 |
+
outputs_df = pd.read_csv(output_file_path)
|
5 |
+
|
6 |
+
print(outputs_df.head())
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
def calculate_precision_recall(df):
|
11 |
+
# Calculate precision and recall
|
12 |
+
true_positives = df['Correct'].sum()
|
13 |
+
total_predicted = len(df)
|
14 |
+
total_actual = df['Misconception ID'].nunique()
|
15 |
+
|
16 |
+
precision = true_positives / total_predicted if total_predicted else 0
|
17 |
+
recall = true_positives / total_actual if total_actual else 0
|
18 |
+
|
19 |
+
return precision, recall
|
20 |
+
|
21 |
+
# Overall precision and recall
|
22 |
+
overall_precision, overall_recall = calculate_precision_recall(outputs_df)
|
23 |
+
|
24 |
+
# Precision and recall per topic
|
25 |
+
topic_precision_recall = outputs_df.groupby('Topic').apply(calculate_precision_recall).apply(pd.Series)
|
26 |
+
topic_precision_recall.columns = ['Precision', 'Recall']
|
27 |
+
|
28 |
+
# Display results
|
29 |
+
print(f"Overall Precision: {overall_precision:.3f}")
|
30 |
+
print(f"Overall Recall: {overall_recall:.3f}")
|
31 |
+
print("\nPrecision and Recall per Topic:")
|
32 |
+
print(topic_precision_recall)
|
exp_lib.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import pandas as pd
|
3 |
+
import pandas as pd
|
4 |
+
import json
|
5 |
+
import urllib
|
6 |
+
import math
|
7 |
+
import time
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
from tqdm import tqdm
|
11 |
+
from io import StringIO
|
12 |
+
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
# In-context learning prompt template
|
16 |
+
def generate_prompt_test_batch(train_examples, test_examples):
|
17 |
+
prompt = (
|
18 |
+
"You are an expert tutor on middle school math with years of experience understanding students' most common math mistakes. "
|
19 |
+
"You have identified a set of common mistakes called Misconceptions, and you use them to diagnose student's answers to math questions. "
|
20 |
+
"You have also developed a labeled dataset of question items, and diagnosed them with the appropriate misconception ID.\n"
|
21 |
+
"Using the set of misconceptions and the labeled dataset, your task today is to take some items of unlabeled data and provide a diagnosis for each unlabeled item.\n\n"
|
22 |
+
"Here is the list of misconceptions together with a brief description:\n"
|
23 |
+
)
|
24 |
+
# Add training examples
|
25 |
+
for i, example in enumerate(train_examples):
|
26 |
+
prompt += f"""
|
27 |
+
Train Example {i+1}
|
28 |
+
Question:
|
29 |
+
{example['Question']}
|
30 |
+
Answer:
|
31 |
+
{example['Incorrect Answer']}
|
32 |
+
Diagnosis: {example['Misconception ID']}
|
33 |
+
Misconception Description: {example['Misconception']}
|
34 |
+
Topic of Misconception: {example['Topic']}
|
35 |
+
|
36 |
+
"""
|
37 |
+
|
38 |
+
|
39 |
+
prompt += """
|
40 |
+
Below are the unlabeled Test Examples. For each Test Example, provide only the most likely Misconception ID for the Test Answer from the provided list.
|
41 |
+
Don't write anything else but a sequence of lines of the format $Test_Example_Number, $Misconception_ID
|
42 |
+
|
43 |
+
"""
|
44 |
+
|
45 |
+
for i, example in enumerate(test_examples):
|
46 |
+
prompt += f"""
|
47 |
+
Test Example {i+1}:
|
48 |
+
Question:
|
49 |
+
{example['Question']}
|
50 |
+
Test Answer:
|
51 |
+
{example['Incorrect Answer']}
|
52 |
+
|
53 |
+
"""
|
54 |
+
|
55 |
+
return prompt
|
56 |
+
|
57 |
+
# GPT-4 API call
|
58 |
+
def get_gpt4_diagnosis(model, prompt):
|
59 |
+
response = openai.ChatCompletion.create(
|
60 |
+
model=model,
|
61 |
+
messages=[
|
62 |
+
{"role": "system", "content": "You are a math expert specialized in diagnosing student misconceptions."},
|
63 |
+
{"role": "user", "content": prompt}
|
64 |
+
],
|
65 |
+
temperature=0.2,
|
66 |
+
max_tokens=2000,
|
67 |
+
frequency_penalty=0.0,
|
68 |
+
|
69 |
+
)
|
70 |
+
return response.choices[0].message['content'].strip()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
openai==0.28
|
2 |
+
pandas
|
3 |
+
tqdm
|