|
import json |
|
import requests |
|
import time |
|
import os |
|
from openai import OpenAI |
|
import argparse |
|
|
|
from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, get_errs |
|
|
|
|
|
def get_tactics_interactive(goal, prev_file): |
|
print(f'output:<{goal}>') |
|
print(f'file context: <{prev_file}>') |
|
return [(input('give the next tactic to execute:'), 0)] |
|
|
|
|
|
def get_tactics_llmstep(goal, prev_file): |
|
|
|
def suggest(host, tactic_state, prefix, context): |
|
data = {'tactic_state': tactic_state, 'prefix': prefix, 'context': context} |
|
response = json.loads(requests.post(host, json=data).content) |
|
return response['suggestions'] |
|
|
|
HOST='localhost' |
|
PORT='6000' |
|
default_host = f'http://{HOST}:{PORT}' |
|
|
|
suggestions = suggest(default_host, goal, '', prev_file) |
|
return suggestions |
|
|
|
def send_prop_defn(lean_repl, pwd, prop_name, mathlib_out, mathlib_env): |
|
print(prop_name) |
|
successful_def = False |
|
penult_env = None |
|
while not successful_def: |
|
successful_def = True |
|
env = None |
|
all_lines = [] |
|
for _loc, line in pwd[prop_name]: |
|
penult_env = env |
|
if line.strip() == 'import Mathlib': |
|
outp, env = mathlib_out, mathlib_env |
|
else: |
|
outp, env = send_command(lean_repl, line, env=env) |
|
if outp is None: |
|
print('restarting repl') |
|
successful_def = False |
|
lean_repl.close() |
|
lean_repl = make_lean_repl(repl_type=repl_type) |
|
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) |
|
break |
|
all_lines.append(line) |
|
return lean_repl, mathlib_out, mathlib_env, outp, env, penult_env, all_lines |
|
|
|
|
|
|
|
def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, search_width=10, repl_type='zsh', logfile=None): |
|
assert logfile is not None |
|
def printl(*args, **kwargs): |
|
print(*args, **kwargs) |
|
print(*args, **kwargs, file=logfile) |
|
|
|
lean_repl = make_lean_repl(repl_type=repl_type) |
|
|
|
|
|
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True, timeout=30) |
|
|
|
num_proved = 0 |
|
num_attempted = 0 |
|
for prop_name in pwd: |
|
|
|
num_attempted += 1 |
|
|
|
|
|
lean_repl, mathlib_out, mathlib_env, outp, env, penult_env, all_lines = send_prop_defn(lean_repl, pwd, prop_name, mathlib_out, mathlib_env) |
|
|
|
assert len(get_errs(outp)) == 0, str(outp.get('messages', [])) |
|
proofState = int(outp['sorries'][0]['proofState']) |
|
goal = outp['sorries'][0]['goal'] |
|
prev_lines = '\n'.join(all_lines) |
|
prev_lines = prev_lines.replace(':= by sorry', ':= by\n') |
|
|
|
solution_tac_seq = None |
|
old_ps = [(goal, proofState, [])] |
|
new_ps = [] |
|
found_proof = False |
|
for search_lvl in range(search_depth): |
|
if search_lvl > 0: |
|
print(f'search at level {search_lvl}') |
|
for (curr_goal, ps, tac_seq) in old_ps: |
|
next_tactics = get_tactics(curr_goal, prev_lines + '\n'.join(tac_seq)) |
|
for next_tactic, _scr in sorted(next_tactics, key=lambda p: -p[1])[:search_width]: |
|
if prop_name in next_tactic: |
|
continue |
|
|
|
|
|
outp, new_proofState = send_tactic(lean_repl, next_tactic, ps) |
|
if outp is None: |
|
continue |
|
|
|
error_msgs = get_errs(outp) |
|
if len(error_msgs) > 0: |
|
continue |
|
|
|
if len(outp['goals']) == 0 and len(error_msgs) == 0: |
|
|
|
found_proof = True |
|
solution_tac_seq = tac_seq + [next_tactic] |
|
break |
|
new_ps.append(('\n'.join(outp['goals']), new_proofState, tac_seq + [next_tactic])) |
|
|
|
|
|
if found_proof: |
|
break |
|
if found_proof: |
|
break |
|
old_ps = new_ps |
|
new_ps = [] |
|
|
|
|
|
if found_proof: |
|
num_proved += 1 |
|
nl = '\n' |
|
print(f'prop {prop_name} with goal <{goal}> solved by: <\n {nl.join([str(s) for s in solution_tac_seq])}\n>') |
|
else: |
|
print(f'failed to prove {prop_name}') |
|
|
|
print(f'proved {num_proved}/{num_attempted}') |
|
|
|
|
|
def get_proof_gpt(theorem_defn, goal, context, num_gen=4): |
|
|
|
client = OpenAI() |
|
|
|
|
|
|
|
encoded = f'<context>\n{context}\n</context>\n<theorem>\n{theorem_defn}\n</theorem>\n' |
|
|
|
ret = client.chat.completions.create( |
|
model=gpt_model, |
|
n=num_gen, |
|
messages=[{"role": "system", "content": "You are a Lean 4 expert tasked with completing proofs of program properties. You will be shown the relevant programs and definitions in <context>...</context> tags, the theorem to be proven in <theorem>...</theorem>. Please output your proof containing only Lean 4 proof code between <proof>...</proof> tags. The generated proof should never contain the word `sorry`. Here are some examples:"}, |
|
{"role": "user", "content": """<context> |
|
import Mathlib |
|
inductive MyTree (α: Type) where |
|
| leaf : MyTree α |
|
| node : MyTree α → α → MyTree α → MyTree α |
|
|
|
def tree_size : MyTree α → ℕ |
|
| .leaf => 1 |
|
| .node l _x r => 1 + (tree_size l) + (tree_size r) |
|
|
|
def balanced : MyTree α → Prop |
|
| .leaf => true |
|
| .node l _x r => ((tree_size l) = (tree_size r)) ∧ (balanced l) ∧ (balanced r) |
|
</context> |
|
<theorem> |
|
theorem balanced_tree_size_odd (t: MyTree α) (hb: balanced t): Odd (tree_size t) := by |
|
</theorem>"""}, |
|
{"role": "assistant", "content": """<proof> |
|
cases t with |
|
| leaf => simp [tree_size] |
|
| node p x q => |
|
unfold tree_size |
|
unfold balanced at hb |
|
simp [hb.1] |
|
</proof>"""}, |
|
{"role": "user", "content": """<context> |
|
import Mathlib |
|
inductive MyTree (α: Type) where |
|
| leaf : MyTree α |
|
| node : MyTree α → α → MyTree α → MyTree α |
|
|
|
def balanced : MyTree α → Prop |
|
| .leaf => true |
|
| .node l _x r => ((tree_size l) = (tree_size r)) ∧ (balanced l) ∧ (balanced r) |
|
|
|
def swap_branches : MyTree α → MyTree α |
|
| MyTree.leaf => MyTree.leaf |
|
| MyTree.node p x q => MyTree.node q x p |
|
</context> |
|
<theorem> |
|
theorem swap_preserves_balance (t: MyTree α) (hb: balanced t): balanced (swap_branches t) := by |
|
</theorem>"""}, |
|
{"role": "assistant", "content": """<proof> |
|
cases t with |
|
| leaf => simp [swap_branches] |
|
| node p x q => |
|
simp [swap_branches, balanced] at hb ⊢ |
|
split |
|
{ simp [← hb.1] } |
|
{ split; assumption } |
|
</proof>"""}, |
|
{"role": "user", "content": """<context> |
|
import Mathlib |
|
inductive PairList where |
|
| empty : PairList |
|
| node : Nat → Nat → PairList → PairList |
|
|
|
def len_pairlist : PairList → Nat |
|
| .empty => 0 |
|
| .node _n1 _n2 l => len_pairlist l + 2 |
|
|
|
lemma even_plus_two (x: Nat) (h: Even x): Even (x + 2) := by |
|
unfold Even at h |
|
rcases h with ⟨y, hy⟩ |
|
use y + 1 |
|
linarith [hy] |
|
</context> |
|
<theorem> |
|
theorem len_pairlist_even (l: PairList): Even (len_pairlist l) := by |
|
</theorem>"""}, |
|
{"role": "assistant", "content": """<proof> |
|
generalize hl: len_pairlist l = pl |
|
induction pl using Nat.strong_induction_on generalizing l with |
|
| h n ih => cases l with |
|
| empty => simp [len_pairlist] at hl; simp [←hl]; |
|
| node n1 n2 l2 => |
|
unfold len_pairlist at hl |
|
simp [←hl] |
|
apply even_plus_two |
|
exact ih (len_pairlist l2) (by linarith [hl]) l2 (by rfl) |
|
</proof>"""}, |
|
{"role": "user", "content": encoded}] |
|
) |
|
return [m.message.content.replace('<proof>','').replace('</proof>', '').strip() for m in ret.choices] |
|
|
|
|
|
|
|
def benchmark_full_proofgen(pwd, get_proof, send_command, num_gen=8, repl_type='icanon', logfile=None): |
|
assert logfile is not None, 'pass in a file object to write results to' |
|
def printl(*args, **kwargs): |
|
print(*args, **kwargs) |
|
print(*args, **kwargs, file=logfile) |
|
lean_repl = make_lean_repl(repl_type=repl_type) |
|
|
|
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) |
|
|
|
num_proved = 0 |
|
num_attempted = 0 |
|
for prop_name in pwd: |
|
num_attempted += 1 |
|
|
|
|
|
|
|
lean_repl, mathlib_out, mathlib_env, outp, env, penult_env, all_lines = send_prop_defn(lean_repl, pwd, prop_name, mathlib_out, mathlib_env) |
|
|
|
assert len(get_errs(outp)) == 0, str(outp.get('messages', [])) |
|
context = '\n\n'.join([line for _loc, line in pwd[prop_name][:-1]]) |
|
theorem_defn = pwd[prop_name][-1][1].replace('by sorry', 'by\n') |
|
goal = outp['sorries'][0]['goal'] |
|
found_proof = False |
|
sugg_proofs = get_proof(theorem_defn, goal, context, num_gen=num_gen) |
|
for gen_i, suggested_proof in enumerate(sugg_proofs): |
|
printl(f'generated proof {gen_i}') |
|
if prop_name in suggested_proof: |
|
printl('suggested proof used proof name, skipping') |
|
continue |
|
|
|
if 'sorry' in suggested_proof or 'admit' in suggested_proof: |
|
printl('suggested proof uses sorry/admit, skipping') |
|
continue |
|
|
|
|
|
full_thm = theorem_defn + suggested_proof |
|
printl('suggested proof: ' + full_thm) |
|
outp, _result_env = send_command(lean_repl, full_thm, env=penult_env) |
|
if len(get_errs(outp)) == 0: |
|
num_proved += 1 |
|
found_proof = True |
|
printl('successful proof!') |
|
printl(f'prop {prop_name} with goal <{goal}> solved by: <\n {suggested_proof}\n>') |
|
break |
|
else: |
|
printl('errors:', get_errs(outp)) |
|
if not found_proof: |
|
printl(f'failed to prove {prop_name}') |
|
|
|
printl(f'proved {num_proved}/{num_attempted}') |
|
|
|
|
|
def parse_benchmark_output(fname, pwd, loc2comm): |
|
with open(fname, 'r') as f: |
|
lines = f.readlines() |
|
|
|
failures = set() |
|
for line in lines: |
|
if 'failed to prove' in line: |
|
failures.add(line.strip().split(' ')[-1]) |
|
|
|
by_score = {i: [0,0] for i in range(1, 6)} |
|
by_custom = [0, 0] |
|
custom_proved = [] |
|
all_proved = [] |
|
results = {} |
|
for i in range(1, 87): |
|
key = f'prop_{i}' if i >=10 else f'prop_0{i}' |
|
if key not in pwd: |
|
continue |
|
loc = [loc[0] for loc, line in pwd[key] if key in line][0] |
|
line_str = int(loc.strip().split(':')[1]) |
|
comm = loc2comm[line_str-1] |
|
print(comm) |
|
score = int(comm.split(':')[1].strip().split('/')[0].strip()) |
|
is_custom = 'custom' in comm |
|
results[key] = {'score': score, 'result': key not in failures, 'custom': is_custom} |
|
if key in failures: |
|
by_score[score][1] += 1 |
|
if is_custom: |
|
by_custom[1] += 1 |
|
print(f'could not prove {key}') |
|
|
|
else: |
|
by_score[score][0] += 1 |
|
if is_custom: |
|
by_custom[0] += 1 |
|
custom_proved.append(key) |
|
all_proved.append((score, key)) |
|
print(f'proved {key}') |
|
|
|
print('by score', by_score) |
|
print('by custom', by_custom) |
|
print('custom proved', custom_proved) |
|
print('all proved 5', [name for score, name in all_proved if score == 5]) |
|
print(f'total: {len(all_proved)}/{len(pwd)}') |
|
return results, by_score |
|
|
|
def parse_benchmark_input(fname): |
|
with open(fname, 'r') as f: |
|
lines = f.readlines() |
|
|
|
jl = [json.loads(line.strip()) for line in lines if len(line.strip()) > 0] |
|
|
|
return {dct['full_name']: list(enumerate(dct['deps'].split('\n\n') + [dct['prop_defn']])) for dct in jl} |
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
|
|
use_icanon = True |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('bench_type', type=str, default='fullproof') |
|
parser.add_argument('gpt_model', type=str, default='gpt-4-turbo') |
|
parser.add_argument('bench_file', type=str, default='codeprops_bench_ps.jsonl') |
|
|
|
args = parser.parse_args() |
|
assert args.bench_type in ['fullproof', 'nextstep'] |
|
|
|
bench_type = args.bench_type |
|
gpt_model = args.gpt_model |
|
|
|
if use_icanon: |
|
send_command = send_command_icanon |
|
repl_type = 'icanon' |
|
else: |
|
send_command = send_command_zsh |
|
repl_type = 'zsh' |
|
|
|
|
|
|
|
pwd = parse_benchmark_input(args.bench_file) |
|
|
|
if bench_type == 'nextstep': |
|
with open(f'logfile_nextstep.txt', 'w') as logf: |
|
benchmark_nextstep(pwd, get_tactics_llmstep, send_command, repl_type=repl_type, logfile=logf) |
|
elif bench_type == 'fullproof': |
|
with open(f'logfile_{gpt_model}.txt', 'w') as logf: |
|
benchmark_full_proofgen(pwd, get_proof_gpt, send_command, repl_type=repl_type, logfile=logf) |
|
|
|
|