evanlohn
commited on
Commit
•
cf82b1c
1
Parent(s):
980b096
README updates for reproducibility
Browse files- README.md +22 -1
- baseline.py +80 -62
README.md
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
|
|
4 |
First install [Lean 4](https://leanprover-community.github.io/get_started.html). Then clone this repo:
|
5 |
|
6 |
`git clone --recurse-submodules https://huggingface.co/datasets/elohn/miniCodeProps`
|
@@ -13,6 +14,26 @@ After cloning the repo, you will need to install [Lean REPL](https://github.com/
|
|
13 |
The `extract.py` script is used only to create the json-formatted benchmark.
|
14 |
|
15 |
The `baseline.py` script contains the code we used to get our baseline results. It shows how to interact with Lean Repl programmatically, although some interactions are still somewhat buggy in that the repl will send i.e. an extra newline or weirdly formatted message that requires our script to restart the repl.
|
16 |
-
Regardless, if you would like to use our setup, We ran our baselines using [LLMStep](https://github.com/wellecks/llmstep). However, our code also includes a natural place to write your own function to generate tactics given the goal and file context (see `get_tactics_llmstep` in `baseline.py`). We modified the LLMStep server to return average suggestion log-probabilities per suggestion to implement best-first search
|
17 |
|
|
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
+
# Getting Started
|
5 |
First install [Lean 4](https://leanprover-community.github.io/get_started.html). Then clone this repo:
|
6 |
|
7 |
`git clone --recurse-submodules https://huggingface.co/datasets/elohn/miniCodeProps`
|
|
|
14 |
The `extract.py` script is used only to create the json-formatted benchmark.
|
15 |
|
16 |
The `baseline.py` script contains the code we used to get our baseline results. It shows how to interact with Lean Repl programmatically, although some interactions are still somewhat buggy in that the repl will send i.e. an extra newline or weirdly formatted message that requires our script to restart the repl.
|
17 |
+
Regardless, if you would like to use our setup, We ran our baselines using [LLMStep](https://github.com/wellecks/llmstep). However, our code also includes a natural place to write your own function to generate tactics given the goal and file context (see `get_tactics_llmstep` in `baseline.py`). We [modified the LLMStep server](https://github.com/evanlohn/llmstep) to return average suggestion log-probabilities per suggestion to implement best-first search.
|
18 |
|
19 |
+
# Reproducing Baselines
|
20 |
|
21 |
+
First, ensure that you have installed Lean and Lean REPL as detailed above. Before running `baseline.py` with any arguments, check that your OS has been set at the top of `utils.py`. At the moment we support interacting with Lean in MacOS and Ubuntu (20.04).
|
22 |
+
|
23 |
+
## Next-Step Baselines
|
24 |
+
Our experiments were run on an A100 GPU. Smaller GPUs may not be able to run Llemma7B, but will likely work with Pythia and ntp-context.
|
25 |
+
|
26 |
+
Clone [our fork of LLMStep](https://github.com/evanlohn/llmstep). After following the LLMStep setup instructions,
|
27 |
+
- For Pythia2.8B, run `python3 python/server_vllm.py` (or, if CPU-bound, run `python3 python/server.py`)
|
28 |
+
- For Llemma7B, run `python3 python/server_llemma.py`
|
29 |
+
- For ntp-context-1.3B, run `python3 python/server_context.py`
|
30 |
+
|
31 |
+
In another terminal, run `python baseline.py --bench_type nextstep`
|
32 |
+
|
33 |
+
## Full-Proof Baseline
|
34 |
+
run `export OPENAI_API_KEY=<your key here>`.
|
35 |
+
Then, simply run
|
36 |
+
`python3 baseline.py`
|
37 |
+
You can also specify which openai LLM to use for proof generation via
|
38 |
+
`python3 baseline.py --gpt_model <your model name>`
|
39 |
+
although our tests only used gpt-4-turbo.
|
baseline.py
CHANGED
@@ -3,6 +3,7 @@ import requests
|
|
3 |
import time
|
4 |
import os
|
5 |
from openai import OpenAI
|
|
|
6 |
|
7 |
from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, get_errs
|
8 |
|
@@ -27,9 +28,38 @@ def get_tactics_llmstep(goal, prev_file):
|
|
27 |
suggestions = suggest(default_host, goal, '', prev_file) # trying to match what the tactic sends
|
28 |
return suggestions
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
# for benchmarking 'get_tactics' functions that suggest several next possible steps for a given
|
31 |
# proofstate + optionally file context.
|
32 |
-
def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, search_width=10, repl_type='zsh'):
|
|
|
|
|
|
|
|
|
|
|
33 |
lean_repl = make_lean_repl(repl_type=repl_type)
|
34 |
|
35 |
# get the first command out of the way which has a weird "expect" behavior using icanon mode
|
@@ -38,29 +68,11 @@ def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, search_wi
|
|
38 |
num_proved = 0
|
39 |
num_attempted = 0
|
40 |
for prop_name in pwd:
|
41 |
-
print(prop_name)
|
42 |
#time.sleep(5)
|
43 |
num_attempted += 1
|
44 |
#if num_attempted < 115:
|
45 |
# continue
|
46 |
-
|
47 |
-
while not successful_def:
|
48 |
-
successful_def = True
|
49 |
-
env = None
|
50 |
-
all_lines = []
|
51 |
-
for _loc, line in pwd[prop_name]:
|
52 |
-
if line.strip() == 'import Mathlib':
|
53 |
-
outp, env = mathlib_out, mathlib_env
|
54 |
-
else:
|
55 |
-
outp, env = send_command(lean_repl, line, env=env)
|
56 |
-
if outp is None:
|
57 |
-
print('restarting repl')
|
58 |
-
successful_def = False
|
59 |
-
lean_repl.close()
|
60 |
-
lean_repl = make_lean_repl(repl_type=repl_type)
|
61 |
-
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True, timeout=30)
|
62 |
-
break
|
63 |
-
all_lines.append(line)
|
64 |
|
65 |
assert len(get_errs(outp)) == 0, str(outp.get('messages', []))
|
66 |
proofState = int(outp['sorries'][0]['proofState'])
|
@@ -116,7 +128,7 @@ def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, search_wi
|
|
116 |
print(f'proved {num_proved}/{num_attempted}')
|
117 |
#exit()
|
118 |
|
119 |
-
def get_proof_gpt(theorem_defn, goal, context):
|
120 |
#openai_api_key = os.environ['OPENAI_API_KEY']
|
121 |
client = OpenAI()
|
122 |
|
@@ -124,8 +136,9 @@ def get_proof_gpt(theorem_defn, goal, context):
|
|
124 |
# and context are given. Future work can confirm or invalidate this.
|
125 |
encoded = f'<context>\n{context}\n</context>\n<theorem>\n{theorem_defn}\n</theorem>\n'
|
126 |
|
127 |
-
|
128 |
model=gpt_model, # see main block
|
|
|
129 |
messages=[{"role": "system", "content": "You are a Lean 4 expert tasked with completing proofs of program properties. You will be shown the relevant programs and definitions in <context>...</context> tags, the theorem to be proven in <theorem>...</theorem>. Please output your proof containing only Lean 4 proof code between <proof>...</proof> tags. The generated proof should never contain the word `sorry`. Here are some examples:"},
|
130 |
{"role": "user", "content": """<context>
|
131 |
import Mathlib
|
@@ -209,11 +222,16 @@ induction pl using Nat.strong_induction_on generalizing l with
|
|
209 |
exact ih (len_pairlist l2) (by linarith [hl]) l2 (by rfl)
|
210 |
</proof>"""},
|
211 |
{"role": "user", "content": encoded}]
|
212 |
-
)
|
|
|
213 |
|
214 |
# for benchmarking full proof generation methods, where input is
|
215 |
# file context, theorem definition, and initial proof state, and output is a full proof of the theorem.
|
216 |
-
def benchmark_full_proofgen(pwd, get_proof, send_command, num_gen=
|
|
|
|
|
|
|
|
|
217 |
lean_repl = make_lean_repl(repl_type=repl_type)
|
218 |
# get the first command out of the way which has a weird "expect" behavior using icanon mode
|
219 |
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
|
@@ -221,53 +239,44 @@ def benchmark_full_proofgen(pwd, get_proof, send_command, num_gen=5, repl_type='
|
|
221 |
num_proved = 0
|
222 |
num_attempted = 0
|
223 |
for prop_name in pwd:
|
224 |
-
print(prop_name)
|
225 |
-
#time.sleep(5)
|
226 |
num_attempted += 1
|
|
|
227 |
#if num_attempted < 30:
|
228 |
# continue
|
229 |
-
|
230 |
-
penult_env = None
|
231 |
-
while not successful_def:
|
232 |
-
successful_def = True
|
233 |
-
env = None
|
234 |
-
all_lines = []
|
235 |
-
for _loc, line in pwd[prop_name]:
|
236 |
-
penult_env = env
|
237 |
-
if line.strip() == 'import Mathlib':
|
238 |
-
outp, env = mathlib_out, mathlib_env
|
239 |
-
else:
|
240 |
-
outp, env = send_command(lean_repl, line, env=env)
|
241 |
-
if outp is None:
|
242 |
-
print('restarting repl')
|
243 |
-
successful_def = False
|
244 |
-
lean_repl.close()
|
245 |
-
lean_repl = make_lean_repl(repl_type=repl_type)
|
246 |
-
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
|
247 |
-
break
|
248 |
-
all_lines.append(line)
|
249 |
|
250 |
assert len(get_errs(outp)) == 0, str(outp.get('messages', []))
|
251 |
context = '\n\n'.join([line for _loc, line in pwd[prop_name][:-1]])
|
252 |
theorem_defn = pwd[prop_name][-1][1].replace('by sorry', 'by\n') # give the llm a clean place to begin generating
|
253 |
goal = outp['sorries'][0]['goal']
|
254 |
found_proof = False
|
255 |
-
|
256 |
-
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
full_thm = theorem_defn + suggested_proof
|
259 |
-
|
260 |
outp, _result_env = send_command(lean_repl, full_thm, env=penult_env)
|
261 |
if len(get_errs(outp)) == 0:
|
262 |
num_proved += 1
|
263 |
found_proof = True
|
264 |
-
|
265 |
-
|
266 |
break
|
|
|
|
|
267 |
if not found_proof:
|
268 |
-
|
269 |
|
270 |
-
|
271 |
|
272 |
|
273 |
def parse_benchmark_output(fname, pwd, loc2comm):
|
@@ -326,12 +335,20 @@ def parse_benchmark_input(fname):
|
|
326 |
|
327 |
if __name__ == '__main__':
|
328 |
# if any single command is >1024 characters, use_icanon=True is necessary.
|
329 |
-
# unfortunately there
|
330 |
# but the messages from Lean REPL indicate an error when using this mode.
|
331 |
use_icanon = True
|
332 |
-
|
333 |
-
|
334 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
335 |
|
336 |
if use_icanon:
|
337 |
send_command = send_command_icanon
|
@@ -340,13 +357,14 @@ if __name__ == '__main__':
|
|
340 |
send_command = send_command_zsh
|
341 |
repl_type = 'zsh'
|
342 |
|
343 |
-
|
344 |
#benchmark_nextstep(pwd, get_tactics_interactive, send_command, repl_type=repl_type) # get_tactics_interactive for testing
|
345 |
|
346 |
-
pwd = parse_benchmark_input(
|
347 |
|
348 |
if bench_type == 'nextstep':
|
349 |
-
|
|
|
350 |
elif bench_type == 'fullproof':
|
351 |
-
|
|
|
352 |
|
|
|
3 |
import time
|
4 |
import os
|
5 |
from openai import OpenAI
|
6 |
+
import argparse
|
7 |
|
8 |
from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, get_errs
|
9 |
|
|
|
28 |
suggestions = suggest(default_host, goal, '', prev_file) # trying to match what the tactic sends
|
29 |
return suggestions
|
30 |
|
31 |
+
def send_prop_defn(lean_repl, pwd, prop_name, mathlib_out, mathlib_env):
|
32 |
+
print(prop_name)
|
33 |
+
successful_def = False
|
34 |
+
penult_env = None
|
35 |
+
while not successful_def:
|
36 |
+
successful_def = True
|
37 |
+
env = None
|
38 |
+
all_lines = []
|
39 |
+
for _loc, line in pwd[prop_name]:
|
40 |
+
penult_env = env
|
41 |
+
if line.strip() == 'import Mathlib':
|
42 |
+
outp, env = mathlib_out, mathlib_env
|
43 |
+
else:
|
44 |
+
outp, env = send_command(lean_repl, line, env=env)
|
45 |
+
if outp is None:
|
46 |
+
print('restarting repl')
|
47 |
+
successful_def = False
|
48 |
+
lean_repl.close()
|
49 |
+
lean_repl = make_lean_repl(repl_type=repl_type)
|
50 |
+
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
|
51 |
+
break
|
52 |
+
all_lines.append(line)
|
53 |
+
return lean_repl, mathlib_out, mathlib_env, outp, env, penult_env, all_lines
|
54 |
+
|
55 |
# for benchmarking 'get_tactics' functions that suggest several next possible steps for a given
|
56 |
# proofstate + optionally file context.
|
57 |
+
def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, search_width=10, repl_type='zsh', logfile=None):
|
58 |
+
assert logfile is not None
|
59 |
+
def printl(*args, **kwargs):
|
60 |
+
print(*args, **kwargs)
|
61 |
+
print(*args, **kwargs, file=logfile)
|
62 |
+
|
63 |
lean_repl = make_lean_repl(repl_type=repl_type)
|
64 |
|
65 |
# get the first command out of the way which has a weird "expect" behavior using icanon mode
|
|
|
68 |
num_proved = 0
|
69 |
num_attempted = 0
|
70 |
for prop_name in pwd:
|
|
|
71 |
#time.sleep(5)
|
72 |
num_attempted += 1
|
73 |
#if num_attempted < 115:
|
74 |
# continue
|
75 |
+
lean_repl, mathlib_out, mathlib_env, outp, env, penult_env, all_lines = send_prop_defn(lean_repl, pwd, prop_name, mathlib_out, mathlib_env)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
assert len(get_errs(outp)) == 0, str(outp.get('messages', []))
|
78 |
proofState = int(outp['sorries'][0]['proofState'])
|
|
|
128 |
print(f'proved {num_proved}/{num_attempted}')
|
129 |
#exit()
|
130 |
|
131 |
+
def get_proof_gpt(theorem_defn, goal, context, num_gen=4):
|
132 |
#openai_api_key = os.environ['OPENAI_API_KEY']
|
133 |
client = OpenAI()
|
134 |
|
|
|
136 |
# and context are given. Future work can confirm or invalidate this.
|
137 |
encoded = f'<context>\n{context}\n</context>\n<theorem>\n{theorem_defn}\n</theorem>\n'
|
138 |
|
139 |
+
ret = client.chat.completions.create(
|
140 |
model=gpt_model, # see main block
|
141 |
+
n=num_gen,
|
142 |
messages=[{"role": "system", "content": "You are a Lean 4 expert tasked with completing proofs of program properties. You will be shown the relevant programs and definitions in <context>...</context> tags, the theorem to be proven in <theorem>...</theorem>. Please output your proof containing only Lean 4 proof code between <proof>...</proof> tags. The generated proof should never contain the word `sorry`. Here are some examples:"},
|
143 |
{"role": "user", "content": """<context>
|
144 |
import Mathlib
|
|
|
222 |
exact ih (len_pairlist l2) (by linarith [hl]) l2 (by rfl)
|
223 |
</proof>"""},
|
224 |
{"role": "user", "content": encoded}]
|
225 |
+
)#.choices[0].message.content.replace('<proof>','').replace('</proof>', '').strip()
|
226 |
+
return [m.message.content.replace('<proof>','').replace('</proof>', '').strip() for m in ret.choices]
|
227 |
|
228 |
# for benchmarking full proof generation methods, where input is
|
229 |
# file context, theorem definition, and initial proof state, and output is a full proof of the theorem.
|
230 |
+
def benchmark_full_proofgen(pwd, get_proof, send_command, num_gen=8, repl_type='icanon', logfile=None):
|
231 |
+
assert logfile is not None, 'pass in a file object to write results to'
|
232 |
+
def printl(*args, **kwargs):
|
233 |
+
print(*args, **kwargs)
|
234 |
+
print(*args, **kwargs, file=logfile)
|
235 |
lean_repl = make_lean_repl(repl_type=repl_type)
|
236 |
# get the first command out of the way which has a weird "expect" behavior using icanon mode
|
237 |
mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
|
|
|
239 |
num_proved = 0
|
240 |
num_attempted = 0
|
241 |
for prop_name in pwd:
|
|
|
|
|
242 |
num_attempted += 1
|
243 |
+
#time.sleep(5)
|
244 |
#if num_attempted < 30:
|
245 |
# continue
|
246 |
+
lean_repl, mathlib_out, mathlib_env, outp, env, penult_env, all_lines = send_prop_defn(lean_repl, pwd, prop_name, mathlib_out, mathlib_env)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
assert len(get_errs(outp)) == 0, str(outp.get('messages', []))
|
249 |
context = '\n\n'.join([line for _loc, line in pwd[prop_name][:-1]])
|
250 |
theorem_defn = pwd[prop_name][-1][1].replace('by sorry', 'by\n') # give the llm a clean place to begin generating
|
251 |
goal = outp['sorries'][0]['goal']
|
252 |
found_proof = False
|
253 |
+
sugg_proofs = get_proof(theorem_defn, goal, context, num_gen=num_gen)
|
254 |
+
for gen_i, suggested_proof in enumerate(sugg_proofs):
|
255 |
+
printl(f'generated proof {gen_i}')
|
256 |
+
if prop_name in suggested_proof:
|
257 |
+
printl('suggested proof used proof name, skipping')
|
258 |
+
continue # although this in theory Can be correct, LEAN DOES NOT CORRECTLY THROW ERRORS when the theorem name is used in a proof.
|
259 |
+
# in fact, Lean REPL will return a proofstate with empty goals and no errors! This creates false positives, so we skip these proofs.
|
260 |
+
if 'sorry' in suggested_proof or 'admit' in suggested_proof:
|
261 |
+
printl('suggested proof uses sorry/admit, skipping')
|
262 |
+
continue # this also isn't perfect, as I'm throwing out proofs with 'sorry' in a comment, for example.
|
263 |
+
# but, it's better than having false positives.
|
264 |
+
# although I explicitly warn against sorry in the prompt, they still pop up sometimes.
|
265 |
full_thm = theorem_defn + suggested_proof
|
266 |
+
printl('suggested proof: ' + full_thm)
|
267 |
outp, _result_env = send_command(lean_repl, full_thm, env=penult_env)
|
268 |
if len(get_errs(outp)) == 0:
|
269 |
num_proved += 1
|
270 |
found_proof = True
|
271 |
+
printl('successful proof!')
|
272 |
+
printl(f'prop {prop_name} with goal <{goal}> solved by: <\n {suggested_proof}\n>')
|
273 |
break
|
274 |
+
else:
|
275 |
+
printl('errors:', get_errs(outp))
|
276 |
if not found_proof:
|
277 |
+
printl(f'failed to prove {prop_name}')
|
278 |
|
279 |
+
printl(f'proved {num_proved}/{num_attempted}')
|
280 |
|
281 |
|
282 |
def parse_benchmark_output(fname, pwd, loc2comm):
|
|
|
335 |
|
336 |
if __name__ == '__main__':
|
337 |
# if any single command is >1024 characters, use_icanon=True is necessary.
|
338 |
+
# unfortunately there may still be some bugs where a theorem is actually proven,
|
339 |
# but the messages from Lean REPL indicate an error when using this mode.
|
340 |
use_icanon = True
|
341 |
+
|
342 |
+
parser = argparse.ArgumentParser()
|
343 |
+
parser.add_argument('bench_type', type=str, default='fullproof')
|
344 |
+
parser.add_argument('gpt_model', type=str, default='gpt-4-turbo')
|
345 |
+
parser.add_argument('bench_file', type=str, default='codeprops_bench_ps.jsonl')
|
346 |
+
|
347 |
+
args = parser.parse_args()
|
348 |
+
assert args.bench_type in ['fullproof', 'nextstep']
|
349 |
+
|
350 |
+
bench_type = args.bench_type
|
351 |
+
gpt_model = args.gpt_model
|
352 |
|
353 |
if use_icanon:
|
354 |
send_command = send_command_icanon
|
|
|
357 |
send_command = send_command_zsh
|
358 |
repl_type = 'zsh'
|
359 |
|
|
|
360 |
#benchmark_nextstep(pwd, get_tactics_interactive, send_command, repl_type=repl_type) # get_tactics_interactive for testing
|
361 |
|
362 |
+
pwd = parse_benchmark_input(args.bench_file)
|
363 |
|
364 |
if bench_type == 'nextstep':
|
365 |
+
with open(f'logfile_nextstep.txt', 'w') as logf:
|
366 |
+
benchmark_nextstep(pwd, get_tactics_llmstep, send_command, repl_type=repl_type, logfile=logf) # get_tactics_llmstep for benchmarking
|
367 |
elif bench_type == 'fullproof':
|
368 |
+
with open(f'logfile_{gpt_model}.txt', 'w') as logf:
|
369 |
+
benchmark_full_proofgen(pwd, get_proof_gpt, send_command, repl_type=repl_type, logfile=logf)
|
370 |
|