|
import os |
|
from enum import Enum |
|
import pexpect |
|
import json |
|
import re |
|
from tqdm import tqdm |
|
import json |
|
import git |
|
import pickle |
|
import requests |
|
from collections import defaultdict |
|
|
|
from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, BASE_PATH |
|
|
|
class ParseState(Enum): |
|
defn = 0 |
|
inductive = 1 |
|
block_comment = 2 |
|
prop = 3 |
|
mutual = 4 |
|
|
|
def loc_tag(fname, line_ind): |
|
return f'{fname}:{line_ind}' |
|
|
|
def parse_file(fname): |
|
lines = None |
|
with open(fname, 'r') as f: |
|
lines = f.readlines() |
|
imports = [] |
|
defns = [] |
|
props = [] |
|
comments = [] |
|
|
|
prev_state = None |
|
state = None |
|
data = [] |
|
|
|
def change_state(new_state, line_ind, line): |
|
nonlocal data, state, prev_state |
|
route_map = {ParseState.defn: defns, ParseState.inductive: defns, |
|
ParseState.prop: props, ParseState.block_comment: comments, |
|
ParseState.mutual: defns} |
|
if state in route_map: |
|
route_map[state].append(data) |
|
data = [] |
|
if new_state in route_map: |
|
data = [(loc_tag(fname, line_ind), line)] |
|
prev_state = state |
|
state = new_state |
|
|
|
for line_ind, line in enumerate(lines): |
|
line_str = line.strip() |
|
if state == ParseState.block_comment: |
|
if line_str.endswith('-/'): |
|
state = prev_state |
|
prev_state = None |
|
continue |
|
elif line_str.startswith('--'): |
|
comments.append((loc_tag(fname, line_ind), line)) |
|
continue |
|
elif line_str.startswith('/-'): |
|
change_state(ParseState.block_comment, line_ind, line) |
|
continue |
|
elif line_str.startswith('mutual'): |
|
change_state(ParseState.mutual, line_ind, line) |
|
continue |
|
elif line_str.startswith('end') and state == ParseState.mutual: |
|
|
|
data.append((loc_tag(fname, line_ind), line)) |
|
change_state(None, line_ind, line) |
|
continue |
|
elif state == ParseState.mutual: |
|
data.append((loc_tag(fname,line_ind), line)) |
|
continue |
|
elif line.startswith('import'): |
|
assert state is None |
|
imports.append(line) |
|
continue |
|
elif line_str.startswith('def prop'): |
|
change_state(ParseState.prop, line_ind, line) |
|
elif line_str.startswith('def') or line_str.startswith('lemma') or line_str.startswith('theorem'): |
|
change_state(ParseState.defn, line_ind, line) |
|
elif line_str.startswith('inductive'): |
|
change_state(ParseState.inductive, line_ind, line) |
|
elif len(line_str) == 0: |
|
change_state(None, line_ind, line) |
|
else: |
|
data.append((loc_tag(fname,line_ind), line)) |
|
|
|
change_state(None, -1, '') |
|
|
|
|
|
return imports, defns, props, comments |
|
|
|
|
|
def process_defns(defns): |
|
new_defns = [] |
|
for defn in defns: |
|
inds, lines = zip(*defn) |
|
prop_text = ''.join(lines) |
|
ind = min(inds) |
|
max_ind = max(inds) |
|
|
|
if lines[0].strip().startswith('mutual'): |
|
|
|
names = [] |
|
for line in lines: |
|
if line.strip().startswith('def'): |
|
inner_name = [s for s in line.strip().split(' ') if len(s) > 0][1] |
|
names.append(inner_name) |
|
|
|
else: |
|
|
|
names = [[s for s in prop_text.split(' ') if len(s) > 0][1].strip()] |
|
|
|
for name in names: |
|
if name.endswith(':'): |
|
name = name[:-1] |
|
new_defns.append(((ind, max_ind), name, prop_text)) |
|
return new_defns |
|
|
|
|
|
def process_prop(prop, default_proof=':= by sorry'): |
|
inds, lines = zip(*prop) |
|
prop_text = ''.join(lines) |
|
ind = min(inds) |
|
max_ind = max(inds) |
|
name = prop_text.split(' ')[1] |
|
assert prop_text[:3] == 'def' |
|
prop2 = 'theorem' + prop_text[3:] |
|
|
|
|
|
|
|
prop2 = prop2.strip().replace(':=', ':') + f'{default_proof}' |
|
return ((ind, max_ind), name, prop2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def collect_import_defns(import_names): |
|
import_names = import_names[:] |
|
defns = [] |
|
seen = set() |
|
while len(import_names) > 0: |
|
imp = import_names.pop() |
|
if imp in seen: |
|
continue |
|
seen.add(imp) |
|
i, d, p, c = parse_file(f'{BASE_PATH}/{imp}.lean') |
|
import_names += [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str] |
|
defns += d |
|
return defns |
|
|
|
|
|
|
|
|
|
def match_error(err_str): |
|
m1 = re.search('expected at\n\s+(.+)\nterm has', err_str) |
|
if m1 is not None: |
|
return m1.group(1) |
|
m2 = re.search("unknown (identifier|constant) '(.+)'", err_str) |
|
if m2 is not None: |
|
return m2.group(2) |
|
if 'invalid dotted identifier notation' in err_str: |
|
return err_str.strip().split(' ')[-1] |
|
print(f'ERROR: err string <<{err_str}>> is not a recognized error pattern') |
|
exit() |
|
return None |
|
|
|
|
|
|
|
|
|
ignore_errs = ['equality or iff proof expected', 'invalid occurrence of universe level', |
|
'function is not recursive', 'failed to prove termination', 'unsolved goals', |
|
'invalid field notation', 'result is not type correct', |
|
'invalid argument, variable is not a proposition', 'tactic failed. Possible reasons'] |
|
def collect_relevant_defns(prop, defns, lean_repl, env, import_order): |
|
""" |
|
Collect the functions and type definitions used in a prop from a list of defns sourced from the |
|
current file and potentially from other libraries, although for now I'm not handling mathlib. |
|
This will rely on names being unique, so please don't shadow any names in the files you're importing. |
|
""" |
|
|
|
outp, _env = send_command(lean_repl, prop, env=env) |
|
errors = [m for m in outp['messages'] if m['severity'] == 'error'] |
|
|
|
seen = set() |
|
seen_locs = set() |
|
all_deps = [] |
|
|
|
while True: |
|
|
|
env2 = env |
|
all_deps = order_deps(all_deps, import_order) |
|
errors = [] |
|
seen_err = set() |
|
for defn in all_deps: |
|
|
|
|
|
outp, env2 = send_command(lean_repl, defn[1], env=env2) |
|
tmp = [(m, defn[1]) for m in outp.get('messages', []) if m['severity'] == 'error' and m['data'] not in seen_err] |
|
errors += tmp |
|
for m, _ in tmp: |
|
seen_err.add(m['data']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(errors) == 0: |
|
outp, env2 = send_command(lean_repl, prop, env=env2) |
|
errors = [(m, prop) for m in outp.get('messages', []) if m['severity'] == 'error'] |
|
|
|
if len(errors) == 0: |
|
break |
|
|
|
while len(errors) > 0: |
|
err, err_cause = errors.pop() |
|
if any([uerr in err['data'] for uerr in ignore_errs]): |
|
continue |
|
if 'invalid pattern variable, must be atomic' in err['data']: |
|
found_ind = False |
|
defn_line = err_cause.split('\n')[0] |
|
for ident in defn_line.strip().split(' '): |
|
if ident in defns and ident not in seen: |
|
found_ind = True |
|
cp = err.copy() |
|
|
|
cp['data'] = f"unknown identifier '{ident}'" |
|
|
|
errors.append((cp, err_cause)) |
|
if not found_ind: |
|
print('ERROR: failed to resolve inductive type pattern var problem') |
|
exit() |
|
continue |
|
|
|
ident_str = match_error(err['data']) |
|
ident_str = ident_str.replace('_root_.','') |
|
|
|
if ident_str not in defns: |
|
print(f'ERROR: couldnt find identifier {ident_str}') |
|
print(err) |
|
exit() |
|
continue |
|
if ident_str in seen: |
|
continue |
|
|
|
|
|
seen.add(ident_str) |
|
|
|
if defns[ident_str][0] in seen_locs: |
|
continue |
|
seen_locs.add(defns[ident_str][0]) |
|
all_deps.append(defns[ident_str]) |
|
|
|
return all_deps |
|
|
|
def order_deps(defns, import_order): |
|
if len(defns) == 0: |
|
return defns |
|
order_map = {fname: i for i, fname in enumerate(import_order)} |
|
line_nums = [int(defn[0][0].split(':')[-1]) for defn in defns] |
|
max_line_num = max(line_nums) |
|
def import_rank(defn): |
|
fpath, line_ind = defn[0][0].split(':') |
|
fname = re.search(BASE_PATH + '/(\S+)\.lean', fpath).group(1) |
|
return order_map[fname]*max_line_num + int(line_ind) |
|
return sorted(defns, key=import_rank) |
|
|
|
def extract_file_props(fname, full_path, send_command, default_proof=':= by sorry', repl_type='zsh'): |
|
|
|
|
|
|
|
i, d, p, c = parse_file(full_path) |
|
|
|
imp_names = [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str] |
|
imp_d = collect_import_defns(imp_names) |
|
|
|
all_d = imp_d + d |
|
import_order = imp_names + [fname] |
|
all_d = process_defns(all_d) |
|
defns_by_name = {name: (ind, defn) for ind, name, defn in all_d} |
|
|
|
|
|
props = [process_prop(prop, default_proof=default_proof) for prop in p] |
|
|
|
lemma_props = [(ind, name, defn.split('\n')[0].strip().replace('lemma', 'theorem').replace(':= by', default_proof)) |
|
for ind, name, defn in all_d if defn.strip().startswith('lemma')] |
|
props = lemma_props |
|
|
|
lean_repl = make_lean_repl(repl_type=repl_type) |
|
|
|
props_with_deps = {} |
|
|
|
outp, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) |
|
ct = 0 |
|
for prop_loc, prop_name, prop in tqdm(props, desc='analyzing and loading lean code + properties'): |
|
ct += 1 |
|
env = mathlib_env |
|
|
|
all_deps = collect_relevant_defns(prop, defns_by_name, lean_repl, env, import_order) |
|
|
|
for defn in all_deps: |
|
print(defn[-1]) |
|
outp, env = send_command(lean_repl, defn[-1], env=env) |
|
print('final output of deps', outp) |
|
|
|
outp, env = send_command(lean_repl, prop, env=env) |
|
for message in outp['messages']: |
|
if message['severity'] == 'error': |
|
print(f'error at prop {prop_name}') |
|
print(message) |
|
print() |
|
exit() |
|
props_with_deps[prop_name] = all_deps + [(prop_loc, prop)] |
|
|
|
lean_repl.close() |
|
|
|
return props_with_deps, c |
|
|
|
def output_prop_with_deps(prop, prop_name, folder='LeanSrc/benchmark'): |
|
lines = '\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in prop]) |
|
with open(os.path.join(folder, prop_name + '.lean'), 'w') as f: |
|
f.write(lines) |
|
|
|
def convert_file_props(fname, new_fname): |
|
i, d, p, c = parse_file(f'{BASE_PATH}/{fname}.lean') |
|
imp_names = [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str] |
|
imp_d = collect_import_defns(imp_names) |
|
|
|
all_d = imp_d + d |
|
import_order = imp_names + [fname] |
|
all_d = process_defns(all_d) |
|
defns_by_name = {name: (ind, defn) for ind, name, defn in all_d} |
|
|
|
props = [process_prop(prop) for prop in p] |
|
|
|
with open(new_fname, 'w') as f: |
|
defn_lines = '\n'.join([defn for _, _, defn in all_d]) |
|
f.write(defn_lines + '\n') |
|
|
|
prop_lines = '\n'.join([prop for _, _, prop in props]) |
|
f.write(prop_lines + '\n') |
|
|
|
def format_llema_input(pwd, lean_url, lean_sha): |
|
dcts = [] |
|
for prop_name in pwd: |
|
|
|
lines = '\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in pwd[prop_name]]) |
|
lines = lines.replace(':= by sorry', '') |
|
loc, _ = pwd[prop_name][-1] |
|
fpath = loc.split(':')[0] |
|
dct = {'full_name': prop_name, |
|
'statement': lines, |
|
'url': lean_url, |
|
'commit': lean_sha, |
|
'file_path': fpath, |
|
'split': 'valid'} |
|
|
|
dcts.append(json.dumps(dct) + '\n') |
|
with open('leancb_lemma_inp.jsonl', 'w') as f: |
|
f.writelines(dcts) |
|
|
|
def pwd_to_json(pwd, send_command, loc2comm, repl_type='zsh'): |
|
|
|
lean_repl = make_lean_repl(repl_type=repl_type) |
|
|
|
outp, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) |
|
assert len([m for m in outp.get('messages', []) if m['severity'] == 'error']) == 0, str(outp) |
|
|
|
dcts = [] |
|
for prop_name in pwd: |
|
deps = '\n\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in pwd[prop_name][:-1]]) |
|
prop_loc, prop_defn = pwd[prop_name][-1] |
|
fpath = prop_loc[0].split(':')[0] |
|
cline = int(prop_loc[0].split(':')[1]) - 1 |
|
score = 5 |
|
if cline in loc2comm: |
|
comm = loc2comm[cline] |
|
if 'core: ' in comm: |
|
score = int(comm.split('core:')[1].strip().split('/')[0].strip()) |
|
|
|
env = mathlib_env |
|
for _loc, code_lines in pwd[prop_name]: |
|
outp, env = send_command(lean_repl, code_lines, env=env) |
|
ps = outp['sorries'][0]['goal'] |
|
|
|
locs = [loc for loc, _code_lines in pwd[prop_name]] |
|
fname2line = defaultdict(lambda: 0) |
|
for loc in locs: |
|
fpath, line_num = loc[1].split(':') |
|
fname2line[fpath] = max(fname2line[fpath], int(line_num)) |
|
|
|
dct = {'full_name': prop_name, |
|
'prop_defn': prop_defn, |
|
'prop_loc': prop_loc[0], |
|
'score': score, |
|
'deps': deps, |
|
'proof_state': ps, |
|
'file_locs': [(fpath, fname2line[fpath]) for fpath in fname2line]} |
|
|
|
dcts.append(json.dumps(dct) + '\n') |
|
with open('codeprops_bench.jsonl', 'w') as f: |
|
f.writelines(dcts) |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
main_fname = 'Sorts' |
|
|
|
|
|
|
|
main_full_path = f'{BASE_PATH}/{main_fname}.lean' |
|
""" |
|
pwd = extract_file_props(main_fname, main_full_path) # props with deps |
|
for prop_name in pwd: |
|
output_prop_with_deps(pwd[prop_name], prop_name, folder=folder) |
|
""" |
|
|
|
use_icanon = True |
|
|
|
if use_icanon: |
|
send_command = send_command_icanon |
|
repl_type = 'icanon' |
|
else: |
|
send_command = send_command_zsh |
|
repl_type = 'zsh' |
|
|
|
|
|
rerun = True |
|
if rerun: |
|
pwd, comments = extract_file_props(main_fname, main_full_path, send_command, repl_type=repl_type) |
|
with open(f'comm_{main_fname}.pkl', 'wb') as f: |
|
pickle.dump(comments, f) |
|
with open(f'pwd_{main_fname}.pkl', 'wb') as f: |
|
pickle.dump(pwd, f) |
|
else: |
|
with open(f'pwd_{main_fname}.pkl', 'rb') as f: |
|
pwd = pickle.load(f) |
|
with open(f'comm_{main_fname}.pkl', 'rb') as f: |
|
comments = pickle.load(f) |
|
|
|
loc2comm = {} |
|
for loc, comm in comments: |
|
fname, line_str = loc.strip().split(':') |
|
if fname != main_full_path: |
|
continue |
|
loc2comm[int(line_str.strip())] = comm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pwd_to_json(pwd, send_command, loc2comm, repl_type=repl_type) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|