Datasets:

Languages:
English
Size:
n<1K
DOI:
Libraries:
License:
evanlohn commited on
Commit
65e48a9
1 Parent(s): 5db2b5c

benchmark and baseline code

Browse files
Files changed (4) hide show
  1. baseline.py +348 -0
  2. codeprops_bench_ps.jsonl +0 -0
  3. extract.py +469 -0
  4. utils.py +186 -0
baseline.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+ import time
4
+ import os
5
+ from openai import OpenAI
6
+
7
+ from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, get_errs
8
+
9
+
10
+ def get_tactics_interactive(goal, prev_file):
11
+ print(f'output:<{goal}>')
12
+ print(f'file context: <{prev_file}>')
13
+ return [(input('give the next tactic to execute:'), 0)]
14
+
15
+ # the goal is to directly call the llmstep server.py
16
+ def get_tactics_llmstep(goal, prev_file):
17
+ # this is the function lean calls to interact with the server
18
+ def suggest(host, tactic_state, prefix, context):
19
+ data = {'tactic_state': tactic_state, 'prefix': prefix, 'context': context}
20
+ response = json.loads(requests.post(host, json=data).content)
21
+ return response['suggestions'] # modified to directly return the suggestion list
22
+
23
+ HOST='localhost'
24
+ PORT='6000'
25
+ default_host = f'http://{HOST}:{PORT}'
26
+
27
+ suggestions = suggest(default_host, goal, '', prev_file) # trying to match what the tactic sends
28
+ return suggestions
29
+
30
+ # for benchmarking 'get_tactics' functions that suggest several next possible steps for a given
31
+ # proofstate + optionally file context.
32
+ def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, repl_type='zsh'):
33
+ lean_repl = make_lean_repl(repl_type=repl_type)
34
+
35
+ # get the first command out of the way which has a weird "expect" behavior using icanon mode
36
+ mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
37
+
38
+ num_proved = 0
39
+ num_attempted = 0
40
+ for prop_name in pwd:
41
+ print(prop_name)
42
+ #time.sleep(5)
43
+ num_attempted += 1
44
+ #if num_attempted < 30:
45
+ # continue
46
+ successful_def = False
47
+ while not successful_def:
48
+ successful_def = True
49
+ env = None
50
+ all_lines = []
51
+ for _loc, line in pwd[prop_name]:
52
+ if line.strip() == 'import Mathlib':
53
+ outp, env = mathlib_out, mathlib_env
54
+ else:
55
+ outp, env = send_command(lean_repl, line, env=env)
56
+ if outp is None:
57
+ print('restarting repl')
58
+ successful_def = False
59
+ lean_repl.close()
60
+ lean_repl = make_lean_repl(repl_type=repl_type)
61
+ mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
62
+ break
63
+ all_lines.append(line)
64
+
65
+ assert len(get_errs(outp)) == 0, str(outp.get('messages', []))
66
+ proofState = int(outp['sorries'][0]['proofState'])
67
+ goal = outp['sorries'][0]['goal']
68
+ prev_lines = '\n'.join(all_lines)
69
+ prev_lines = prev_lines.replace(':= by sorry', ':= by\n')
70
+
71
+ solution_tac_seq = None
72
+ old_ps = [(goal, proofState, [])]
73
+ new_ps = []
74
+ found_proof = False
75
+ for search_lvl in range(search_depth):
76
+ if search_lvl > 0:
77
+ print(f'search at level {search_lvl}')
78
+ for (curr_goal, ps, tac_seq) in old_ps:
79
+ next_tactics = get_tactics(curr_goal, prev_lines)
80
+ for next_tactic, _scr in sorted(next_tactics, key=lambda p: -p[1])[:3]:
81
+ print('\n'.join(tac_seq + [next_tactic]))
82
+ outp, new_proofState = send_tactic(lean_repl, next_tactic, ps)
83
+ if outp is None:
84
+ continue # i.e. timeout/error on tactic sending
85
+ #print(outp)
86
+ error_msgs = get_errs(outp)
87
+ if len(error_msgs) > 0:
88
+ continue # invalid next proof step. sometimes there are invalid intermediate
89
+ # states that lead to successful proof, but for efficiency we enforce this.
90
+ if len(outp['goals']) == 0 and len(error_msgs) == 0:
91
+ #print(outp)
92
+ found_proof = True
93
+ solution_tac_seq = tac_seq + [next_tactic]
94
+ break
95
+ new_ps.append(('\n'.join(outp['goals']), new_proofState, tac_seq + [next_tactic]))
96
+
97
+ #print(f'final output: {outp}')
98
+ if found_proof:
99
+ break
100
+ if found_proof:
101
+ break
102
+ old_ps = new_ps
103
+ new_ps = []
104
+
105
+
106
+ if found_proof:
107
+ num_proved += 1
108
+ nl = '\n'
109
+ print(f'prop {prop_name} with goal <{goal}> solved by: <\n {nl.join([str(s) for s in solution_tac_seq])}\n>')
110
+ else:
111
+ print(f'failed to prove {prop_name}')
112
+
113
+ print(f'proved {num_proved}/{num_attempted}')
114
+ #exit()
115
+
116
+ def get_proof_gpt(theorem_defn, goal, context):
117
+ #openai_api_key = os.environ['OPENAI_API_KEY']
118
+ client = OpenAI()
119
+
120
+ # decided I don't need the goal, it doesn't look very useful in most cases when the theorem statement
121
+ # and context are given. Future work can confirm or invalidate this.
122
+ encoded = f'<context>\n{context}\n</context>\n<theorem>\n{theorem_defn}\n</theorem>\n'
123
+
124
+ return client.chat.completions.create(
125
+ model=gpt_model, # see main block
126
+ messages=[{"role": "system", "content": "You are a Lean 4 expert tasked with completing proofs of program properties. You will be shown the relevant programs and definitions in <context>...</context> tags, the theorem to be proven in <theorem>...</theorem>. Please output your proof containing only Lean 4 proof code between <proof>...</proof> tags. The generated proof should never contain the word `sorry`. Here are some examples:"},
127
+ {"role": "user", "content": """<context>
128
+ import Mathlib
129
+ inductive MyTree (α: Type) where
130
+ | leaf : MyTree α
131
+ | node : MyTree α → α → MyTree α → MyTree α
132
+
133
+ def tree_size : MyTree α → ℕ
134
+ | .leaf => 1
135
+ | .node l _x r => 1 + (tree_size l) + (tree_size r)
136
+
137
+ def balanced : MyTree α → Prop
138
+ | .leaf => true
139
+ | .node l _x r => ((tree_size l) = (tree_size r)) ∧ (balanced l) ∧ (balanced r)
140
+ </context>
141
+ <theorem>
142
+ theorem balanced_tree_size_odd (t: MyTree α) (hb: balanced t): Odd (tree_size t) := by
143
+ </theorem>"""},
144
+ {"role": "assistant", "content": """<proof>
145
+ cases t with
146
+ | leaf => simp [tree_size]
147
+ | node p x q =>
148
+ unfold tree_size
149
+ unfold balanced at hb
150
+ simp [hb.1]
151
+ </proof>"""},
152
+ {"role": "user", "content": """<context>
153
+ import Mathlib
154
+ inductive MyTree (α: Type) where
155
+ | leaf : MyTree α
156
+ | node : MyTree α → α → MyTree α → MyTree α
157
+
158
+ def balanced : MyTree α → Prop
159
+ | .leaf => true
160
+ | .node l _x r => ((tree_size l) = (tree_size r)) ∧ (balanced l) ∧ (balanced r)
161
+
162
+ def swap_branches : MyTree α → MyTree α
163
+ | MyTree.leaf => MyTree.leaf
164
+ | MyTree.node p x q => MyTree.node q x p
165
+ </context>
166
+ <theorem>
167
+ theorem swap_preserves_balance (t: MyTree α) (hb: balanced t): balanced (swap_branches t) := by
168
+ </theorem>"""},
169
+ {"role": "assistant", "content": """<proof>
170
+ cases t with
171
+ | leaf => simp [swap_branches]
172
+ | node p x q =>
173
+ simp [swap_branches, balanced] at hb ⊢
174
+ split
175
+ { simp [← hb.1] }
176
+ { split; assumption }
177
+ </proof>"""},
178
+ {"role": "user", "content": """<context>
179
+ import Mathlib
180
+ inductive PairList where
181
+ | empty : PairList
182
+ | node : Nat → Nat → PairList → PairList
183
+
184
+ def len_pairlist : PairList → Nat
185
+ | .empty => 0
186
+ | .node _n1 _n2 l => len_pairlist l + 2
187
+
188
+ lemma even_plus_two (x: Nat) (h: Even x): Even (x + 2) := by
189
+ unfold Even at h
190
+ rcases h with ⟨y, hy⟩
191
+ use y + 1
192
+ linarith [hy]
193
+ </context>
194
+ <theorem>
195
+ theorem len_pairlist_even (l: PairList): Even (len_pairlist l) := by
196
+ </theorem>"""},
197
+ {"role": "assistant", "content": """<proof>
198
+ generalize hl: len_pairlist l = pl
199
+ induction pl using Nat.strong_induction_on generalizing l with
200
+ | h n ih => cases l with
201
+ | empty => simp [len_pairlist] at hl; simp [←hl];
202
+ | node n1 n2 l2 =>
203
+ unfold len_pairlist at hl
204
+ simp [←hl]
205
+ apply even_plus_two
206
+ exact ih (len_pairlist l2) (by linarith [hl]) l2 (by rfl)
207
+ </proof>"""},
208
+ {"role": "user", "content": encoded}]
209
+ ).choices[0].message.content.replace('<proof>','').replace('</proof>', '').strip()
210
+
211
+ # for benchmarking full proof generation methods, where input is
212
+ # file context, theorem definition, and initial proof state, and output is a full proof of the theorem.
213
+ def benchmark_full_proofgen(pwd, get_proof, send_command, num_gen=5, repl_type='icanon'):
214
+ lean_repl = make_lean_repl(repl_type=repl_type)
215
+ # get the first command out of the way which has a weird "expect" behavior using icanon mode
216
+ mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
217
+
218
+ num_proved = 0
219
+ num_attempted = 0
220
+ for prop_name in pwd:
221
+ print(prop_name)
222
+ #time.sleep(5)
223
+ num_attempted += 1
224
+ #if num_attempted < 30:
225
+ # continue
226
+ successful_def = False
227
+ penult_env = None
228
+ while not successful_def:
229
+ successful_def = True
230
+ env = None
231
+ all_lines = []
232
+ for _loc, line in pwd[prop_name]:
233
+ penult_env = env
234
+ if line.strip() == 'import Mathlib':
235
+ outp, env = mathlib_out, mathlib_env
236
+ else:
237
+ outp, env = send_command(lean_repl, line, env=env)
238
+ if outp is None:
239
+ print('restarting repl')
240
+ successful_def = False
241
+ lean_repl.close()
242
+ lean_repl = make_lean_repl(repl_type=repl_type)
243
+ mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
244
+ break
245
+ all_lines.append(line)
246
+
247
+ assert len(get_errs(outp)) == 0, str(outp.get('messages', []))
248
+ context = '\n\n'.join([line for _loc, line in pwd[prop_name][:-1]])
249
+ theorem_defn = pwd[prop_name][-1][1].replace('by sorry', 'by\n') # give the llm a clean place to begin generating
250
+ goal = outp['sorries'][0]['goal']
251
+ found_proof = False
252
+ for gen_i in range(num_gen):
253
+ print(f'generating proof {gen_i}')
254
+ suggested_proof = get_proof(theorem_defn, goal, context)
255
+ full_thm = theorem_defn + suggested_proof
256
+ print('suggested proof: ' + full_thm)
257
+ outp, _result_env = send_command(lean_repl, full_thm, env=penult_env)
258
+ if len(get_errs(outp)) == 0:
259
+ num_proved += 1
260
+ found_proof = True
261
+ print('successful proof!')
262
+ print(f'prop {prop_name} with goal <{goal}> solved by: <\n {suggested_proof}\n>')
263
+ break
264
+ if not found_proof:
265
+ print(f'failed to prove {prop_name}')
266
+
267
+ print(f'proved {num_proved}/{num_attempted}')
268
+
269
+
270
+ def parse_benchmark_output(fname, pwd, loc2comm):
271
+ with open(fname, 'r') as f:
272
+ lines = f.readlines()
273
+
274
+ failures = set()
275
+ for line in lines:
276
+ if 'failed to prove' in line:
277
+ failures.add(line.strip().split(' ')[-1])
278
+
279
+ by_score = {i: [0,0] for i in range(1, 6)}
280
+ by_custom = [0, 0]
281
+ custom_proved = []
282
+ all_proved = []
283
+ results = {}
284
+ for i in range(1, 87):
285
+ key = f'prop_{i}' if i >=10 else f'prop_0{i}'
286
+ if key not in pwd:
287
+ continue
288
+ loc = [loc[0] for loc, line in pwd[key] if key in line][0]
289
+ line_str = int(loc.strip().split(':')[1])
290
+ comm = loc2comm[line_str-1]
291
+ print(comm)
292
+ score = int(comm.split(':')[1].strip().split('/')[0].strip())
293
+ is_custom = 'custom' in comm
294
+ results[key] = {'score': score, 'result': key not in failures, 'custom': is_custom}
295
+ if key in failures:
296
+ by_score[score][1] += 1
297
+ if is_custom:
298
+ by_custom[1] += 1
299
+ print(f'could not prove {key}')
300
+
301
+ else:
302
+ by_score[score][0] += 1
303
+ if is_custom:
304
+ by_custom[0] += 1
305
+ custom_proved.append(key)
306
+ all_proved.append((score, key))
307
+ print(f'proved {key}')
308
+
309
+ print('by score', by_score)
310
+ print('by custom', by_custom)
311
+ print('custom proved', custom_proved)
312
+ print('all proved 5', [name for score, name in all_proved if score == 5])
313
+ print(f'total: {len(all_proved)}/{len(pwd)}')
314
+ return results, by_score
315
+
316
+ def parse_benchmark_input(fname):
317
+ with open(fname, 'r') as f:
318
+ lines = f.readlines()
319
+
320
+ jl = [json.loads(line.strip()) for line in lines if len(line.strip()) > 0]
321
+ # dummy locations via enumerate, since they're unused during baseline calculation
322
+ return {dct['full_name']: list(enumerate(dct['deps'].split('\n\n') + [dct['prop_defn']])) for dct in jl}
323
+
324
+ if __name__ == '__main__':
325
+ # if any single command is >1024 characters, use_icanon=True is necessary.
326
+ # unfortunately there are still some bugs where a theorem is actually proven,
327
+ # but the messages from Lean REPL indicate an error when using this mode.
328
+ use_icanon = True
329
+ #bench_type = 'fullproof'
330
+ bench_type = 'nextstep'
331
+ gpt_model = 'gpt-4-turbo'
332
+
333
+ if use_icanon:
334
+ send_command = send_command_icanon
335
+ repl_type = 'icanon'
336
+ else:
337
+ send_command = send_command_zsh
338
+ repl_type = 'zsh'
339
+
340
+
341
+ #benchmark_nextstep(pwd, get_tactics_interactive, send_command, repl_type=repl_type) # get_tactics_interactive for testing
342
+
343
+ pwd = parse_benchmark_input('codeprops_bench_lemmas.jsonl')
344
+
345
+ if bench_type == 'nextstep':
346
+ benchmark_nextstep(pwd, get_tactics_llmstep, send_command, repl_type=repl_type) # get_tactics_llmstep for benchmarking
347
+ elif bench_type == 'fullproof':
348
+ benchmark_full_proofgen(pwd, get_proof_gpt, send_command, repl_type=repl_type)
codeprops_bench_ps.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
extract.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from enum import Enum
3
+ import pexpect
4
+ import json
5
+ import re
6
+ from tqdm import tqdm
7
+ import json
8
+ import git
9
+ import pickle
10
+ import requests
11
+ from collections import defaultdict
12
+
13
+ from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, BASE_PATH
14
+
15
+ class ParseState(Enum):
16
+ defn = 0
17
+ inductive = 1
18
+ block_comment = 2
19
+ prop = 3
20
+ mutual = 4
21
+
22
+ def loc_tag(fname, line_ind):
23
+ return f'{fname}:{line_ind}'
24
+
25
+ def parse_file(fname):
26
+ lines = None
27
+ with open(fname, 'r') as f:
28
+ lines = f.readlines()
29
+ imports = []
30
+ defns = []
31
+ props = []
32
+ comments = []
33
+
34
+ prev_state = None
35
+ state = None
36
+ data = []
37
+
38
+ def change_state(new_state, line_ind, line):
39
+ nonlocal data, state, prev_state
40
+ route_map = {ParseState.defn: defns, ParseState.inductive: defns,
41
+ ParseState.prop: props, ParseState.block_comment: comments,
42
+ ParseState.mutual: defns}
43
+ if state in route_map:
44
+ route_map[state].append(data)
45
+ data = []
46
+ if new_state in route_map:
47
+ data = [(loc_tag(fname, line_ind), line)]
48
+ prev_state = state
49
+ state = new_state
50
+
51
+ for line_ind, line in enumerate(lines):
52
+ line_str = line.strip()
53
+ if state == ParseState.block_comment: # end of block comment: reset state
54
+ if line_str.endswith('-/'):
55
+ state = prev_state
56
+ prev_state = None
57
+ continue
58
+ elif line_str.startswith('--'): # inline comment: maintain state
59
+ comments.append((loc_tag(fname, line_ind), line))
60
+ continue
61
+ elif line_str.startswith('/-'): # start block comment
62
+ change_state(ParseState.block_comment, line_ind, line)
63
+ continue
64
+ elif line_str.startswith('mutual'):
65
+ change_state(ParseState.mutual, line_ind, line)
66
+ continue
67
+ elif line_str.startswith('end') and state == ParseState.mutual:
68
+ #manually handle mutual stuff, its pretty annoying
69
+ data.append((loc_tag(fname, line_ind), line))
70
+ change_state(None, line_ind, line)
71
+ continue
72
+ elif state == ParseState.mutual:
73
+ data.append((loc_tag(fname,line_ind), line))
74
+ continue
75
+ elif line.startswith('import'):
76
+ assert state is None
77
+ imports.append(line)
78
+ continue
79
+ elif line_str.startswith('def prop'): # one of the propositions to prove
80
+ change_state(ParseState.prop, line_ind, line)
81
+ elif line_str.startswith('def') or line_str.startswith('lemma') or line_str.startswith('theorem'): # a function definition
82
+ change_state(ParseState.defn, line_ind, line)
83
+ elif line_str.startswith('inductive'):
84
+ change_state(ParseState.inductive, line_ind, line)
85
+ elif len(line_str) == 0:
86
+ change_state(None, line_ind, line)
87
+ else:
88
+ data.append((loc_tag(fname,line_ind), line))
89
+
90
+ change_state(None, -1, '') # handle EOF
91
+
92
+
93
+ return imports, defns, props, comments
94
+
95
+
96
+ def process_defns(defns):
97
+ new_defns = []
98
+ for defn in defns:
99
+ inds, lines = zip(*defn)
100
+ prop_text = ''.join(lines)
101
+ ind = min(inds)
102
+ max_ind = max(inds)
103
+
104
+ if lines[0].strip().startswith('mutual'):
105
+ # manually process mutual defns
106
+ names = []
107
+ for line in lines:
108
+ if line.strip().startswith('def'):
109
+ inner_name = [s for s in line.strip().split(' ') if len(s) > 0][1]
110
+ names.append(inner_name)
111
+ #names.append(f'_root_.{inner_name}')
112
+ else:
113
+
114
+ names = [[s for s in prop_text.split(' ') if len(s) > 0][1].strip()]
115
+
116
+ for name in names:
117
+ if name.endswith(':'):
118
+ name = name[:-1]
119
+ new_defns.append(((ind, max_ind), name, prop_text))
120
+ return new_defns
121
+
122
+ # take in a raw parsed prop (list of lines), output the corresponding lemma for a theorem prover to prove
123
+ def process_prop(prop, default_proof=':= by sorry'):
124
+ inds, lines = zip(*prop)
125
+ prop_text = ''.join(lines)
126
+ ind = min(inds)
127
+ max_ind = max(inds)
128
+ name = prop_text.split(' ')[1]
129
+ assert prop_text[:3] == 'def'
130
+ prop2 = 'theorem' + prop_text[3:]
131
+ # TBD what default proof should be; different setups might want different things. i.e. tactic mode just wants
132
+ # a 'by', proof term generation wants nothing, a proof rewriter that expects an initial valid state might want
133
+ # 'by sorry'.
134
+ prop2 = prop2.strip().replace(':=', ':') + f'{default_proof}'
135
+ return ((ind, max_ind), name, prop2)
136
+
137
+ #NOTE: if I eventually choose to handle chained dependencies, I'll want this to return a dictionary representing
138
+ # the dependency graph of imports. As of now I have the code for adding in other LeanSrc import definitions, but
139
+ # I'm not dealing with the import order in my later sort.
140
+ #
141
+ # collect all the (location_tag, definition_text) definitions from other files in LeanSrc specified
142
+ # by import_names
143
+ def collect_import_defns(import_names):
144
+ import_names = import_names[:] # don't modify original list
145
+ defns = []
146
+ seen = set()
147
+ while len(import_names) > 0:
148
+ imp = import_names.pop()
149
+ if imp in seen:
150
+ continue
151
+ seen.add(imp)
152
+ i, d, p, c = parse_file(f'{BASE_PATH}/{imp}.lean')
153
+ import_names += [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str]
154
+ defns += d
155
+ return defns
156
+
157
+ #errors I have come across:
158
+ # "function expected at\n <ident>\nterm has type"
159
+ # "unknown identifier '<ident>'"
160
+ def match_error(err_str):
161
+ m1 = re.search('expected at\n\s+(.+)\nterm has', err_str)
162
+ if m1 is not None:
163
+ return m1.group(1)
164
+ m2 = re.search("unknown (identifier|constant) '(.+)'", err_str)
165
+ if m2 is not None:
166
+ return m2.group(2)
167
+ if 'invalid dotted identifier notation' in err_str:
168
+ return err_str.strip().split(' ')[-1]
169
+ print(f'ERROR: err string <<{err_str}>> is not a recognized error pattern')
170
+ exit()
171
+ return None
172
+
173
+ # these are a result of other things not being defined, and don't contain semantic information about what definition to add
174
+ # note that I AM NOT IGNORING THESE ERRORS IN THE FINAL PROP WITH DEPS, i.e. I only output the prop w deps once I have
175
+ # no errors at all. These are just ignored for the purpose of finding new dependencies
176
+ ignore_errs = ['equality or iff proof expected', 'invalid occurrence of universe level',
177
+ 'function is not recursive', 'failed to prove termination', 'unsolved goals',
178
+ 'invalid field notation', 'result is not type correct',
179
+ 'invalid argument, variable is not a proposition', 'tactic failed. Possible reasons']
180
+ def collect_relevant_defns(prop, defns, lean_repl, env, import_order):
181
+ """
182
+ Collect the functions and type definitions used in a prop from a list of defns sourced from the
183
+ current file and potentially from other libraries, although for now I'm not handling mathlib.
184
+ This will rely on names being unique, so please don't shadow any names in the files you're importing.
185
+ """
186
+ # use _env because we want all definition dependence checks to be based on the original env
187
+ outp, _env = send_command(lean_repl, prop, env=env) # ignore resulting env; we just want to see the error
188
+ errors = [m for m in outp['messages'] if m['severity'] == 'error']
189
+ #print(errors)
190
+ seen = set()
191
+ seen_locs = set()
192
+ all_deps = []
193
+
194
+ while True:
195
+ # reset to original environment
196
+ env2 = env
197
+ all_deps = order_deps(all_deps, import_order)
198
+ errors = []
199
+ seen_err = set()
200
+ for defn in all_deps:
201
+ #print()
202
+ #print(defn[1])
203
+ outp, env2 = send_command(lean_repl, defn[1], env=env2)
204
+ tmp = [(m, defn[1]) for m in outp.get('messages', []) if m['severity'] == 'error' and m['data'] not in seen_err]
205
+ errors += tmp
206
+ for m, _ in tmp:
207
+ seen_err.add(m['data'])
208
+ # env2 is the environment after all dependencies have been added.
209
+ #print('new iteration outp:', outp)
210
+ #print('new iteration errs:', errors)
211
+ #errors = [m for m in outp.get('messages', []) if m['severity'] == 'error']
212
+
213
+ # if the dependencies are added without error, also add in the prop.
214
+ if len(errors) == 0:
215
+ outp, env2 = send_command(lean_repl, prop, env=env2)
216
+ errors = [(m, prop) for m in outp.get('messages', []) if m['severity'] == 'error']
217
+
218
+ if len(errors) == 0: # all dependencies plus prop statement does not error
219
+ break
220
+
221
+ while len(errors) > 0:
222
+ err, err_cause = errors.pop()
223
+ if any([uerr in err['data'] for uerr in ignore_errs]):
224
+ continue
225
+ if 'invalid pattern variable, must be atomic' in err['data']:
226
+ found_ind = False
227
+ defn_line = err_cause.split('\n')[0]
228
+ for ident in defn_line.strip().split(' '):
229
+ if ident in defns and ident not in seen:
230
+ found_ind = True
231
+ cp = err.copy()
232
+
233
+ cp['data'] = f"unknown identifier '{ident}'" # spoof a better error message
234
+ #print('FOUND INDUCTIVE:', cp['data'])
235
+ errors.append((cp, err_cause))
236
+ if not found_ind:
237
+ print('ERROR: failed to resolve inductive type pattern var problem')
238
+ exit()
239
+ continue
240
+
241
+ ident_str = match_error(err['data'])
242
+ ident_str = ident_str.replace('_root_.','')
243
+ #print(ident_str, ident_str in defns)
244
+ if ident_str not in defns:
245
+ print(f'ERROR: couldnt find identifier {ident_str}')
246
+ print(err)
247
+ exit()
248
+ continue
249
+ if ident_str in seen:
250
+ continue
251
+ # don't add the same defn twice
252
+ #print(f'ERROR: circular dependency: {ident_str}')
253
+ seen.add(ident_str)
254
+
255
+ if defns[ident_str][0] in seen_locs:
256
+ continue
257
+ seen_locs.add(defns[ident_str][0])
258
+ all_deps.append(defns[ident_str])
259
+
260
+ return all_deps
261
+
262
+ def order_deps(defns, import_order):
263
+ if len(defns) == 0:
264
+ return defns
265
+ order_map = {fname: i for i, fname in enumerate(import_order)}
266
+ line_nums = [int(defn[0][0].split(':')[-1]) for defn in defns]
267
+ max_line_num = max(line_nums)
268
+ def import_rank(defn):
269
+ fpath, line_ind = defn[0][0].split(':')
270
+ fname = re.search(BASE_PATH + '/(\S+)\.lean', fpath).group(1)
271
+ return order_map[fname]*max_line_num + int(line_ind)
272
+ return sorted(defns, key=import_rank)
273
+
274
+ def extract_file_props(fname, full_path, send_command, default_proof=':= by sorry', repl_type='zsh'):
275
+ # imports, definitions (code and types), propositions, and comments.
276
+ # d, p, and c are lists of lists; each sublit contains the original lines of the file that comprise
277
+ # the definition, proposition, or comment.
278
+ i, d, p, c = parse_file(full_path)
279
+
280
+ imp_names = [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str]
281
+ imp_d = collect_import_defns(imp_names)
282
+
283
+ all_d = imp_d + d
284
+ import_order = imp_names + [fname] # imports go first
285
+ all_d = process_defns(all_d)
286
+ defns_by_name = {name: (ind, defn) for ind, name, defn in all_d}
287
+
288
+
289
+ props = [process_prop(prop, default_proof=default_proof) for prop in p]
290
+ #TODO
291
+ lemma_props = [(ind, name, defn.split('\n')[0].strip().replace('lemma', 'theorem').replace(':= by', default_proof))
292
+ for ind, name, defn in all_d if defn.strip().startswith('lemma')]
293
+ props = lemma_props # + props
294
+ #props_by_name = {name: (ind, defn) for ind, name, defn in props}
295
+ lean_repl = make_lean_repl(repl_type=repl_type)
296
+
297
+ props_with_deps = {}
298
+
299
+ outp, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
300
+ ct = 0
301
+ for prop_loc, prop_name, prop in tqdm(props, desc='analyzing and loading lean code + properties'):
302
+ ct += 1
303
+ env = mathlib_env
304
+
305
+ all_deps = collect_relevant_defns(prop, defns_by_name, lean_repl, env, import_order)
306
+
307
+ for defn in all_deps:
308
+ print(defn[-1])
309
+ outp, env = send_command(lean_repl, defn[-1], env=env)
310
+ print('final output of deps', outp)
311
+
312
+ outp, env = send_command(lean_repl, prop, env=env)
313
+ for message in outp['messages']:
314
+ if message['severity'] == 'error':
315
+ print(f'error at prop {prop_name}')
316
+ print(message)
317
+ print()
318
+ exit()
319
+ props_with_deps[prop_name] = all_deps + [(prop_loc, prop)]
320
+
321
+ lean_repl.close()
322
+ #print(lean_repl.exitstatus, lean_repl.signalstatus)
323
+ return props_with_deps, c
324
+
325
+ def output_prop_with_deps(prop, prop_name, folder='LeanSrc/benchmark'):
326
+ lines = '\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in prop])
327
+ with open(os.path.join(folder, prop_name + '.lean'), 'w') as f:
328
+ f.write(lines)
329
+
330
+ def convert_file_props(fname, new_fname):
331
+ i, d, p, c = parse_file(f'{BASE_PATH}/{fname}.lean')
332
+ imp_names = [imp_str.split('.')[-1].strip() for imp_str in i if 'LeanSrc' in imp_str]
333
+ imp_d = collect_import_defns(imp_names)
334
+
335
+ all_d = imp_d + d
336
+ import_order = imp_names + [fname] # imports go first
337
+ all_d = process_defns(all_d)
338
+ defns_by_name = {name: (ind, defn) for ind, name, defn in all_d}
339
+
340
+ props = [process_prop(prop) for prop in p]
341
+
342
+ with open(new_fname, 'w') as f:
343
+ defn_lines = '\n'.join([defn for _, _, defn in all_d])
344
+ f.write(defn_lines + '\n')
345
+
346
+ prop_lines = '\n'.join([prop for _, _, prop in props])
347
+ f.write(prop_lines + '\n')
348
+
349
+ def format_llema_input(pwd, lean_url, lean_sha):
350
+ dcts = []
351
+ for prop_name in pwd:
352
+
353
+ lines = '\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in pwd[prop_name]])
354
+ lines = lines.replace(':= by sorry', '')
355
+ loc, _ = pwd[prop_name][-1] # last line comes from the prop of interest
356
+ fpath = loc.split(':')[0]
357
+ dct = {'full_name': prop_name,
358
+ 'statement': lines,
359
+ 'url': lean_url,
360
+ 'commit': lean_sha,
361
+ 'file_path': fpath,
362
+ 'split': 'valid'}
363
+
364
+ dcts.append(json.dumps(dct) + '\n')
365
+ with open('leancb_lemma_inp.jsonl', 'w') as f:
366
+ f.writelines(dcts)
367
+
368
+ def pwd_to_json(pwd, send_command, loc2comm, repl_type='zsh'):
369
+
370
+ lean_repl = make_lean_repl(repl_type=repl_type)
371
+
372
+ outp, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True)
373
+ assert len([m for m in outp.get('messages', []) if m['severity'] == 'error']) == 0, str(outp)
374
+
375
+ dcts = []
376
+ for prop_name in pwd:
377
+ deps = '\n\n'.join(['import Mathlib'] + [code_lines for _loc, code_lines in pwd[prop_name][:-1]])
378
+ prop_loc, prop_defn = pwd[prop_name][-1] # last line comes from the prop of interest
379
+ fpath = prop_loc[0].split(':')[0]
380
+ cline = int(prop_loc[0].split(':')[1]) - 1
381
+ score = 5
382
+ if cline in loc2comm:
383
+ comm = loc2comm[cline]
384
+ if 'core: ' in comm: # allow for (S/s)core
385
+ score = int(comm.split('core:')[1].strip().split('/')[0].strip())
386
+
387
+ env = mathlib_env
388
+ for _loc, code_lines in pwd[prop_name]:
389
+ outp, env = send_command(lean_repl, code_lines, env=env)
390
+ ps = outp['sorries'][0]['goal']
391
+
392
+ locs = [loc for loc, _code_lines in pwd[prop_name]]
393
+ fname2line = defaultdict(lambda: 0)
394
+ for loc in locs:
395
+ fpath, line_num = loc[1].split(':')
396
+ fname2line[fpath] = max(fname2line[fpath], int(line_num))
397
+
398
+ dct = {'full_name': prop_name,
399
+ 'prop_defn': prop_defn,
400
+ 'prop_loc': prop_loc[0],
401
+ 'score': score,
402
+ 'deps': deps,
403
+ 'proof_state': ps,
404
+ 'file_locs': [(fpath, fname2line[fpath]) for fpath in fname2line]}
405
+
406
+ dcts.append(json.dumps(dct) + '\n')
407
+ with open('codeprops_bench.jsonl', 'w') as f:
408
+ f.writelines(dcts)
409
+
410
+
411
+ if __name__ == '__main__':
412
+ #main_fname = 'Properties'
413
+ main_fname = 'Sorts'
414
+
415
+ #convert_file_props(main_fname, os.path.join(folder,'all_props.lean'))
416
+
417
+ main_full_path = f'{BASE_PATH}/{main_fname}.lean'
418
+ """
419
+ pwd = extract_file_props(main_fname, main_full_path) # props with deps
420
+ for prop_name in pwd:
421
+ output_prop_with_deps(pwd[prop_name], prop_name, folder=folder)
422
+ """
423
+
424
+ use_icanon = True
425
+
426
+ if use_icanon:
427
+ send_command = send_command_icanon
428
+ repl_type = 'icanon'
429
+ else:
430
+ send_command = send_command_zsh
431
+ repl_type = 'zsh'
432
+
433
+
434
+ rerun = True
435
+ if rerun:
436
+ pwd, comments = extract_file_props(main_fname, main_full_path, send_command, repl_type=repl_type) # props with deps
437
+ with open(f'comm_{main_fname}.pkl', 'wb') as f:
438
+ pickle.dump(comments, f)
439
+ with open(f'pwd_{main_fname}.pkl', 'wb') as f:
440
+ pickle.dump(pwd, f)
441
+ else:
442
+ with open(f'pwd_{main_fname}.pkl', 'rb') as f:
443
+ pwd = pickle.load(f)
444
+ with open(f'comm_{main_fname}.pkl', 'rb') as f:
445
+ comments = pickle.load(f)
446
+
447
+ loc2comm = {}
448
+ for loc, comm in comments:
449
+ fname, line_str = loc.strip().split(':')
450
+ if fname != main_full_path:
451
+ continue
452
+ loc2comm[int(line_str.strip())] = comm
453
+
454
+ # use to test specific props
455
+ #pwd_spec = {}
456
+ #test_pname = 'prop_29'
457
+ #pwd_spec[test_pname] = pwd[test_pname]
458
+ #pwd = pwd_spec
459
+
460
+ pwd_to_json(pwd, send_command, loc2comm, repl_type=repl_type)
461
+
462
+ #data, by_score = parse_benchmark_output('bench_out_pythia.txt', pwd, loc2comm)
463
+
464
+ #sorries = outp['sorries']
465
+ #for sorry in sorries:
466
+ # ps = sorry['proofState']
467
+ # # also has 'pos', 'endPos'
468
+ # goal = sorry['goal']
469
+ # send_tactic
utils.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pexpect
2
+ import json_stream
3
+ import json
4
+
5
+ LEAN_REPL_CMD = 'lake env ../repl/.lake/build/bin/repl'
6
+ SHELL_CMD = '/bin/sh' #zsh doesn't play well with non-canonical input: https://github.com/samapriya/geeup/issues/41
7
+ BASE_PATH = 'LeanSrc/LeanSrc'
8
+
9
+ def make_lean_repl(repl_type='zsh'):
10
+ # there are maximum line lengths that can be sent over tty, this will cause some things we send to
11
+ # lean repl to be cut off at e.g. 1024 characters for MacOS unless we use this workaround.
12
+ # See https://pexpect.readthedocs.io/en/stable/api/pexpect.html#pexpect.spawn.send
13
+ print('making repl')
14
+ if repl_type == 'icanon':
15
+ lean_repl = pexpect.spawn(SHELL_CMD, cwd='LeanSrc', maxread=5000, timeout=20, echo=False)
16
+ lean_repl.sendline('stty -icanon') # this is the magic that allows a bunch of chars to be sent
17
+ lean_repl.sendline(LEAN_REPL_CMD)
18
+ print(lean_repl.readline())
19
+ print(lean_repl.readline())
20
+ elif repl_type == 'zsh':
21
+ lean_repl = pexpect.spawn(LEAN_REPL_CMD,cwd='LeanSrc', maxread=1025, timeout=5)
22
+
23
+ return lean_repl
24
+
25
+ # To reliably read multiple json objects from a single byte string where the objects are
26
+ # separated by newlines but the content also contains newlines, we treat the byte string
27
+ # as a stream using the json-stream python library, and consume from the stream repeatedly until
28
+ # we reach the end of the byte array.
29
+ #
30
+ # The simplest thing to do is just feed json-stream the byte array one byte at a time, but this
31
+ # produces errors due to multi-byte unicode characters being split. So, we solve this in another
32
+ # simple way: produce chunks up to and including the next '}' byte. This makes sure weird byte sequences
33
+ # get sent together and thus decoded correctly.
34
+ def json_stream_itr(bstr):
35
+ start = 0
36
+ for end in range(len(bstr)):
37
+ #print(bstr[end:end+1], b'}')
38
+ if bstr[end:end+1] == b'}':
39
+ yield bstr[start: end + 1]
40
+ start = end + 1
41
+
42
+ def load_mult_json(jsons):
43
+ #NOTE: when using the non-canonical input mode and /bin/sh to spawn lean repl,
44
+ # sometimes multiple json lines get output! I collect them all into a single
45
+ # dictionary to return.
46
+ itr = json_stream_itr(jsons)
47
+ ps_out = {'messages': []}
48
+ while True:
49
+ try:
50
+ data = json_stream.load(itr, persistent=True)
51
+ except StopIteration:
52
+ break
53
+ data.read_all()
54
+ data = json_stream.to_standard_types(data)
55
+ for k in data:
56
+ # special case: for some reason when their is a single message the key is not plural, and
57
+ # is just a string, which is a problem for later when I try to look at the error severity.
58
+ if k == 'message':
59
+ if data[k].strip().startswith('Lean error:'):
60
+ dct = {'severity': 'error', 'data': data[k].replace('Lean error:', '')}
61
+ else:
62
+ dct = {'severity': 'warning', 'data': data[k]}
63
+ print('got unexpected non-error message', dct) #TODO: if these don't occur in practice we can delete this
64
+ exit()
65
+ ps_out['messages'].append(dct)
66
+ elif isinstance(data[k], list) and k in ps_out:
67
+ ps_out[k].extend(data[k])
68
+ else:
69
+ assert k not in ps_out, k +',' +str(ps_out[k])
70
+ ps_out[k] = data[k]
71
+ #ps_out = json_stream.to_standard_types(ps_out)
72
+ #print('yo', ps_out)
73
+
74
+ #ps_out = json.loads(output)
75
+ assert ps_out is not None, 'parsing failed: ' + jsons.decode()
76
+ print('parsed output:', ps_out)
77
+ return ps_out
78
+
79
+ def make_repl_command(def_str, env=None):
80
+ jsn = {'cmd': def_str}
81
+ if env is not None:
82
+ jsn['env'] = env
83
+ return json.dumps(jsn)
84
+
85
+ # This is the standard way to send commands to lean repl using most normal shells in canonical input mode.
86
+ # unfortunately, in canonical input mode there is a maximum character limit that can be sent over terminal:
87
+ # see https://pexpect.readthedocs.io/en/stable/api/pexpect.html#pexpect.spawn.send for more details.
88
+ # unfortunately this can't be solved by just chunking up the message to send; we're not sure why, but
89
+ # lean repl stubbornly insists it has only received the first 1024 (on Mac) characters.
90
+ #
91
+ # you can use send_command_icanon to handle sending longer strings, although we have found that the messages
92
+ # lean repl sends change for some reason, causing some inconsistencies in how we parse successful or failed proofs.
93
+ def send_command_zsh(repl, command, env=None, timeout=5, first=False):
94
+ rpl_comm = make_repl_command(command, env=env)
95
+ #print(rpl_comm)
96
+ """
97
+ num_splits = len(rpl_comm)//1024 + 1
98
+ for split in range(num_splits):
99
+ #print(rpl_comm) # NOTE: uncomment to see everything being sent to lean repl
100
+ spl_comm = rpl_comm[split*1024:(split+1)*1024]
101
+ print(spl_comm)
102
+
103
+ #print('sent and expecting:')
104
+ #print(len(rpl_comm), rpl_comm)
105
+ if split < num_splits - 1:
106
+ repl.sendline(spl_comm)
107
+ else:
108
+ repl.sendline(spl_comm)
109
+ repl.expect_exact(rpl_comm+'\r\n')
110
+ """
111
+ #print(repl.readline())
112
+ #repl.sendline()
113
+ #repl.expect_exact('\r\n')
114
+ #print('sup', repl.readline())
115
+ repl.sendline(rpl_comm)
116
+ repl.expect_exact(rpl_comm + '\r\n')
117
+
118
+ repl.sendline()
119
+ repl.expect_exact('\r\n')
120
+
121
+ _index = repl.expect('env": \d+\}', timeout=timeout)
122
+ env_str = repl.match.group().decode()
123
+ new_env = int(env_str.split(':')[1].strip()[:-1])
124
+ output = repl.before + repl.match.group()
125
+ #print('js outp', output)
126
+ return load_mult_json(output), new_env
127
+
128
+
129
+ def send_command_icanon(repl, command, env=None, timeout=20, first=False):
130
+ print('sending command:', command)
131
+ rpl_comm = make_repl_command(command, env=env)
132
+
133
+ repl.sendline(rpl_comm+'\n')
134
+ if first:
135
+ repl.expect_exact('~>')
136
+ first = False
137
+ else:
138
+ try:
139
+ repl.expect_exact('\r\n\r\n')
140
+ except pexpect.exceptions.TIMEOUT as e:
141
+ print('did not find newlines')
142
+ try:
143
+ _index = repl.expect('env": \d+\}', timeout=timeout)
144
+ command_sent = True
145
+ except pexpect.exceptions.TIMEOUT as e:
146
+ print('did not find env in output')
147
+ return None, None
148
+
149
+ env_str = repl.match.group().decode()
150
+ new_env = int(env_str.split(':')[1].strip()[:-1])
151
+ output = repl.before + repl.match.group()
152
+ return load_mult_json(output), new_env
153
+
154
+ def make_repl_tactic(tac, proofState):
155
+ return json.dumps({'tactic': tac, 'proofState': proofState})
156
+
157
+ def send_tactic(repl, tactic, proofState):
158
+ rpl_comm = make_repl_tactic(tactic, proofState)
159
+
160
+ #repl.sendline(rpl_comm)
161
+ #repl.expect_exact(rpl_comm + "\r\n")
162
+ #repl.sendline()
163
+ #repl.expect_exact("\r\n")
164
+ repl.sendline(rpl_comm+'\n')
165
+
166
+ # the icanon mode sometimes sends a single dict with just one field: 'message'.
167
+ # when we get that with a lean error, the tactic failed so we return None
168
+ _index = repl.expect('(]})|(\"}\r\n\r\n)', timeout=10)
169
+
170
+
171
+ #ps_str = repl.match.group().decode()
172
+ #new_ps = int(ps_str.split(':')[1].strip()[:-1])
173
+ output = repl.before + repl.match.group()
174
+ #if verbose:
175
+ #print('js outp 2', output)
176
+ ps_out = load_mult_json(output)
177
+ print('ps out', ps_out)
178
+ if 'proofState' not in ps_out:
179
+ return None, None
180
+ return ps_out, ps_out['proofState']
181
+ #except pexpect.exceptions.TIMEOUT:
182
+ # print("FAILED DUE TO TIMEOUT")
183
+ # return None, None
184
+
185
+ def get_errs(outp):
186
+ return [m for m in outp.get('messages', []) if m.get('severity', 'error') == 'error']