acecalisto3 commited on
Commit
d3ab0a0
·
verified ·
1 Parent(s): 84e193d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -81
app.py CHANGED
@@ -1,7 +1,9 @@
1
  import os
2
  import subprocess
3
- import torch
4
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
 
 
5
  from agent.prompts import (
6
  ACTION_PROMPT,
7
  ADD_PROMPT,
@@ -16,33 +18,33 @@ from agent.prompts import (
16
  )
17
  from agent.utils import parse_action, parse_file_content, read_python_module_structure
18
 
19
- # Initialize Hugging Face model and tokenizer
20
- TOKENIZER = AutoTokenizer.from_pretrained("typefully/rag-tokenbert-3B")
21
- MODEL = AutoModelForSeq2SeqLM.from_pretrained("typefully/rag-tokenbert-3B")
22
- PIPELINE = pipeline('text-generation', model=MODEL, tokenizer=TOKENIZER, device=-1)
 
23
 
24
  VERBOSE = False
25
  MAX_HISTORY = 100
26
 
27
- def hf_run_gpt(prompt_template, stop_tokens, max_length, module_summary, purpose, **prompt_kwargs):
28
- content = PREFIX.format(module_summary=module_summary, purpose=purpose) + prompt_template.format(**prompt_kwargs)
 
 
 
29
  if VERBOSE:
30
- print(LOG_PROMPT.format(content))
31
-
32
- input_seq = TOKENIZER(content, return_tensors='pt', truncation=True, padding='longest')['input_ids']
33
- output_sequences = PIPELINE(input_seq, max_length=max_length, num_return_sequences=1, do_sample=False)
34
- resp = TOKENIZER.decode(output_sequences[0]['generated_text'], skip_special_tokens=True)
35
-
36
  if VERBOSE:
37
- print(LOG_RESPONSE.format(resp))
38
  return resp
39
 
40
  def compress_history(purpose, task, history, directory):
41
  module_summary, _, _ = read_python_module_structure(directory)
42
- resp = hf_run_gpt(
43
  COMPRESS_HISTORY_PROMPT,
44
  stop_tokens=["observation:", "task:", "action:", "thought:"],
45
- max_length=512,
46
  module_summary=module_summary,
47
  purpose=purpose,
48
  task=task,
@@ -53,17 +55,19 @@ def compress_history(purpose, task, history, directory):
53
 
54
  def call_main(purpose, task, history, directory, action_input):
55
  module_summary, _, _ = read_python_module_structure(directory)
56
- resp = hf_run_gpt(
57
  ACTION_PROMPT,
58
  stop_tokens=["observation:", "task:"],
59
- max_length=256,
60
  module_summary=module_summary,
61
  purpose=purpose,
62
  task=task,
63
  history=history,
64
  )
65
- lines = resp.strip().split("\n")
66
  for line in lines:
 
 
67
  if line.startswith("thought: "):
68
  history += "{}\n".format(line)
69
  elif line.startswith("action: "):
@@ -81,7 +85,9 @@ def call_test(purpose, task, history, directory, action_input):
81
  text=True,
82
  )
83
  if result.returncode != 0:
84
- history += "observation: there are no tests! Test should be written in a test folder under {}\n".format(directory)
 
 
85
  return "MAIN", None, history, task
86
  result = subprocess.run(
87
  ["python", "-m", "pytest", directory], capture_output=True, text=True
@@ -90,10 +96,10 @@ def call_test(purpose, task, history, directory, action_input):
90
  history += "observation: tests pass\n"
91
  return "MAIN", None, history, task
92
  module_summary, content, _ = read_python_module_structure(directory)
93
- resp = hf_run_gpt(
94
  UNDERSTAND_TEST_RESULTS_PROMPT,
95
  stop_tokens=[],
96
- max_length=256,
97
  module_summary=module_summary,
98
  purpose=purpose,
99
  task=task,
@@ -106,10 +112,10 @@ def call_test(purpose, task, history, directory, action_input):
106
 
107
  def call_set_task(purpose, task, history, directory, action_input):
108
  module_summary, content, _ = read_python_module_structure(directory)
109
- task = hf_run_gpt(
110
  TASK_PROMPT,
111
  stop_tokens=[],
112
- max_length=64,
113
  module_summary=module_summary,
114
  purpose=purpose,
115
  task=task,
@@ -123,11 +129,13 @@ def call_read(purpose, task, history, directory, action_input):
123
  history += "observation: file does not exist\n"
124
  return "MAIN", None, history, task
125
  module_summary, content, _ = read_python_module_structure(directory)
126
- f_content = content.get(action_input, "< document is empty >")
127
- resp = hf_run_gpt(
 
 
128
  READ_PROMPT,
129
  stop_tokens=[],
130
- max_length=256,
131
  module_summary=module_summary,
132
  purpose=purpose,
133
  task=task,
@@ -142,12 +150,18 @@ def call_modify(purpose, task, history, directory, action_input):
142
  if not os.path.exists(action_input):
143
  history += "observation: file does not exist\n"
144
  return "MAIN", None, history, task
145
- module_summary, content, _ = read_python_module_structure(directory)
146
- f_content = content.get(action_input, "< document is empty >")
147
- resp = hf_run_gpt(
 
 
 
 
 
 
148
  MODIFY_PROMPT,
149
  stop_tokens=["action:", "thought:", "observation:"],
150
- max_length=2048,
151
  module_summary=module_summary,
152
  purpose=purpose,
153
  task=task,
@@ -178,10 +192,10 @@ def call_add(purpose, task, history, directory, action_input):
178
  os.makedirs(d)
179
  if not os.path.exists(action_input):
180
  module_summary, _, _ = read_python_module_structure(directory)
181
- resp = hf_run_gpt(
182
  ADD_PROMPT,
183
  stop_tokens=["action:", "thought:", "observation:"],
184
- max_length=2048,
185
  module_summary=module_summary,
186
  purpose=purpose,
187
  task=task,
@@ -202,53 +216,31 @@ def call_add(purpose, task, history, directory, action_input):
202
  history += "observation: file already exists\n"
203
  return "MAIN", None, history, task
204
 
205
- NAME_TO_FUNC = {
206
- "MAIN": call_main,
207
- "UPDATE-TASK": call_set_task,
208
- "MODIFY-FILE": call_modify,
209
- "READ-FILE": call_read,
210
- "ADD-FILE": call_add,
211
- "TEST": call_test,
212
- }
213
-
214
- def run_action(purpose, task, history, directory, action_name, action_input):
215
- if action_name == "COMPLETE":
216
- exit(0)
217
-
218
- # compress the history when it is long
219
- if len(history.split("\n")) > MAX_HISTORY:
220
- if VERBOSE:
221
- print("COMPRESSING HISTORY")
222
- history = compress_history(purpose, task, history, directory)
223
 
224
- assert action_name in NAME_TO_FUNC
 
 
 
 
 
 
225
 
226
- print("RUN: ", action_name, action_input)
227
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
228
-
229
- def run(purpose, directory, task=None):
230
  history = ""
231
- action_name = "UPDATE-TASK" if task is None else "MAIN"
232
- action_input = None
233
- while True:
234
- print("")
235
- print("")
236
- print("---")
237
- print("purpose:", purpose)
238
- print("task:", task)
239
- print("---")
240
- print(history)
241
- print("---")
242
-
243
- action_name, action_input, history, task = run_action(
244
- purpose,
245
- task,
246
- history,
247
- directory,
248
- action_name,
249
- action_input,
250
- )
251
-
252
- if __name__ == "__main__":
253
- # Example usage
254
- run("Your purpose here", "path/to/your/directory")
 
1
  import os
2
  import subprocess
3
+
4
+ import streamlit as st
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
+
7
  from agent.prompts import (
8
  ACTION_PROMPT,
9
  ADD_PROMPT,
 
18
  )
19
  from agent.utils import parse_action, parse_file_content, read_python_module_structure
20
 
21
+ # Hugging Face model and tokenizer setup
22
+ model_name = "gpt2"
23
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
24
+ model = AutoModelForCausalLM.from_pretrained(model_name)
25
+ generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
26
 
27
  VERBOSE = False
28
  MAX_HISTORY = 100
29
 
30
+ def run_gpt(prompt_template, stop_tokens, max_tokens, module_summary, purpose, **prompt_kwargs):
31
+ content = PREFIX.format(
32
+ module_summary=module_summary,
33
+ purpose=purpose,
34
+ ) + prompt_template.format(**prompt_kwargs)
35
  if VERBOSE:
36
+ st.write(LOG_PROMPT.format(content))
37
+ resp = generator(content, max_length=max_tokens, stop=stop_tokens)[0]["generated_text"]
 
 
 
 
38
  if VERBOSE:
39
+ st.write(LOG_RESPONSE.format(resp))
40
  return resp
41
 
42
  def compress_history(purpose, task, history, directory):
43
  module_summary, _, _ = read_python_module_structure(directory)
44
+ resp = run_gpt(
45
  COMPRESS_HISTORY_PROMPT,
46
  stop_tokens=["observation:", "task:", "action:", "thought:"],
47
+ max_tokens=512,
48
  module_summary=module_summary,
49
  purpose=purpose,
50
  task=task,
 
55
 
56
  def call_main(purpose, task, history, directory, action_input):
57
  module_summary, _, _ = read_python_module_structure(directory)
58
+ resp = run_gpt(
59
  ACTION_PROMPT,
60
  stop_tokens=["observation:", "task:"],
61
+ max_tokens=256,
62
  module_summary=module_summary,
63
  purpose=purpose,
64
  task=task,
65
  history=history,
66
  )
67
+ lines = resp.strip().strip("\n").split("\n")
68
  for line in lines:
69
+ if line == "":
70
+ continue
71
  if line.startswith("thought: "):
72
  history += "{}\n".format(line)
73
  elif line.startswith("action: "):
 
85
  text=True,
86
  )
87
  if result.returncode != 0:
88
+ history += "observation: there are no tests! Test should be written in a test folder under {}\n".format(
89
+ directory
90
+ )
91
  return "MAIN", None, history, task
92
  result = subprocess.run(
93
  ["python", "-m", "pytest", directory], capture_output=True, text=True
 
96
  history += "observation: tests pass\n"
97
  return "MAIN", None, history, task
98
  module_summary, content, _ = read_python_module_structure(directory)
99
+ resp = run_gpt(
100
  UNDERSTAND_TEST_RESULTS_PROMPT,
101
  stop_tokens=[],
102
+ max_tokens=256,
103
  module_summary=module_summary,
104
  purpose=purpose,
105
  task=task,
 
112
 
113
  def call_set_task(purpose, task, history, directory, action_input):
114
  module_summary, content, _ = read_python_module_structure(directory)
115
+ task = run_gpt(
116
  TASK_PROMPT,
117
  stop_tokens=[],
118
+ max_tokens=64,
119
  module_summary=module_summary,
120
  purpose=purpose,
121
  task=task,
 
129
  history += "observation: file does not exist\n"
130
  return "MAIN", None, history, task
131
  module_summary, content, _ = read_python_module_structure(directory)
132
+ f_content = (
133
+ content[action_input] if content[action_input] else "< document is empty >"
134
+ )
135
+ resp = run_gpt(
136
  READ_PROMPT,
137
  stop_tokens=[],
138
+ max_tokens=256,
139
  module_summary=module_summary,
140
  purpose=purpose,
141
  task=task,
 
150
  if not os.path.exists(action_input):
151
  history += "observation: file does not exist\n"
152
  return "MAIN", None, history, task
153
+ (
154
+ module_summary,
155
+ content,
156
+ _,
157
+ ) = read_python_module_structure(directory)
158
+ f_content = (
159
+ content[action_input] if content[action_input] else "< document is empty >"
160
+ )
161
+ resp = run_gpt(
162
  MODIFY_PROMPT,
163
  stop_tokens=["action:", "thought:", "observation:"],
164
+ max_tokens=2048,
165
  module_summary=module_summary,
166
  purpose=purpose,
167
  task=task,
 
192
  os.makedirs(d)
193
  if not os.path.exists(action_input):
194
  module_summary, _, _ = read_python_module_structure(directory)
195
+ resp = run_gpt(
196
  ADD_PROMPT,
197
  stop_tokens=["action:", "thought:", "observation:"],
198
+ max_tokens=2048,
199
  module_summary=module_summary,
200
  purpose=purpose,
201
  task=task,
 
216
  history += "observation: file already exists\n"
217
  return "MAIN", None, history, task
218
 
219
+ # Streamlit UI
220
+ st.title("AI Powered Code Assistant")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
+ with st.sidebar:
223
+ st.header("Task Configuration")
224
+ purpose = st.text_input("Purpose")
225
+ task = st.text_input("Task")
226
+ directory = st.text_input("Directory")
227
+ action_input = st.text_input("Action Input")
228
+ action = st.selectbox("Action", ["main", "test", "set_task", "read", "modify", "add"])
229
 
230
+ if st.button("Run Action"):
 
 
 
231
  history = ""
232
+ if action == "main":
233
+ action_name, action_input, history, task = call_main(purpose, task, history, directory, action_input)
234
+ elif action == "test":
235
+ action_name, action_input, history, task = call_test(purpose, task, history, directory, action_input)
236
+ elif action == "set_task":
237
+ action_name, action_input, history, task = call_set_task(purpose, task, history, directory, action_input)
238
+ elif action == "read":
239
+ action_name, action_input, history, task = call_read(purpose, task, history, directory, action_input)
240
+ elif action == "modify":
241
+ action_name, action_input, history, task = call_modify(purpose, task, history, directory, action_input)
242
+ elif action == "add":
243
+ action_name, action_input, history, task = call_add(purpose, task, history, directory, action_input)
244
+
245
+ st.subheader("History")
246
+ st.write(history)