Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,12 +21,11 @@ from agent import (
|
|
21 |
)
|
22 |
from utils import parse_action, parse_file_content, read_python_module_structure
|
23 |
from datetime import datetime
|
|
|
24 |
now = datetime.now()
|
25 |
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
26 |
|
27 |
-
client = InferenceClient(
|
28 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
29 |
-
)
|
30 |
|
31 |
############################################
|
32 |
|
@@ -34,20 +33,14 @@ VERBOSE = True
|
|
34 |
MAX_HISTORY = 125
|
35 |
|
36 |
def format_prompt(message, history):
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
def run_gpt(
|
45 |
-
prompt_template,
|
46 |
-
stop_tokens,
|
47 |
-
max_tokens,
|
48 |
-
purpose,
|
49 |
-
**prompt_kwargs,
|
50 |
-
):
|
51 |
seed = random.randint(1, 1111111111111111)
|
52 |
print(seed)
|
53 |
generate_kwargs = dict(
|
@@ -64,12 +57,10 @@ def run_gpt(
|
|
64 |
purpose=purpose,
|
65 |
safe_search=safe_search,
|
66 |
) + prompt_template.format(**prompt_kwargs)
|
|
|
67 |
if VERBOSE:
|
68 |
print(LOG_PROMPT.format(content))
|
69 |
|
70 |
-
#formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
71 |
-
#formatted_prompt = format_prompt(f'{content}', history)
|
72 |
-
|
73 |
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
74 |
resp = ""
|
75 |
for response in stream:
|
@@ -77,6 +68,7 @@ def run_gpt(
|
|
77 |
|
78 |
if VERBOSE:
|
79 |
print(LOG_RESPONSE.format(resp))
|
|
|
80 |
return resp
|
81 |
|
82 |
def compress_history(purpose, task, history, directory):
|
@@ -101,24 +93,25 @@ def call_search(purpose, task, history, directory, action_input):
|
|
101 |
action_input = action_input.strip(">")
|
102 |
|
103 |
response = i_s(action_input)
|
104 |
-
#response = google(search_return)
|
105 |
print(response)
|
106 |
history += "observation: search result is: {}\n".format(response)
|
107 |
else:
|
108 |
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
|
109 |
except Exception as e:
|
110 |
history += "observation: {}'\n".format(e)
|
|
|
111 |
return "MAIN", None, history, task
|
112 |
|
113 |
def call_main(purpose, task, history, directory, action_input):
|
114 |
resp = run_gpt(
|
115 |
ACTION_PROMPT,
|
116 |
-
stop_tokens=["observation:", "task:", "action:","
|
117 |
max_tokens=5096,
|
118 |
purpose=purpose,
|
119 |
task=task,
|
120 |
history=history,
|
121 |
)
|
|
|
122 |
lines = resp.strip().strip("\n").split("\n")
|
123 |
for line in lines:
|
124 |
if line == "":
|
@@ -137,9 +130,7 @@ def call_main(purpose, task, history, directory, action_input):
|
|
137 |
return action_name, action_input, history, task
|
138 |
else:
|
139 |
history += "{}\n".format(line)
|
140 |
-
|
141 |
-
#return action_name, action_input, history, task
|
142 |
-
#assert False, "unknown action: {}".format(line)
|
143 |
return "MAIN", None, history, task
|
144 |
|
145 |
def call_set_task(purpose, task, history, directory, action_input):
|
@@ -151,6 +142,7 @@ def call_set_task(purpose, task, history, directory, action_input):
|
|
151 |
task=task,
|
152 |
history=history,
|
153 |
).strip("\n")
|
|
|
154 |
history += "observation: task has been updated to: {}\n".format(task)
|
155 |
return "MAIN", None, history, task
|
156 |
|
@@ -178,10 +170,12 @@ def run_action(purpose, task, history, directory, action_name, action_input):
|
|
178 |
if VERBOSE:
|
179 |
print("COMPRESSING HISTORY")
|
180 |
history = compress_history(purpose, task, history, directory)
|
|
|
181 |
if not action_name in NAME_TO_FUNC:
|
182 |
action_name = "MAIN"
|
183 |
if action_name == "" or action_name is None:
|
184 |
action_name = "MAIN"
|
|
|
185 |
assert action_name in NAME_TO_FUNC
|
186 |
|
187 |
print("RUN: ", action_name, action_input)
|
@@ -218,22 +212,14 @@ def run(purpose, history):
|
|
218 |
action_name,
|
219 |
action_input,
|
220 |
)
|
|
|
221 |
yield (history)
|
222 |
-
|
223 |
if task == "END":
|
224 |
return (history)
|
225 |
-
#return ("", [(purpose,history)])
|
226 |
|
227 |
################################################
|
228 |
|
229 |
-
def format_prompt(message, history):
|
230 |
-
prompt = "<s>"
|
231 |
-
for user_prompt, bot_response in history:
|
232 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
233 |
-
prompt += f" {bot_response}</s> "
|
234 |
-
prompt += f"[INST] {message} [/INST]"
|
235 |
-
return prompt
|
236 |
-
|
237 |
agents = [
|
238 |
"WEB_DEV",
|
239 |
"AI_SYSTEM_PROMPT",
|
@@ -244,14 +230,15 @@ def generate(
|
|
244 |
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
|
245 |
):
|
246 |
seed = random.randint(1, 1111111111111111)
|
247 |
-
|
248 |
agent = prompts.WEB_DEV
|
|
|
249 |
if agent_name == "WEB_DEV":
|
250 |
agent = prompts.WEB_DEV
|
251 |
-
|
252 |
agent = prompts.AI_SYSTEM_PROMPT
|
253 |
-
|
254 |
agent = prompts.PYTHON_CODE_DEV
|
|
|
255 |
system_prompt = agent
|
256 |
temperature = float(temperature)
|
257 |
if temperature < 1e-2:
|
@@ -274,6 +261,7 @@ def generate(
|
|
274 |
for response in stream:
|
275 |
output += response.token.text
|
276 |
yield output
|
|
|
277 |
return output
|
278 |
|
279 |
additional_inputs = [
|
@@ -297,7 +285,6 @@ additional_inputs = [
|
|
297 |
interactive=True,
|
298 |
info="Higher values produce more diverse outputs",
|
299 |
),
|
300 |
-
|
301 |
gr.Slider(
|
302 |
label="Max new tokens",
|
303 |
value=1048 * 10,
|
@@ -328,13 +315,13 @@ additional_inputs = [
|
|
328 |
]
|
329 |
|
330 |
examples = [
|
331 |
-
["Based on previous interactions, generate an interactive preview of the user's requested application.", None, None, None, None, None
|
332 |
-
["Utilize the relevant code snippets and components from previous interactions.", None, None, None, None, None
|
333 |
-
["Assemble a working demo that showcases the core functionality of the application.", None, None, None, None, None
|
334 |
-
["Present the demo in an interactive environment within the Gradio interface.", None, None, None, None, None
|
335 |
-
["Allow the user to explore and interact with the demo to test its features.", None, None, None, None, None
|
336 |
-
["Gather feedback from the user about the demo and potential improvements.", None, None, None, None, None
|
337 |
-
["If the user approves of the app's running state
|
338 |
]
|
339 |
|
340 |
def create_interface():
|
@@ -366,7 +353,9 @@ with iface:
|
|
366 |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05)
|
367 |
repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05)
|
368 |
|
369 |
-
msg.submit(chat_interface,
|
|
|
|
|
370 |
clear.click(lambda: None, None, chatbot, queue=False)
|
371 |
|
372 |
gr.Examples(examples, [msg, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty])
|
|
|
21 |
)
|
22 |
from utils import parse_action, parse_file_content, read_python_module_structure
|
23 |
from datetime import datetime
|
24 |
+
|
25 |
now = datetime.now()
|
26 |
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
27 |
|
28 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
|
|
29 |
|
30 |
############################################
|
31 |
|
|
|
33 |
MAX_HISTORY = 125
|
34 |
|
35 |
def format_prompt(message, history):
|
36 |
+
prompt = "<s>"
|
37 |
+
for user_prompt, bot_response in history:
|
38 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
39 |
+
prompt += f" {bot_response}</s> "
|
40 |
+
prompt += f"[INST] {message} [/INST]"
|
41 |
+
return prompt
|
42 |
+
|
43 |
+
def run_gpt(prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
seed = random.randint(1, 1111111111111111)
|
45 |
print(seed)
|
46 |
generate_kwargs = dict(
|
|
|
57 |
purpose=purpose,
|
58 |
safe_search=safe_search,
|
59 |
) + prompt_template.format(**prompt_kwargs)
|
60 |
+
|
61 |
if VERBOSE:
|
62 |
print(LOG_PROMPT.format(content))
|
63 |
|
|
|
|
|
|
|
64 |
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
65 |
resp = ""
|
66 |
for response in stream:
|
|
|
68 |
|
69 |
if VERBOSE:
|
70 |
print(LOG_RESPONSE.format(resp))
|
71 |
+
|
72 |
return resp
|
73 |
|
74 |
def compress_history(purpose, task, history, directory):
|
|
|
93 |
action_input = action_input.strip(">")
|
94 |
|
95 |
response = i_s(action_input)
|
|
|
96 |
print(response)
|
97 |
history += "observation: search result is: {}\n".format(response)
|
98 |
else:
|
99 |
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
|
100 |
except Exception as e:
|
101 |
history += "observation: {}'\n".format(e)
|
102 |
+
|
103 |
return "MAIN", None, history, task
|
104 |
|
105 |
def call_main(purpose, task, history, directory, action_input):
|
106 |
resp = run_gpt(
|
107 |
ACTION_PROMPT,
|
108 |
+
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
109 |
max_tokens=5096,
|
110 |
purpose=purpose,
|
111 |
task=task,
|
112 |
history=history,
|
113 |
)
|
114 |
+
|
115 |
lines = resp.strip().strip("\n").split("\n")
|
116 |
for line in lines:
|
117 |
if line == "":
|
|
|
130 |
return action_name, action_input, history, task
|
131 |
else:
|
132 |
history += "{}\n".format(line)
|
133 |
+
|
|
|
|
|
134 |
return "MAIN", None, history, task
|
135 |
|
136 |
def call_set_task(purpose, task, history, directory, action_input):
|
|
|
142 |
task=task,
|
143 |
history=history,
|
144 |
).strip("\n")
|
145 |
+
|
146 |
history += "observation: task has been updated to: {}\n".format(task)
|
147 |
return "MAIN", None, history, task
|
148 |
|
|
|
170 |
if VERBOSE:
|
171 |
print("COMPRESSING HISTORY")
|
172 |
history = compress_history(purpose, task, history, directory)
|
173 |
+
|
174 |
if not action_name in NAME_TO_FUNC:
|
175 |
action_name = "MAIN"
|
176 |
if action_name == "" or action_name is None:
|
177 |
action_name = "MAIN"
|
178 |
+
|
179 |
assert action_name in NAME_TO_FUNC
|
180 |
|
181 |
print("RUN: ", action_name, action_input)
|
|
|
212 |
action_name,
|
213 |
action_input,
|
214 |
)
|
215 |
+
|
216 |
yield (history)
|
217 |
+
|
218 |
if task == "END":
|
219 |
return (history)
|
|
|
220 |
|
221 |
################################################
|
222 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
agents = [
|
224 |
"WEB_DEV",
|
225 |
"AI_SYSTEM_PROMPT",
|
|
|
230 |
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
|
231 |
):
|
232 |
seed = random.randint(1, 1111111111111111)
|
|
|
233 |
agent = prompts.WEB_DEV
|
234 |
+
|
235 |
if agent_name == "WEB_DEV":
|
236 |
agent = prompts.WEB_DEV
|
237 |
+
elif agent_name == "AI_SYSTEM_PROMPT":
|
238 |
agent = prompts.AI_SYSTEM_PROMPT
|
239 |
+
elif agent_name == "PYTHON_CODE_DEV":
|
240 |
agent = prompts.PYTHON_CODE_DEV
|
241 |
+
|
242 |
system_prompt = agent
|
243 |
temperature = float(temperature)
|
244 |
if temperature < 1e-2:
|
|
|
261 |
for response in stream:
|
262 |
output += response.token.text
|
263 |
yield output
|
264 |
+
|
265 |
return output
|
266 |
|
267 |
additional_inputs = [
|
|
|
285 |
interactive=True,
|
286 |
info="Higher values produce more diverse outputs",
|
287 |
),
|
|
|
288 |
gr.Slider(
|
289 |
label="Max new tokens",
|
290 |
value=1048 * 10,
|
|
|
315 |
]
|
316 |
|
317 |
examples = [
|
318 |
+
["Based on previous interactions, generate an interactive preview of the user's requested application.", None, None, None, None, None],
|
319 |
+
["Utilize the relevant code snippets and components from previous interactions.", None, None, None, None, None],
|
320 |
+
["Assemble a working demo that showcases the core functionality of the application.", None, None, None, None, None],
|
321 |
+
["Present the demo in an interactive environment within the Gradio interface.", None, None, None, None, None],
|
322 |
+
["Allow the user to explore and interact with the demo to test its features.", None, None, None, None, None],
|
323 |
+
["Gather feedback from the user about the demo and potential improvements.", None, None, None, None, None],
|
324 |
+
["If the user approves of the app's running state, provide a bash script that will automate all aspects of a local run and a docker image for ease-of-launch in addition to the huggingface-ready app.py with all functions and GUI, and the requirements.txt file comprised of all required libraries and packages the application is dependent on, avoiding OpenAI API at all points since we only use Hugging Face transformers, models, agents, libraries, and API.", None, None, None, None, None],
|
325 |
]
|
326 |
|
327 |
def create_interface():
|
|
|
353 |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05)
|
354 |
repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05)
|
355 |
|
356 |
+
msg.submit(chat_interface,
|
357 |
+
[msg, chatbot, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty],
|
358 |
+
[chatbot, msg])
|
359 |
clear.click(lambda: None, None, chatbot, queue=False)
|
360 |
|
361 |
gr.Examples(examples, [msg, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty])
|