acecalisto3 commited on
Commit
72af2bb
·
verified ·
1 Parent(s): 760245f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -332
app.py CHANGED
@@ -1,25 +1,7 @@
1
  import gradio as gr
2
  import os
3
- import subprocess
4
  import random
5
  from huggingface_hub import InferenceClient
6
- from safe_search import safe_search
7
- from i_search import google
8
- from i_search import i_search as i_s
9
- from agent import (
10
- ACTION_PROMPT,
11
- ADD_PROMPT,
12
- COMPRESS_HISTORY_PROMPT,
13
- LOG_PROMPT,
14
- LOG_RESPONSE,
15
- MODIFY_PROMPT,
16
- PREFIX,
17
- SEARCH_QUERY,
18
- READ_PROMPT,
19
- TASK_PROMPT,
20
- UNDERSTAND_TEST_RESULTS_PROMPT,
21
- )
22
- from utils import parse_action, parse_file_content, read_python_module_structure
23
  from datetime import datetime
24
  import yaml
25
  import logging
@@ -140,8 +122,6 @@ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
140
 
141
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
142
 
143
- ############################################
144
-
145
  VERBOSE = True
146
  MAX_HISTORY = 125
147
 
@@ -153,209 +133,24 @@ def format_prompt(message, history):
153
  prompt += f"[INST] {message} [/INST]"
154
  return prompt
155
 
156
- def run_gpt(prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs):
157
- seed = random.randint(1, 1111111111111111)
158
- print(seed)
159
- generate_kwargs = dict(
160
- temperature=1.0,
161
- max_new_tokens=2096,
162
- top_p=0.99,
163
- repetition_penalty=1.7,
164
- do_sample=True,
165
- seed=seed,
166
- )
167
-
168
- content = PREFIX.format(
169
- date_time_str=date_time_str,
170
- purpose=purpose,
171
- safe_search=safe_search,
172
- ) + prompt_template.format(**prompt_kwargs)
173
-
174
- if VERBOSE:
175
- print(LOG_PROMPT.format(content))
176
-
177
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
178
- resp = ""
179
- for response in stream:
180
- resp += response.token.text
181
-
182
- if VERBOSE:
183
- print(LOG_RESPONSE.format(resp))
184
-
185
- return resp
186
-
187
- def compress_history(purpose, task, history, directory):
188
- resp = run_gpt(
189
- COMPRESS_HISTORY_PROMPT,
190
- stop_tokens=["observation:", "task:", "action:", "thought:"],
191
- max_tokens=5096,
192
- purpose=purpose,
193
- task=task,
194
- history=history,
195
- )
196
- history = "observation: {}\n".format(resp)
197
- return history
198
-
199
- def call_search(purpose, task, history, directory, action_input):
200
- print("CALLING SEARCH")
201
- try:
202
- if "http" in action_input:
203
- if "<" in action_input:
204
- action_input = action_input.strip("<")
205
- if ">" in action_input:
206
- action_input = action_input.strip(">")
207
-
208
- response = i_s(action_input)
209
- print(response)
210
- history += "observation: search result is: {}\n".format(response)
211
- else:
212
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
213
- except Exception as e:
214
- history += "observation: {}'\n".format(e)
215
-
216
- return "MAIN", None, history, task
217
-
218
- def call_main(purpose, task, history, directory, action_input):
219
- resp = run_gpt(
220
- ACTION_PROMPT,
221
- stop_tokens=["observation:", "task:", "action:", "thought:"],
222
- max_tokens=5096,
223
- purpose=purpose,
224
- task=task,
225
- history=history,
226
- )
227
-
228
- lines = resp.strip().strip("\n").split("\n")
229
- for line in lines:
230
- if line == "":
231
- continue
232
- if line.startswith("thought: "):
233
- history += "{}\n".format(line)
234
- elif line.startswith("action: "):
235
- action_name, action_input = parse_action(line)
236
- print(f'ACTION_NAME :: {action_name}')
237
- print(f'ACTION_INPUT :: {action_input}')
238
- history += "{}\n".format(line)
239
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
240
- task = "END"
241
- return action_name, action_input, history, task
242
- else:
243
- return action_name, action_input, history, task
244
- else:
245
- history += "{}\n".format(line)
246
-
247
- return "MAIN", None, history, task
248
-
249
- def call_set_task(purpose, task, history, directory, action_input):
250
- task = run_gpt(
251
- TASK_PROMPT,
252
- stop_tokens=[],
253
- max_tokens=2048,
254
- purpose=purpose,
255
- task=task,
256
- history=history,
257
- ).strip("\n")
258
-
259
- history += "observation: task has been updated to: {}\n".format(task)
260
- return "MAIN", None, history, task
261
-
262
- def end_fn(purpose, task, history, directory, action_input):
263
- task = "END"
264
- return "COMPLETE", "COMPLETE", history, task
265
-
266
- NAME_TO_FUNC = {
267
- "MAIN": call_main,
268
- "UPDATE-TASK": call_set_task,
269
- "SEARCH": call_search,
270
- "COMPLETE": end_fn,
271
- }
272
-
273
- def run_action(purpose, task, history, directory, action_name, action_input):
274
- print(f'action_name::{action_name}')
275
- try:
276
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
277
- action_name = "COMPLETE"
278
- task = "END"
279
- return action_name, "COMPLETE", history, task
280
-
281
- # compress the history when it is long
282
- if len(history.split("\n")) > MAX_HISTORY:
283
- if VERBOSE:
284
- print("COMPRESSING HISTORY")
285
- history = compress_history(purpose, task, history, directory)
286
-
287
- if action_name not in NAME_TO_FUNC:
288
- action_name = "MAIN"
289
- if action_name == "" or action_name is None:
290
- action_name = "MAIN"
291
-
292
- assert action_name in NAME_TO_FUNC
293
-
294
- print("RUN: ", action_name, action_input)
295
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
296
- except Exception as e:
297
- history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
298
- return "MAIN", None, history, task
299
-
300
- def run(purpose, history):
301
- task = None
302
- directory = "./"
303
- if history:
304
- history = str(history).strip("[]")
305
- if not history:
306
- history = ""
307
-
308
- action_name = "UPDATE-TASK" if task is None else "MAIN"
309
- action_input = None
310
- while True:
311
- print("")
312
- print("")
313
- print("---")
314
- print("purpose:", purpose)
315
- print("task:", task)
316
- print("---")
317
- print(history)
318
- print("---")
319
-
320
- action_name, action_input, history, task = run_action(
321
- purpose,
322
- task,
323
- history,
324
- directory,
325
- action_name,
326
- action_input,
327
- )
328
-
329
- yield history
330
-
331
- if task == "END":
332
- return history
333
-
334
- ################################################
335
-
336
  agents = [
337
  "WEB_DEV",
338
  "AI_SYSTEM_PROMPT",
339
- "PYTHON_CODE_DEV"
 
 
 
 
340
  ]
341
 
342
  def generate(
343
  prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
344
  ):
345
  seed = random.randint(1, 1111111111111111)
346
- agent = custom_prompts["WEB_DEV"]
347
-
348
- if agent_name == "WEB_DEV":
349
- agent = custom_prompts["WEB_DEV"]
350
- elif agent_name == "AI_SYSTEM_PROMPT":
351
- agent = custom_prompts["AI_SYSTEM_PROMPT"]
352
- elif agent_name == "PYTHON_CODE_DEV":
353
- agent = custom_prompts["PYTHON_CODE_DEV"]
354
 
355
- system_prompt = agent
356
- temperature = float(temperature)
357
- if temperature < 1e-2:
358
- temperature = 1e-2
359
  top_p = float(top_p)
360
 
361
  generate_kwargs = dict(
@@ -367,135 +162,45 @@ def generate(
367
  seed=seed,
368
  )
369
 
370
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
371
- output = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, return_full_text=True)
372
 
373
  return output
374
 
375
- # Define input and output components
376
- with gr.Blocks() as iface:
377
- # Input components
378
- input_text = gr.Textbox(label="Input Text")
379
- # Other input components...
380
-
381
- # Output components
382
- output_text = gr.Textbox(label="Output Text")
383
- # Other output components...
384
-
385
- # Specify inputs and events
386
- inputs = [input_text, agents] # List of input components
387
- events = [output_text, agents] # List of output components
388
-
389
- iface.load(inputs, events)
390
- def log_messages(inputs, outputs):
391
- logger.info(f'Input: {inputs}, Output: {outputs}')
392
-
393
- @gr.Interface.load(inputs, agents)
394
- def log_messages(inputs, outputs):
395
- logger.info(f'Input: {inputs}, Output: {outputs}')
396
-
397
- @gr.Interface.load(inputs, events)
398
- def log_messages(inputs, outputs):
399
- logger.info(f'Input: {inputs}, Output: {outputs}')
400
-
401
  def update_sys_prompt(agent):
402
- global SYSTEM_PROMPT
403
- SYSTEM_PROMPT = globals()[agent]
404
 
405
  def get_helpful_tip(agent):
406
- if agent == 'WEB_DEV':
407
- return "Provide information related to Web Development tasks."
408
- elif agent == 'AI_SYSTEM_PROMPT':
409
- return "Update the system instructions for the assistant here."
410
- elif agent == 'PYTHON_CODE_DEV':
411
- return "Describe what you want me to help you with regarding Python coding tasks."
412
- elif agent == 'CODE_GENERATION':
413
- return "Provide requirements for the code you want me to generate."
414
- elif agent == 'CODE_INTERPRETATION':
415
- return "Share the code you want me to analyze and explain."
416
- elif agent == 'CODE_TRANSLATION':
417
- return "Specify the source and target programming languages, and provide the code you want me to translate."
418
- elif agent == 'CODE_IMPLEMENTATION':
419
- return "Provide the code or requirements you want me to implement in a production-ready environment."
420
 
421
  def chat_interface(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty):
422
  generated_text = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
423
- chatbot_response = [(prompt, generated_text)]
424
- return chatbot_response, "" # Return the chatbot response and an empty string for the message textbox
425
-
426
- additional_inputs = [
427
- gr.Dropdown(
428
- label="Agents",
429
- choices=[s for s in agents],
430
- value=agents[0],
431
- interactive=True,
432
- ),
433
- gr.Textbox(
434
- label="System Prompt",
435
- max_lines=1,
436
- interactive=True,
437
- ),
438
- gr.Slider(
439
- label="Temperature",
440
- value=0.9,
441
- minimum=0.0,
442
- maximum=1.0,
443
- step=0.05,
444
- interactive=True,
445
- info="Higher values produce more diverse outputs",
446
- ),
447
- gr.Slider(
448
- label="Max new tokens",
449
- value=1048 * 10,
450
- minimum=0,
451
- maximum=1048 * 10,
452
- step=64,
453
- interactive=True,
454
- info="The maximum numbers of new tokens",
455
- ),
456
- gr.Slider(
457
- label="Top-p (nucleus sampling)",
458
- value=0.90,
459
- minimum=0.0,
460
- maximum=1,
461
- step=0.05,
462
- interactive=True,
463
- info="Higher values sample more low-probability tokens",
464
- ),
465
- gr.Slider(
466
- label="Repetition penalty",
467
- value=1.2,
468
- minimum=1.0,
469
- maximum=2.0,
470
- step=0.05,
471
- interactive=True,
472
- info="Penalize repeated tokens",
473
- ),
474
- ]
475
 
476
  examples = [
477
- ["Based on previous interactions, generate an interactive preview of the user's requested application.", None, None, None, None, None],
478
- ["Utilize the relevant code snippets and components from previous interactions.", None, None, None, None, None],
479
- ["Assemble a working demo that showcases the core functionality of the application.", None, None, None, None, None],
480
- ["Present the demo in an interactive environment within the Gradio interface.", None, None, None, None, None],
481
- ["Allow the user to explore and interact with the demo to test its features.", None, None, None, None, None],
482
- ["Gather feedback from the user about the demo and potential improvements.", None, None, None, None, None],
483
- ["If the user approves of the app's running state, provide a bash script that will automate all aspects of a local run and a docker image for ease-of-launch in addition to the huggingface-ready app.py with all functions and GUI, and the requirements.txt file comprised of all required libraries and packages the application is dependent on, avoiding OpenAI API at all points since we only use Hugging Face transformers, models, agents, libraries, and API.", None, None, None, None, None],
484
  ]
485
 
486
- def create_interface():
487
- with gr.Blocks() as iface:
488
- gr.ChatInterface(
489
- fn=generate,
490
- title="Fragmixt\nAgents With Agents,\nSurf With a Purpose",
491
- examples=examples,
492
- additional_inputs=additional_inputs,
493
- )
494
- return iface
495
-
496
- iface = gr.Blocks()
497
-
498
- with iface:
499
  gr.Markdown("# Fragmixt\nAgents With Agents,\nSurf With a Purpose")
500
 
501
  chatbot = gr.Chatbot()
@@ -505,10 +210,12 @@ with iface:
505
  agent_dropdown = gr.Dropdown(label="Agents", choices=agents, value=agents[0])
506
  sys_prompt = gr.Textbox(label="System Prompt", max_lines=1)
507
  temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05)
508
- max_new_tokens = gr.Slider(label="Max new tokens", value=1048 * 10, minimum=0, maximum=1048 * 10, step=64)
509
  top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05)
510
  repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05)
511
 
 
 
512
  msg.submit(chat_interface,
513
  [msg, chatbot, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty],
514
  [chatbot, msg])
@@ -516,7 +223,11 @@ with iface:
516
 
517
  gr.Examples(examples, [msg, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty])
518
 
519
- agent_dropdown.change(fn=get_helpful_tip, inputs=agent_dropdown, outputs=gr.components.Markdown())
520
  agent_dropdown.change(fn=update_sys_prompt, inputs=agent_dropdown, outputs=sys_prompt)
521
 
522
- iface.launch()
 
 
 
 
 
1
  import gradio as gr
2
  import os
 
3
  import random
4
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  from datetime import datetime
6
  import yaml
7
  import logging
 
122
 
123
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
124
 
 
 
125
  VERBOSE = True
126
  MAX_HISTORY = 125
127
 
 
133
  prompt += f"[INST] {message} [/INST]"
134
  return prompt
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  agents = [
137
  "WEB_DEV",
138
  "AI_SYSTEM_PROMPT",
139
+ "PYTHON_CODE_DEV",
140
+ "CODE_GENERATION",
141
+ "CODE_INTERPRETATION",
142
+ "CODE_TRANSLATION",
143
+ "CODE_IMPLEMENTATION"
144
  ]
145
 
146
  def generate(
147
  prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
148
  ):
149
  seed = random.randint(1, 1111111111111111)
150
+ agent = custom_prompts[agent_name]
 
 
 
 
 
 
 
151
 
152
+ system_prompt = agent if sys_prompt == "" else sys_prompt
153
+ temperature = max(float(temperature), 1e-2)
 
 
154
  top_p = float(top_p)
155
 
156
  generate_kwargs = dict(
 
162
  seed=seed,
163
  )
164
 
165
+ formatted_prompt = format_prompt(f"{system_prompt}\n\n{prompt}", history)
166
+ output = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, return_full_text=False)
167
 
168
  return output
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  def update_sys_prompt(agent):
171
+ return custom_prompts[agent]
 
172
 
173
  def get_helpful_tip(agent):
174
+ tips = {
175
+ 'WEB_DEV': "Provide information related to Web Development tasks.",
176
+ 'AI_SYSTEM_PROMPT': "Update the system instructions for the assistant here.",
177
+ 'PYTHON_CODE_DEV': "Describe what you want me to help you with regarding Python coding tasks.",
178
+ 'CODE_GENERATION': "Provide requirements for the code you want me to generate.",
179
+ 'CODE_INTERPRETATION': "Share the code you want me to analyze and explain.",
180
+ 'CODE_TRANSLATION': "Specify the source and target programming languages, and provide the code you want me to translate.",
181
+ 'CODE_IMPLEMENTATION': "Provide the code or requirements you want me to implement in a production-ready environment."
182
+ }
183
+ return tips.get(agent, "Select an agent to get started.")
 
 
 
 
184
 
185
  def chat_interface(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty):
186
  generated_text = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
187
+ history.append((prompt, generated_text))
188
+ return history, ""
189
+
190
+ def log_messages(*args):
191
+ logger.info(f'Input: {args}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  examples = [
194
+ ["Based on previous interactions, generate an interactive preview of the user's requested application.", "WEB_DEV", "", 0.9, 1024, 0.95, 1.2],
195
+ ["Utilize the relevant code snippets and components from previous interactions.", "PYTHON_CODE_DEV", "", 0.9, 1024, 0.95, 1.2],
196
+ ["Assemble a working demo that showcases the core functionality of the application.", "CODE_IMPLEMENTATION", "", 0.9, 1024, 0.95, 1.2],
197
+ ["Present the demo in an interactive environment within the Gradio interface.", "WEB_DEV", "", 0.9, 1024, 0.95, 1.2],
198
+ ["Allow the user to explore and interact with the demo to test its features.", "CODE_GENERATION", "", 0.9, 1024, 0.95, 1.2],
199
+ ["Gather feedback from the user about the demo and potential improvements.", "AI_SYSTEM_PROMPT", "", 0.9, 1024, 0.95, 1.2],
200
+ ["If the user approves of the app's running state, provide a bash script that will automate all aspects of a local run and a docker image for ease-of-launch in addition to the huggingface-ready app.py with all functions and GUI, and the requirements.txt file comprised of all required libraries and packages the application is dependent on, avoiding OpenAI API at all points since we only use Hugging Face transformers, models, agents, libraries, and API.", "CODE_IMPLEMENTATION", "", 0.9, 2048, 0.95, 1.2],
201
  ]
202
 
203
+ with gr.Blocks() as iface:
 
 
 
 
 
 
 
 
 
 
 
 
204
  gr.Markdown("# Fragmixt\nAgents With Agents,\nSurf With a Purpose")
205
 
206
  chatbot = gr.Chatbot()
 
210
  agent_dropdown = gr.Dropdown(label="Agents", choices=agents, value=agents[0])
211
  sys_prompt = gr.Textbox(label="System Prompt", max_lines=1)
212
  temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05)
213
+ max_new_tokens = gr.Slider(label="Max new tokens", value=1048, minimum=0, maximum=2048, step=64)
214
  top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05)
215
  repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05)
216
 
217
+ helpful_tip = gr.Markdown()
218
+
219
  msg.submit(chat_interface,
220
  [msg, chatbot, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty],
221
  [chatbot, msg])
 
223
 
224
  gr.Examples(examples, [msg, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty])
225
 
226
+ agent_dropdown.change(fn=get_helpful_tip, inputs=agent_dropdown, outputs=helpful_tip)
227
  agent_dropdown.change(fn=update_sys_prompt, inputs=agent_dropdown, outputs=sys_prompt)
228
 
229
+ for component in [msg, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty]:
230
+ component.change(fn=log_messages, inputs=[component])
231
+
232
+ if __name__ == "__main__":
233
+ iface.launch()