acecalisto3 commited on
Commit
66677ed
·
verified ·
1 Parent(s): a9d16d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +290 -466
app.py CHANGED
@@ -1,480 +1,304 @@
1
  import os
2
- import sys
3
  import subprocess
4
- import base64
5
- import json
6
- from io import StringIO
7
- from typing import Dict, List
8
- import streamlit as st
9
- from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
10
- from pylint import lint
11
  from huggingface_hub import InferenceClient
12
  import gradio as gr
13
- import random
14
- import prompts
15
- client = InferenceClient(
16
- "mistralai/Mixtral-8x7B-v0.1"
 
 
 
 
 
 
 
 
 
 
 
17
  )
 
 
18
 
19
- def format_prompt(message, history):
20
- prompt = "<s>"
21
- for user_prompt, bot_response in history:
22
- prompt += f"[INST] {user_prompt} [/INST]"
23
- prompt += f" {bot_response}</s> "
24
- prompt += f"[INST] {message} [/INST]"
25
- return prompt
26
-
27
- # Replace st.secrets with os.environ
28
- hf_token = os.environ.get("huggingface_token")
29
-
30
- if not hf_token:
31
- st.error("Hugging Face API key not found. Please set the HUGGINGFACE_API_KEY environment variable.")
32
- st.stop()
33
-
34
- # Global state to manage communication between Tool Box and Workspace Chat App
35
- if "chat_history" not in st.session_state:
36
- st.session_state.chat_history = []
37
- if "terminal_history" not in st.session_state:
38
- st.session_state.terminal_history = []
39
- if "workspace_projects" not in st.session_state:
40
- st.session_state.workspace_projects = {}
41
-
42
- # Load pre-trained RAG retriever
43
- rag_retriever = pipeline("text-generation", model="mistralai/Mixtral-8x7B-v0.1")
44
-
45
- # Load pre-trained chat model
46
- chat_model = AutoModelForSeq2SeqLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
47
-
48
- # Load tokenizer
49
- tokenizer = AutoTokenizer.from_pretrained("gpt2")
50
-
51
- def process_input(user_input: str) -> str:
52
- # Input pipeline: Tokenize and preprocess user input
53
- input_ids = tokenizer(user_input, return_tensors="pt").input_ids
54
- attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
55
-
56
- # RAG model: Generate response
57
- with torch.no_grad():
58
- output = rag_retriever(input_ids, attention_mask=attention_mask)
59
- response = output.generator_outputs[0].sequences[0]
60
-
61
- # Chat model: Refine response
62
- chat_input = tokenizer(response, return_tensors="pt")
63
- chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
64
- chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
65
- with torch.no_grad():
66
- chat_output = chat_model(**chat_input)
67
- refined_response = chat_output.sequences[0]
68
-
69
- # Output pipeline: Return final response
70
- return refined_response
71
-
72
- class AIAgent:
73
- def __init__(self, name: str, description: str, skills: List[str], hf_api=None):
74
- self.name = name
75
- self.description = description
76
- self.skills = skills
77
- self._hf_api = hf_api
78
- self._hf_token = hf_token
79
-
80
- @property
81
- def hf_api(self):
82
- if not self._hf_api and self.has_valid_hf_token():
83
- self._hf_api = HfApi(token=self._hf_token)
84
- return self._hf_api
85
-
86
- def has_valid_hf_token(self):
87
- return bool(self._hf_token)
88
-
89
- async def autonomous_build(self, chat_history: List[str], workspace_projects: Dict[str, str], project_name: str, selected_model: str):
90
- # Continuation of previous methods
91
- summary = "Chat History:\n" + "\n".join(chat_history)
92
- summary += "\n\nWorkspace Projects:\n" + "\n".join(workspace_projects.items())
93
-
94
- # Analyze chat history and workspace projects to suggest actions
95
- # Example:
96
- # - Check if the user has requested to create a new file
97
- # - Check if the user has requested to install a package
98
- # - Check if the user has requested to run a command
99
- # - Check if the user has requested to generate code
100
- # - Check if the user has requested to translate code
101
- # - Check if the user has requested to summarize text
102
-
103
- # Generate a response based on the analysis
104
- next_step = "Based on the current state, the next logical step is to implement the main application logic."
105
 
106
- # Ensure project folder exists
107
- project_path = os.path.join(PROJECT_ROOT, project_name)
108
- if not os.path.exists(project_path):
109
- os.makedirs(project_path)
110
 
111
- # Create requirements.txt if it doesn't exist
112
- requirements_file = os.path.join(project_path, "requirements.txt")
113
- if not os.path.exists(requirements_file):
114
- with open(requirements_file, "w") as f:
115
- f.write("# Add your project's dependencies here\n")
116
 
117
- # Create app.py if it doesn't exist
118
- app_file = os.path.join(project_path, "app.py")
119
- if not os.path.exists(app_file):
120
- with open(app_file, "w") as f:
121
- f.write("# Your project's main application logic goes here\n")
122
-
123
- # Generate GUI code for app.py if requested
124
- if "create a gui" in summary.lower():
125
- gui_code = generate_code(
126
- "Create a simple GUI for this application", selected_model)
127
- with open(app_file, "a") as f:
128
- f.write(gui_code)
129
-
130
- # Run the default build process
131
- build_command = "pip install -r requirements.txt && python app.py"
132
- try:
133
- result = subprocess.run(
134
- build_command, shell=True, capture_output=True, text=True, cwd=project_path)
135
- st.write(f"Build Output:\n{result.stdout}")
136
- if result.stderr:
137
- st.error(f"Build Errors:\n{result.stderr}")
138
- except Exception as e:
139
- st.error(f"Build Error: {e}")
140
-
141
- return summary, next_step
142
-
143
- def get_built_space_files() -> Dict[str, str]:
144
- # Replace with your logic to gather the files you want to deploy
145
- return {
146
- "app.py": "# Your Streamlit app code here",
147
- "requirements.txt": "streamlit\ntransformers"
148
- # Add other files as needed
149
- }
150
-
151
- def save_agent_to_file(agent: AIAgent):
152
- """Saves the agent's prompt to a file."""
153
- if not os.path.exists(AGENT_DIRECTORY):
154
- os.makedirs(AGENT_DIRECTORY)
155
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
156
- with open(file_path, "w") as file:
157
- file.write(agent.create_agent_prompt())
158
- st.session_state.available_agents.append(agent.name)
159
-
160
- def load_agent_prompt(agent_name: str) -> str:
161
- """Loads an agent prompt from a file."""
162
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
163
- if os.path.exists(file_path):
164
- with open(file_path, "r") as file:
165
- agent_prompt = file.read()
166
- return agent_prompt
167
- else:
168
- return None
169
-
170
- def create_agent_from_text(name: str, text: str) -> str:
171
- skills = text.split("\n")
172
- agent = AIAgent(name, "AI agent created from text input.", skills)
173
- save_agent_to_file(agent)
174
- return agent.create_agent_prompt()
175
-
176
- def chat_interface_with_agent(input_text: str, agent_name: str) -> str:
177
- agent_prompt = load_agent_prompt(agent_name)
178
- if agent_prompt is None:
179
- return f"Agent {agent_name} not found."
180
-
181
- model_name = ""
182
  try:
183
- generator = pipeline("text-generation", model=model_name)
184
- generator.tokenizer.pad_token = generator.tokenizer.eos_token
185
- generated_response = generator(
186
- f"{agent_prompt}\n\nUser: {input_text}\nAgent:", max_length=100, do_sample=True, top_k=50)[0]["generated_text"]
187
- return generated_response
 
 
 
 
 
188
  except Exception as e:
189
- return f"Error loading model: {e}"
190
-
191
- def terminal_interface(command: str, project_name: str = None) -> str:
192
- if project_name:
193
- project_path = os.path.join(PROJECT_ROOT, project_name)
194
- if not os.path.exists(project_path):
195
- return f"Project {project_name} does not exist."
196
- result = subprocess.run(
197
- command, shell=True, capture_output=True, text=True, cwd=project_path)
198
- else:
199
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
200
- return result.stdout
201
-
202
- def code_editor_interface(code: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  try:
204
- formatted_code = black.format_str(code, mode=black.FileMode())
205
- except black.NothingChanged:
206
- formatted_code = code
207
-
208
- result = StringIO()
209
- sys.stdout = result
210
- sys.stderr = result
211
-
212
- (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
213
- sys.stdout = sys.__stdout__
214
- sys.stderr = sys.__stderr__
215
-
216
- lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
217
-
218
- return formatted_code, lint_message
219
-
220
- def summarize_text(text: str) -> str:
221
- summarizer = pipeline("summarization")
222
- summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
223
- return summary[0]['summary_text']
224
-
225
-
226
- def translate_code(code: str, source_language: str, target_language: str) -> str:
227
- # Use a Hugging Face translation model instead of OpenAI
228
- # Example: English to Spanish
229
- translator = pipeline(
230
- "translation", model="mistralai/Mixtral-8x7B-Instruct-v0.1")
231
- translated_code = translator(code, target_lang=target_language)[0]['translation_text']
232
- return translated_code
233
-
234
- def generate_code(code_idea: str, model_name: str) -> str:
235
- """Generates code using the selected model."""
236
- try:
237
- generator = pipeline('text-generation', model=model_name)
238
- generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
239
- return generated_code
240
  except Exception as e:
241
- return f"Error generating code: {e}"
242
-
243
- def chat_interface(input_text: str) -> str:
244
- """Handles general chat interactions with the user."""
245
- # Use a Hugging Face chatbot model or your own logic
246
- chatbot = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1")
247
- response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
248
- return response
249
-
250
- def workspace_interface(project_name: str) -> str:
251
- project_path = os.path.join(PROJECT_ROOT, project_name)
252
- if not os.path.exists(project_path):
253
- os.makedirs(project_path)
254
- st.session_state.workspace_projects[project_name] = {'files': []}
255
- return f"Project '{project_name}' created successfully."
256
- else:
257
- return f"Project '{project_name}' already exists."
258
-
259
- def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
260
- project_path = os.path.join(PROJECT_ROOT, project_name)
261
- if not os.path.exists(project_path):
262
- return f"Project '{project_name}' does not exist."
263
-
264
- file_path = os.path.join(project_path, file_name)
265
- with open(file_path, "w") as file:
266
- file.write(code)
267
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
268
- return f"Code added to '{file_name}' in project '{project_name}'."
269
-
270
- def create_space_on_hugging_face(api, name, description, public, files, entrypoint="launch.py"):
271
- url = f"{hf_hub_url()}spaces/{name}/prepare-repo"
272
- headers = {"Authorization": f"Bearer {api.access_token}"}
273
- payload = {
274
- "public": public,
275
- "gitignore_template": "web",
276
- "default_branch": "main",
277
- "archived": False,
278
- "files": []
279
- }
280
- for filename, contents in files.items():
281
- data = {
282
- "content": contents,
283
- "path": filename,
284
- "encoding": "utf-8",
285
- "mode": "overwrite"
286
- }
287
- payload["files"].append(data)
288
- response = requests.post(url, json=payload, headers=headers)
289
- response.raise_for_status()
290
- location = response.headers.get("Location")
291
- # wait_for_processing(location, api) # You might need to implement this if it's not already defined
292
-
293
- return Repository(name=name, api=api)
294
-
295
- # Streamlit App
296
- st.title("AI Agent Creator")
297
-
298
- # Sidebar navigation
299
- st.sidebar.title("Navigation")
300
- app_mode = st.sidebar.selectbox(
301
- "Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
302
-
303
- if app_mode == "AI Agent Creator":
304
- # AI Agent Creator
305
- st.header("Create an AI Agent from Text")
306
-
307
- st.subheader("From Text")
308
- agent_name = st.text_input("Enter agent name:")
309
- text_input = st.text_area("Enter skills (one per line):")
310
- if st.button("Create Agent"):
311
- agent_prompt = create_agent_from_text(agent_name, text_input)
312
- st.success(f"Agent '{agent_name}' created and saved successfully.")
313
- st.session_state.available_agents.append(agent_name)
314
-
315
- elif app_mode == "Tool Box":
316
- # Tool Box
317
- st.header("AI-Powered Tools")
318
-
319
- # Chat Interface
320
- st.subheader("Chat with CodeCraft")
321
- chat_input = st.text_area("Enter your message:")
322
- if st.button("Send"):
323
- chat_response = chat_interface(chat_input)
324
- st.session_state.chat_history.append((chat_input, chat_response))
325
- st.write(f"CodeCraft: {chat_response}")
326
-
327
- # Terminal Interface
328
- st.subheader("Terminal")
329
- terminal_input = st.text_input("Enter a command:")
330
- if st.button("Run"):
331
- terminal_output = terminal_interface(terminal_input)
332
- st.session_state.terminal_history.append(
333
- (terminal_input, terminal_output))
334
- st.code(terminal_output, language="bash")
335
-
336
- # Code Editor Interface
337
- st.subheader("Code Editor")
338
- code_editor = st.text_area("Write your code:", height=300)
339
- if st.button("Format & Lint"):
340
- formatted_code, lint_message = code_editor_interface(code_editor)
341
- st.code(formatted_code, language="python")
342
- st.info(lint_message)
343
-
344
- # Text Summarization Tool
345
- st.subheader("Summarize Text")
346
- text_to_summarize = st.text_area("Enter text to summarize:")
347
- if st.button("Summarize"):
348
- summary = summarize_text(text_to_summarize)
349
- st.write(f"Summary: {summary}")
350
 
351
- # Text Translation Tool (Code Translation)
352
- st.subheader("Translate Code")
353
- code_to_translate = st.text_area("Enter code to translate:")
354
- source_language = st.text_input("Enter source language (e.g., 'Python'):")
355
- target_language = st.text_input(
356
- "Enter target language (e.g., 'JavaScript'):")
357
- if st.button("Translate Code"):
358
- translated_code = translate_code(
359
- code_to_translate, source_language, target_language)
360
- st.code(translated_code, language=target_language.lower())
361
-
362
- # Code Generation
363
- st.subheader("Code Generation")
364
- code_idea = st.text_input("Enter your code idea:")
365
- if st.button("Generate Code"):
366
- generated_code = generate_code(code_idea)
367
- st.code(generated_code, language="python")
368
-
369
- elif app_mode == "Workspace Chat App":
370
- # Workspace Chat App
371
- st.header("Workspace Chat App")
372
-
373
- # Project Workspace Creation
374
- st.subheader("Create a New Project")
375
- project_name = st.text_input("Enter project name:")
376
- if st.button("Create Project"):
377
- workspace_status = workspace_interface(project_name)
378
- st.success(workspace_status)
379
-
380
- # Automatically create requirements.txt and app.py
381
- project_path = os.path.join(PROJECT_ROOT, project_name)
382
- requirements_file = os.path.join(project_path, "requirements.txt")
383
- if not os.path.exists(requirements_file):
384
- with open(requirements_file, "w") as f:
385
- f.write("# Add your project's dependencies here\n")
386
-
387
- app_file = os.path.join(project_path, "app.py")
388
- if not os.path.exists(app_file):
389
- with open(app_file, "w") as f:
390
- f.write("# Your project's main application logic goes here\n")
391
-
392
- # Add Code to Workspace
393
- st.subheader("Add Code to Workspace")
394
- code_to_add = st.text_area("Enter code to add to workspace:")
395
- file_name = st.text_input("Enter file name (e.g., 'app.py'):")
396
- if st.button("Add Code"):
397
- add_code_status = add_code_to_workspace(
398
- project_name, code_to_add, file_name)
399
- st.session_state.terminal_history.append(
400
- (f"Add Code: {code_to_add}", add_code_status))
401
- st.success(add_code_status)
402
-
403
- # Terminal Interface with Project Context
404
- st.subheader("Terminal (Workspace Context)")
405
- terminal_input = st.text_input("Enter a command within the workspace:")
406
- if st.button("Run Command"):
407
- terminal_output = terminal_interface(terminal_input, project_name)
408
- st.session_state.terminal_history.append(
409
- (terminal_input, terminal_output))
410
- st.code(terminal_output, language="bash")
411
-
412
- # Chat Interface for Guidance
413
- st.subheader("Chat with CodeCraft for Guidance")
414
- chat_input = st.text_area("Enter your message for guidance:")
415
- if st.button("Get Guidance"):
416
- chat_response = chat_interface(chat_input)
417
- st.session_state.chat_history.append((chat_input, chat_response))
418
- st.write(f"CodeCraft: {chat_response}")
419
-
420
- # Display Chat History
421
- st.subheader("Chat History")
422
- for user_input, response in st.session_state.chat_history:
423
- st.write(f"User: {user_input}")
424
- st.write(f"CodeCraft: {response}")
425
-
426
- # Display Terminal History
427
- st.subheader("Terminal History")
428
- for command, output in st.session_state.terminal_history:
429
- st.write(f"Command: {command}")
430
- st.code(output, language="bash")
431
-
432
- # Display Projects and Files
433
- st.subheader("Workspace Projects")
434
- for project, details in st.session_state.workspace_projects.items():
435
- st.write(f"Project: {project}")
436
- for file in details['files']:
437
- st.write(f" - {file}")
438
-
439
- # Chat with AI Agents
440
- st.subheader("Chat with AI Agents")
441
- selected_agent = st.selectbox(
442
- "Select an AI agent", st.session_state.available_agents)
443
- agent_chat_input = st.text_area("Enter your message for the agent:")
444
- if st.button("Send to Agent"):
445
- agent_chat_response = chat_interface_with_agent(
446
- agent_chat_input, selected_agent)
447
- st.session_state.chat_history.append(
448
- (agent_chat_input, agent_chat_response))
449
- st.write(f"{selected_agent}: {agent_chat_response}")
450
-
451
- # Code Generation
452
- st.subheader("Code Generation")
453
- code_idea = st.text_input("Enter your code idea:")
454
-
455
- # Model Selection Menu
456
- selected_model = st.selectbox(
457
- "Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS)
458
-
459
- if st.button("Generate Code"):
460
- generated_code = generate_code(code_idea, selected_model)
461
- st.code(generated_code, language="python")
462
-
463
- # Automate Build Process
464
- st.subheader("Automate Build Process")
465
- if st.button("Automate"):
466
- # Load the agent without skills for now
467
- agent = AIAgent(selected_agent, "", [])
468
- summary, next_step = agent.autonomous_build(
469
- st.session_state.chat_history, st.session_state.workspace_projects, project_name, selected_model)
470
- st.write("Autonomous Build Summary:")
471
- st.write(summary)
472
- st.write("Next Step:")
473
- st.write(next_step)
474
-
475
- # If everything went well, proceed to deploy the Space
476
- if agent._hf_api and agent.has_valid_hf_token():
477
- agent.deploy_built_space_to_hf()
478
- # Use the hf_token to interact with the Hugging Face API
479
- api = HfApi(token="hf_token") # Function to create a Space on Hugging Face
480
- create_space_on_hugging_face(api, agent.name, agent.description, True, get_built_space_files())
 
1
  import os
 
2
  import subprocess
3
+ import random
 
 
 
 
 
 
4
  from huggingface_hub import InferenceClient
5
  import gradio as gr
6
+ from safe_search import safe_search
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from agent import (
10
+ ACTION_PROMPT,
11
+ ADD_PROMPT,
12
+ COMPRESS_HISTORY_PROMPT,
13
+ LOG_PROMPT,
14
+ LOG_RESPONSE,
15
+ MODIFY_PROMPT,
16
+ PREFIX,
17
+ SEARCH_QUERY,
18
+ READ_PROMPT,
19
+ TASK_PROMPT,
20
+ UNDERSTAND_TEST_RESULTS_PROMPT,
21
  )
22
+ from utils import parse_action, parse_file_content, read_python_module_structure
23
+ from datetime import datetime
24
 
25
+ now = datetime.now()
26
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
 
 
29
 
30
+ VERBOSE = True
31
+ MAX_HISTORY = 100
 
 
 
32
 
33
+ def format_prompt(message, history):
34
+ prompt = "<s>"
35
+ for user_prompt, bot_response in history:
36
+ prompt += f"[INST] {user_prompt} [/INST]"
37
+ prompt += f" {bot_response}</s> "
38
+ prompt += f"[INST] {message} [/INST]"
39
+ return prompt
40
+
41
+ def run_gpt(prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs):
42
+ seed = random.randint(1, 1111111111111111)
43
+ print(seed)
44
+ generate_kwargs = dict(
45
+ temperature=1.0,
46
+ max_new_tokens=2096,
47
+ top_p=0.99,
48
+ repetition_penalty=1.0,
49
+ do_sample=True,
50
+ seed=seed,
51
+ )
52
+
53
+ content = PREFIX.format(
54
+ date_time_str=date_time_str,
55
+ purpose=purpose,
56
+ safe_search=safe_search,
57
+ ) + prompt_template.format(**prompt_kwargs)
58
+ if VERBOSE:
59
+ print(LOG_PROMPT.format(content))
60
+
61
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
62
+ resp = ""
63
+ for response in stream:
64
+ resp += response.token.text
65
+
66
+ if VERBOSE:
67
+ print(LOG_RESPONSE.format(resp))
68
+ return resp
69
+
70
+ def compress_history(purpose, task, history, directory):
71
+ resp = run_gpt(
72
+ COMPRESS_HISTORY_PROMPT,
73
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
74
+ max_tokens=512,
75
+ purpose=purpose,
76
+ task=task,
77
+ history=history,
78
+ )
79
+ history = "observation: {}\n".format(resp)
80
+ return history
81
+
82
+ def call_search(purpose, task, history, directory, action_input):
83
+ print("CALLING SEARCH")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  try:
85
+ if "http" in action_input:
86
+ if "<" in action_input:
87
+ action_input = action_input.strip("<")
88
+ if ">" in action_input:
89
+ action_input = action_input.strip(">")
90
+ response = i_s(action_input)
91
+ print(response)
92
+ history += "observation: search result is: {}\n".format(response)
93
+ else:
94
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
95
  except Exception as e:
96
+ history += "observation: {}'\n".format(e)
97
+ return "MAIN", None, history, task
98
+
99
+ def call_main(purpose, task, history, directory, action_input):
100
+ resp = run_gpt(
101
+ ACTION_PROMPT,
102
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
103
+ max_tokens=2096,
104
+ purpose=purpose,
105
+ task=task,
106
+ history=history,
107
+ )
108
+ lines = resp.strip().strip("\n").split("\n")
109
+ for line in lines:
110
+ if line == "":
111
+ continue
112
+ if line.startswith("thought: "):
113
+ history += "{}\n".format(line)
114
+ elif line.startswith("action: "):
115
+ action_name, action_input = parse_action(line)
116
+ print(f'ACTION_NAME :: {action_name}')
117
+ print(f'ACTION_INPUT :: {action_input}')
118
+ history += "{}\n".format(line)
119
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
120
+ task = "END"
121
+ return action_name, action_input, history, task
122
+ else:
123
+ return action_name, action_input, history, task
124
+ else:
125
+ history += "{}\n".format(line)
126
+ return "MAIN", None, history, task
127
+
128
+ def call_set_task(purpose, task, history, directory, action_input):
129
+ task = run_gpt(
130
+ TASK_PROMPT,
131
+ stop_tokens=[],
132
+ max_tokens=64,
133
+ purpose=purpose,
134
+ task=task,
135
+ history=history,
136
+ ).strip("\n")
137
+ history += "observation: task has been updated to: {}\n".format(task)
138
+ return "MAIN", None, history, task
139
+
140
+ def end_fn(purpose, task, history, directory, action_input):
141
+ task = "END"
142
+ return "COMPLETE", "COMPLETE", history, task
143
+
144
+ NAME_TO_FUNC = {
145
+ "MAIN": call_main,
146
+ "UPDATE-TASK": call_set_task,
147
+ "SEARCH": call_search,
148
+ "COMPLETE": end_fn,
149
+ }
150
+
151
+ def run_action(purpose, task, history, directory, action_name, action_input):
152
+ print(f'action_name::{action_name}')
153
  try:
154
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
155
+ action_name = "COMPLETE"
156
+ task = "END"
157
+ return action_name, "COMPLETE", history, task
158
+
159
+ if len(history.split("\n")) > MAX_HISTORY:
160
+ if VERBOSE:
161
+ print("COMPRESSING HISTORY")
162
+ history = compress_history(purpose, task, history, directory)
163
+ if not action_name in NAME_TO_FUNC:
164
+ action_name = "MAIN"
165
+ if action_name == "" or action_name is None:
166
+ action_name = "MAIN"
167
+ assert action_name in NAME_TO_FUNC
168
+
169
+ print("RUN: ", action_name, action_input)
170
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  except Exception as e:
172
+ history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
173
+ return "MAIN", None, history, task
174
+
175
+ def run(purpose, history):
176
+ task = None
177
+ directory = "./"
178
+ if history:
179
+ history = str(history).strip("[]")
180
+ if not history:
181
+ history = ""
182
+
183
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
184
+ action_input = None
185
+ while True:
186
+ print("")
187
+ print("")
188
+ print("---")
189
+ print("purpose:", purpose)
190
+ print("task:", task)
191
+ print("---")
192
+ print(history)
193
+ print("---")
194
+
195
+ action_name, action_input, history, task = run_action(
196
+ purpose,
197
+ task,
198
+ history,
199
+ directory,
200
+ action_name,
201
+ action_input,
202
+ )
203
+ yield (history)
204
+ if task == "END":
205
+ return (history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
+ def format_prompt(message, history):
208
+ prompt = "<s>"
209
+ for user_prompt, bot_response in history:
210
+ prompt += f"[INST] {user_prompt} [/INST]"
211
+ prompt += f" {bot_response}</s> "
212
+ prompt += f"[INST] {message} [/INST]"
213
+ return prompt
214
+
215
+ agents = [
216
+ "WEB_DEV",
217
+ agents = [
218
+ "WEB_DEV",
219
+ "AI_SYSTEM_PROMPT",
220
+ "PYTHON_CODE_DEV"
221
+ ]
222
+
223
+ def generate(
224
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
225
+ ):
226
+ seed = random.randint(1, 1111111111111111)
227
+
228
+ agent = prompts.WEB_DEV
229
+ if agent_name == "WEB_DEV":
230
+ agent = prompts.WEB_DEV
231
+ if agent_name == "AI_SYSTEM_PROMPT":
232
+ agent = prompts.AI_SYSTEM_PROMPT
233
+ if agent_name == "PYTHON_CODE_DEV":
234
+ agent = prompts.PYTHON_CODE_DEV
235
+ system_prompt = agent
236
+ temperature = float(temperature)
237
+ if temperature < 1e-2:
238
+ temperature = 1e-2
239
+ top_p = float(top_p)
240
+
241
+ generate_kwargs = dict(
242
+ temperature=temperature,
243
+ max_new_tokens=max_new_tokens,
244
+ top_p=top_p,
245
+ repetition_penalty=repetition_penalty,
246
+ do_sample=True,
247
+ seed=seed,
248
+ )
249
+
250
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
251
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
252
+ output = ""
253
+
254
+ for response in stream:
255
+ output += response.token.text
256
+ yield output
257
+ return output
258
+
259
+ additional_inputs = [
260
+ gr.Dropdown(
261
+ label="Agents",
262
+ choices=[s for s in agents],
263
+ value=agents[0],
264
+ interactive=True,
265
+ ),
266
+ gr.Textbox(
267
+ label="System Prompt",
268
+ max_lines=1,
269
+ interactive=True,
270
+ ),
271
+ gr.Slider(
272
+ label="Temperature",
273
+ value=0.9,
274
+ minimum=0.0,
275
+ maximum=1.0,
276
+ step=0.05,
277
+ interactive=True,
278
+ info="Higher values produce more diverse outputs",
279
+ ),
280
+ gr.Slider(
281
+ label="Max new tokens",
282
+ value=1048 * 10,
283
+ minimum=0,
284
+ maximum=1048 * 10,
285
+ step=64,
286
+ interactive=True,
287
+ info="The maximum numbers of new tokens",
288
+ ),
289
+ gr.Slider(
290
+ label="Top-p (nucleus sampling)",
291
+ value=0.90,
292
+ minimum=0.0,
293
+ maximum=1,
294
+ step=0.05,
295
+ interactive=True,
296
+ info="Higher values sample more low-probability tokens",
297
+ ),
298
+ gr.Slider(
299
+ label="Repetition penalty",
300
+ value=1.2,
301
+ minimum=1.0,
302
+ maximum=2.0,
303
+ step=0.05,
304
+