acecalisto3 commited on
Commit
d903c55
·
verified ·
1 Parent(s): 9863d7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +403 -445
app.py CHANGED
@@ -1,452 +1,410 @@
1
- from huggingface_hub import InferenceClient, hf_hub_url
2
- import gradio as gr
3
- import random
4
- import os
5
  import subprocess
6
- import threading
7
- import time
8
- import shutil
9
- from typing import Dict, Tuple, List
10
- import json
11
- from rich import print as rprint
12
- from rich.panel import Panel
13
- from rich.progress import track
14
- from rich.table import Table
15
- from rich.prompt import Prompt, Confirm
16
- from rich.markdown import Markdown
17
- from rich.traceback import install
18
- install() # Enable rich tracebacks for easier debugging
19
-
20
- # --- Constants ---
21
-
22
- API_URL = "https://api-inference.huggingface.co/models/"
23
- MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Replace with your desired model
24
-
25
- # Chat Interface Parameters
26
- DEFAULT_TEMPERATURE = 0.9
27
- DEFAULT_MAX_NEW_TOKENS = 2048
28
- DEFAULT_TOP_P = 0.95
29
- DEFAULT_REPETITION_PENALTY = 1.2
30
-
31
- # Local Server
32
- LOCAL_HOST_PORT = 7860
33
-
34
- # --- Agent Roles ---
35
-
36
- agent_roles: Dict[str, Dict[str, bool]] = {
37
- "Web Developer": {"description": "A master of front-end and back-end web development.", "active": False},
38
- "Prompt Engineer": {"description": "An expert in crafting effective prompts for AI models.", "active": False},
39
- "Python Code Developer": {"description": "A skilled Python programmer who can write clean and efficient code.", "active": False},
40
- "Hugging Face Hub Expert": {"description": "A specialist in navigating and utilizing the Hugging Face Hub.", "active": False},
41
- "AI-Powered Code Assistant": {"description": "An AI assistant that can help with coding tasks and provide code snippets.", "active": False},
42
- }
43
-
44
- # --- Initial Prompt ---
45
-
46
- selected_agent = list(agent_roles.keys())[0]
47
- initial_prompt = f"""
48
- You are an expert {selected_agent} who responds with complete program coding to client requests.
49
- Using available tools, please explain the researched information.
50
- Please don't answer based solely on what you already know. Always perform a search before providing a response.
51
- In special cases, such as when the user specifies a page to read, there's no need to search.
52
- Please read the provided page and answer the user's question accordingly.
53
- If you find that there's not much information just by looking at the search results page, consider these two options and try them out:
54
- - Try clicking on the links of the search results to access and read the content of each page.
55
- - Change your search query and perform a new search.
56
- Users are extremely busy and not as free as you are.
57
- Therefore, to save the user's effort, please provide direct answers.
58
- BAD ANSWER EXAMPLE
59
- - Please refer to these pages.
60
- - You can write code referring these pages.
61
- - Following page will be helpful.
62
- GOOD ANSWER EXAMPLE
63
- - This is the complete code: -- complete code here --
64
- - The answer of you question is -- answer here --
65
- Please make sure to list the URLs of the pages you referenced at the end of your answer. (This will allow users to verify your response.)
66
- Please make sure to answer in the language used by the user. If the user asks in Japanese, please answer in Japanese. If the user asks in Spanish, please answer in Spanish.
67
- But, you can go ahead and search in English, especially for programming-related questions. PLEASE MAKE SURE TO ALWAYS SEARCH IN ENGLISH FOR THOSE.
68
  """
69
-
70
- # --- Custom CSS ---
71
-
72
- customCSS = """
73
- #component-7 {
74
- height: 1600px;
75
- flex-grow: 4;
76
- }
77
- """
78
-
79
- # --- Functions ---
80
-
81
- # Function to toggle the active state of an agent
82
- def toggle_agent(agent_name: str) -> str:
83
- """Toggles the active state of an agent."""
84
- global agent_roles
85
- agent_roles[agent_name]["active"] = not agent_roles[agent_name]["active"]
86
- return f"{agent_name} is now {'active' if agent_roles[agent_name]['active'] else 'inactive'}"
87
-
88
- # Function to get the active agent cluster
89
- def get_agent_cluster() -> Dict[str, bool]:
90
- """Returns a dictionary of active agents."""
91
- return {agent: agent_roles[agent]["active"] for agent in agent_roles}
92
-
93
- # Function to execute code
94
- def run_code(code: str) -> str:
95
- """Executes the provided code and returns the output."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  try:
97
- output = subprocess.check_output(
98
- ['python', '-c', code],
99
- stderr=subprocess.STDOUT,
100
- universal_newlines=True,
101
- )
102
- return output
103
- except subprocess.CalledProcessError as e:
104
- return f"Error: {e.output}"
105
-
106
- # Function to format the prompt
107
- def format_prompt(message: str, history: list[Tuple[str, str]], agent_roles: list[str]) -> str:
108
- """Formats the prompt with the selected agent roles and conversation history."""
109
- prompt = f"""
110
- You are an expert agent cluster, consisting of {', '.join(agent_roles)}.
111
- Respond with complete program coding to client requests.
112
- Using available tools, please explain the researched information.
113
- Please don't answer based solely on what you already know. Always perform a search before providing a response.
114
- In special cases, such as when the user specifies a page to read, there's no need to search.
115
- Please read the provided page and answer the user's question accordingly.
116
- If you find that there's not much information just by looking at the search results page, consider these two options and try them out:
117
- - Try clicking on the links of the search results to access and read the content of each page.
118
- - Change your search query and perform a new search.
119
- Users are extremely busy and not as free as you are.
120
- Therefore, to save the user's effort, please provide direct answers.
121
- BAD ANSWER EXAMPLE
122
- - Please refer to these pages.
123
- - You can write code referring these pages.
124
- - Following page will be helpful.
125
- GOOD ANSWER EXAMPLE
126
- - This is the complete code: -- complete code here --
127
- - The answer of you question is -- answer here --
128
- Please make sure to list the URLs of the pages you referenced at the end of your answer. (This will allow users to verify your response.)
129
- Please make sure to answer in the language used by the user. If the user asks in Japanese, please answer in Japanese. If the user asks in Spanish, please answer in Spanish.
130
- But, you can go ahead and search in English, especially for programming-related questions. PLEASE MAKE SURE TO ALWAYS SEARCH IN ENGLISH FOR THOSE.
131
- """
132
-
133
- for user_prompt, bot_response in history:
134
- prompt += f"[INST] {user_prompt} [/INST]"
135
- prompt += f" {bot_response}</s> "
136
 
137
- prompt += f"[INST] {message} [/INST]"
138
- return prompt
139
-
140
- # Function to generate a response
141
- def generate(prompt: str, history: list[Tuple[str, str]], agent_roles: list[str], temperature: float = DEFAULT_TEMPERATURE, max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, top_p: float = DEFAULT_TOP_P, repetition_penalty: float = DEFAULT_REPETITION_PENALTY) -> str:
142
- """Generates a response using the selected agent roles and parameters."""
143
- temperature = float(temperature)
144
- if temperature < 1e-2:
145
- temperature = 1e-2
146
- top_p = float(top_p)
147
-
148
- generate_kwargs = dict(
149
- temperature=temperature,
150
- max_new_tokens=max_new_tokens,
151
- top_p=top_p,
152
- repetition_penalty=repetition_penalty,
153
- do_sample=True,
154
- seed=random.randint(0, 10**7),
155
  )
156
-
157
- formatted_prompt = format_prompt(prompt, history, agent_roles)
158
-
159
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
160
- output = ""
161
-
162
- for response in stream:
163
- output += response.token.text
164
- yield output
165
- return output
166
-
167
- # Function to handle user input and generate responses
168
- def chat_interface(message: str, history: list[Tuple[str, str]], agent_cluster: Dict[str, bool], temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float) -> Tuple[str, str]:
169
- """Handles user input and generates responses."""
170
- rprint(f"[bold blue]User:[/bold blue] {message}") # Log user message
171
- if message.startswith("python"):
172
- # User entered code, execute it
173
- code = message[9:-3]
174
- output = run_code(code)
175
- rprint(f"[bold green]Code Output:[/bold green] {output}") # Log code output
176
- return (message, output)
177
  else:
178
- # User entered a normal message, generate a response
179
- active_agents = [agent for agent, is_active in agent_cluster.items() if is_active]
180
- response = generate(message, history, active_agents, temperature, max_new_tokens, top_p, repetition_penalty)
181
- rprint(f"[bold purple]Agent Response:[/bold purple] {response}") # Log agent response
182
- return (message, response)
183
-
184
- # Function to create a new web app instance
185
- def create_web_app(app_name: str, code: str) -> None:
186
- """Creates a new web app instance with the given name and code."""
187
- # Create a new directory for the app
188
- os.makedirs(app_name, exist_ok=True)
189
-
190
- # Create the app.py file
191
- with open(os.path.join(app_name, 'app.py'), 'w') as f:
192
- f.write(code)
193
-
194
- # Create the requirements.txt file
195
- with open(os.path.join(app_name, 'requirements.txt'), 'w') as f:
196
- f.write("gradio\nhuggingface_hub")
197
-
198
- # Print a success message
199
- print(f"Web app '{app_name}' created successfully!")
200
-
201
- # Function to handle the "Create Web App" button click
202
- def create_web_app_button_click(code: str) -> str:
203
- """Handles the "Create Web App" button click."""
204
- # Get the app name from the user
205
- app_name = gr.Textbox.get().strip()
206
-
207
- # Validate the app name
208
- if not app_name:
209
- return "Please enter a valid app name."
210
-
211
- # Create the web app instance
212
- create_web_app(app_name, code)
213
-
214
- # Return a success message
215
- return f"Web app '{app_name}' created successfully!"
216
-
217
- # Function to handle the "Deploy" button click
218
- def deploy_button_click(app_name: str, code: str) -> str:
219
- """Handles the "Deploy" button click."""
220
- # Get the app name from the user
221
- app_name = gr.Textbox.get().strip()
222
-
223
- # Validate the app name
224
- if not app_name:
225
- return "Please enter a valid app name."
226
-
227
- # Get Hugging Face token
228
- hf_token = gr.Textbox.get("hf_token").strip()
229
-
230
- # Validate Hugging Face token
231
- if not hf_token:
232
- return "Please enter a valid Hugging Face token."
233
-
234
- # Create a new directory for the app
235
- os.makedirs(app_name, exist_ok=True)
236
-
237
- # Copy the code to the app directory
238
- with open(os.path.join(app_name, 'app.py'), 'w') as f:
239
- f.write(code)
240
-
241
- # Create the requirements.txt file
242
- with open(os.path.join(app_name, 'requirements.txt'), 'w') as f:
243
- f.write("gradio\nhuggingface_hub")
244
-
245
- # Deploy the app to Hugging Face Spaces
246
- try:
247
- subprocess.run(
248
- ['huggingface-cli', 'login', '--token', hf_token],
249
- check=True,
250
- )
251
- subprocess.run(
252
- ['huggingface-cli', 'space', 'create', app_name, '--repo_type', 'spaces', '--private', '--branch', 'main'],
253
- check=True,
254
- )
255
- subprocess.run(
256
- ['git', 'init'],
257
- cwd=app_name,
258
- check=True,
259
- )
260
- subprocess.run(
261
- ['git', 'add', '.'],
262
- cwd=app_name,
263
- check=True,
264
- )
265
- subprocess.run(
266
- ['git', 'commit', '-m', 'Initial commit'],
267
- cwd=app_name,
268
- check=True,
269
- )
270
- subprocess.run(
271
- ['git', 'remote', 'add', 'origin', hf_hub_url(username='your_username', repo_id=app_name)],
272
- cwd=app_name,
273
- check=True,
274
- )
275
- subprocess.run(
276
- ['git', 'push', '-u', 'origin', 'main'],
277
- cwd=app_name,
278
- check=True,
279
- )
280
- return f"Web app '{app_name}' deployed successfully to Hugging Face Spaces!"
281
- except subprocess.CalledProcessError as e:
282
- return f"Error: {e}"
283
-
284
- # Function to handle the "Local Host" button click
285
- def local_host_button_click(app_name: str, code: str) -> str:
286
- """Handles the "Local Host" button click."""
287
- # Get the app name from the user
288
- app_name = gr.Textbox.get().strip()
289
-
290
- # Validate the app name
291
- if not app_name:
292
- return "Please enter a valid app name."
293
-
294
- # Create a new directory for the app
295
- os.makedirs(app_name, exist_ok=True)
296
-
297
- # Copy the code to the app directory
298
- with open(os.path.join(app_name, 'app.py'), 'w') as f:
299
- f.write(code)
300
-
301
- # Create the requirements.txt file
302
- with open(os.path.join(app_name, 'requirements.txt'), 'w') as f:
303
- f.write("gradio\nhuggingface_hub")
304
-
305
- # Start the local server
306
- os.chdir(app_name)
307
- subprocess.Popen(['gradio', 'run', 'app.py', '--share', '--server_port', str(LOCAL_HOST_PORT)])
308
-
309
- # Return a success message
310
- return f"Web app '{app_name}' running locally on port {LOCAL_HOST_PORT}!"
311
-
312
- # Function to handle the "Ship" button click
313
- def ship_button_click(app_name: str, code: str) -> str:
314
- """Handles the "Ship" button click."""
315
- # Get the app name from the user
316
- app_name = gr.Textbox.get().strip()
317
-
318
- # Validate the app name
319
- if not app_name:
320
- return "Please enter a valid app name."
321
-
322
- # Ship the web app instance
323
- # ... (Implement shipping logic here)
324
-
325
- # Return a success message
326
- return f"Web app '{app_name}' shipped successfully!"
327
-
328
- # --- Gradio Interface ---
329
-
330
- with gr.Blocks(theme='ParityError/Interstellar') as demo:
331
- # --- Agent Selection ---
332
- with gr.Row():
333
- for agent_name, agent_data in agent_roles.items():
334
- button = gr.Button(agent_name, variant="secondary")
335
- textbox = gr.Textbox(agent_data["description"], interactive=False)
336
- button.click(toggle_agent, inputs=[button], outputs=[textbox])
337
-
338
- # --- Chat Interface ---
339
- with gr.Row():
340
- chatbot = gr.Chatbot()
341
- chat_interface_input = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
342
- chat_interface_output = gr.Textbox(label="Response", interactive=False)
343
-
344
- # Parameters
345
- temperature_slider = gr.Slider(
346
- label="Temperature",
347
- value=DEFAULT_TEMPERATURE,
348
- minimum=0.0,
349
- maximum=1.0,
350
- step=0.05,
351
- interactive=True,
352
- info="Higher values generate more diverse outputs",
353
- )
354
- max_new_tokens_slider = gr.Slider(
355
- label="Maximum New Tokens",
356
- value=DEFAULT_MAX_NEW_TOKENS,
357
- minimum=64,
358
- maximum=4096,
359
- step=64,
360
- interactive=True,
361
- info="The maximum number of new tokens",
362
- )
363
- top_p_slider = gr.Slider(
364
- label="Top-p (Nucleus Sampling)",
365
- value=DEFAULT_TOP_P,
366
- minimum=0.0,
367
- maximum=1,
368
- step=0.05,
369
- interactive=True,
370
- info="Higher values sample more low-probability tokens",
371
- )
372
- repetition_penalty_slider = gr.Slider(
373
- label="Repetition Penalty",
374
- value=DEFAULT_REPETITION_PENALTY,
375
- minimum=1.0,
376
- maximum=2.0,
377
- step=0.05,
378
- interactive=True,
379
- info="Penalize repeated tokens",
380
- )
381
-
382
- # Submit Button
383
- submit_button = gr.Button("Submit")
384
-
385
- # Chat Interface Logic
386
- submit_button.click(
387
- chat_interface,
388
- inputs=[
389
- chat_interface_input,
390
- chatbot,
391
- get_agent_cluster,
392
- temperature_slider,
393
- max_new_tokens_slider,
394
- top_p_slider,
395
- repetition_penalty_slider,
396
- ],
397
- outputs=[
398
- chatbot,
399
- chat_interface_output,
400
- ],
401
- )
402
-
403
- # --- Web App Creation ---
404
- with gr.Row():
405
- app_name_input = gr.Textbox(label="App Name", placeholder="Enter your app name")
406
- code_output = gr.Textbox(label="Code", interactive=False)
407
- create_web_app_button = gr.Button("Create Web App")
408
- deploy_button = gr.Button("Deploy")
409
- local_host_button = gr.Button("Local Host")
410
- ship_button = gr.Button("Ship")
411
- hf_token_input = gr.Textbox(label="Hugging Face Token", placeholder="Enter your Hugging Face token")
412
-
413
- # Web App Creation Logic
414
- create_web_app_button.click(
415
- create_web_app_button_click,
416
- inputs=[code_output],
417
- outputs=[gr.Textbox(label="Status", interactive=False)],
418
- )
419
-
420
- # Deploy the web app
421
- deploy_button.click(
422
- deploy_button_click,
423
- inputs=[app_name_input, code_output, hf_token_input],
424
- outputs=[gr.Textbox(label="Status", interactive=False)],
425
- )
426
-
427
- # Local host the web app
428
- local_host_button.click(
429
- local_host_button_click,
430
- inputs=[app_name_input, code_output],
431
- outputs=[gr.Textbox(label="Status", interactive=False)],
432
- )
433
-
434
- # Ship the web app
435
- ship_button.click(
436
- ship_button_click,
437
- inputs=[app_name_input, code_output],
438
- outputs=[gr.Textbox(label="Status", interactive=False)],
439
- )
440
-
441
- # --- Connect Chat Output to Code Output ---
442
- chat_interface_output.change(
443
- lambda x: x,
444
- inputs=[chat_interface_output],
445
- outputs=[code_output],
446
  )
447
-
448
- # --- Initialize Hugging Face Client ---
449
- client = InferenceClient(repo_id=MODEL_NAME, token=os.environ.get("HF_TOKEN"))
450
-
451
- # --- Launch Gradio ---
452
- demo.queue().launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import subprocess
2
+ import streamlit as st
3
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
4
+ import black
5
+ from pylint import lint
6
+ from io import StringIO
7
+
8
+ HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
9
+ PROJECT_ROOT = "projects"
10
+ AGENT_DIRECTORY = "agents"
11
+
12
+ # Global state to manage communication between Tool Box and Workspace Chat App
13
+ if 'chat_history' not in st.session_state:
14
+ st.session_state.chat_history = []
15
+ if 'terminal_history' not in st.session_state:
16
+ st.session_state.terminal_history = []
17
+ if 'workspace_projects' not in st.session_state:
18
+ st.session_state.workspace_projects = {}
19
+ if 'available_agents' not in st.session_state:
20
+ st.session_state.available_agents = []
21
+ if 'current_state' not in st.session_state:
22
+ st.session_state.current_state = {
23
+ 'toolbox': {},
24
+ 'workspace_chat': {}
25
+ }
26
+
27
+ class AIAgent:
28
+ def __init__(self, name, description, skills):
29
+ self.name = name
30
+ self.description = description
31
+ self.skills = skills
32
+
33
+ def create_agent_prompt(self):
34
+ skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
35
+ agent_prompt = f"""
36
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
37
+ {skills_str}
38
+
39
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  """
41
+ return agent_prompt
42
+
43
+ def autonomous_build(self, chat_history, workspace_projects):
44
+ """
45
+ Autonomous build logic that continues based on the state of chat history and workspace projects.
46
+ """
47
+ summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
48
+ summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
49
+
50
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
51
+
52
+ return summary, next_step
53
+
54
+ def save_agent_to_file(agent):
55
+ """Saves the agent's prompt to a file locally and then commits to the Hugging Face repository."""
56
+ if not os.path.exists(AGENT_DIRECTORY):
57
+ os.makedirs(AGENT_DIRECTORY)
58
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
59
+ config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
60
+ with open(file_path, "w") as file:
61
+ file.write(agent.create_agent_prompt())
62
+ with open(config_path, "w") as file:
63
+ file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
64
+ st.session_state.available_agents.append(agent.name)
65
+
66
+ commit_and_push_changes(f"Add agent {agent.name}")
67
+
68
+ def load_agent_prompt(agent_name):
69
+ """Loads an agent prompt from a file."""
70
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
71
+ if os.path.exists(file_path):
72
+ with open(file_path, "r") as file:
73
+ agent_prompt = file.read()
74
+ return agent_prompt
75
+ else:
76
+ return None
77
+
78
+ def create_agent_from_text(name, text):
79
+ skills = text.split('\n')
80
+ agent = AIAgent(name, "AI agent created from text input.", skills)
81
+ save_agent_to_file(agent)
82
+ return agent.create_agent_prompt()
83
+
84
+ # Chat interface using a selected agent
85
+ def chat_interface_with_agent(input_text, agent_name):
86
+ agent_prompt = load_agent_prompt(agent_name)
87
+ if agent_prompt is None:
88
+ return f"Agent {agent_name} not found."
89
+
90
+ # Load the GPT-2 model which is compatible with AutoModelForCausalLM
91
+ model_name = "gpt2"
92
  try:
93
+ model = AutoModelForCausalLM.from_pretrained(model_name)
94
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
95
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
96
+ except EnvironmentError as e:
97
+ return f"Error loading model: {e}"
98
+
99
+ # Combine the agent prompt with user input
100
+ combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ # Truncate input text to avoid exceeding the model's maximum length
103
+ max_input_length = 900
104
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
105
+ if input_ids.shape[1] > max_input_length:
106
+ input_ids = input_ids[:, :max_input_length]
107
+
108
+ # Generate chatbot response
109
+ outputs = model.generate(
110
+ input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
 
 
 
 
 
 
 
 
 
111
  )
112
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
113
+ return response
114
+
115
+ def workspace_interface(project_name):
116
+ project_path = os.path.join(PROJECT_ROOT, project_name)
117
+ if not os.path.exists(PROJECT_ROOT):
118
+ os.makedirs(PROJECT_ROOT)
119
+ if not os.path.exists(project_path):
120
+ os.makedirs(project_path)
121
+ st.session_state.workspace_projects[project_name] = {"files": []}
122
+ st.session_state.current_state['workspace_chat']['project_name'] = project_name
123
+ commit_and_push_changes(f"Create project {project_name}")
124
+ return f"Project {project_name} created successfully."
 
 
 
 
 
 
 
 
125
  else:
126
+ return f"Project {project_name} already exists."
127
+
128
+ def add_code_to_workspace(project_name, code, file_name):
129
+ project_path = os.path.join(PROJECT_ROOT, project_name)
130
+ if os.path.exists(project_path):
131
+ file_path = os.path.join(project_path, file_name)
132
+ with open(file_path, "w") as file:
133
+ file.write(code)
134
+ st.session_state.workspace_projects[project_name]["files"].append(file_name)
135
+ st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
136
+ commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
137
+ return f"Code added to {file_name} in project {project_name} successfully."
138
+ else:
139
+ return f"Project {project_name} does not exist."
140
+
141
+ def terminal_interface(command, project_name=None):
142
+ if project_name:
143
+ project_path = os.path.join(PROJECT_ROOT, project_name)
144
+ if not os.path.exists(project_path):
145
+ return f"Project {project_name} does not exist."
146
+ result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
147
+ else:
148
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
149
+ if result.returncode == 0:
150
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
151
+ return result.stdout
152
+ else:
153
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
154
+ return result.stderr
155
+
156
+ # Chat interface using a selected agent
157
+ def chat_interface_with_agent(input_text, agent_name):
158
+ # ... [rest of the chat_interface_with_agent function] ...
159
+
160
+
161
+ def summarize_text(text):
162
+ summarizer = pipeline("summarization")
163
+ summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
164
+ st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
165
+ return summary[0]['summary_text']
166
+
167
+ def sentiment_analysis(text):
168
+ analyzer = pipeline("sentiment-analysis")
169
+ sentiment = analyzer(text)
170
+ st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
171
+ return sentiment[0]
172
+
173
+ # ... [rest of the translate_code function, but remove the OpenAI API call and replace it with your own logic] ...
174
+
175
+ def generate_code(code_idea):
176
+ # Replace this with a call to a Hugging Face model or your own logic
177
+ # For example, using a text-generation pipeline:
178
+ generator = pipeline('text-generation', model='gpt2')
179
+ generated_code = generator(code_idea, max_length=100, num_return_sequences=1)[0]['generated_text']
180
+ st.session_state.current_state['toolbox']['generated_code'] = generated_code
181
+ return generated_code
182
+
183
+ def translate_code(code, input_language, output_language):
184
+ # Define a dictionary to map programming languages to their corresponding file extensions
185
+ language_extensions = {
186
+
187
+ }
188
+
189
+ # Add code to handle edge cases such as invalid input and unsupported programming languages
190
+ if input_language not in language_extensions:
191
+ raise ValueError(f"Invalid input language: {input_language}")
192
+ if output_language not in language_extensions:
193
+ raise ValueError(f"Invalid output language: {output_language}")
194
+
195
+ # Use the dictionary to map the input and output languages to their corresponding file extensions
196
+ input_extension = language_extensions[input_language]
197
+ output_extension = language_extensions[output_language]
198
+
199
+ # Translate the code using the OpenAI API
200
+ prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
201
+ response = openai.ChatCompletion.create(
202
+ model="gpt-4",
203
+ messages=[
204
+ {"role": "system", "content": "You are an expert software developer."},
205
+ {"role": "user", "content": prompt}
206
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  )
208
+ translated_code = response.choices[0].message['content'].strip()
209
+
210
+ # Return the translated code
211
+ translated_code = response.choices[0].message['content'].strip()
212
+ st.session_state.current_state['toolbox']['translated_code'] = translated_code
213
+ return translated_code
214
+
215
+ def generate_code(code_idea):
216
+ response = openai.ChatCompletion.create(
217
+ model="gpt-4",
218
+ messages=[
219
+ {"role": "system", "content": "You are an expert software developer."},
220
+ {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
221
+ ]
222
+ )
223
+ generated_code = response.choices[0].message['content'].strip()
224
+ st.session_state.current_state['toolbox']['generated_code'] = generated_code
225
+ return generated_code
226
+
227
+ def commit_and_push_changes(commit_message):
228
+ """Commits and pushes changes to the Hugging Face repository."""
229
+ commands = [
230
+ "git add .",
231
+ f"git commit -m '{commit_message}'",
232
+ "git push"
233
+ ]
234
+ for command in commands:
235
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
236
+ if result.returncode != 0:
237
+ st.error(f"Error executing command '{command}': {result.stderr}")
238
+ break
239
+
240
+ # Streamlit App
241
+ st.title("AI Agent Creator")
242
+
243
+ # Sidebar navigation
244
+ st.sidebar.title("Navigation")
245
+ app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
246
+
247
+ if app_mode == "AI Agent Creator":
248
+ # AI Agent Creator
249
+ st.header("Create an AI Agent from Text")
250
+
251
+ st.subheader("From Text")
252
+ agent_name = st.text_input("Enter agent name:")
253
+ text_input = st.text_area("Enter skills (one per line):")
254
+ if st.button("Create Agent"):
255
+ agent_prompt = create_agent_from_text(agent_name, text_input)
256
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
257
+ st.session_state.available_agents.append(agent_name)
258
+
259
+ elif app_mode == "Tool Box":
260
+ # Tool Box
261
+ st.header("AI-Powered Tools")
262
+
263
+ # Chat Interface
264
+ st.subheader("Chat with CodeCraft")
265
+ chat_input = st.text_area("Enter your message:")
266
+ if st.button("Send"):
267
+ if chat_input.startswith("@"):
268
+ agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
269
+ chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
270
+ chat_response = chat_interface_with_agent(chat_input, agent_name)
271
+ else:
272
+ chat_response = chat_interface(chat_input)
273
+ st.session_state.chat_history.append((chat_input, chat_response))
274
+ st.write(f"CodeCraft: {chat_response}")
275
+
276
+ # Terminal Interface
277
+ st.subheader("Terminal")
278
+ terminal_input = st.text_input("Enter a command:")
279
+ if st.button("Run"):
280
+ terminal_output = terminal_interface(terminal_input)
281
+ st.session_state.terminal_history.append((terminal_input, terminal_output))
282
+ st.code(terminal_output, language="bash")
283
+
284
+ # Code Editor Interface
285
+ st.subheader("Code Editor")
286
+ code_editor = st.text_area("Write your code:", height=300)
287
+ if st.button("Format & Lint"):
288
+ formatted_code, lint_message = code_editor_interface(code_editor)
289
+ st.code(formatted_code, language="python")
290
+ st.info(lint_message)
291
+
292
+ # Text Summarization Tool
293
+ st.subheader("Summarize Text")
294
+ text_to_summarize = st.text_area("Enter text to summarize:")
295
+ if st.button("Summarize"):
296
+ summary = summarize_text(text_to_summarize)
297
+ st.write(f"Summary: {summary}")
298
+
299
+ # Sentiment Analysis Tool
300
+ st.subheader("Sentiment Analysis")
301
+ sentiment_text = st.text_area("Enter text for sentiment analysis:")
302
+ if st.button("Analyze Sentiment"):
303
+ sentiment = sentiment_analysis(sentiment_text)
304
+ st.write(f"Sentiment: {sentiment}")
305
+
306
+ # Text Translation Tool (Code Translation)
307
+ st.subheader("Translate Code")
308
+ code_to_translate = st.text_area("Enter code to translate:")
309
+ source_language = st.text_input("Enter source language (e.g. 'Python'):")
310
+ target_language = st.text_input("Enter target language (e.g. 'JavaScript'):")
311
+ if st.button("Translate Code"):
312
+ translated_code = translate_code(code_to_translate, source_language, target_language)
313
+ st.code(translated_code, language=target_language.lower())
314
+
315
+ # Code Generation
316
+ st.subheader("Code Generation")
317
+ code_idea = st.text_input("Enter your code idea:")
318
+ if st.button("Generate Code"):
319
+ generated_code = generate_code(code_idea)
320
+ st.code(generated_code, language="python")
321
+
322
+ # Display Preset Commands
323
+ st.subheader("Preset Commands")
324
+ preset_commands = {
325
+ "Create a new project": "create_project('project_name')",
326
+ "Add code to workspace": "add_code_to_workspace('project_name', 'code', 'file_name')",
327
+ "Run terminal command": "terminal_interface('command', 'project_name')",
328
+ "Generate code": "generate_code('code_idea')",
329
+ "Summarize text": "summarize_text('text')",
330
+ "Analyze sentiment": "sentiment_analysis('text')",
331
+ "Translate code": "translate_code('code', 'source_language', 'target_language')",
332
+ }
333
+ for command_name, command in preset_commands.items():
334
+ st.write(f"{command_name}: `{command}`")
335
+
336
+ elif app_mode == "Workspace Chat App":
337
+ # Workspace Chat App
338
+ st.header("Workspace Chat App")
339
+
340
+ # Project Workspace Creation
341
+ st.subheader("Create a New Project")
342
+ project_name = st.text_input("Enter project name:")
343
+ if st.button("Create Project"):
344
+ workspace_status = workspace_interface(project_name)
345
+ st.success(workspace_status)
346
+
347
+ # Add Code to Workspace
348
+ st.subheader("Add Code to Workspace")
349
+ code_to_add = st.text_area("Enter code to add to workspace:")
350
+ file_name = st.text_input("Enter file name (e.g. 'app.py'):")
351
+ if st.button("Add Code"):
352
+ add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
353
+ st.success(add_code_status)
354
+
355
+ # Terminal Interface with Project Context
356
+ st.subheader("Terminal (Workspace Context)")
357
+ terminal_input = st.text_input("Enter a command within the workspace:")
358
+ if st.button("Run Command"):
359
+ terminal_output = terminal_interface(terminal_input, project_name)
360
+ st.code(terminal_output, language="bash")
361
+
362
+ # Chat Interface for Guidance
363
+ st.subheader("Chat with CodeCraft for Guidance")
364
+ chat_input = st.text_area("Enter your message for guidance:")
365
+ if st.button("Get Guidance"):
366
+ chat_response = chat_interface(chat_input)
367
+ st.session_state.chat_history.append((chat_input, chat_response))
368
+ st.write(f"CodeCraft: {chat_response}")
369
+
370
+ # Display Chat History
371
+ st.subheader("Chat History")
372
+ for user_input, response in st.session_state.chat_history:
373
+ st.write(f"User: {user_input}")
374
+ st.write(f"CodeCraft: {response}")
375
+
376
+ # Display Terminal History
377
+ st.subheader("Terminal History")
378
+ for command, output in st.session_state.terminal_history:
379
+ st.write(f"Command: {command}")
380
+ st.code(output, language="bash")
381
+
382
+ # Display Projects and Files
383
+ st.subheader("Workspace Projects")
384
+ for project, details in st.session_state.workspace_projects.items():
385
+ st.write(f"Project: {project}")
386
+ for file in details['files']:
387
+ st.write(f" - {file}")
388
+
389
+ # Chat with AI Agents
390
+ st.subheader("Chat with AI Agents")
391
+ selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
392
+ agent_chat_input = st.text_area("Enter your message for the agent:")
393
+ if st.button("Send to Agent"):
394
+ agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
395
+ st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
396
+ st.write(f"{selected_agent}: {agent_chat_response}")
397
+
398
+ # Automate Build Process
399
+ st.subheader("Automate Build Process")
400
+ if st.button("Automate"):
401
+ agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
402
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
403
+ st.write("Autonomous Build Summary:")
404
+ st.write(summary)
405
+ st.write("Next Step:")
406
+ st.write(next_step)
407
+
408
+ # Display current state for debugging
409
+ st.sidebar.subheader("Current State")
410
+ st.sidebar.json(st.session_state.current_state)