acecalisto3 commited on
Commit
b0453e4
·
verified ·
1 Parent(s): e6d482f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +246 -351
app.py CHANGED
@@ -1,356 +1,251 @@
1
- import streamlit as st
2
- from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
3
- import json
4
  import os
5
- import requests
6
- import torch
7
- from gensim import summarize, corpora, models, dictionary
8
- import re
9
- from pygments import highlight
10
- from pygments.lexers import PythonLexer
11
- from pygments.formatters import HtmlFormatter
12
- import sys
13
- import time
14
- from threading import Thread
15
  import subprocess
16
- import collections.abc as collections
17
-
18
- client = InferenceClient(
19
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
20
- )
21
-
22
- def format_prompt(message, history):
23
- prompt = "<s>"
24
- for user_prompt, bot_response in history:
25
- prompt += f"[INST] {user_prompt} [/INST]"
26
- prompt += f" {bot_response}</s> "
27
- prompt += f"[INST] {message} [/INST]"
28
- return prompt
29
-
30
- # --- Constants ---
31
- MODEL_URL = "https://huggingface.co/models"
32
- TASKS_FILE = "tasks.json"
33
- CODE_EXECUTION_ENV = {}
34
- PIPELINE_RUNNING = False
35
-
36
- # --- Model Initialization ---
37
- generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B')
38
- sentiment_model_name = "distilbert-base-uncased-finetuned-sst-2-english"
39
- sentiment_tokenizer = AutoTokenizer.from_pretrained(sentiment_model_name)
40
- sentiment_model = AutoModelForSequenceClassification.from_pretrained(sentiment_model_name)
41
-
42
- # --- Helper Functions ---
43
-
44
- def generate_code(prompt):
45
- """Generates code based on the given prompt."""
46
- generated = generator(prompt, max_length=200, do_sample=True, temperature=0.9)
47
- return generated[0]['generated_text']
48
-
49
- def add_task(task_description):
50
- """Adds a new task to the task list."""
51
- try:
52
- with open(TASKS_FILE, "r") as outfile:
53
- tasks = json.load(outfile)
54
- except FileNotFoundError:
55
- tasks = []
56
- tasks.append({"task": task_description["task"], "description": task_description["description"], "status": "Pending"})
57
- with open(TASKS_FILE, "w") as outfile:
58
- json.dump(tasks, outfile)
59
-
60
- def display_code(code):
61
- """Displays the code in a formatted manner."""
62
- formatter = HtmlFormatter(style='default')
63
- lexer = PythonLexer()
64
- html = highlight(code, lexer, formatter)
65
- st.markdown(html, unsafe_allow_html=True)
66
-
67
- def summarize_text(text):
68
- """Summarizes the given text."""
69
- return summarize(text)
70
-
71
- def analyze_sentiment(text):
72
- """Analyzes the sentiment of the given text."""
73
- inputs = sentiment_tokenizer(text, return_tensors='pt')
74
- outputs = sentiment_model(**inputs)
75
- probs = torch.nn.functional.softmax(outputs.logits, dim=1)
76
- return probs.tolist()[0][1]
77
-
78
- def run_tests(code):
79
- """Runs tests on the given code."""
80
- # Placeholder for testing logic
81
- return "Tests passed."
82
-
83
- def load_model(model_name):
84
- """Loads a pre-trained model."""
85
- tokenizer = AutoTokenizer.from_pretrained(model_name)
86
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
87
- return model, tokenizer
88
-
89
- def save_model(model, tokenizer, file_name):
90
- """Saves the model and tokenizer."""
91
- model.save_pretrained(file_name)
92
- tokenizer.save_pretrained(file_name)
93
-
94
- def load_dataset(file_name):
95
- """Loads a dataset from a file."""
96
- data = []
97
- with open(file_name, "r") as infile:
98
- for line in infile:
99
- data.append(line.strip())
100
- return data
101
-
102
- def save_dataset(data, file_name):
103
- """Saves a dataset to a file."""
104
- with open(file_name, "w") as outfile:
105
- for item in data:
106
- outfile.write("%s\n" % item)
107
-
108
- def download_file(url, file_name):
109
- """Downloads a file from a URL."""
110
- response = requests.get(url)
111
- if response.status_code == 200:
112
- with open(file_name, "wb") as outfile:
113
- outfile.write(response.content)
114
-
115
- def get_model_list():
116
- """Gets a list of available models."""
117
- response = requests.get(MODEL_URL)
118
- models = []
119
- for match in re.finditer("<a href='/models/(\w+/\w+)'", response.text):
120
- models.append(match.group(1))
121
- return models
122
-
123
- def predict_text(model, tokenizer, text):
124
- """Predicts the text using the given model and tokenizer."""
125
- inputs = tokenizer(text, return_tensors='pt')
126
- outputs = model(**inputs)
127
- probs = torch.nn.functional.softmax(outputs.logits, dim=1)
128
- return probs.tolist()[0]
129
-
130
- def get_user_input():
131
- """Gets user input."""
132
- input_type = st.selectbox("Select an input type", ["Text", "File", "Model"])
133
- if input_type == "Text":
134
- prompt = st.text_input("Enter text:")
135
  return prompt
136
- elif input_type == "File":
137
- uploaded_file = st.file_uploader("Choose a file")
138
- if uploaded_file:
139
- return uploaded_file.read().decode("utf-8")
140
- else:
141
- return ""
142
- elif input_type == "Model":
143
- model_name = st.selectbox("Select a model", get_model_list())
144
- model, tokenizer = load_model(model_name)
145
- text = st.text_area("Enter text:")
146
- return text
147
-
148
- def get_tasks():
149
- """Loads tasks from tasks.json."""
150
- try:
151
- with open(TASKS_FILE, "r") as outfile:
152
- tasks = json.load(outfile)
153
- return tasks
154
- except FileNotFoundError:
155
- return []
156
-
157
- def complete_task(task_id):
158
- """Completes a task."""
159
- tasks = get_tasks()
160
- if 0 <= task_id < len(tasks):
161
- tasks[task_id]["status"] = "Completed"
162
- with open(TASKS_FILE, "w") as outfile:
163
- json.dump(tasks, outfile)
164
- st.write(f"Task {task_id} completed.")
165
- else:
166
- st.write(f"Invalid task ID: {task_id}")
167
-
168
- def delete_task(task_id):
169
- """Deletes a task."""
170
- tasks = get_tasks()
171
- if 0 <= task_id < len(tasks):
172
- del tasks[task_id]
173
- with open(TASKS_FILE, "w") as outfile:
174
- json.dump(tasks, outfile)
175
- st.write(f"Task {task_id} deleted.")
176
  else:
177
- st.write(f"Invalid task ID: {task_id}")
178
-
179
- def run_pipeline():
180
- """Runs the pipeline."""
181
- global PIPELINE_RUNNING
182
- PIPELINE_RUNNING = True
183
- while PIPELINE_RUNNING:
184
- tasks = get_tasks()
185
- for i, task in enumerate(tasks):
186
- if task["status"] == "Pending":
187
- st.write(f"Processing task {i}: {task['task']}")
188
- try:
189
- code = generate_code(task['description'])
190
- st.write(f"Generated code:\n{code}")
191
- # Execute code in a separate process
192
- process = subprocess.Popen(["python", "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
193
- output, error = process.communicate()
194
- st.write(f"Code output:\n{output.decode('utf-8')}")
195
- st.write(f"Code error:\n{error.decode('utf-8')}")
196
- # Run tests (replace with actual logic)
197
- test_result = run_tests(code)
198
- st.write(f"Test result: {test_result}")
199
- # Update task status
200
- tasks[i]["status"] = "Completed"
201
- with open(TASKS_FILE, "w") as outfile:
202
- json.dump(tasks, outfile)
203
- except Exception as e:
204
- st.write(f"Error processing task {i}: {e}")
205
- tasks[i]["status"] = "Failed"
206
- with open(TASKS_FILE, "w") as outfile:
207
- json.dump(tasks, outfile)
208
- time.sleep(1) # Adjust delay as needed
209
-
210
- def stop_pipeline():
211
- """Stops the pipeline."""
212
- global PIPELINE_RUNNING
213
- PIPELINE_RUNNING = False
214
- st.write("Pipeline stopped.")
215
-
216
- def load_model(file_name):
217
- """Loads a saved model."""
218
- try:
219
- with open(file_name, "rb") as f:
220
- model = pickle.load(f)
221
- with open(file_name.replace(".sav", "_tokenizer.pkl"), "rb") as f:
222
- tokenizer = pickle.load(f)
223
- return model, tokenizer
224
- except FileNotFoundError:
225
- st.write(f"Model not found: {file_name}")
226
- return None, None
227
-
228
- def delete_model(file_name):
229
- """Deletes a saved model."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  try:
231
- os.remove(file_name)
232
- os.remove(file_name.replace(".sav", "_tokenizer.pkl"))
233
- st.write(f"Model deleted: {file_name}")
234
- except FileNotFoundError:
235
- st.write(f"Model not found: {file_name}")
236
-
237
- # --- Streamlit App ---
238
-
239
- def main():
240
- """Main function."""
241
- st.title("AI-Powered Code Interpreter")
242
-
243
- # --- Code Generation and Analysis ---
244
- st.subheader("Code Generation and Analysis")
245
- text = get_user_input()
246
-
247
- if text:
248
- prompt = "Generate a python function that:\n\n" + text
249
- code = generate_code(prompt)
250
-
251
- summarized_text = ""
252
- if len(text) > 100:
253
- summarized_text = summarize_text(text)
254
-
255
- sentiment = ""
256
- if text:
257
- sentiment = "Positive" if analyze_sentiment(text) > 0.5 else "Negative"
258
-
259
- tests_passed = ""
260
- if code:
261
- tests_passed = run_tests(code)
262
-
263
- st.subheader("Summary:")
264
- st.write(summarized_text)
265
-
266
- st.subheader("Sentiment:")
267
- st.write(sentiment)
268
-
269
- st.subheader("Code:")
270
- display_code(code)
271
-
272
- st.subheader("Tests:")
273
- st.write(tests_passed)
274
-
275
- if st.button("Save code"):
276
- file_name = st.text_input("Enter file name:")
277
- with open(file_name, "w") as outfile:
278
- outfile.write(code)
279
-
280
- # --- Dataset Management ---
281
- st.subheader("Dataset Management")
282
- if st.button("Load dataset"):
283
- file_name = st.text_input("Enter file name:")
284
- data = load_dataset(file_name)
285
- st.write(data)
286
-
287
- if st.button("Save dataset"):
288
- data = st.text_area("Enter data:")
289
- file_name = st.text_input("Enter file name:")
290
- save_dataset(data, file_name)
291
-
292
- # --- Model Management ---
293
- st.subheader("Model Management")
294
- if st.button("Download model"):
295
- model_name = st.selectbox("Select a model", get_model_list())
296
- url = f"{MODEL_URL}/models/{model_name}/download"
297
- file_name = model_name.replace("/", "-") + ".tar.gz"
298
- download_file(url, file_name)
299
-
300
- if st.button("Load model"):
301
- model_name = st.selectbox("Select a model", get_model_list())
302
- model, tokenizer = load_model(model_name)
303
-
304
- if st.button("Predict text"):
305
- text = st.text_area("Enter text:")
306
- probs = predict_text(model, tokenizer, text)
307
- st.write(probs)
308
-
309
- if st.button("Save model"):
310
- file_name = st.text_input("Enter file name:")
311
- save_model(model, tokenizer, file_name)
312
-
313
- # --- Saved Model Management ---
314
- st.subheader("Saved Model Management")
315
- file_name = st.text_input("Enter file name:")
316
- model, tokenizer = load_model(file_name)
317
-
318
- if st.button("Delete model"):
319
- delete_model(file_name)
320
-
321
- # --- Task Management ---
322
- st.subheader("Task Management")
323
- if st.button("Add task"):
324
- task = st.text_input("Enter task:")
325
- description = st.text_area("Enter description:")
326
- add_task({"task": task, "description": description})
327
-
328
- if st.button("Show tasks"):
329
- tasks = get_tasks()
330
- st.write(tasks)
331
-
332
- if st.button("Complete task"):
333
- task_id = st.number_input("Enter task ID:")
334
- complete_task(task_id)
335
-
336
- if st.button("Delete task"):
337
- task_id = st.number_input("Enter task ID:")
338
- delete_task(task_id)
339
-
340
- # --- Pipeline Management ---
341
- st.subheader("Pipeline Management")
342
- if st.button("Run pipeline") and not PIPELINE_RUNNING:
343
- Thread(target=run_pipeline).start()
344
- if st.button("Stop pipeline") and PIPELINE_RUNNING:
345
- stop_pipeline()
346
-
347
- # --- Console Management ---
348
- st.subheader("Console Management")
349
- if st.button("Clear console"):
350
- st.write("")
351
-
352
- if st.button("Quit"):
353
- sys.exit()
354
-
355
- if __name__ == "__main__":
356
- main()
 
 
 
 
1
  import os
2
+ import streamlit as st
3
+ from huggingface_hub import InferenceClient
4
+ import gradio as gr
5
+ import random
6
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
 
 
 
 
 
7
  import subprocess
8
+
9
+ # --- Agent Definitions ---
10
+ class AIAgent:
11
+ def __init__(self, name, description, skills, model_name="mistralai/Mixtral-8x7B-Instruct-v0.1"):
12
+ self.name = name
13
+ self.description = description
14
+ self.skills = skills
15
+ self.model_name = model_name
16
+ self.client = InferenceClient(self.model_name)
17
+
18
+ def create_agent_prompt(self):
19
+ skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
20
+ agent_prompt = f"""
21
+ As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
22
+ {skills_str}
23
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
24
+ """
25
+ return agent_prompt
26
+
27
+ def generate_response(self, prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
28
+ formatted_prompt = self.format_prompt(prompt, history)
29
+ stream = self.client.text_generation(formatted_prompt,
30
+ temperature=temperature,
31
+ max_new_tokens=max_new_tokens,
32
+ top_p=top_p,
33
+ repetition_penalty=repetition_penalty,
34
+ do_sample=True,
35
+ seed=random.randint(1, 1111111111111111),
36
+ stream=True,
37
+ details=True,
38
+ return_full_text=False)
39
+ output = ""
40
+ for response in stream:
41
+ output += response.token.text
42
+ yield output
43
+ return output
44
+
45
+ def format_prompt(self, message, history):
46
+ prompt = "<s>"
47
+ for user_prompt, bot_response in history:
48
+ prompt += f"[INST] {user_prompt} [/INST]"
49
+ prompt += f" {bot_response}</s> "
50
+ prompt += f"[INST] {message} [/INST]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  return prompt
52
+
53
+ def autonomous_build(self, chat_history, workspace_projects):
54
+ summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
55
+ summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
56
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
57
+ return summary, next_step
58
+
59
+ # --- Agent Definitions ---
60
+ agents = {
61
+ "WEB_DEV": AIAgent("WEB_DEV", "Web development expert", ["HTML", "CSS", "JavaScript", "Flask", "React"]),
62
+ "AI_SYSTEM_PROMPT": AIAgent("AI_SYSTEM_PROMPT", "AI system prompt expert", ["Prompt Engineering", "LLM Interaction", "Fine-tuning"]),
63
+ "PYTHON_CODE_DEV": AIAgent("PYTHON_CODE_DEV", "Python code development expert", ["Python", "Data Structures", "Algorithms", "Libraries"]),
64
+ "CODE_REVIEW_ASSISTANT": AIAgent("CODE_REVIEW_ASSISTANT", "Code review assistant", ["Code Quality", "Best Practices", "Security"]),
65
+ "CONTENT_WRITER_EDITOR": AIAgent("CONTENT_WRITER_EDITOR", "Content writer and editor", ["Writing", "Editing", "SEO"]),
66
+ "QUESTION_GENERATOR": AIAgent("QUESTION_GENERATOR", "Question generator", ["Question Generation", "Knowledge Testing"]),
67
+ "HUGGINGFACE_FILE_DEV": AIAgent("HUGGINGFACE_FILE_DEV", "Hugging Face file development expert", ["Hugging Face Hub", "Model Training", "Dataset Creation"]),
68
+ }
69
+
70
+ # --- Streamlit UI ---
71
+ st.title("DevToolKit: AI-Powered Development Environment")
72
+
73
+ # --- Project Management ---
74
+ st.header("Project Management")
75
+ project_name = st.text_input("Enter project name:")
76
+ if st.button("Create Project"):
77
+ if project_name not in st.session_state.workspace_projects:
78
+ st.session_state.workspace_projects[project_name] = {'files': []}
79
+ st.success(f"Created project: {project_name}")
 
 
 
 
 
 
 
 
 
 
 
 
80
  else:
81
+ st.warning(f"Project {project_name} already exists")
82
+
83
+ # --- Code Addition ---
84
+ st.subheader("Add Code to Workspace")
85
+ code_to_add = st.text_area("Enter code to add to workspace:")
86
+ file_name = st.text_input("Enter file name (e.g. 'app.py'):")
87
+ if st.button("Add Code"):
88
+ add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
89
+ st.success(add_code_status)
90
+
91
+ # --- Terminal Interface ---
92
+ st.subheader("Terminal (Workspace Context)")
93
+ terminal_input = st.text_input("Enter a command within the workspace:")
94
+ if st.button("Run Command"):
95
+ terminal_output = terminal_interface(terminal_input, project_name)
96
+ st.code(terminal_output, language="bash")
97
+
98
+ # --- Chat Interface ---
99
+ st.subheader("Chat with DevToolKit for Guidance")
100
+ chat_input = st.text_area("Enter your message for guidance:")
101
+ if st.button("Get Guidance"):
102
+ chat_response = chat_interface(chat_input)
103
+ st.session_state.chat_history.append((chat_input, chat_response))
104
+ st.write(f"DevToolKit: {chat_response}")
105
+
106
+ # --- Display Chat History ---
107
+ st.subheader("Chat History")
108
+ for user_input, response in st.session_state.chat_history:
109
+ st.write(f"User: {user_input}")
110
+ st.write(f"DevToolKit: {response}")
111
+
112
+ # --- Display Terminal History ---
113
+ st.subheader("Terminal History")
114
+ for command, output in st.session_state.terminal_history:
115
+ st.write(f"Command: {command}")
116
+ st.code(output, language="bash")
117
+
118
+ # --- Display Projects and Files ---
119
+ st.subheader("Workspace Projects")
120
+ for project, details in st.session_state.workspace_projects.items():
121
+ st.write(f"Project: {project}")
122
+ for file in details['files']:
123
+ st.write(f" - {file}")
124
+
125
+ # --- Chat with AI Agents ---
126
+ st.subheader("Chat with AI Agents")
127
+ selected_agent_name = st.selectbox("Select an AI agent", list(agents.keys()))
128
+ selected_agent = agents[selected_agent_name]
129
+ agent_chat_input = st.text_area("Enter your message for the agent:")
130
+ if st.button("Send to Agent"):
131
+ agent_chat_response = selected_agent.generate_response(agent_chat_input, st.session_state.chat_history)
132
+ st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
133
+ st.write(f"{selected_agent.name}: {agent_chat_response}")
134
+
135
+ # --- Automate Build Process ---
136
+ st.subheader("Automate Build Process")
137
+ if st.button("Automate"):
138
+ summary, next_step = selected_agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
139
+ st.write("Autonomous Build Summary:")
140
+ st.write(summary)
141
+ st.write("Next Step:")
142
+ st.write(next_step)
143
+
144
+ # --- Display current state for debugging ---
145
+ st.sidebar.subheader("Current State")
146
+ st.sidebar.json(st.session_state.current_state)
147
+
148
+ # --- Gradio Interface ---
149
+ additional_inputs = [
150
+ gr.Dropdown(label="Agents", choices=list(agents.keys()), value=list(agents.keys())[0], interactive=True),
151
+ gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
152
+ gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
153
+ gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
154
+ gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
155
+ gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
156
+ ]
157
+
158
+ examples = [
159
+ ["Create a simple web application using Flask", "WEB_DEV", None, None, None, None, ],
160
+ ["Generate a Python script to perform a linear regression analysis", "PYTHON_CODE_DEV", None, None, None, None, ],
161
+ ["Create a Dockerfile for a Node.js application", "AI_SYSTEM_PROMPT", None, None, None, None, ],
162
+ # Add more examples as needed
163
+ ]
164
+
165
+ gr.ChatInterface(
166
+ fn=generate,
167
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
168
+ additional_inputs=additional_inputs,
169
+ title="DevToolKit AI Assistant",
170
+ examples=examples,
171
+ concurrency_limit=20,
172
+ ).launch(show_api=True)
173
+
174
+ # --- Helper Functions (Moved to separate file) ---
175
+ def generate(prompt, history, agent_name, sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
176
+ # ... (Implementation in utils.py)
177
+
178
+ def chat_interface(chat_input):
179
+ # ... (Implementation in utils.py)
180
+
181
+ def chat_interface_with_agent(chat_input, agent_name):
182
+ # ... (Implementation in utils.py)
183
+
184
+ def terminal_interface(command, project_name):
185
+ # ... (Implementation in utils.py)
186
+
187
+ def add_code_to_workspace(project_name, code, file_name):
188
+ # ... (Implementation in utils.py)
189
+ 2. requirements.txt (Dependencies)
190
+
191
+ streamlit
192
+ huggingface_hub
193
+ gradio
194
+ transformers
195
+ subprocess
196
+ 3. utils.py (Helper Functions)
197
+
198
+ import os
199
+ import subprocess
200
+ import streamlit as st
201
+
202
+ def generate(prompt, history, agent_name, sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
203
+ seed = random.randint(1, 1111111111111111)
204
+ agent = agents[agent_name]
205
+ system_prompt = agent.create_agent_prompt() if sys_prompt is None else sys_prompt
206
+
207
+ generate_kwargs = dict(
208
+ temperature=float(temperature),
209
+ max_new_tokens=max_new_tokens,
210
+ top_p=top_p,
211
+ repetition_penalty=repetition_penalty,
212
+ do_sample=True,
213
+ seed=seed,
214
+ )
215
+
216
+ formatted_prompt = agent.format_prompt(f"{system_prompt}, {prompt}", history)
217
+ stream = agent.client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
218
+ output = ""
219
+
220
+ for response in stream:
221
+ output += response.token.text
222
+ yield output
223
+ return output
224
+
225
+ def chat_interface(chat_input):
226
+ response = generate(chat_input, st.session_state.chat_history)
227
+ return response
228
+
229
+ def chat_interface_with_agent(chat_input, agent_name):
230
+ agent_prompt = agents[agent_name].create_agent_prompt()
231
+ response = generate(chat_input, st.session_state.chat_history, agent_name=agent_name, sys_prompt=agent_prompt)
232
+ return response
233
+
234
+ def terminal_interface(command, project_name):
235
  try:
236
+ result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name)
237
+ return result.stdout if result.returncode == 0 else result.stderr
238
+ except Exception as e:
239
+ return str(e)
240
+
241
+ def add_code_to_workspace(project_name, code, file_name):
242
+ project_path = os.path.join(os.getcwd(), project_name)
243
+ if not os.path.exists(project_path):
244
+ os.makedirs(project_path)
245
+ file_path = os.path.join(project_path, file_name)
246
+ with open(file_path, 'w') as file:
247
+ file.write(code)
248
+ if project_name not in st.session_state.workspace_projects:
249
+ st.session_state.workspace_projects[project_name] = {'files': []}
250
+ st.session_state.workspace_projects[project_name]['files'].append(file_name)
251
+ return f"Added {file_name} to {project_name}"