Spaces:
Sleeping
Sleeping
acecalisto3
commited on
Commit
•
80629bf
1
Parent(s):
f379f50
Update app.py
Browse files
app.py
CHANGED
@@ -86,9 +86,6 @@ class AIAgent:
|
|
86 |
"READ_PROMPT": self.read_prompt,
|
87 |
"TASK_PROMPT": self.task_prompt,
|
88 |
"UNDERSTAND_TEST_RESULTS_PROMPT": self.understand_test_results_prompt,
|
89 |
-
"EXECUTE_COMMAND": self.execute_command, # Add command execution
|
90 |
-
"PYTHON_INTERPRET": self.python_interpret, # Add Python interpretation
|
91 |
-
"NLP": self.nlp, # Add NLP capabilities
|
92 |
}
|
93 |
self.task_history: List[Dict[str, str]] = []
|
94 |
self.current_task: Optional[str] = None
|
@@ -102,7 +99,6 @@ class AIAgent:
|
|
102 |
"bigscience/T0_3B",
|
103 |
] # Add more as needed
|
104 |
self.selected_model = "gpt2" # Default model
|
105 |
-
self.nlp_pipeline = None # Initialize NLP pipeline later
|
106 |
|
107 |
# --- Search Functionality ---
|
108 |
def search(self, query: str) -> List[str]:
|
@@ -255,12 +251,6 @@ class AIAgent:
|
|
255 |
return "Enter the new task to start."
|
256 |
elif action == "UNDERSTAND_TEST_RESULTS_PROMPT":
|
257 |
return "Enter your question about the test results."
|
258 |
-
elif action == "EXECUTE_COMMAND":
|
259 |
-
return "Enter the command to execute."
|
260 |
-
elif action == "PYTHON_INTERPRET":
|
261 |
-
return "Enter the Python code to interpret."
|
262 |
-
elif action == "NLP":
|
263 |
-
return "Enter the text for NLP analysis."
|
264 |
else:
|
265 |
raise InvalidActionError("Please provide a valid action.")
|
266 |
except InvalidActionError as e:
|
@@ -324,39 +314,6 @@ class AIAgent:
|
|
324 |
"""Provides a prompt to understand the test results."""
|
325 |
return "What do you want to know about the test results?"
|
326 |
|
327 |
-
# --- Command Execution Functionality ---
|
328 |
-
def execute_command(self, command: str) -> str:
|
329 |
-
"""Executes the provided command in the terminal."""
|
330 |
-
try:
|
331 |
-
process = subprocess.run(
|
332 |
-
command.split(), capture_output=True, text=True
|
333 |
-
)
|
334 |
-
return f"Command output:\n{process.stdout}"
|
335 |
-
except subprocess.CalledProcessError as e:
|
336 |
-
return f"Error executing command: {e}"
|
337 |
-
|
338 |
-
# --- Python Interpretation Functionality ---
|
339 |
-
def python_interpret(self, code: str) -> str:
|
340 |
-
"""Interprets the provided Python code."""
|
341 |
-
try:
|
342 |
-
exec(code)
|
343 |
-
return "Python code executed successfully."
|
344 |
-
except Exception as e:
|
345 |
-
return f"Error interpreting Python code: {e}"
|
346 |
-
|
347 |
-
# --- NLP Functionality ---
|
348 |
-
def nlp(self, text: str) -> str:
|
349 |
-
"""Performs NLP analysis on the provided text."""
|
350 |
-
try:
|
351 |
-
if not self.nlp_pipeline:
|
352 |
-
self.nlp_pipeline = pipeline(
|
353 |
-
"sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-5-lit"
|
354 |
-
) # Example NLP pipeline
|
355 |
-
analysis = self.nlp_pipeline(text)
|
356 |
-
return f"NLP Analysis: {analysis}"
|
357 |
-
except Exception as e:
|
358 |
-
return f"Error performing NLP analysis: {e}"
|
359 |
-
|
360 |
# --- Input Handling Functionality ---
|
361 |
def handle_input(self, input_str: str):
|
362 |
"""Handles user input and executes the corresponding action."""
|
@@ -448,7 +405,6 @@ if __name__ == "__main__":
|
|
448 |
# --- Tabbed Navigation ---
|
449 |
tabs = st.tabs(["Agent Generation", "Chat App"])
|
450 |
|
451 |
-
# --- Agent Generation Tab ---
|
452 |
with tabs[0]:
|
453 |
st.title("AI Agent Generation")
|
454 |
st.sidebar.title("Agent Settings")
|
@@ -479,9 +435,7 @@ if __name__ == "__main__":
|
|
479 |
|
480 |
# --- Model Dropdown ---
|
481 |
selected_model = st.sidebar.selectbox(
|
482 |
-
"Model",
|
483 |
-
agent.available_models,
|
484 |
-
index=agent.available_models.index(agent.selected_model),
|
485 |
)
|
486 |
agent.selected_model = selected_model
|
487 |
|
@@ -510,45 +464,39 @@ if __name__ == "__main__":
|
|
510 |
except WorkspaceExplorerError as e:
|
511 |
st.error(f"Error exploring workspace: {e}")
|
512 |
|
513 |
-
# --- Chat App Tab ---
|
514 |
with tabs[1]:
|
515 |
st.title("Chat App")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
516 |
|
517 |
# --- Chat History ---
|
518 |
chat_history = st.empty()
|
519 |
-
|
520 |
|
521 |
-
# --- Input
|
522 |
-
user_input = st.text_input("Enter your message:")
|
523 |
-
|
524 |
-
# --- Send Message ---
|
525 |
if st.button("Send"):
|
526 |
-
if
|
527 |
-
|
528 |
-
chat_history.
|
|
|
529 |
|
530 |
-
# ---
|
531 |
try:
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
chat_history.text(f"Agent: {output}")
|
541 |
-
else:
|
542 |
-
# --- Treat as regular chat message ---
|
543 |
-
output = agent.code_generation(user_input)
|
544 |
-
# --- Display Agent Response ---
|
545 |
-
chat_history.text(f"Agent: {output}")
|
546 |
except Exception as e:
|
547 |
-
|
548 |
-
chat_history.text(f"Agent: Error: {e}")
|
549 |
-
|
550 |
-
# --- Clear Input Field ---
|
551 |
-
user_input = ""
|
552 |
|
553 |
# --- Gradio Integration ---
|
554 |
def gradio_interface(input_text):
|
|
|
86 |
"READ_PROMPT": self.read_prompt,
|
87 |
"TASK_PROMPT": self.task_prompt,
|
88 |
"UNDERSTAND_TEST_RESULTS_PROMPT": self.understand_test_results_prompt,
|
|
|
|
|
|
|
89 |
}
|
90 |
self.task_history: List[Dict[str, str]] = []
|
91 |
self.current_task: Optional[str] = None
|
|
|
99 |
"bigscience/T0_3B",
|
100 |
] # Add more as needed
|
101 |
self.selected_model = "gpt2" # Default model
|
|
|
102 |
|
103 |
# --- Search Functionality ---
|
104 |
def search(self, query: str) -> List[str]:
|
|
|
251 |
return "Enter the new task to start."
|
252 |
elif action == "UNDERSTAND_TEST_RESULTS_PROMPT":
|
253 |
return "Enter your question about the test results."
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
else:
|
255 |
raise InvalidActionError("Please provide a valid action.")
|
256 |
except InvalidActionError as e:
|
|
|
314 |
"""Provides a prompt to understand the test results."""
|
315 |
return "What do you want to know about the test results?"
|
316 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
# --- Input Handling Functionality ---
|
318 |
def handle_input(self, input_str: str):
|
319 |
"""Handles user input and executes the corresponding action."""
|
|
|
405 |
# --- Tabbed Navigation ---
|
406 |
tabs = st.tabs(["Agent Generation", "Chat App"])
|
407 |
|
|
|
408 |
with tabs[0]:
|
409 |
st.title("AI Agent Generation")
|
410 |
st.sidebar.title("Agent Settings")
|
|
|
435 |
|
436 |
# --- Model Dropdown ---
|
437 |
selected_model = st.sidebar.selectbox(
|
438 |
+
"Model", agent.available_models, index=agent.available_models.index(agent.selected_model)
|
|
|
|
|
439 |
)
|
440 |
agent.selected_model = selected_model
|
441 |
|
|
|
464 |
except WorkspaceExplorerError as e:
|
465 |
st.error(f"Error exploring workspace: {e}")
|
466 |
|
|
|
467 |
with tabs[1]:
|
468 |
st.title("Chat App")
|
469 |
+
st.sidebar.title("Chat Settings")
|
470 |
+
|
471 |
+
# --- Model Dropdown ---
|
472 |
+
selected_chat_model = st.sidebar.selectbox(
|
473 |
+
"Model", agent.available_models, index=agent.available_models.index(agent.selected_model)
|
474 |
+
)
|
475 |
+
agent.selected_model = selected_chat_model
|
476 |
|
477 |
# --- Chat History ---
|
478 |
chat_history = st.empty()
|
479 |
+
chat_input = st.text_input("Enter your message:")
|
480 |
|
481 |
+
# --- Chat Input Handling ---
|
|
|
|
|
|
|
482 |
if st.button("Send"):
|
483 |
+
if chat_input:
|
484 |
+
user_message = chat_input
|
485 |
+
chat_history.markdown(f"**You:** {user_message}")
|
486 |
+
chat_input.empty()
|
487 |
|
488 |
+
# --- Generate Response ---
|
489 |
try:
|
490 |
+
if not agent.code_generator:
|
491 |
+
agent.code_generator = pipeline(
|
492 |
+
"text-generation", model=agent.selected_model
|
493 |
+
)
|
494 |
+
response = agent.code_generator(
|
495 |
+
user_message, max_length=500, num_return_sequences=1
|
496 |
+
)[0]["generated_text"]
|
497 |
+
chat_history.markdown(f"**Agent:** {response}")
|
|
|
|
|
|
|
|
|
|
|
|
|
498 |
except Exception as e:
|
499 |
+
chat_history.markdown(f"**Agent:** Error: {e}")
|
|
|
|
|
|
|
|
|
500 |
|
501 |
# --- Gradio Integration ---
|
502 |
def gradio_interface(input_text):
|