acecalisto3 commited on
Commit
b8b7a36
·
verified ·
1 Parent(s): 58ddfa5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +238 -876
app.py CHANGED
@@ -1,886 +1,248 @@
1
  import os
2
-
3
- import sys
4
-
5
  import subprocess
6
-
7
- import streamlit as st
8
-
9
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
10
-
11
- import black
12
-
13
- from pylint import lint
14
-
15
- from io import StringIO
16
-
17
- import openai
18
-
19
-
20
-
21
-
22
- # Set your OpenAI API key here
23
-
24
- openai.api_key = "YOUR_OPENAI_API_KEY"
25
-
26
-
27
-
28
-
29
- HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
30
-
31
- PROJECT_ROOT = "projects"
32
-
33
- AGENT_DIRECTORY = "agents"
34
-
35
-
36
-
37
-
38
- # Global state to manage communication between Tool Box and Workspace Chat App
39
-
40
- if 'chat_history' not in st.session_state:
41
-
42
- st.session_state.chat_history = []
43
-
44
- if 'terminal_history' not in st.session_state:
45
-
46
- st.session_state.terminal_history = []
47
-
48
- if 'workspace_projects' not in st.session_state:
49
-
50
- st.session_state.workspace_projects = {}
51
-
52
- if 'available_agents' not in st.session_state:
53
-
54
- st.session_state.available_agents = []
55
-
56
- if 'current_state' not in st.session_state:
57
-
58
- st.session_state.current_state = {
59
-
60
- 'toolbox': {},
61
-
62
- 'workspace_chat': {}
63
-
64
- }
65
-
66
-
67
-
68
-
69
- class AIAgent:
70
-
71
- def __init__(self, name, description, skills):
72
-
73
- self.name = name
74
-
75
- self.description = description
76
-
77
- self.skills = skills
78
-
79
-
80
-
81
-
82
- def create_agent_prompt(self):
83
-
84
- skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
85
-
86
- agent_prompt = f"""
87
-
88
- As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
89
-
90
- {skills_str}
91
-
92
-
93
-
94
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
95
-
96
- """
97
-
98
- return agent_prompt
99
-
100
-
101
-
102
-
103
- def autonomous_build(self, chat_history, workspace_projects):
104
-
105
- """
106
-
107
- Autonomous build logic that continues based on the state of chat history and workspace projects.
108
-
109
- """
110
-
111
- summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
112
-
113
- summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
114
-
115
-
116
-
117
-
118
- next_step = "Based on the current state, the next logical step is to implement the main application logic."
119
-
120
-
121
-
122
-
123
- return summary, next_step
124
-
125
-
126
-
127
-
128
- def save_agent_to_file(agent):
129
-
130
- """Saves the agent's prompt to a file locally and then commits to the Hugging Face repository."""
131
-
132
- if not os.path.exists(AGENT_DIRECTORY):
133
-
134
- os.makedirs(AGENT_DIRECTORY)
135
-
136
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
137
-
138
- config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
139
-
140
- with open(file_path, "w") as file:
141
-
142
- file.write(agent.create_agent_prompt())
143
-
144
- with open(config_path, "w") as file:
145
-
146
- file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
147
-
148
- st.session_state.available_agents.append(agent.name)
149
-
150
-
151
-
152
-
153
- commit_and_push_changes(f"Add agent {agent.name}")
154
-
155
-
156
-
157
-
158
- def load_agent_prompt(agent_name):
159
-
160
- """Loads an agent prompt from a file."""
161
-
162
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
163
-
164
- if os.path.exists(file_path):
165
-
166
- with open(file_path, "r") as file:
167
-
168
- agent_prompt = file.read()
169
-
170
- return agent_prompt
171
-
172
- else:
173
-
174
- return None
175
-
176
-
177
-
178
-
179
- def create_agent_from_text(name, text):
180
-
181
- skills = text.split('\n')
182
-
183
- agent = AIAgent(name, "AI agent created from text input.", skills)
184
-
185
- save_agent_to_file(agent)
186
-
187
- return agent.create_agent_prompt()
188
-
189
-
190
-
191
-
192
- # Chat interface using a selected agent
193
-
194
- def chat_interface_with_agent(input_text, agent_name):
195
-
196
- agent_prompt = load_agent_prompt(agent_name)
197
-
198
- if agent_prompt is None:
199
-
200
- return f"Agent {agent_name} not found."
201
-
202
-
203
-
204
-
205
- # Load the GPT-2 model which is compatible with AutoModelForCausalLM
206
-
207
- model_name = "gpt2"
208
-
209
- try:
210
-
211
- model = AutoModelForCausalLM.from_pretrained(model_name)
212
-
213
- tokenizer = AutoTokenizer.from_pretrained(model_name)
214
-
215
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
216
-
217
- except EnvironmentError as e:
218
-
219
- return f"Error loading model: {e}"
220
-
221
-
222
-
223
-
224
- # Combine the agent prompt with user input
225
-
226
- combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
227
-
228
 
229
-
230
- # Truncate input text to avoid exceeding the model's maximum length
231
-
232
- max_input_length = 900
233
-
234
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
235
-
236
- if input_ids.shape[1] > max_input_length:
237
-
238
- input_ids = input_ids[:, :max_input_length]
239
-
240
-
241
-
242
-
243
- # Generate chatbot response
244
-
245
- outputs = model.generate(
246
-
247
- input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
248
-
249
- )
250
-
251
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
252
-
253
- return response
254
-
255
-
256
-
257
-
258
- def workspace_interface(project_name):
259
-
260
- project_path = os.path.join(PROJECT_ROOT, project_name)
261
-
262
- if not os.path.exists(PROJECT_ROOT):
263
-
264
- os.makedirs(PROJECT_ROOT)
265
-
266
- if not os.path.exists(project_path):
267
-
268
- os.makedirs(project_path)
269
-
270
- st.session_state.workspace_projects[project_name] = {"files": []}
271
-
272
- st.session_state.current_state['workspace_chat']['project_name'] = project_name
273
-
274
- commit_and_push_changes(f"Create project {project_name}")
275
-
276
- return f"Project {project_name} created successfully."
277
-
278
- else:
279
-
280
- return f"Project {project_name} already exists."
281
-
282
-
283
-
284
-
285
- def add_code_to_workspace(project_name, code, file_name):
286
-
287
- project_path = os.path.join(PROJECT_ROOT, project_name)
288
-
289
- if os.path.exists(project_path):
290
-
291
- file_path = os.path.join(project_path, file_name)
292
-
293
- with open(file_path, "w") as file:
294
-
295
- file.write(code)
296
-
297
- st.session_state.workspace_projects[project_name]["files"].append(file_name)
298
-
299
- st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
300
-
301
- commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
302
-
303
- return f"Code added to {file_name} in project {project_name} successfully."
304
-
305
- else:
306
-
307
- return f"Project {project_name} does not exist."
308
-
309
-
310
-
311
-
312
- def terminal_interface(command, project_name=None):
313
-
314
- if project_name:
315
-
316
- project_path = os.path.join(PROJECT_ROOT, project_name)
317
-
318
- if not os.path.exists(project_path):
319
-
320
- return f"Project {project_name} does not exist."
321
-
322
- result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
323
-
324
- else:
325
-
326
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
327
-
328
- if result.returncode == 0:
329
-
330
- st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
331
-
332
- return result.stdout
333
-
334
- else:
335
-
336
- st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
337
-
338
- return result.stderr
339
-
340
-
341
-
342
-
343
- def code_editor_interface(code):
344
-
345
- try:
346
-
347
- formatted_code = black.format_str(code, mode=black.FileMode())
348
-
349
- except black.NothingChanged:
350
-
351
- formatted_code = code
352
-
353
- result = StringIO()
354
-
355
- sys.stdout = result
356
-
357
- sys.stderr = result
358
-
359
- (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
360
-
361
- sys.stdout = sys.__stdout__
362
-
363
- sys.stderr = sys.__stderr__
364
-
365
- lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
366
-
367
- st.session_state.current_state['toolbox']['formatted_code'] = formatted_code
368
-
369
- st.session_state.current_state['toolbox']['lint_message'] = lint_message
370
-
371
- return formatted_code, lint_message
372
-
373
-
374
-
375
-
376
- def summarize_text(text):
377
-
378
- summarizer = pipeline("summarization")
379
-
380
- summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
381
-
382
- st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
383
-
384
- return summary[0]['summary_text']
385
-
386
-
387
-
388
-
389
- def sentiment_analysis(text):
390
-
391
- analyzer = pipeline("sentiment-analysis")
392
-
393
- sentiment = analyzer(text)
394
-
395
- st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
396
-
397
- return sentiment[0]
398
-
399
-
400
-
401
-
402
- def translate_code(code, input_language, output_language):
403
-
404
- # Define a dictionary to map programming languages to their corresponding file extensions
405
-
406
- language_extensions = {
407
-
408
- # ignore the specific languages right now, and continue to EOF
409
-
410
  }
411
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413
 
 
 
 
 
 
414
 
415
- # Add code to handle edge cases such as invalid input and unsupported programming languages
416
-
417
- if input_language not in language_extensions:
418
-
419
- raise ValueError(f"Invalid input language: {input_language}")
420
-
421
- if output_language not in language_extensions:
422
-
423
- raise ValueError(f"Invalid output language: {output_language}")
424
-
425
-
426
-
427
-
428
- # Use the dictionary to map the input and output languages to their corresponding file extensions
429
-
430
- input_extension = language_extensions[input_language]
431
-
432
- output_extension = language_extensions[output_language]
433
-
434
-
435
-
436
-
437
- # Translate the code using the OpenAI API
438
-
439
- prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
440
-
441
- response = openai.ChatCompletion.create(
442
-
443
- model="gpt-4",
444
-
445
- messages=[
446
-
447
- {"role": "system", "content": "You are an expert software developer."},
448
-
449
- {"role": "user", "content": prompt}
450
-
451
- ]
452
-
453
- )
454
-
455
- translated_code = response.choices[0].message['content'].strip()
456
-
457
-
458
-
459
-
460
- # Return the translated code
461
-
462
- translated_code = response.choices[0].message['content'].strip()
463
-
464
- st.session_state.current_state['toolbox']['translated_code'] = translated_code
465
-
466
- return translated_code
467
-
468
-
469
-
470
-
471
- def generate_code(code_idea):
472
-
473
- response = openai.ChatCompletion.create(
474
-
475
- model="gpt-4",
476
-
477
- messages=[
478
-
479
- {"role": "system", "content": "You are an expert software developer."},
480
-
481
- {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
482
-
483
- ]
484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485
  )
486
-
487
- generated_code = response.choices[0].message['content'].strip()
488
-
489
- st.session_state.current_state['toolbox']['generated_code'] = generated_code
490
-
491
- return generated_code
492
-
493
-
494
-
495
-
496
- def commit_and_push_changes(commit_message):
497
-
498
- """Commits and pushes changes to the Hugging Face repository."""
499
-
500
- commands = [
501
-
502
- "git add .",
503
-
504
- f"git commit -m '{commit_message}'",
505
-
506
- "git push"
507
-
508
- ]
509
-
510
- for command in commands:
511
-
512
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
513
-
514
- if result.returncode != 0:
515
-
516
- st.error(f"Error executing command '{command}': {result.stderr}")
517
-
518
- break
519
-
520
-
521
-
522
-
523
- # Streamlit App
524
-
525
- st.title("AI Agent Creator")
526
-
527
-
528
-
529
-
530
- # Sidebar navigation
531
-
532
- st.sidebar.title("Navigation")
533
-
534
- app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
535
-
536
-
537
-
538
-
539
- if app_mode == "AI Agent Creator":
540
-
541
- # AI Agent Creator
542
-
543
- st.header("Create an AI Agent from Text")
544
-
545
-
546
-
547
-
548
- st.subheader("From Text")
549
-
550
- agent_name = st.text_input("Enter agent name:")
551
-
552
- text_input = st.text_area("Enter skills (one per line):")
553
-
554
- if st.button("Create Agent"):
555
-
556
- agent_prompt = create_agent_from_text(agent_name, text_input)
557
-
558
- st.success(f"Agent '{agent_name}' created and saved successfully.")
559
-
560
- st.session_state.available_agents.append(agent_name)
561
-
562
-
563
-
564
-
565
- elif app_mode == "Tool Box":
566
-
567
- # Tool Box
568
-
569
- st.header("AI-Powered Tools")
570
-
571
-
572
-
573
-
574
- # Chat Interface
575
-
576
- st.subheader("Chat with CodeCraft")
577
-
578
- chat_input = st.text_area("Enter your message:")
579
-
580
- if st.button("Send"):
581
-
582
- if chat_input.startswith("@"):
583
-
584
- agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
585
-
586
- chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
587
-
588
- chat_response = chat_interface_with_agent(chat_input, agent_name)
589
-
590
- else:
591
-
592
- chat_response = chat_interface(chat_input)
593
-
594
- st.session_state.chat_history.append((chat_input, chat_response))
595
-
596
- st.write(f"CodeCraft: {chat_response}")
597
-
598
-
599
-
600
-
601
- # Terminal Interface
602
-
603
- st.subheader("Terminal")
604
-
605
- terminal_input = st.text_input("Enter a command:")
606
-
607
- if st.button("Run"):
608
-
609
- terminal_output = terminal_interface(terminal_input)
610
-
611
- st.session_state.terminal_history.append((terminal_input, terminal_output))
612
-
613
- st.code(terminal_output, language="bash")
614
-
615
-
616
-
617
-
618
- # Code Editor Interface
619
-
620
- st.subheader("Code Editor")
621
-
622
- code_editor = st.text_area("Write your code:", height=300)
623
-
624
- if st.button("Format & Lint"):
625
-
626
- formatted_code, lint_message = code_editor_interface(code_editor)
627
-
628
- st.code(formatted_code, language="python")
629
-
630
- st.info(lint_message)
631
-
632
-
633
-
634
-
635
- # Text Summarization Tool
636
-
637
- st.subheader("Summarize Text")
638
-
639
- text_to_summarize = st.text_area("Enter text to summarize:")
640
-
641
- if st.button("Summarize"):
642
-
643
- summary = summarize_text(text_to_summarize)
644
-
645
- st.write(f"Summary: {summary}")
646
-
647
-
648
-
649
-
650
- # Sentiment Analysis Tool
651
-
652
- st.subheader("Sentiment Analysis")
653
-
654
- sentiment_text = st.text_area("Enter text for sentiment analysis:")
655
-
656
- if st.button("Analyze Sentiment"):
657
-
658
- sentiment = sentiment_analysis(sentiment_text)
659
-
660
- st.write(f"Sentiment: {sentiment}")
661
-
662
-
663
-
664
-
665
- # Text Translation Tool (Code Translation)
666
-
667
- st.subheader("Translate Code")
668
-
669
- code_to_translate = st.text_area("Enter code to translate:")
670
-
671
- source_language = st.text_input("Enter source language (e.g. 'Python'):")
672
-
673
- target_language = st.text_input("Enter target language (e.g. 'JavaScript'):")
674
-
675
- if st.button("Translate Code"):
676
-
677
- translated_code = translate_code(code_to_translate, source_language, target_language)
678
-
679
- st.code(translated_code, language=target_language.lower())
680
-
681
-
682
-
683
-
684
- # Code Generation
685
-
686
- st.subheader("Code Generation")
687
-
688
- code_idea = st.text_input("Enter your code idea:")
689
-
690
- if st.button("Generate Code"):
691
-
692
- generated_code = generate_code(code_idea)
693
-
694
- st.code(generated_code, language="python")
695
-
696
-
697
-
698
-
699
- # Display Preset Commands
700
-
701
- st.subheader("Preset Commands")
702
-
703
- preset_commands = {
704
-
705
- "Create a new project": "create_project('project_name')",
706
-
707
- "Add code to workspace": "add_code_to_workspace('project_name', 'code', 'file_name')",
708
-
709
- "Run terminal command": "terminal_interface('command', 'project_name')",
710
-
711
- "Generate code": "generate_code('code_idea')",
712
-
713
- "Summarize text": "summarize_text('text')",
714
-
715
- "Analyze sentiment": "sentiment_analysis('text')",
716
-
717
- "Translate code": "translate_code('code', 'source_language', 'target_language')",
718
-
719
- }
720
-
721
- for command_name, command in preset_commands.items():
722
-
723
- st.write(f"{command_name}: `{command}`")
724
-
725
-
726
-
727
-
728
- elif app_mode == "Workspace Chat App":
729
-
730
- # Workspace Chat App
731
-
732
- st.header("Workspace Chat App")
733
-
734
-
735
-
736
-
737
- # Project Workspace Creation
738
-
739
- st.subheader("Create a New Project")
740
-
741
- project_name = st.text_input("Enter project name:")
742
-
743
- if st.button("Create Project"):
744
-
745
- workspace_status = workspace_interface(project_name)
746
-
747
- st.success(workspace_status)
748
-
749
-
750
-
751
-
752
- # Add Code to Workspace
753
-
754
- st.subheader("Add Code to Workspace")
755
-
756
- code_to_add = st.text_area("Enter code to add to workspace:")
757
-
758
- file_name = st.text_input("Enter file name (e.g. 'app.py'):")
759
-
760
- if st.button("Add Code"):
761
-
762
- add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
763
-
764
- st.success(add_code_status)
765
-
766
-
767
-
768
-
769
- # Terminal Interface with Project Context
770
-
771
- st.subheader("Terminal (Workspace Context)")
772
-
773
- terminal_input = st.text_input("Enter a command within the workspace:")
774
-
775
- if st.button("Run Command"):
776
-
777
- terminal_output = terminal_interface(terminal_input, project_name)
778
-
779
- st.code(terminal_output, language="bash")
780
-
781
-
782
-
783
-
784
- # Chat Interface for Guidance
785
-
786
- st.subheader("Chat with CodeCraft for Guidance")
787
-
788
- chat_input = st.text_area("Enter your message for guidance:")
789
-
790
- if st.button("Get Guidance"):
791
-
792
- chat_response = chat_interface(chat_input)
793
-
794
- st.session_state.chat_history.append((chat_input, chat_response))
795
-
796
- st.write(f"CodeCraft: {chat_response}")
797
-
798
-
799
-
800
-
801
- # Display Chat History
802
-
803
- st.subheader("Chat History")
804
-
805
- for user_input, response in st.session_state.chat_history:
806
-
807
- st.write(f"User: {user_input}")
808
-
809
- st.write(f"CodeCraft: {response}")
810
-
811
-
812
-
813
-
814
- # Display Terminal History
815
-
816
- st.subheader("Terminal History")
817
-
818
- for command, output in st.session_state.terminal_history:
819
-
820
- st.write(f"Command: {command}")
821
-
822
- st.code(output, language="bash")
823
-
824
-
825
-
826
-
827
- # Display Projects and Files
828
-
829
- st.subheader("Workspace Projects")
830
-
831
- for project, details in st.session_state.workspace_projects.items():
832
-
833
- st.write(f"Project: {project}")
834
-
835
- for file in details['files']:
836
-
837
- st.write(f" - {file}")
838
-
839
-
840
-
841
-
842
- # Chat with AI Agents
843
-
844
- st.subheader("Chat with AI Agents")
845
-
846
- selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
847
-
848
- agent_chat_input = st.text_area("Enter your message for the agent:")
849
-
850
- if st.button("Send to Agent"):
851
-
852
- agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
853
-
854
- st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
855
-
856
- st.write(f"{selected_agent}: {agent_chat_response}")
857
-
858
-
859
-
860
-
861
- # Automate Build Process
862
-
863
- st.subheader("Automate Build Process")
864
-
865
- if st.button("Automate"):
866
-
867
- agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
868
-
869
- summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
870
-
871
- st.write("Autonomous Build Summary:")
872
-
873
- st.write(summary)
874
-
875
- st.write("Next Step:")
876
-
877
- st.write(next_step)
878
-
879
-
880
-
881
-
882
- # Display current state for debugging
883
-
884
- st.sidebar.subheader("Current State")
885
-
886
- st.sidebar.json(st.session_state.current_state)
 
1
  import os
 
 
 
2
  import subprocess
3
+ import random
4
+ import json
5
+ from datetime import datetime
6
+
7
+ from huggingface_hub import (
8
+ InferenceClient,
9
+ cached_download,
10
+ hf_hub_url
11
+ )
12
+ import gradio as gr
13
+
14
+ from safe_search import safe_search
15
+ from i_search import google
16
+ from i_search import i_search as i_s
17
+
18
+ from agent import (
19
+ ACTION_PROMPT,
20
+ ADD_PROMPT,
21
+ COMPRESS_HISTORY_PROMPT,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ LOG_PROMPT,
24
+ LOG_RESPONSE,
25
+ MODIFY_PROMPT,
26
+ PRE_PREFIX,
27
+ SEARCH_QUERY,
28
+ READ_PROMPT,
29
+ TASK_PROMPT,
30
+ UNDERSTAND_TEST_RESULTS_PROMPT,
31
+ )
32
+
33
+ from utils import (
34
+ parse_action,
35
+ parse_file_content,
36
+ read_python_module_structure
37
+ )
38
+ from datetime import datetime
39
+ import json
40
+
41
+ #--- Global Variables for App State ---
42
+ app_state = {"components": []}
43
+
44
+ terminal_history = ""
45
+ #--- Component Library ---
46
+ components_registry = { "Button": { "properties": {"label": "Click Me", "onclick": ""}, "description": "A clickable button", "code_snippet": 'gr.Button(value="{label}", variant="primary")', }, "Text Input": { "properties": {"value": "", "placeholder": "Enter text"}, "description": "A field for entering text", "code_snippet": 'gr.Textbox(label="{placeholder}")', }, "Image": { "properties": {"src": "#", "alt": "Image"}, "description": "Displays an image", "code_snippet": 'gr.Image(label="{alt}")', }, "Dropdown": { "properties": {"choices": ["Option 1", "Option 2"], "value": ""}, "description": "A dropdown menu for selecting options", "code_snippet": 'gr.Dropdown(choices={choices}, label="Dropdown")', }, # Add more components here... }
47
+
48
+ #--- NLP Model (Example using Hugging Face) ---
49
+ nlp_model_name = "google/flan-t5-small"
50
+
51
+ # Check if the model exists in the cache
52
+ try: cached_download(hf_hub_url(nlp_model_name, revision="main")) nlp_model = InferenceClient(nlp_model_name) except: nlp_model = None
53
+
54
+ #--- Function to get NLP model response ---
55
+ def get_nlp_response(input_text): if nlp_model: response = nlp_model.text_generation(input_text) return response.generated_text else: return "NLP model not available."
56
+
57
+ # --- Component Class ---
58
+ class Component: def init(self, type, properties=None, id=None): self.id = id or random.randint(1000, 9999) self.type = type self.properties = properties or components_registry[type]["properties"].copy()
59
+
60
+ def to_dict(self):
61
+ return {
62
+ "id": self.id,
63
+ "type": self.type,
64
+ "properties": self.properties,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  }
66
 
67
+ def render(self):
68
+ # Properly format choices for Dropdown
69
+ if self.type == "Dropdown":
70
+ self.properties["choices"] = (
71
+ str(self.properties["choices"])
72
+ .replace("[", "")
73
+ .replace("]", "")
74
+ .replace("'", "")
75
+ )
76
+ return components_registry[self.type]["code_snippet"].format(
77
+ **self.properties
78
+ )
79
+ # --- Function to update the app canvas (for preview) ---
80
+ def update_app_canvas(): components_html = "".join( [ f"<div>Component ID: {component['id']}, Type: {component['type']}, Properties: {component['properties']}</div>" for component in app_state["components"] ] ) return components_html
81
+
82
+ # --- Function to handle component addition ---
83
+ def add_component(component_type): if component_type in components_registry: new_component = Component(component_type) app_state["components"].append(new_component.to_dict()) return ( update_app_canvas(), f"System: Added component: {component_type}\n", ) else: return None, f"Error: Invalid component type: {component_type}\n"
84
+
85
+ # --- Function to handle terminal input ---
86
+ def run_terminal_command(command, history): global terminal_history output = "" try: # Basic command parsing (expand with NLP) if command.startswith("add "): component_type = command.split("add ", 1)[1].strip() _, output = add_component(component_type) elif command.startswith("set "): _, output = set_component_property(command) elif command.startswith("search "): search_query = command.split("search ", 1)[1].strip() output = i_s(search_query) elif command.startswith("deploy "): app_name = command.split("deploy ", 1)[1].strip() output = deploy_to_huggingface(app_name) else: # Attempt to execute command as Python code try: result = subprocess.check_output( command, shell=True, stderr=subprocess.STDOUT, text=True ) output = result except Exception as e: output = f"Error executing Python code: {str(e)}" except Exception as e: output = f"Error: {str(e)}" finally: terminal_history += f"User: {command}\n" terminal_history += f"{output}\n" return terminal_history
87
+
88
+ def set_component_property(command): try: # Improved 'set' command parsing set_parts = command.split(" ", 2)[1:] if len(set_parts) != 2: raise ValueError("Invalid 'set' command format.")
89
+
90
+ component_id = int(set_parts[0]) # Use component ID
91
+ property_name, property_value = set_parts[1].split("=", 1)
92
+
93
+ # Find component by ID
94
+ component_found = False
95
+ for component in app_state["components"]:
96
+ if component["id"] == component_id:
97
+ if property_name in component["properties"]:
98
+ component["properties"][
99
+ property_name.strip()
100
+ ] = property_value.strip()
101
+ component_found = True
102
+ return (
103
+ update_app_canvas(),
104
+ f"System: Property '{property_name}' set to '{property_value}' for component {component_id}\n",
105
+ )
106
+ else:
107
+ return (
108
+ None,
109
+ f"Error: Property '{property_name}' not found in component {component_id}\n",
110
+ )
111
+ if not component_found:
112
+ return (
113
+ None,
114
+ f"Error: Component with ID {component_id} not found.\n",
115
+ )
116
+
117
+ except Exception as e:
118
+ return None, f"Error: Invalid 'set' command format or error setting property: {str(e)}\n"
119
+ #--- Function to handle chat interaction ---
120
+ def run_chat(message, history): global terminal_history if message.startswith("!"): command = message[1:] terminal_history = run_terminal_command(command, history) return history, terminal_history else: # ... (Your regular chat response generation) return history, terminal_history
121
+
122
+ # --- Code Generation ---
123
+ def generate_python_code(app_name): code = f""" import gradio as gr
124
+ Define your Gradio components here
125
+ with gr.Blocks() as {app_name}: """ for component in app_state["components"]: code += " " + Component(**component).render() + "\n"
126
+
127
+ code += f"""
128
+ {app_name}.launch() """ return code
129
+
130
+ # --- Save/Load App State ---
131
+ def save_app_state(filename="app_state.json"): with open(filename, "w") as f: json.dump(app_state, f)
132
+
133
+ # --- Hugging Face Deployment --- def deploy_to_huggingface(app_name): # Generate Python code code = generate_python_code(app_name)
134
+ def load_app_state(filename="app_state.json"): global app_state try: with open(filename, "r") as f: app_state = json.load(f) except FileNotFoundError: print("App state file not found. Starting with a blank slate.")
135
+
136
+ # Create requirements.txt
137
+ with open("requirements.txt", "w") as f:
138
+ f.write("gradio==3.32.0\n")
139
+
140
+ # Create the app.py file
141
+ with open("app.py", "w") as f:
142
+ f.write(code)
143
+
144
+ # Execute the deployment command
145
+ try:
146
+ subprocess.run(
147
+ [
148
+ "huggingface-cli",
149
+ "repo",
150
+ "create",
151
+ "--type",
152
+ "space",
153
+ "--space_sdk",
154
+ "gradio",
155
+ app_name,
156
+ ],
157
+ check=True,
158
+ )
159
+ subprocess.run(
160
+ ["git", "init"], cwd=f"./{app_name}", check=True
161
+ )
162
+ subprocess.run(
163
+ ["git", "add", "."], cwd=f"./{app_name}", check=True
164
+ )
165
+ subprocess.run(
166
+ ['git', 'commit', '-m', '"Initial commit"'], cwd=f"./{user_name}/{app_name}", check=True
167
+ )
168
+ subprocess.run(
169
+ ["git", "push", "https://huggingface.co/spaces/" + app_name, "main"], cwd=f"./{app_name}", check=True
170
+ )
171
+ return (
172
+ f"Successfully deployed to Hugging Face Spaces: https://huggingface.co/spaces/{app_name}"
173
+ )
174
+ except Exception as e:
175
+ return f"Error deploying to Hugging Face Spaces: {e}"
176
+ --- Gradio Interface ---
177
+ with gr.Blocks() as iface: with gr.Row(): # --- Chat Interface --- chat_history = gr.Chatbot(label="Chat with Agent") chat_input = gr.Textbox(label="Your Message") chat_button = gr.Button("Send")
178
+
179
+ chat_button.click(
180
+ run_chat,
181
+ inputs=[chat_input, chat_history],
182
+ outputs=[chat_history, terminal_output],
183
+ )
184
 
185
+ with gr.Row():
186
+ # --- App Builder Section ---
187
+ app_canvas = gr.HTML(
188
+ "<div>App Canvas Preview:</div>", label="App Canvas"
189
+ )
190
+ with gr.Column():
191
+ component_list = gr.Dropdown(
192
+ choices=list(components_registry.keys()), label="Components"
193
+ )
194
+ add_button = gr.Button("Add Component")
195
+
196
+ add_button.click(
197
+ add_component,
198
+ inputs=component_list,
199
+ outputs=[app_canvas, terminal_output],
200
+ )
201
+
202
+ with gr.Row():
203
+ # --- Terminal ---
204
+ terminal_output = gr.Textbox(
205
+ lines=8, label="Terminal", value=terminal_history
206
+ )
207
+ terminal_input = gr.Textbox(label="Enter Command")
208
+ terminal_button = gr.Button("Run")
209
 
210
+ terminal_button.click(
211
+ run_terminal_command,
212
+ inputs=[terminal_input, terminal_output],
213
+ outputs=terminal_output,
214
+ )
215
 
216
+ with gr.Row():
217
+ # --- Code Generation ---
218
+ code_output = gr.Code(
219
+ generate_python_code("app_name"),
220
+ language="python",
221
+ label="Generated Code",
222
+ )
223
+ app_name_input = gr.Textbox(label="App Name")
224
+ generate_code_button = gr.Button("Generate Code")
225
+ generate_code_button.click(
226
+ generate_python_code,
227
+ inputs=[app_name_input],
228
+ outputs=code_output,
229
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
+ with gr.Row():
232
+ # --- Save/Load Buttons ---
233
+ save_button = gr.Button("Save App State")
234
+ load_button = gr.Button("Load App State")
235
+
236
+ save_button.click(save_app_state)
237
+ load_button.click(load_app_state)
238
+
239
+ with gr.Row():
240
+ # --- Deploy Button ---
241
+ deploy_button = gr.Button("Deploy to Hugging Face")
242
+ deploy_output = gr.Textbox(label="Deployment Output")
243
+ deploy_button.click(
244
+ deploy_to_huggingface,
245
+ inputs=[app_name_input],
246
+ outputs=[deploy_output],
247
  )
248
+ iface.launch()