callanwu commited on
Commit
9fdf94d
1 Parent(s): b2168d8
Files changed (7) hide show
  1. config.json +511 -0
  2. gradio_backend.py +124 -0
  3. gradio_base.py +559 -0
  4. gradio_config.py +437 -0
  5. image.jpg +0 -0
  6. requirements.txt +1 -0
  7. run_gradio.py +243 -0
config.json ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ { "config":{
2
+ "API_KEY" : "API_KEY",
3
+ "PROXY" : "PROXY",
4
+ "MAX_CHAT_HISTORY" : "3",
5
+ "TOP_K" : "0"
6
+ },
7
+ "LLM_type": "OpenAI",
8
+ "LLM": {
9
+ "temperature": 0.3,
10
+ "model": "gpt-3.5-turbo-16k-0613",
11
+ "log_path": "logs/god"
12
+ },
13
+ "root": "design_state",
14
+ "relations": {
15
+ "design_state": {
16
+ "0": "design_state",
17
+ "1": "develop_state"
18
+ },
19
+ "develop_state": {
20
+ "0": "develop_state",
21
+ "1": "debug_state"
22
+ },
23
+ "debug_state": {
24
+ "0": "debug_state",
25
+ "1": "end_state"
26
+ }
27
+ },
28
+ "agents": {
29
+ "Alice": {
30
+ "style": "august",
31
+ "roles": {
32
+ "design_state": "Boss",
33
+ "develop_state": "Boss",
34
+ "debug_state": "Boss"
35
+ }
36
+ },
37
+ "Bob": {
38
+ "style": "professional",
39
+ "roles": {
40
+ "design_state": "Architect_1"
41
+ }
42
+ },
43
+ "Belle": {
44
+ "style": "professional",
45
+ "roles": {
46
+ "design_state": "Architect_2"
47
+ }
48
+ },
49
+ "Candy": {
50
+ "style": "professional",
51
+ "roles": {
52
+ "develop_state": "Developer_1",
53
+ "debug_state": "Developer_1"
54
+ }
55
+ },
56
+ "Carl": {
57
+ "style": "professional",
58
+ "roles": {
59
+ "develop_state": "Developer_2",
60
+ "debug_state": "Developer_2"
61
+ }
62
+ },
63
+ "David": {
64
+ "style": "professional",
65
+ "roles": {
66
+ "debug_state": "Debugger"
67
+ }
68
+ },
69
+ "Eva": {
70
+ "style": "professional",
71
+ "roles": {
72
+ "debug_state": "Coder"
73
+ }
74
+ },
75
+ "Michael": {
76
+ "style": "professional",
77
+ "roles": {
78
+ "design_state": "Leader",
79
+ "develop_state": "Leader",
80
+ "debug_state": "Leader"
81
+ }
82
+ }
83
+ },
84
+ "states": {
85
+ "end_state": {
86
+ "agent_states":{}
87
+ },
88
+ "design_state": {
89
+ "LLM_type": "OpenAI",
90
+ "LLM": {
91
+ "temperature": 0.3,
92
+ "model": "gpt-3.5-turbo-16k-0613",
93
+ "log_path": "logs/god"
94
+ },
95
+ "roles": [
96
+ "Boss",
97
+ "Architect_1",
98
+ "Leader",
99
+ "Architect_2"
100
+ ],
101
+ "controller": {
102
+ "controller_type": "order",
103
+ "max_chat_nums": 8,
104
+ "judge_system_prompt": "",
105
+ "judge_last_prompt": "",
106
+ "judge_extract_words": "end"
107
+ },
108
+ "environment_prompt": "Imagine a scenario where the boss has presented a requirement. The architect is tasked with proposing a framework based on this requirement. The leader's role is to provide feedback on the architect's proposal, and another architect will finalize the framework based on the leader's comments.The target program is:<target>a snake game with python</target>",
109
+ "begin_role": "Boss",
110
+ "begin_query": "Please write code for the target game",
111
+ "agent_states": {
112
+ "Boss": {
113
+ "LLM_type": "OpenAI",
114
+ "LLM": {
115
+ "temperature": 0.3,
116
+ "model": "gpt-3.5-turbo-16k-0613",
117
+ "log_path": "logs/Boss"
118
+ },
119
+ "style": {
120
+ "role": "Boss"
121
+ },
122
+ "task": {
123
+ "task": "Present the project requirements to the team and articulate the project's objectives clearly."
124
+ },
125
+ "rule": {
126
+ "rule": "It's crucial to communicate the project's objectives, key deliverables, and any specific requirements comprehensively. This ensures that the entire team understands the project's significance and direction."
127
+ },
128
+ "demonstrations":{
129
+ "demonstrations":"Prepare a comprehensive project overview that encompasses the project's scope, objectives, any constraints, and desired outcomes. This document should outline the required features, performance goals, and budget constraints, for example."
130
+
131
+ }
132
+ },
133
+ "Architect_1": {
134
+ "LLM_type": "OpenAI",
135
+ "LLM": {
136
+ "temperature": 0.3,
137
+ "model": "gpt-3.5-turbo-16k-0613",
138
+ "log_path": "logs/Architect"
139
+ },
140
+ "style": {
141
+ "role": "Architect",
142
+ "style": "professional"
143
+ },
144
+ "task": {
145
+ "task": "Propose a Python framework based on the BOSS's requirements."
146
+ },
147
+ "rule": {
148
+ "rule": "Thoroughly analyze the project requirements, evaluate potential technologies, and select suitable design principles to meet the project's needs."
149
+ },
150
+ "demonstrations":{
151
+ "demonstrations":"Create a detailed Architect proposal document, including the rationale for choosing the proposed framework and accompanying design diagrams. For instance, provide an Architect diagram outlining the framework's high-level structure and a detailed explanation of why this architecture was selected."
152
+
153
+ }
154
+ },
155
+ "Leader": {
156
+ "LLM_type": "OpenAI",
157
+ "LLM": {
158
+ "temperature": 0.3,
159
+ "model": "gpt-3.5-turbo-16k-0613",
160
+ "log_path": "logs/Leader"
161
+ },
162
+ "style": {
163
+ "role": "Leader",
164
+ "style": "professional"
165
+ },
166
+ "task": {
167
+ "task": "evaluate the architecture proposal and provide specific feedback for improvement"
168
+ },
169
+ "rule": {
170
+ "rule": " Offer constructive feedback aligned with the project's objectives to enhance the proposed framework."
171
+ },
172
+ "demonstrations":{
173
+ "demonstrations":"Review Architect1's proposal meticulously and provide written feedback. Ensure the feedback is specific and includes actionable suggestions for improvement. For instance, you can point out areas that need improvement and explain how suggested changes align with project goals."
174
+
175
+ }
176
+ },
177
+ "Architect_2": {
178
+ "LLM_type": "OpenAI",
179
+ "LLM": {
180
+ "temperature": 0.3,
181
+ "model": "gpt-3.5-turbo-16k-0613",
182
+ "log_path": "logs/Architect"
183
+ },
184
+ "style": {
185
+ "role": "Architect_2",
186
+ "style": "professional"
187
+ },
188
+ "task": {
189
+ "task": "Finalize the Python framework based on Leader's feedback."
190
+ },
191
+ "rule": {
192
+ "rule": " Integrate Leader's feedback into the Architect proposal and make necessary adjustments to refine the framework."
193
+ },
194
+ "demonstrations":{
195
+ "demonstrations":"Revise the Architect proposal document to reflect the improvements suggested by Leader. Provide clear documentation of the changes made, including any revised design diagrams and explanations for incorporating Leader's feedback."
196
+
197
+ },
198
+ "ExtractComponent": {
199
+ "extract_words": [
200
+ "system"
201
+ ],
202
+ "system_prompt": "Please extract the modified system as completely as possible.",
203
+ "last_prompt": ""
204
+ }
205
+ }
206
+ }
207
+ },
208
+ "develop_state": {
209
+ "LLM_type": "OpenAI",
210
+ "LLM": {
211
+ "temperature": 0.3,
212
+ "model": "gpt-3.5-turbo-16k-0613",
213
+ "log_path": "logs/god"
214
+ },
215
+ "roles": [
216
+ "Boss",
217
+ "Developer_1",
218
+ "Leader",
219
+ "Developer_2"
220
+ ],
221
+ "controller": {
222
+ "controller_type": "order",
223
+ "max_chat_nums": 8,
224
+ "judge_system_prompt": "",
225
+ "judge_last_prompt": "",
226
+ "judge_extract_words": "end"
227
+ },
228
+ "environment_prompt": "In this scenario, the boss has provided a requirement. The developer's task is to write code based on the architecture proposed by the architect. The leader evaluates the written code for elegance, readability, and functionality, providing feedback. Another developer makes necessary modifications to the code.The target program is:<target>a snake game with python</target>",
229
+ "begin_role": "Boss",
230
+ "begin_query": "Please write code for the target game",
231
+ "agent_states": {
232
+ "Boss": {
233
+ "LLM_type": "OpenAI",
234
+ "LLM": {
235
+ "temperature": 0.3,
236
+ "model": "gpt-3.5-turbo-16k-0613",
237
+ "log_path": "logs/Boss"
238
+ },
239
+ "style": {
240
+ "role": "Boss",
241
+ "style": "august"
242
+ },
243
+ "task": {
244
+ "task": "Communicate the project requirements and vision to the team."
245
+ },
246
+ "rule": {
247
+ "rule": "Clearly define the project's objectives, functionality, and any specific requirements."
248
+ },
249
+ "demonstrations":{
250
+ "demonstrations":"Create a detailed project brief that outlines the scope, objectives, and specific features required for the project in a clear and concise manner. This document should provide a comprehensive understanding of what the project aims to achieve."
251
+
252
+ }
253
+ },
254
+ "Developer_1": {
255
+ "LLM_type": "OpenAI",
256
+ "LLM": {
257
+ "temperature": 0.3,
258
+ "model": "gpt-3.5-turbo-16k-0613",
259
+ "log_path": "logs/Developer"
260
+ },
261
+ "style": {
262
+ "role": "Developer",
263
+ "style": "professional"
264
+ },
265
+ "task": {
266
+ "task": "write elegant, readable, extensible, and efficient code"
267
+ },
268
+ "rule": {
269
+ "rule": "1.write code that conforms to standards like PEP8, is modular, easy to read, and maintainable. 2.Output the code only,Ensure that the code adheres to the Architect guidelines, coding standards, and best practices.3.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
270
+ },
271
+ "demonstrations":{
272
+ "demonstrations":"Follow the Architect proposal closely while writing code. Document the code adequately, use meaningful variable names, and maintain proper code structure. For example, provide code snippets that demonstrate adherence to coding standards and Architect design.Output the code only."
273
+
274
+ },
275
+ "CustomizeComponent": {
276
+ "template": "You need to write code based on the following framework: {system}",
277
+ "keywords": [
278
+ "system"
279
+ ]
280
+ },
281
+ "last":{
282
+ "last_prompt":"The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
283
+ }
284
+ },
285
+ "Leader": {
286
+ "LLM_type": "OpenAI",
287
+ "LLM": {
288
+ "temperature": 0.3,
289
+ "model": "gpt-3.5-turbo-16k-0613",
290
+ "log_path": "logs/Leader"
291
+ },
292
+ "style": {
293
+ "role": "Leader",
294
+ "style": "professional"
295
+ },
296
+ "task": {
297
+ "task": "Evaluate the written code for elegance, readability, and functionality."
298
+ },
299
+ "rule": {
300
+ "rule": "Provide constructive feedback that helps improve code quality and alignment with project goals."
301
+ },
302
+ "demonstrations":{
303
+ "demonstrations":" Thoroughly review the code written by Developer1. Offer feedback on code organization, naming conventions, code efficiency, and any functional improvements needed. For instance, provide specific examples of code sections that require refinement and explain how these changes enhance code quality."
304
+
305
+ }
306
+ },
307
+ "Developer_2": {
308
+ "LLM_type": "OpenAI",
309
+ "LLM": {
310
+ "temperature": 0.3,
311
+ "model": "gpt-3.5-turbo-16k-0613",
312
+ "log_path": "logs/Developer"
313
+ },
314
+ "style": {
315
+ "role": "Developer",
316
+ "style": "professional"
317
+ },
318
+ "task": {
319
+ "task": " Make necessary modifications to the code based on Leader's feedback."
320
+ },
321
+ "rule": {
322
+ "rule": "1.make code modifications that conform to standards like PEP8, are modular, easy to read, and maintainable. 2.Output the code only,Incorporate Leader's feedback into the code and address any issues or improvements identified.3.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
323
+ },
324
+ "demonstrations":{
325
+ "demonstrations":" Review the feedback provided by Leader and apply the suggested modifications to the code. Document the changes made and ensure that the updated code aligns with the project's goals and Architect guidelines. Provide examples of code segments before and after the modifications to illustrate the improvements."
326
+
327
+ },
328
+ "last":{
329
+ "last_prompt":"The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
330
+ },
331
+ "ExtractComponent": {
332
+ "extract_words": [
333
+ "code"
334
+ ],
335
+ "system_prompt": "Please extract the code as completely as possible.",
336
+ "last_prompt": ""
337
+ }
338
+ }
339
+ }
340
+ },
341
+ "debug_state": {
342
+ "LLM_type": "OpenAI",
343
+ "LLM": {
344
+ "temperature": 0.3,
345
+ "model": "gpt-3.5-turbo-16k-0613",
346
+ "log_path": "logs/god"
347
+ },
348
+ "roles": [
349
+ "Boss",
350
+ "Debugger",
351
+ "Developer_1",
352
+ "Leader",
353
+ "Developer_2"
354
+ ],
355
+ "controller": {
356
+ "controller_type": "order",
357
+ "max_chat_nums": 10,
358
+ "judge_system_prompt": "",
359
+ "judge_last_prompt": "",
360
+ "judge_extract_words": "end"
361
+ },
362
+ "environment_prompt": "In this scenario, the boss has provided a requirement. The debugger simulates a compiler to determine whether the code is runnable and provides feedback. The developer writes code based on the debugger's feedback. The leader evaluates whether the final code meets the boss's requirements and provides feedback for further modifications. The coder writes the final code to a file.The target program <target>a snake game with python</target>",
363
+ "begin_role": "Boss",
364
+ "begin_query": "Please make the code both runnable and more efficient.",
365
+ "agent_states": {
366
+ "Boss": {
367
+ "LLM_type": "OpenAI",
368
+ "LLM": {
369
+ "temperature": 0.3,
370
+ "model": "gpt-3.5-turbo-16k-0613",
371
+ "log_path": "logs/Boss"
372
+ },
373
+ "style": {
374
+ "role": "Boss",
375
+ "style": "august"
376
+ },
377
+ "task": {
378
+ "task": " Communicate the project requirements and vision to the team."
379
+ },
380
+ "rule": {
381
+ "rule": "Clearly define the project's objectives, functionality, and any specific requirements."
382
+ },
383
+ "demonstrations":{
384
+ "demonstrations":" Create a detailed project brief that outlines the scope, objectives, and specific features required for the project in a clear and concise manner. This document should provide a comprehensive understanding of what the project aims to achieve."
385
+
386
+ }
387
+ },
388
+ "Debugger": {
389
+ "LLM_type": "OpenAI",
390
+ "LLM": {
391
+ "temperature": 0.3,
392
+ "model": "gpt-3.5-turbo-16k-0613",
393
+ "log_path": "logs/Debugger"
394
+ },
395
+ "style": {
396
+ "role": "Debugger",
397
+ "style": "professional"
398
+ },
399
+ "task": {
400
+ "task": "Simulate a compiler to determine whether the code is runnable and provide feedback."
401
+ },
402
+ "rule": {
403
+ "rule": "Thoroughly test the code for syntax errors, logical issues, and other potential problems. Offer detailed feedback that helps the developer understand and resolve any issues.Please pay special attention to some logic bugs in the game, such as whether the game can run normally."
404
+ },
405
+ "demonstrations":{
406
+ "demonstrations":" Run the code provided by Developer1 through a simulated compiler or debugger. Document any errors, warnings, or issues encountered during the process. Provide feedback that includes specific examples of code problems and suggested solutions."
407
+
408
+ },
409
+ "CustomizeComponent": {
410
+ "template": "You need to Run the following code: {code}, through a simulated compiler or debugger. Document any errors, warnings, or issues encountered during the process. Provide feedback that includes specific examples of code problems and suggested solutions.",
411
+ "keywords": [
412
+ "code"
413
+ ]
414
+ }
415
+ },
416
+ "Developer_1": {
417
+ "LLM_type": "OpenAI",
418
+ "LLM": {
419
+ "temperature": 0.3,
420
+ "model": "gpt-3.5-turbo-16k-0613",
421
+ "log_path": "logs/Developer"
422
+ },
423
+ "style": {
424
+ "role": "Developer",
425
+ "style": "professional"
426
+ },
427
+ "task": {
428
+ "task": "write elegant, readable, extensible, and efficient code based on the debugger's feedback."
429
+ },
430
+ "rule": {
431
+ "rule": "1.write code that conforms to standards like PEP8, is modular, easy to read, and maintainable.\n2.Address the issues identified by the Debugger and ensure that the code meets the project's requirements.\n3.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
432
+ },
433
+ "demonstrations":{
434
+ "demonstrations":" Review the feedback provided by the Debugger and make the necessary modifications to the code. Document the changes made and ensure that the code is free of errors and warnings. Provide examples of code segments before and after the modifications.Output the code only."
435
+ },
436
+ "last":{
437
+ "last_prompt":"The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
438
+ }
439
+ },
440
+ "Leader": {
441
+ "LLM_type": "OpenAI",
442
+ "LLM": {
443
+ "temperature": 0.3,
444
+ "model": "gpt-3.5-turbo-16k-0613",
445
+ "log_path": "logs/Leader"
446
+ },
447
+ "style": {
448
+ "role": "Leader",
449
+ "style": "professional"
450
+ },
451
+ "task": {
452
+ "task": "Evaluate whether the final code meets the boss's requirements and provide feedback for further modifications."
453
+ },
454
+ "rule": {
455
+ "rule": " Assess the code's alignment with the project's objectives, functionality, and quality standards. Offer constructive feedback to enhance the code's overall quality."
456
+ },
457
+ "demonstrations":{
458
+ "demonstrations":" Carefully review the code provided by Developer1 after addressing Debugger's feedback. Offer feedback on code organization, readability, and any functional improvements needed. Provide specific examples of code sections that require further refinement and explain how these changes enhance the code's quality."
459
+ }
460
+ },
461
+ "Developer_2": {
462
+ "LLM_type": "OpenAI",
463
+ "LLM": {
464
+ "temperature": 0.3,
465
+ "model": "gpt-3.5-turbo-16k-0613",
466
+ "log_path": "logs/Developer"
467
+ },
468
+ "style": {
469
+ "role": "Developer",
470
+ "style": "professional"
471
+ },
472
+ "task": {
473
+ "task": "Make further modifications to the code based on Leader's feedback."
474
+ },
475
+ "rule": {
476
+ "rule": "1.Incorporate Leader's feedback into the code and address any issues or improvements identified.,make code modifications that conform to standards like PEP8, are modular, easy to read, and maintainable. 2.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
477
+ },
478
+ "demonstrations":{
479
+ "demonstrations":" Review the feedback provided by Leader and apply the suggested modifications to the code. Document the changes made and ensure that the updated code aligns with the project's goals and quality standards. Provide examples of code segments before and after the modifications.Output the code only."
480
+ },
481
+ "last":{
482
+ "last_prompt":"The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
483
+ },
484
+ "ExtractComponent": {
485
+ "extract_words": [
486
+ "code"
487
+ ],
488
+ "system_prompt": "Please extract the code for the target game,must be fully operational",
489
+ "last_prompt": ""
490
+ }
491
+ },
492
+ "Coder": {
493
+ "LLM_type": "OpenAI",
494
+ "LLM": {
495
+ "temperature": 0.3,
496
+ "model": "gpt-3.5-turbo-16k-0613",
497
+ "log_path": "logs/Coder"
498
+ },
499
+ "style": {
500
+ "role": "Coder",
501
+ "style": "professional"
502
+ },
503
+ "CodeComponent": {
504
+ "file_name": "rps_game.py",
505
+ "keyword": "code"
506
+ }
507
+ }
508
+ }
509
+ }
510
+ }
511
+ }
gradio_backend.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import sys
4
+ from agents.utils import extract
5
+ from agents.SOP import SOP
6
+ from agents.Agent import Agent
7
+ from agents.Environment import Environment
8
+ from agents.Memory import Memory
9
+ from gradio_base import Client, convert2list4agentname
10
+
11
+ def process(action):
12
+ response = action.response
13
+ send_name = action.name
14
+ send_role = action.role
15
+ if not action.is_user:
16
+ print(f"{send_name}({send_role}):{response}")
17
+ memory = Memory(send_role, send_name, response)
18
+ return memory
19
+
20
+ def gradio_process(action,current_state):
21
+ response = action.response
22
+ all = ""
23
+ for i,res in enumerate(response):
24
+ all+=res
25
+ state = 10
26
+ if action.is_user:
27
+ state = 30
28
+ elif action.state_begin:
29
+ state = 12
30
+ action.state_begin = False
31
+ elif i>0:
32
+ state = 11
33
+ send_name = f"{action.name}({action.role})"
34
+ Client.send_server(str([state, send_name, res, current_state.name]))
35
+ if state == 30:
36
+ # print("client: waiting for user input")
37
+ data: list = next(Client.receive_server)
38
+ content = ""
39
+ for item in data:
40
+ if item.startswith("<USER>"):
41
+ content = item.split("<USER>")[1]
42
+ break
43
+ # print(f"client: received `{content}` from server.")
44
+ action.response = content
45
+ break
46
+ else:
47
+ action.response = all
48
+
49
+ def init(config):
50
+ if not os.path.exists("logs"):
51
+ os.mkdir("logs")
52
+ sop = SOP.from_config(config)
53
+ agents,roles_to_names,names_to_roles = Agent.from_config(config)
54
+ environment = Environment.from_config(config)
55
+ environment.agents = agents
56
+ environment.roles_to_names,environment.names_to_roles = roles_to_names,names_to_roles
57
+ sop.roles_to_names,sop.names_to_roles = roles_to_names,names_to_roles
58
+ for name,agent in agents.items():
59
+ agent.environment = environment
60
+ return agents,sop,environment
61
+
62
+ def block_when_next(current_agent, current_state):
63
+ if Client.LAST_USER:
64
+ assert not current_agent.is_user
65
+ Client.LAST_USER = False
66
+ return
67
+ if current_agent.is_user:
68
+ # if next turn is user, we don't handle it here
69
+ Client.LAST_USER = True
70
+ return
71
+ if Client.FIRST_RUN:
72
+ Client.FIRST_RUN = False
73
+ else:
74
+ # block current process
75
+ if Client.mode == Client.SINGLE_MODE:
76
+ Client.send_server(str([98, f"{current_agent.name}({current_agent.state_roles[current_state.name]})", " ", current_state.name]))
77
+ data: list = next(Client.receive_server)
78
+
79
+ def run(agents,sop,environment):
80
+ while True:
81
+ current_state,current_agent= sop.next(environment,agents)
82
+ block_when_next(current_agent, current_state)
83
+ if sop.finished:
84
+ print("finished!")
85
+ Client.send_server(str([99, ' ', ' ', current_state.name]))
86
+ os.environ.clear()
87
+ break
88
+ action = current_agent.step(current_state) #component_dict = current_state[self.role[current_node.name]] current_agent.compile(component_dict)
89
+ gradio_process(action,current_state)
90
+ memory = process(action)
91
+ environment.update_memory(memory,current_state)
92
+
93
+ def prepare(agents, sop, environment):
94
+ client = Client()
95
+ Client.send_server = client.send_message
96
+
97
+ requirement_game_name = extract(sop.states['design_state'].environment_prompt,"game")
98
+ client.send_message(
99
+ {
100
+ "requirement": requirement_game_name,
101
+ "agents_name": convert2list4agentname(sop)[0],
102
+ # "only_name": DebateUI.convert2list4agentname(sop)[1],
103
+ "only_name": convert2list4agentname(sop)[0],
104
+ "default_cos_play_id": -1
105
+ }
106
+ )
107
+ # print(f"client: send {requirement_game_name}")
108
+ client.listening_for_start_()
109
+ client.mode = Client.mode = client.cache["mode"]
110
+ new_requirement = Client.cache['requirement']
111
+ for state in sop.states.values():
112
+ state.environment_prompt = state.environment_prompt.replace("<game>a snake game with python</game>", f"<game>{new_requirement}</game>")
113
+ # print(f"client: received {Client.cache['requirement']} from server.")
114
+
115
+ if __name__ == '__main__':
116
+ parser = argparse.ArgumentParser(description='A demo of chatbot')
117
+ parser.add_argument('--agent', type=str, help='path to SOP json', default="config.json")
118
+ args = parser.parse_args()
119
+
120
+ agents,sop,environment = init(args.agent)
121
+ # add================================
122
+ prepare(agents, sop, environment)
123
+ # ===================================
124
+ run(agents,sop,environment)
gradio_base.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Emoji comes from this website:
18
+ # https://emojipedia.org/
19
+ import subprocess
20
+ from gradio_config import GradioConfig as gc
21
+ import gradio as gr
22
+ from typing import List, Tuple, Any
23
+ import time
24
+ import socket
25
+ import psutil
26
+ import os
27
+ from abc import abstractmethod
28
+
29
+ def convert2list4agentname(sop):
30
+ """
31
+ Extract the agent names of all states
32
+ return:
33
+ only name: [name1, name2, ...]
34
+ agent_name: [name1(role1), name2(role2), ...]
35
+ """
36
+ only_name = []
37
+ agent_name = []
38
+ roles_to_names = sop.roles_to_names
39
+ for state_name,roles_names in roles_to_names.items():
40
+ for role,name in roles_names.items():
41
+ agent_name.append(f"{name}({role})")
42
+ only_name.append(name)
43
+ agent_name = list(set(agent_name))
44
+ agent_name.sort()
45
+ return agent_name, only_name
46
+
47
+ def is_port_in_use(port):
48
+ """Check if the port is available"""
49
+ for conn in psutil.net_connections():
50
+ if conn.laddr.port == port:
51
+ return True
52
+ return False
53
+
54
+ def check_port(port):
55
+ """Determine available ports"""
56
+ if os.path.isfile("PORT.txt"):
57
+ port = int(open("PORT.txt","r",encoding='utf-8').readlines()[0])
58
+ else:
59
+ for i in range(10):
60
+ if is_port_in_use(port+i) == False:
61
+ port += i
62
+ break
63
+ with open("PORT.txt", "w") as f:
64
+ f.writelines(str(port))
65
+ return port
66
+
67
+ # Determine some heads
68
+ SPECIAL_SIGN = {
69
+ "START": "<START>",
70
+ "SPLIT": "<SELFDEFINESEP>",
71
+ "END": "<ENDSEP>"
72
+ }
73
+ HOST = "127.0.0.1"
74
+ # The starting port number for the search.
75
+ PORT = 15000
76
+ PORT = check_port(PORT)
77
+
78
+ def print_log(message:str):
79
+ print(f"[{time.ctime()}]{message}")
80
+
81
+ global_dialog = {
82
+ "user": [],
83
+ "agent": {},
84
+ "system": []
85
+ }
86
+
87
+ class UIHelper:
88
+ """Static Class"""
89
+
90
+ @classmethod
91
+ def wrap_css(cls, content, name) -> str:
92
+ """
93
+ Description:
94
+ Wrap CSS around each output, and return it in HTML format for rendering with Markdown.
95
+ Input:
96
+ content: Output content
97
+ name: Whose output is it
98
+ Output:
99
+ HTML
100
+ """
101
+ assert name in gc.OBJECT_INFO, \
102
+ f"The current name `{name}` is not registered with an image. The names of the currently registered agents are in `{gc.OBJECT_INFO.keys()}`. Please use `GradioConfig.add_agent()` from `Gradio_Config/gradio_config.py` to bind the name of the new agent."
103
+ output = ""
104
+ info = gc.OBJECT_INFO[name]
105
+ if info["id"] == "USER":
106
+ output = gc.BUBBLE_CSS["USER"].format(
107
+ info["bubble_color"], # Background-color
108
+ info["text_color"], # Color of the agent's name
109
+ name, # Agent name
110
+ info["text_color"], # Font color
111
+ info["font_size"], # Font size
112
+ content, # Content
113
+ info["head_url"] # URL of the avatar
114
+ )
115
+ elif info["id"] == "SYSTEM":
116
+ output = gc.BUBBLE_CSS["SYSTEM"].format(
117
+ info["bubble_color"], # Background-color
118
+ info["font_size"], # Font size
119
+ info["text_color"], # Font color
120
+ name, # Agent name
121
+ content # Content
122
+ )
123
+ elif info["id"] == "AGENT":
124
+ output = gc.BUBBLE_CSS["AGENT"].format(
125
+ info["head_url"], # URL of the avatar
126
+ info["bubble_color"], # Background-color
127
+ info["text_color"], # Font color
128
+ name, # Agent name
129
+ info["text_color"], # Font color
130
+ info["font_size"], # Font size
131
+ content, # Content
132
+ )
133
+ else:
134
+ assert False, f"Id `{info['id']}` is invalid. The valid id is in ['SYSTEM', 'AGENT', 'USER']"
135
+ return output
136
+
137
+ @classmethod
138
+ def novel_filter(cls, content, agent_name):
139
+
140
+ """比如<CONTENT>...</CONTENT>,就应该输出CONTENT:..."""
141
+ IS_RECORDER = agent_name.lower() in ["recorder", "summary"]
142
+ if IS_RECORDER:
143
+ BOLD_FORMAT = """<div style="color: #000000; display:inline">
144
+ <b>{}</b>
145
+ </div>
146
+ <span style="color: black;">
147
+ """
148
+ else:
149
+ BOLD_FORMAT = "<b>{}</b>"
150
+ CENTER_FORMAT = """<div style="background-color: #F0F0F0; text-align: center; padding: 5px; color: #000000">
151
+ <b>{}</b>
152
+ </div>
153
+ """
154
+ START_FORMAT = "<{}>"
155
+ END_FORMAT = "</{}>"
156
+ mapping = {
157
+ "TARGET": "🎯 Current Target: ",
158
+ "NUMBER": "🍖 Required Number: ",
159
+ "THOUGHT": "🤔 Overall Thought: ",
160
+ "FIRST NAME": "⚪ First Name: ",
161
+ "LAST NAME": "⚪ Last Name: ",
162
+ "ROLE": "🤠 Character Properties: ",
163
+ "RATIONALES": "🤔 Design Rationale: ",
164
+ "BACKGROUND": "🚊 Character Background: ",
165
+ "ID": "🔴 ID: ",
166
+ "TITLE": "🧩 Chapter Title: ",
167
+ "ABSTRACT": "🎬 Abstract: ",
168
+ "CHARACTER INVOLVED": "☃️ Character Involved: ",
169
+ "ADVICE": "💬 Advice:",
170
+ "NAME": "📛 Name: ",
171
+ "GENDER": "👩‍👩‍👦‍👦 Gender: ",
172
+ "AGE": "⏲️ Age: ",
173
+ "WORK": "👨‍🔧 Work: ",
174
+ "PERSONALITY": "🧲 Character Personality: ",
175
+ "SPEECH STYLE": "🗣️ Speaking Style: ",
176
+ "RELATION": "🏠 Relation with Others: ",
177
+ "WORD COUNT": "🎰 Word Count: ",
178
+ "CHARACTER DESIGN": "📈 Character Design: ",
179
+ "CHARACTER REQUIRE": "📈 Character Require: ",
180
+ "CHARACTER NAME": "📈 Character Naming Analysis: ",
181
+ "CHARACTER NOW": "📈 Character Now: ",
182
+ "OUTLINE DESIGN": "📈 Outline Design: ",
183
+ "OUTLINE REQUIRE": "📈 Outline Require: ",
184
+ "OUTLINE NOW": "📈 Outline Now: ",
185
+ "SUB TASK": "🎯 Current Sub Task: ",
186
+ "CHARACTER ADVICE": "💬 Character Design Advice: ",
187
+ "OUTLINE ADVANTAGE": "📈 Outline Advantage: ",
188
+ "OUTLINE DISADVANTAGE": "📈 Outline Disadvantage: ",
189
+ "OUTLINE ADVICE": "💬 Outline Advice: ",
190
+ "NEXT": "➡️ Next Advice: ",
191
+ "TOTAL NUMBER": "🔢 Total Number: "
192
+ }
193
+ for i in range(1, 10):
194
+ mapping[f"CHARACTER {i}"] = f"🦄 Character {i}"
195
+ mapping[f"SECTION {i}"] = f"🏷️ Chapter {i}"
196
+ for key in mapping:
197
+ if key in [f"CHARACTER {i}" for i in range(1, 10)] \
198
+ or key in [f"SECTION {i}" for i in range(1, 10)] \
199
+ :
200
+ content = content.replace(
201
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key])
202
+ )
203
+ elif key in ["TOTAL NUMBER"]:
204
+ content = content.replace(
205
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key]) + """<span style="color: black;">"""
206
+ )
207
+ content = content.replace(
208
+ END_FORMAT.format(key), "</span>"
209
+ )
210
+ else:
211
+ content = content.replace(
212
+ START_FORMAT.format(key), BOLD_FORMAT.format(mapping[key])
213
+ )
214
+
215
+ content = content.replace(
216
+ END_FORMAT.format(key), "</span>" if IS_RECORDER else ""
217
+ )
218
+ return content
219
+
220
+ @classmethod
221
+ def singleagent_filter(cls, content, agent_name):
222
+ return content
223
+
224
+ @classmethod
225
+ def debate_filter(cls, content, agent_name):
226
+ return content
227
+
228
+ @classmethod
229
+ def code_filter(cls, content, agent_name):
230
+ # return content.replace("```python", "<pre><code>").replace("```","</pre></code>")
231
+ return content
232
+
233
+ @classmethod
234
+ def general_filter(cls, content, agent_name):
235
+ return content
236
+
237
+ @classmethod
238
+ def filter(cls, content: str, agent_name: str, ui_name: str):
239
+ """
240
+ Description:
241
+ Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
242
+ Input:
243
+ content: output content
244
+ agent_name: Whose output is it
245
+ ui_name: What UI is currently launching
246
+ Output:
247
+ Modified content
248
+ """
249
+ mapping = {
250
+ "SingleAgentUI": cls.singleagent_filter,
251
+ "DebateUI": cls.debate_filter,
252
+ "NovelUI": cls.novel_filter,
253
+ "CodeUI": cls.code_filter,
254
+ "GeneralUI": cls.general_filter
255
+ }
256
+ if ui_name in mapping:
257
+ return mapping[ui_name](content, agent_name)
258
+ else:
259
+ return content
260
+
261
+ class Client:
262
+ """
263
+ For inter-process communication, this is the client.
264
+ `gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
265
+ Communication between the frontend and backend is accomplished using Sockets.
266
+ """
267
+ # =======================Radio Const String======================
268
+ SINGLE_MODE = "Single Mode"
269
+ AUTO_MODE = "Auto Mode"
270
+ MODE_LABEL = "Select the execution mode"
271
+ MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
272
+ # ===============================================================
273
+ mode = AUTO_MODE
274
+ FIRST_RUN:bool = True
275
+ # if last agent is user, then next agent will be executed automatically rather than click button
276
+ LAST_USER:bool = False
277
+
278
+ receive_server = None
279
+ send_server = None
280
+ current_node = None
281
+ cache = {}
282
+
283
+ def __init__(self, host=HOST, port=PORT, bufsize=1024):
284
+ assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
285
+ self.SIGN = SPECIAL_SIGN
286
+ self.bufsize = bufsize
287
+ assert bufsize > 0
288
+ self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
289
+ self.client_socket.connect((host, port))
290
+ while True:
291
+ data = self.client_socket.recv(self.bufsize).decode('utf-8')
292
+ if data == "hi":
293
+ self.client_socket.send("hello agent".encode('utf-8'))
294
+ time.sleep(1)
295
+ elif data == "check":
296
+ break
297
+ print_log("Client: connecting successfully......")
298
+
299
+ def start_server(self):
300
+ while True:
301
+ message = yield
302
+ if message == 'exit':
303
+ break
304
+ self.send_message(message=message)
305
+
306
+ def send_message(self, message):
307
+ """Send the message to the server."""
308
+ if isinstance(message, list) or isinstance(message, dict):
309
+ message = str(message)
310
+ assert isinstance(message, str)
311
+ message = message + self.SIGN["SPLIT"]
312
+ self.client_socket.send(message.encode('utf-8'))
313
+
314
+ def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
315
+ """Receive messages from the server, and it will block the process. Supports receiving long text."""
316
+ remaining = ""
317
+ while True:
318
+ # receive message
319
+ dataset = self.client_socket.recv(self.bufsize)
320
+ try:
321
+ # If decoding fails, it indicates that the current transmission is a long text.
322
+ dataset = dataset.decode('utf-8')
323
+ except UnicodeDecodeError:
324
+ if not isinstance(remaining, bytes):
325
+ remaining = remaining.encode('utf-8')
326
+ assert isinstance(dataset, bytes)
327
+ remaining += dataset
328
+ try:
329
+ dataset = remaining.decode('utf-8')
330
+ remaining = ""
331
+ except UnicodeDecodeError:
332
+ continue
333
+ assert isinstance(remaining, str)
334
+ dataset = remaining + dataset
335
+ list_dataset = dataset.split(split_identifier)
336
+ if len(list_dataset) == 1:
337
+ # If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
338
+ remaining = list_dataset[0]
339
+ continue
340
+ else:
341
+ remaining = list_dataset[-1]
342
+ # Receive successfully
343
+ list_dataset = list_dataset[:-1]
344
+ return_value = []
345
+ for item in list_dataset:
346
+ if end_identifier is not None and item == end_identifier:
347
+ break
348
+ return_value.append(item)
349
+ identifier = yield return_value
350
+ if identifier is not None:
351
+ end_identifier, split_identifier = identifier
352
+
353
+ def listening_for_start_(self):
354
+ """
355
+ When the server starts, the client is automatically launched.
356
+ At this point, process synchronization is required,
357
+ such as sending client data to the server for rendering,
358
+ then the server sending the modified data back to the client,
359
+ and simultaneously sending a startup command.
360
+ Once the client receives the data, it will start running.
361
+ """
362
+ Client.receive_server = self.receive_message()
363
+ # Waiting for information from the server.
364
+ data: list = next(Client.receive_server)
365
+ assert len(data) == 1
366
+ data = eval(data[0])
367
+ assert isinstance(data, dict)
368
+ Client.cache.update(data)
369
+ # Waiting for start command from the server.
370
+ data:list = Client.receive_server.send(None)
371
+ assert len(data) == 1
372
+ assert data[0] == "<START>"
373
+
374
+ class WebUI:
375
+ """
376
+ The base class for the frontend, which encapsulates some functions for process information synchronization.
377
+ When a new frontend needs to be created, you should inherit from this class,
378
+ then implement the `construct_ui()` method and set up event listeners.
379
+ Finally, execute `run()` to load it.
380
+ """
381
+
382
+ def receive_message(
383
+ self,
384
+ end_identifier:str=None,
385
+ split_identifier:str=SPECIAL_SIGN["SPLIT"]
386
+ )->List:
387
+ """This is the same as in Client class."""
388
+ yield "hello"
389
+ remaining = ""
390
+ while True:
391
+ dataset = self.client_socket.recv(self.bufsize)
392
+ try:
393
+ dataset = dataset.decode('utf-8')
394
+ except UnicodeDecodeError:
395
+ if not isinstance(remaining, bytes):
396
+ remaining = remaining.encode('utf-8')
397
+ assert isinstance(dataset, bytes)
398
+ remaining += dataset
399
+ try:
400
+ dataset = remaining.decode('utf-8')
401
+ remaining = ""
402
+ except UnicodeDecodeError:
403
+ continue
404
+ assert isinstance(remaining, str)
405
+ dataset = remaining + dataset
406
+ list_dataset = dataset.split(split_identifier)
407
+ if len(list_dataset) == 1:
408
+ remaining = list_dataset[0]
409
+ continue
410
+ else:
411
+ remaining = list_dataset[-1]
412
+ list_dataset = list_dataset[:-1]
413
+ return_value = []
414
+ for item in list_dataset:
415
+ if end_identifier is not None and item == end_identifier:
416
+ break
417
+ return_value.append(item)
418
+ identifier = yield return_value
419
+ if identifier is not None:
420
+ end_identifier, split_identifier = identifier
421
+
422
+ def send_message(self, message:str):
423
+ """Send message to client."""
424
+ SEP = self.SIGN["SPLIT"]
425
+ self.client_socket.send(
426
+ (message+SEP).encode("utf-8")
427
+ )
428
+
429
+ def _connect(self):
430
+ # check
431
+ if self.server_socket:
432
+ self.server_socket.close()
433
+ assert not os.path.isfile("PORT.txt")
434
+ self.socket_port = check_port(PORT)
435
+ # Step1. initialize
436
+ self.server_socket = socket.socket(
437
+ socket.AF_INET, socket.SOCK_STREAM
438
+ )
439
+ # Step2. binding ip and port
440
+ self.server_socket.bind((self.socket_host, self.socket_port))
441
+ # Step3. run client
442
+ self._start_client()
443
+
444
+ # Step4. listening for connect
445
+ self.server_socket.listen(1)
446
+
447
+ # Step5. test connection
448
+ client_socket, client_address = self.server_socket.accept()
449
+ print_log("server: establishing connection......")
450
+ self.client_socket = client_socket
451
+ while True:
452
+ client_socket.send("hi".encode('utf-8'))
453
+ time.sleep(1)
454
+ data = client_socket.recv(self.bufsize).decode('utf-8')
455
+ if data == "hello agent":
456
+ client_socket.send("check".encode('utf-8'))
457
+ print_log("server: connect successfully")
458
+ break
459
+ assert os.path.isfile("PORT.txt")
460
+ os.remove("PORT.txt")
461
+ if self.receive_server:
462
+ del self.receive_server
463
+ self.receive_server = self.receive_message()
464
+ assert next(self.receive_server) == "hello"
465
+
466
+ @abstractmethod
467
+ def render_and_register_ui(self):
468
+ # You need to implement this function.
469
+ # The function's purpose is to bind the name of the agent with an image.
470
+ # The name of the agent is stored in `self.cache[]`,
471
+ # and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
472
+ # This function will be executed in `self.first_recieve_from_client()`
473
+ pass
474
+
475
+ def first_recieve_from_client(self, reset_mode:bool=False):
476
+ """
477
+ This function is used to receive information from the client and is typically executed during the initialization of the class.
478
+ If `reset_mode` is False, it will bind the name of the agent with an image.
479
+ """
480
+ self.FIRST_RECIEVE_FROM_CLIENT = True
481
+ data_list:List = self.receive_server.send(None)
482
+ assert len(data_list) == 1
483
+ data = eval(data_list[0])
484
+ assert isinstance(data, dict)
485
+ self.cache.update(data)
486
+ if not reset_mode:
487
+ self.render_and_register_ui()
488
+
489
+ def _second_send(self, message:dict):
490
+ # Send the modified message.
491
+ # It will be executed in `self.send_start_cmd()` automatically.
492
+ self.send_message(str(message))
493
+
494
+ def _third_send(self):
495
+ # Send start command.
496
+ # It will be executed in `self.send_start_cmd()` automatically.
497
+ self.send_message(self.SIGN['START'])
498
+
499
+ def send_start_cmd(self, message:dict={"hello":"hello"}):
500
+ # If you have no message to send, you can ignore the args `message`.
501
+ assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
502
+ self._second_send(message=message)
503
+ time.sleep(1)
504
+ self._third_send()
505
+ self.FIRST_RECIEVE_FROM_CLIENT = False
506
+
507
+ def __init__(
508
+ self,
509
+ client_cmd: list, # ['python','test.py','--a','b','--c','d']
510
+ socket_host: str = HOST,
511
+ socket_port: int = PORT,
512
+ bufsize: int = 1024,
513
+ ui_name: str = ""
514
+ ):
515
+ self.ui_name = ui_name
516
+ self.server_socket = None
517
+ self.SIGN = SPECIAL_SIGN
518
+ self.socket_host = socket_host
519
+ self.socket_port = socket_port
520
+ self.bufsize = bufsize
521
+ self.client_cmd = client_cmd
522
+
523
+ self.receive_server = None
524
+ self.cache = {}
525
+ assert self.bufsize > 0
526
+ self._connect()
527
+
528
+ def _start_client(self):
529
+ print(f"server: executing `{' '.join(self.client_cmd)}` ...")
530
+ self.backend = subprocess.Popen(self.client_cmd)
531
+
532
+ def _close_client(self):
533
+ print(f"server: killing `{' '.join(self.client_cmd)}` ...")
534
+ self.backend.terminate()
535
+
536
+ def reset(self):
537
+ print("server: restarting ...")
538
+ self._close_client()
539
+ time.sleep(1)
540
+ self._connect()
541
+
542
+ def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
543
+ # Rendered bubbles (HTML format) are used for gradio output.
544
+ output = f"**{node_name}**<br>" if render_node_name else ""
545
+ for item in agent_response:
546
+ for agent_name in item:
547
+ content = item[agent_name].replace("\n", "<br>")
548
+ content = UIHelper.filter(content, agent_name, self.ui_name)
549
+ output = f"{output}<br>{UIHelper.wrap_css(content, agent_name)}"
550
+ rendered_data[-1] = [rendered_data[-1][0], output]
551
+ return rendered_data
552
+
553
+ def run(self,share: bool = True):
554
+ self.demo.queue()
555
+ self.demo.launch()
556
+
557
+
558
+ if __name__ == '__main__':
559
+ pass
gradio_config.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ from PIL import Image
19
+ import requests
20
+ from typing import List, Tuple
21
+
22
+ class GradioConfig:
23
+ # How many avatars are currently registered
24
+ POINTER = 0
25
+
26
+ # Avatar image. You can add or replace.
27
+ AGENT_HEAD_URL = [
28
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687579617434043.jpg",
29
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687592097408547.jpg",
30
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561699613.jpg",
31
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561275758.jpg",
32
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/ry5k31wt33c.jpg",
33
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/0ls2gmwhrf5.jpg",
34
+ "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
35
+ "https://img.touxiangwu.com/zb_users/upload/2023/03/202303271679886128550253.jpg",
36
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711344407060.jpg",
37
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711345834296.jpg",
38
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311194291520.jpg",
39
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311196958993.jpg",
40
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/vr0bkov0dwl.jpg",
41
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/auqx5zfsv5g.jpg",
42
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/llofpivtwls.jpg",
43
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/3j2sdot3ye0.jpg",
44
+ "https://img.touxiangwu.com/2020/3/nQfYf2.jpg",
45
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068774532.jpg",
46
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068289945.jpg",
47
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918069785183.jpg",
48
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561292003.jpg",
49
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561578616.jpg",
50
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726564597524.jpg"
51
+ ]
52
+ USER_HEAD_URL = "https://img.touxiangwu.com/zb_users/upload/2023/05/202305301685407468585486.jpg"
53
+
54
+ # The css style of gradio.Chatbot
55
+ CSS = """
56
+ #chatbot1 .user {
57
+ background-color:transparent;
58
+ border-color:transparent;
59
+ }
60
+ #chatbot1 .bot {
61
+ background-color:transparent;
62
+ border-color:transparent;
63
+ }
64
+ #btn {color: red; border-color: red;}
65
+ """
66
+
67
+ ID = ["USER", "AGENT", "SYSTEM"]
68
+
69
+ # Bubble template
70
+ BUBBLE_CSS = {
71
+ # Background-color Name-color Name-content Font-color Font-size Content Avatar-URL
72
+ "USER": """
73
+ <div style="display: flex; align-items: flex-start; justify-content: flex-end;">
74
+ <div style="background-color: {}; border-radius: 20px 0px 20px 20px; padding: 15px; min-width: 100px; max-width: 300px;">
75
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
76
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
77
+ </div>
78
+ <img src="{}" alt="USER" style="width: 50px; height: 50px; border-radius: 50%; margin-left: 10px;">
79
+ </div>
80
+ """,
81
+
82
+ # Avatar-URL Background-color Name-color Name-Content Font-color Font-size Content
83
+ "AGENT": """
84
+ <div style="display: flex; align-items: flex-start;">
85
+ <img src="{}" alt="AGENT" style="width: 50px; height: 50px; border-radius: 50%; margin-right: 10px;">
86
+ <div style="background-color: {}; border-radius: 0px 20px 20px 20px; padding: 15px; min-width: 100px; max-width: 600px;">
87
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
88
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
89
+ </div>
90
+ </div>
91
+ """,
92
+
93
+ # Background-color Font-size Font-color Name Content
94
+ "SYSTEM": """
95
+ <div style="display: flex; align-items: center; justify-content: center;">
96
+ <div style="background-color: {}; border-radius: 20px; padding: 1px; min-width: 200px; max-width: 1000px;">
97
+ <p style="margin: 0; padding: 0; text-align: center; font-size: {}px; font-weight: bold; font-family: '微软雅黑', sans-serif; color: {};">{}:{}</p>
98
+ </div>
99
+ </div>
100
+ """
101
+ }
102
+
103
+ ROLE_2_NAME = {}
104
+
105
+ OBJECT_INFO = {
106
+
107
+ "User": {
108
+ # https://img-blog.csdnimg.cn/img_convert/7c20bc39ac69b6972a22e18762d02db3.jpeg
109
+ "head_url": USER_HEAD_URL,
110
+ "bubble_color": "#95EC69",
111
+ "text_color": "#000000",
112
+ "font_size": 0,
113
+ "id": "USER"
114
+ },
115
+
116
+ "System": {
117
+ # https://img-blog.csdnimg.cn/img_convert/e7e5887cfff67df8c2205c2ef0e5e7fa.png
118
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/03/202303141678768524747045.jpg",
119
+ "bubble_color": "#7F7F7F", ##FFFFFF
120
+ "text_color": "#FFFFFF", ##000000
121
+ "font_size": 0,
122
+ "id": "SYSTEM"
123
+ },
124
+
125
+ "wait": {
126
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2022/12/202212011669881536145501.jpg",
127
+ "bubble_color": "#E7CBA6",
128
+ "text_color": "#000000",
129
+ "font_size": 0,
130
+ "id": "AGENT"
131
+ },
132
+
133
+ "Recorder": {
134
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
135
+ "bubble_color": "#F7F7F7",
136
+ "text_color": "#000000",
137
+ "font_size": 0,
138
+ "id": "AGENT"
139
+ }
140
+ }
141
+
142
+ @classmethod
143
+ def color_for_img(cls, url):
144
+ """
145
+ Extract the main colors from the picture and set them as the background color,
146
+ then determine the corresponding text color.
147
+ """
148
+
149
+ def get_main_color(image):
150
+ image = image.convert("RGB")
151
+ width, height = image.size
152
+ pixels = image.getcolors(width * height)
153
+ most_common_pixel = max(pixels, key=lambda item: item[0])
154
+ return most_common_pixel[1]
155
+
156
+ def is_dark_color(rgb_color):
157
+ r, g, b = rgb_color
158
+ luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
159
+ return luminance < 0.5
160
+
161
+ def download_image(url):
162
+ print(f"binding: {url}")
163
+ response = requests.get(url)
164
+ if response.status_code == 200:
165
+ with open('image.jpg', 'wb') as f:
166
+ f.write(response.content)
167
+
168
+ def rgb_to_hex(color):
169
+ return "#{:02X}{:02X}{:02X}".format(color[0], color[1], color[2])
170
+
171
+ def get_color(image_url):
172
+ download_image(image_url)
173
+
174
+ image = Image.open("image.jpg")
175
+ main_color = get_main_color(image)
176
+ is_dark = is_dark_color(main_color)
177
+
178
+ if is_dark:
179
+ font_color = "#FFFFFF"
180
+ else:
181
+ font_color = "#000000"
182
+
183
+ return rgb_to_hex(main_color), font_color
184
+
185
+ return get_color(url)
186
+
187
+ @classmethod
188
+ def init(cls, JSON):
189
+ # Deprecated
190
+ with open(JSON) as f:
191
+ sop = json.load(f)
192
+ cnt = 0
193
+ FISRT_NODE = True
194
+ fisrt_node_roles = []
195
+ for node_name in sop['nodes']:
196
+ node_info = sop['nodes'][node_name]
197
+ agent_states = node_info['agent_states']
198
+ for agent_role in agent_states:
199
+ name = agent_states[agent_role]['style']['name']
200
+ cls.ROLE_2_NAME[agent_role] = name
201
+ if FISRT_NODE:
202
+ fisrt_node_roles.append(agent_role)
203
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cnt])
204
+ cls.OBJECT_INFO[name] = {
205
+ "head_url": f"{cls.AGENT_HEAD_URL[cnt]}",
206
+ "bubble_color": bubble_color,
207
+ "text_color": text_color,
208
+ "font_size": 0,
209
+ "id": "AGENT"
210
+ }
211
+ cnt += 1
212
+ if FISRT_NODE:
213
+ FISRT_NODE = False
214
+ print(cls.OBJECT_INFO)
215
+ for usr_name in cls.OBJECT_INFO:
216
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
217
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
218
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
219
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
220
+ else:
221
+ assert False
222
+ return fisrt_node_roles
223
+
224
+ @classmethod
225
+ def add_agent(cls, agents_name:List):
226
+ for name in agents_name:
227
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cls.POINTER])
228
+ cls.OBJECT_INFO[name] = {
229
+ "head_url": f"{cls.AGENT_HEAD_URL[cls.POINTER]}",
230
+ "bubble_color": bubble_color,
231
+ "text_color": text_color,
232
+ "font_size": 0,
233
+ "id": "AGENT"
234
+ }
235
+ cls.POINTER += 1
236
+ for usr_name in cls.OBJECT_INFO:
237
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
238
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
239
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
240
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
241
+ else:
242
+ assert False
243
+
244
+
245
+ class StateConfig:
246
+ """UI configuration for the step progress bar (indicating the current node)"""
247
+
248
+ CSS = """
249
+ :root {
250
+ --gradient-start: 100%;
251
+ --gradient-end: 0%;
252
+ }
253
+ .container.progress-bar-container {
254
+ position: relative;
255
+ display: flex;
256
+ align-items: flex-end;
257
+ width: 100%;
258
+ overflow-x: auto;
259
+ padding-bottom: 30px;
260
+ padding-top: 20px
261
+ }
262
+ .container.progress-bar-container::-webkit-scrollbar {
263
+ width: 8px;
264
+ background-color: transparent;
265
+ }
266
+
267
+ .container.progress-bar-container::-webkit-scrollbar-thumb {
268
+ background-color: transparent;
269
+ }
270
+
271
+ .progress-bar-container .progressbar {
272
+ counter-reset: step;
273
+ white-space: nowrap;
274
+ }
275
+ .progress-bar-container .progressbar li {
276
+ list-style: none;
277
+ display: inline-block;
278
+ width: 200px;
279
+ position: relative;
280
+ text-align: center;
281
+ cursor: pointer;
282
+ white-space: normal;
283
+ }
284
+ .progress-bar-container .progressbar li:before {
285
+ content: counter(step);
286
+ counter-increment: step;
287
+ width: 30px;
288
+ height: 30px;
289
+ line-height: 30px;
290
+ border: 1px solid #ddd;
291
+ border-radius: 100%;
292
+ display: block;
293
+ text-align: center;
294
+ margin: 0 auto 10px auto;
295
+ background-color: #ffffff;
296
+ }
297
+ .progress-bar-container .progressbar li:after {
298
+ content: attr(data-content);
299
+ position: absolute;
300
+ width: 87%;
301
+ height: 2px;
302
+ background-color: #dddddd;
303
+ top: 15px;
304
+ left: -45%;
305
+ }
306
+ .progress-bar-container .progressbar li:first-child:after {
307
+ content: none;
308
+ }
309
+ .progress-bar-container .progressbar li.active {
310
+ color: green;
311
+ }
312
+ .progress-bar-container .progressbar li.active:before {
313
+ border-color: green;
314
+ background-color: green;
315
+ color: white;
316
+ }
317
+ .progress-bar-container .progressbar li.active + li:after {
318
+ background: linear-gradient(to right, green var(--gradient-start), lightgray var(--gradient-end));
319
+ }
320
+ .progress-bar-container .small-element {
321
+ transform: scale(0.8);
322
+ }
323
+ .progress-bar-container .progressbar li span {
324
+ position: absolute;
325
+ top: 40px;
326
+ left: 0;
327
+ width: 100%;
328
+ text-align: center;
329
+ }
330
+ .progress-bar-container .progressbar li .data-content {
331
+ position: absolute;
332
+ width: 100%;
333
+ top: -10px;
334
+ left: -100px;
335
+ text-align: center;
336
+ }
337
+ """
338
+
339
+ FORMAT = """
340
+ <html>
341
+ <head>
342
+ <style>
343
+ {}
344
+ </style>
345
+ </head>
346
+ <body>
347
+ <br>
348
+ <center>
349
+ <div class="container progress-bar-container">
350
+ <ul class="progressbar">
351
+ {}
352
+ </ul>
353
+ </div>
354
+ </center>
355
+ </body>
356
+ </html>
357
+ """
358
+
359
+ STATES_NAME:List[str] = None
360
+
361
+ @classmethod
362
+ def _generate_template(cls, types:str)->str:
363
+ # normal: A state with no execution.
364
+ # active-show-up: Active state, and content displayed above the horizontal line.
365
+ # active-show-down: Active state, and content displayed below the horizontal line.
366
+ # active-show-both: Active state, and content displayed both above and below the horizontal line.
367
+ # active-show-none: Active state, with no content displayed above the horizontal line.
368
+
369
+ assert types.lower() in ["normal","active-show-up", "active-show-down", "active-show-both", "active", "active-show-none"]
370
+ both_templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
371
+ <div class="data-content">
372
+ <center>
373
+ <p style="line-height: 1px;"></p>
374
+ {}
375
+ <p>
376
+ {}
377
+ </p>
378
+ </center>
379
+ </div>
380
+ <span>{}</span>
381
+ </li>"""
382
+
383
+ if types.lower() == "normal":
384
+ templates = "<li><span>{}</span></li>"
385
+ elif types.lower() == "active":
386
+ templates = """<li class="active"><span>{}</span></li>"""
387
+ elif types.lower() == "active-show-up":
388
+ templates = both_templates.format("{}","{}", "{}", "", "{}")
389
+ elif types.lower() == "active-show-down":
390
+ templates = both_templates.format("{}","{}", "", "{}", "{}")
391
+ elif types.lower() == "active-show-both":
392
+ templates = both_templates
393
+ elif types.lower() == "active-show-none":
394
+ templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
395
+ <span>{}</span>
396
+ </li>"""
397
+ else:
398
+ assert False
399
+ return templates
400
+
401
+ @classmethod
402
+ def update_states(cls, current_states:List[int], current_templates:List[str], show_content:List[Tuple[str]])->str:
403
+ assert len(current_states) == len(current_templates)
404
+ # You can dynamically change the number of states.
405
+ # assert len(current_states) == len(cls.STATES_NAME)
406
+ css_code = []
407
+ for idx in range(len(current_states)):
408
+ if idx == 0:
409
+ if current_states[idx] != 0:
410
+ css_code = [f"{cls._generate_template('active').format(cls.STATES_NAME[idx])}"]
411
+ else:
412
+ css_code = [f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"]
413
+ continue
414
+ if current_states[idx-1] == 0:
415
+ # new_code = f"{cls._generate_template('normal').format(*(show_content[idx]))}"
416
+ new_code = f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"
417
+ else:
418
+ new_code = f"{cls._generate_template(current_templates[idx]).format(current_states[idx-1], 100-current_states[idx-1],*(show_content[idx-1]), cls.STATES_NAME[idx])}"
419
+ if current_states[idx-1] != 100 or (current_states[idx]==0 and current_states[idx-1]==100):
420
+ new_code = new_code.replace("""li class="active" ""","""li """)
421
+ css_code.append(new_code)
422
+ return "\n".join(css_code)
423
+
424
+ @classmethod
425
+ def create_states(cls, states_name:List[str], manual_create_end_nodes:bool=False):
426
+ # Create states
427
+ if manual_create_end_nodes:
428
+ states_name.append("Done")
429
+ css_code = ""
430
+ cls.STATES_NAME: List[str] = states_name
431
+ for name in states_name:
432
+ css_code = f"{css_code}\n{cls._generate_template('normal').format(name)}"
433
+ return css_code
434
+
435
+
436
+ if __name__ == '__main__':
437
+ pass
image.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ai-agents
run_gradio.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ from gradio_base import WebUI, UIHelper, PORT, HOST, Client
4
+ from gradio_config import GradioConfig as gc
5
+ from typing import List, Tuple, Any
6
+ import gradio as gr
7
+ import time
8
+
9
+ class CodeUI(WebUI):
10
+
11
+ def render_and_register_ui(self):
12
+ self.agent_name:list = [self.cache["agents_name"]] if isinstance(self.cache["agents_name"], str) else self.cache['agents_name']
13
+ gc.add_agent(self.agent_name)
14
+
15
+ def __init__(
16
+ self,
17
+ client_cmd: list,
18
+ socket_host: str = HOST,
19
+ socket_port: int = PORT,
20
+ bufsize: int = 1024,
21
+ ui_name: str = "CodeUI"
22
+ ):
23
+ super(CodeUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
24
+ self.first_recieve_from_client()
25
+ self.data_history = list()
26
+ self.caller = 0
27
+
28
+ def construct_ui(self):
29
+ with gr.Blocks(css=gc.CSS) as demo:
30
+ with gr.Row():
31
+ with gr.Column():
32
+ self.radio_mode = gr.Radio(
33
+ [Client.AUTO_MODE, Client.SINGLE_MODE],
34
+ value=Client.AUTO_MODE,
35
+ interactive=True,
36
+ label = Client.MODE_LABEL,
37
+ info = Client.MODE_INFO
38
+ )
39
+ self.chatbot = gr.Chatbot(
40
+ elem_id="chatbot1"
41
+ )
42
+ self.btn_next = gr.Button(
43
+ value="Next Agent",
44
+ visible=False, elem_id="btn"
45
+ )
46
+ with gr.Row():
47
+ self.text_requirement = gr.Textbox(
48
+ value=self.cache['requirement'],
49
+ placeholder="Please enter your content",
50
+ scale=9,
51
+ )
52
+ self.btn_start = gr.Button(
53
+ value="Start!",
54
+ scale=1
55
+ )
56
+ self.btn_reset = gr.Button(
57
+ value="Restart",
58
+ visible=False
59
+ )
60
+
61
+ with gr.Column():
62
+ self.file = gr.File(visible=False)
63
+ self.chat_code_show = gr.Chatbot(
64
+ elem_id="chatbot1",
65
+ visible=False
66
+ )
67
+
68
+ self.btn_start.click(
69
+ fn=self.btn_send_when_click,
70
+ inputs=[self.chatbot, self.text_requirement, self.radio_mode],
71
+ outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
72
+ ).then(
73
+ fn=self.btn_send_after_click,
74
+ inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
75
+ outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
76
+ )
77
+ self.text_requirement.submit(
78
+ fn=self.btn_send_when_click,
79
+ inputs=[self.chatbot, self.text_requirement],
80
+ outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
81
+ ).then(
82
+ fn=self.btn_send_after_click,
83
+ inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
84
+ outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
85
+ )
86
+ self.btn_reset.click(
87
+ fn=self.btn_reset_when_click,
88
+ inputs=[],
89
+ outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
90
+ ).then(
91
+ fn=self.btn_reset_after_click,
92
+ inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
93
+ outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
94
+ )
95
+ self.file.select(
96
+ fn=self.file_when_select,
97
+ inputs=[self.file],
98
+ outputs=[self.chat_code_show]
99
+ )
100
+ self.btn_next.click(
101
+ fn = self.btn_next_when_click,
102
+ inputs=[],
103
+ outputs=[self.btn_next]
104
+ ).then(
105
+ fn=self.btn_send_after_click,
106
+ inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
107
+ outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
108
+ )
109
+
110
+ self.demo = demo
111
+
112
+
113
+ def handle_message(self, history:list, state, agent_name, token, node_name):
114
+ if state % 10 == 0:
115
+ self.data_history.append({agent_name: token})
116
+ elif state % 10 == 1:
117
+ # Same state. Need to add new bubble in same bubble.
118
+ self.data_history[-1][agent_name] += token
119
+ elif state % 10 == 2:
120
+ # New state. Need to add new bubble.
121
+ history.append([None, ""])
122
+ self.data_history.clear()
123
+ self.data_history.append({agent_name: token})
124
+ else:
125
+ assert False, "Invalid state."
126
+ render_data = self.render_bubble(history, self.data_history, node_name, render_node_name=True)
127
+ return render_data
128
+
129
+ def btn_send_when_click(self, chatbot, text_requirement, mode):
130
+ """
131
+ inputs=[self.chatbot, self.text_requirement],
132
+ outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
133
+ """
134
+ chatbot = [[UIHelper.wrap_css(content=text_requirement, name="User"), None]]
135
+ yield chatbot,\
136
+ gr.Button.update(visible=True, interactive=False, value="Running"),\
137
+ gr.Textbox.update(visible=True, interactive=False, value=""),\
138
+ gr.Button.update(visible=False, interactive=False)
139
+ self.send_start_cmd({'requirement': text_requirement, "mode": mode})
140
+ return
141
+
142
+ def btn_send_after_click(
143
+ self,
144
+ file,
145
+ history,
146
+ show_code,
147
+ btn_send,
148
+ btn_reset,
149
+ text_requirement
150
+ ):
151
+ """
152
+ outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
153
+ """
154
+ if self.caller == 0:
155
+ self.data_history = list()
156
+ self.caller = 0
157
+ receive_server = self.receive_server
158
+ while True:
159
+ data_list: List = receive_server.send(None)
160
+ for item in data_list:
161
+ data = eval(item)
162
+ assert isinstance(data, list)
163
+ state, agent_name, token, node_name = data
164
+ assert isinstance(state, int)
165
+ assert state in [10, 11, 12, 99, 98]
166
+ if state == 99:
167
+ # finish
168
+ fs = [self.cache['pwd']+'/output_code/'+_ for _ in os.listdir(self.cache['pwd']+'/output_code')]
169
+ yield gr.File.update(value=fs, visible=True, interactive=True),\
170
+ history, \
171
+ gr.Chatbot.update(visible=True),\
172
+ gr.Button.update(visible=True, interactive=True, value="Start"),\
173
+ gr.Button.update(visible=True, interactive=True),\
174
+ gr.Textbox.update(visible=True, interactive=True, placeholder="Please input your requirement", value=""),\
175
+ gr.Button.update(visible=False)
176
+ return
177
+ elif state == 98:
178
+ yield gr.File.update(visible=False),\
179
+ history, \
180
+ gr.Chatbot.update(visible=False),\
181
+ gr.Button.update(visible=True, interactive=False),\
182
+ gr.Button.update(visible=True, interactive=True),\
183
+ gr.Textbox.update(visible=True, interactive=False),\
184
+ gr.Button.update(visible=True, value=f"Next Agent: 🤖{agent_name} | Next Node: ⭕{node_name}")
185
+ return
186
+ history = self.handle_message(history, state, agent_name, token, node_name)
187
+ yield gr.File.update(visible=False),\
188
+ history, \
189
+ gr.Chatbot.update(visible=False),\
190
+ gr.Button.update(visible=True, interactive=False),\
191
+ gr.Button.update(visible=False, interactive=False),\
192
+ gr.Textbox.update(visible=True, interactive=False),\
193
+ gr.Button.update(visible=False)
194
+
195
+ def btn_reset_when_click(self):
196
+ """
197
+ inputs = []
198
+ outputs = [self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
199
+ """
200
+ return gr.File.update(visible=False),\
201
+ None, None, gr.Button.update(value="Restarting...", interactive=False),\
202
+ gr.Button.update(value="Restarting...", interactive=False),\
203
+ gr.Textbox.update(value="Restarting", interactive=False),\
204
+ gr.Button.update(visible=False)
205
+
206
+ def btn_reset_after_click(
207
+ self,
208
+ file,
209
+ chatbot,
210
+ show_code,
211
+ btn_send,
212
+ btn_reset,
213
+ text_requirement
214
+ ):
215
+ self.reset()
216
+ self.first_recieve_from_client(reset_mode=True)
217
+ return gr.File.update(value=None, visible=False),\
218
+ gr.Chatbot.update(value=None, visible=True),\
219
+ gr.Chatbot.update(value=None, visible=False),\
220
+ gr.Button.update(value="Start", visible=True, interactive=True),\
221
+ gr.Button.update(value="Restart", interactive=False, visible=False),\
222
+ gr.Textbox.update(value=self.cache['requirement'], interactive=True, visible=True),\
223
+ gr.Button.update(visible=False)
224
+
225
+ def file_when_select(self, file):
226
+ CODE_PREFIX = "```python\n{}\n```"
227
+ with open(file.name, "r", encoding='utf-8') as f:
228
+ contents = f.readlines()
229
+ codes = "".join(contents)
230
+ return [[CODE_PREFIX.format(codes),None]]
231
+
232
+ def btn_next_when_click(self):
233
+ self.caller = 1 # it will remain the value in self.data_history
234
+ self.send_message("nothing")
235
+ time.sleep(0.5)
236
+ yield gr.Button.update(visible=False)
237
+ return
238
+
239
+
240
+ if __name__ == '__main__':
241
+ ui = CodeUI(client_cmd=["python","gradio_backend.py"])
242
+ ui.construct_ui()
243
+ ui.run()