AgentVerse commited on
Commit
1d266a5
1 Parent(s): 01523b5

first commit

Browse files
Files changed (1) hide show
  1. app.py +598 -0
app.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import openai
3
+ import itertools
4
+ import json
5
+ from typing import Dict, List, Tuple
6
+
7
+ import cv2
8
+ import gradio as gr
9
+
10
+ from agentverse import TaskSolving
11
+ from agentverse.simulation import Simulation
12
+ from agentverse.message import Message
13
+
14
+
15
+ def cover_img(background, img, place: Tuple[int, int]):
16
+ """
17
+ Overlays the specified image to the specified position of the background image.
18
+ :param background: background image
19
+ :param img: the specified image
20
+ :param place: the top-left coordinate of the target location
21
+ """
22
+ back_h, back_w, _ = background.shape
23
+ height, width, _ = img.shape
24
+ for i, j in itertools.product(range(height), range(width)):
25
+ if img[i, j, 3]:
26
+ background[place[0] + i, place[1] + j] = img[i, j, :3]
27
+
28
+
29
+ class GUI:
30
+ """
31
+ the UI of frontend
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ task: str = "simulation/nlp_classroom_9players",
37
+ tasks_dir: str = "agentverse/tasks",
38
+ ):
39
+ """
40
+ init a UI.
41
+ default number of students is 0
42
+ """
43
+ self.messages = []
44
+ self.task = task
45
+ self.tasks_dir = tasks_dir
46
+ if task == "pipeline_brainstorming":
47
+ self.backend = TaskSolving.from_task(task, tasks_dir)
48
+ else:
49
+ self.backend = Simulation.from_task(task, tasks_dir)
50
+ self.turns_remain = 0
51
+ self.agent_id = {
52
+ self.backend.agents[idx].name: idx
53
+ for idx in range(len(self.backend.agents))
54
+ }
55
+ self.stu_num = len(self.agent_id) - 1
56
+ self.autoplay = False
57
+ self.image_now = None
58
+ self.text_now = None
59
+ self.tot_solutions = 5
60
+ self.solution_status = [False] * self.tot_solutions
61
+
62
+ def get_avatar(self, idx):
63
+ if idx == -1:
64
+ img = cv2.imread("./imgs/db_diag/-1.png")
65
+ elif self.task == "prisoner_dilemma":
66
+ img = cv2.imread(f"./imgs/prison/{idx}.png")
67
+ elif self.task == "db_diag":
68
+ img = cv2.imread(f"./imgs/db_diag/{idx}.png")
69
+ elif "sde" in self.task:
70
+ img = cv2.imread(f"./imgs/sde/{idx}.png")
71
+ else:
72
+ img = cv2.imread(f"./imgs/{idx}.png")
73
+ base64_str = cv2.imencode(".png", img)[1].tostring()
74
+ return "data:image/png;base64," + base64.b64encode(base64_str).decode("utf-8")
75
+
76
+ def stop_autoplay(self):
77
+ self.autoplay = False
78
+ return (
79
+ gr.Button.update(interactive=False),
80
+ gr.Button.update(interactive=False),
81
+ gr.Button.update(interactive=False),
82
+ )
83
+
84
+ def start_autoplay(self):
85
+ self.autoplay = True
86
+ yield (
87
+ self.image_now,
88
+ self.text_now,
89
+ gr.Button.update(interactive=False),
90
+ gr.Button.update(interactive=True),
91
+ gr.Button.update(interactive=False),
92
+ *[gr.Button.update(visible=statu) for statu in self.solution_status],
93
+ gr.Box.update(visible=any(self.solution_status)),
94
+ )
95
+
96
+ while self.autoplay and self.turns_remain > 0:
97
+ outputs = self.gen_output()
98
+ self.image_now, self.text_now = outputs
99
+
100
+ yield (
101
+ *outputs,
102
+ gr.Button.update(
103
+ interactive=not self.autoplay and self.turns_remain > 0
104
+ ),
105
+ gr.Button.update(interactive=self.autoplay and self.turns_remain > 0),
106
+ gr.Button.update(
107
+ interactive=not self.autoplay and self.turns_remain > 0
108
+ ),
109
+ *[gr.Button.update(visible=statu) for statu in self.solution_status],
110
+ gr.Box.update(visible=any(self.solution_status)),
111
+ )
112
+
113
+ def delay_gen_output(
114
+ self,
115
+ ):
116
+ yield (
117
+ self.image_now,
118
+ self.text_now,
119
+ gr.Button.update(interactive=False),
120
+ gr.Button.update(interactive=False),
121
+ *[gr.Button.update(visible=statu) for statu in self.solution_status],
122
+ gr.Box.update(visible=any(self.solution_status)),
123
+ )
124
+
125
+ outputs = self.gen_output()
126
+ self.image_now, self.text_now = outputs
127
+
128
+ yield (
129
+ self.image_now,
130
+ self.text_now,
131
+ gr.Button.update(interactive=self.turns_remain > 0),
132
+ gr.Button.update(interactive=self.turns_remain > 0),
133
+ *[gr.Button.update(visible=statu) for statu in self.solution_status],
134
+ gr.Box.update(visible=any(self.solution_status)),
135
+ )
136
+
137
+ def delay_reset(self, task_dropdown, api_key_text, organization_text):
138
+ self.autoplay = False
139
+ self.image_now, self.text_now = self.reset(
140
+ task_dropdown, api_key_text, organization_text
141
+ )
142
+ return (
143
+ self.image_now,
144
+ self.text_now,
145
+ gr.Button.update(interactive=True),
146
+ gr.Button.update(interactive=False),
147
+ gr.Button.update(interactive=True),
148
+ *[gr.Button.update(visible=statu) for statu in self.solution_status],
149
+ gr.Box.update(visible=any(self.solution_status)),
150
+ )
151
+
152
+ def reset(
153
+ self,
154
+ task_dropdown="simulation/nlp_classroom_9players",
155
+ api_key_text="",
156
+ organization_text="",
157
+ ):
158
+ openai.api_key = api_key_text
159
+ openai.organization = organization_text
160
+ """
161
+ tell backend the new number of students and generate new empty image
162
+ :param stu_num:
163
+ :return: [empty image, empty message]
164
+ """
165
+ # if not 0 <= stu_num <= 30:
166
+ # raise gr.Error("the number of students must be between 0 and 30.")
167
+
168
+ """
169
+ # [To-Do] Need to add a function to assign agent numbers into the backend.
170
+ """
171
+ # self.backend.reset(stu_num)
172
+ # self.stu_num = stu_num
173
+
174
+ """
175
+ # [To-Do] Pass the parameters to reset
176
+ """
177
+ if task_dropdown == "pipeline_brainstorming":
178
+ self.backend = TaskSolving.from_task(task_dropdown, self.tasks_dir)
179
+ else:
180
+ self.backend = Simulation.from_task(task_dropdown, self.tasks_dir)
181
+ self.backend.reset()
182
+ self.turns_remain = self.backend.environment.max_turns
183
+
184
+ if task_dropdown == "simulation/prisoner_dilemma":
185
+ background = cv2.imread("./imgs/prison/case_1.png")
186
+ elif task_dropdown == "simulation/db_diag":
187
+ background = cv2.imread("./imgs/db_diag/background.png")
188
+ elif "sde" in task_dropdown:
189
+ background = cv2.imread("./imgs/sde/background.png")
190
+ else:
191
+ background = cv2.imread("./imgs/background.png")
192
+ back_h, back_w, _ = background.shape
193
+ stu_cnt = 0
194
+ for h_begin, w_begin in itertools.product(
195
+ range(800, back_h, 300), range(135, back_w - 200, 200)
196
+ ):
197
+ stu_cnt += 1
198
+ img = cv2.imread(
199
+ f"./imgs/{(stu_cnt - 1) % 11 + 1 if stu_cnt <= self.stu_num else 'empty'}.png",
200
+ cv2.IMREAD_UNCHANGED,
201
+ )
202
+ cover_img(
203
+ background,
204
+ img,
205
+ (h_begin - 30 if img.shape[0] > 190 else h_begin, w_begin),
206
+ )
207
+ self.messages = []
208
+ self.solution_status = [False] * self.tot_solutions
209
+ return [cv2.cvtColor(background, cv2.COLOR_BGR2RGB), ""]
210
+
211
+ def gen_img(self, data: List[Dict]):
212
+ """
213
+ generate new image with sender rank
214
+ :param data:
215
+ :return: the new image
216
+ """
217
+ # The following code need to be more general. This one is too task-specific.
218
+ # if len(data) != self.stu_num:
219
+ if len(data) != self.stu_num + 1:
220
+ raise gr.Error("data length is not equal to the total number of students.")
221
+ if self.task == "prisoner_dilemma":
222
+ img = cv2.imread("./imgs/speaking.png", cv2.IMREAD_UNCHANGED)
223
+ if (
224
+ len(self.messages) < 2
225
+ or self.messages[-1][0] == 1
226
+ or self.messages[-2][0] == 2
227
+ ):
228
+ background = cv2.imread("./imgs/prison/case_1.png")
229
+ if data[0]["message"] != "":
230
+ cover_img(background, img, (400, 480))
231
+ else:
232
+ background = cv2.imread("./imgs/prison/case_2.png")
233
+ if data[0]["message"] != "":
234
+ cover_img(background, img, (400, 880))
235
+ if data[1]["message"] != "":
236
+ cover_img(background, img, (550, 480))
237
+ if data[2]["message"] != "":
238
+ cover_img(background, img, (550, 880))
239
+ elif self.task == "db_diag":
240
+ background = cv2.imread("./imgs/db_diag/background.png")
241
+ img = cv2.imread("./imgs/db_diag/speaking.png", cv2.IMREAD_UNCHANGED)
242
+ if data[0]["message"] != "":
243
+ cover_img(background, img, (750, 80))
244
+ if data[1]["message"] != "":
245
+ cover_img(background, img, (310, 220))
246
+ if data[2]["message"] != "":
247
+ cover_img(background, img, (522, 11))
248
+ elif "sde" in self.task:
249
+ background = cv2.imread("./imgs/sde/background.png")
250
+ img = cv2.imread("./imgs/sde/speaking.png", cv2.IMREAD_UNCHANGED)
251
+ if data[0]["message"] != "":
252
+ cover_img(background, img, (692, 330))
253
+ if data[1]["message"] != "":
254
+ cover_img(background, img, (692, 660))
255
+ if data[2]["message"] != "":
256
+ cover_img(background, img, (692, 990))
257
+ else:
258
+ background = cv2.imread("./imgs/background.png")
259
+ back_h, back_w, _ = background.shape
260
+ stu_cnt = 0
261
+ if data[stu_cnt]["message"] not in ["", "[RaiseHand]"]:
262
+ img = cv2.imread("./imgs/speaking.png", cv2.IMREAD_UNCHANGED)
263
+ cover_img(background, img, (370, 1250))
264
+ for h_begin, w_begin in itertools.product(
265
+ range(800, back_h, 300), range(135, back_w - 200, 200)
266
+ ):
267
+ stu_cnt += 1
268
+ if stu_cnt <= self.stu_num:
269
+ img = cv2.imread(
270
+ f"./imgs/{(stu_cnt - 1) % 11 + 1}.png", cv2.IMREAD_UNCHANGED
271
+ )
272
+ cover_img(
273
+ background,
274
+ img,
275
+ (h_begin - 30 if img.shape[0] > 190 else h_begin, w_begin),
276
+ )
277
+ if "[RaiseHand]" in data[stu_cnt]["message"]:
278
+ # elif data[stu_cnt]["message"] == "[RaiseHand]":
279
+ img = cv2.imread("./imgs/hand.png", cv2.IMREAD_UNCHANGED)
280
+ cover_img(background, img, (h_begin - 90, w_begin + 10))
281
+ elif data[stu_cnt]["message"] not in ["", "[RaiseHand]"]:
282
+ img = cv2.imread("./imgs/speaking.png", cv2.IMREAD_UNCHANGED)
283
+ cover_img(background, img, (h_begin - 90, w_begin + 10))
284
+
285
+ else:
286
+ img = cv2.imread("./imgs/empty.png", cv2.IMREAD_UNCHANGED)
287
+ cover_img(background, img, (h_begin, w_begin))
288
+ return cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
289
+
290
+ def return_format(self, messages: List[Message]):
291
+ _format = [{"message": "", "sender": idx} for idx in range(len(self.agent_id))]
292
+
293
+ for message in messages:
294
+ if self.task == "db_diag":
295
+ content_json: dict = message.content
296
+ content_json[
297
+ "diagnose"
298
+ ] = f"[{message.sender}]: {content_json['diagnose']}"
299
+ _format[self.agent_id[message.sender]]["message"] = json.dumps(
300
+ content_json
301
+ )
302
+ elif "sde" in self.task:
303
+ if message.sender == "code_tester":
304
+ pre_message, message_ = message.content.split("\n")
305
+ message_ = "{}\n{}".format(
306
+ pre_message, json.loads(message_)["feedback"]
307
+ )
308
+ _format[self.agent_id[message.sender]][
309
+ "message"
310
+ ] = "[{}]: {}".format(message.sender, message_)
311
+ else:
312
+ _format[self.agent_id[message.sender]][
313
+ "message"
314
+ ] = "[{}]: {}".format(message.sender, message.content)
315
+
316
+ else:
317
+ _format[self.agent_id[message.sender]]["message"] = "[{}]: {}".format(
318
+ message.sender, message.content
319
+ )
320
+
321
+ return _format
322
+
323
+ def gen_output(self):
324
+ """
325
+ generate new image and message of next step
326
+ :return: [new image, new message]
327
+ """
328
+
329
+ # data = self.backend.next_data()
330
+ return_message = self.backend.next()
331
+ data = self.return_format(return_message)
332
+
333
+ # data.sort(key=lambda item: item["sender"])
334
+ """
335
+ # [To-Do]; Check the message from the backend: only 1 person can speak
336
+ """
337
+
338
+ for item in data:
339
+ if item["message"] not in ["", "[RaiseHand]"]:
340
+ self.messages.append((item["sender"], item["message"]))
341
+
342
+ message = self.gen_message()
343
+ self.turns_remain -= 1
344
+ return [self.gen_img(data), message]
345
+
346
+ def gen_message(self):
347
+ # If the backend cannot handle this error, use the following code.
348
+ message = ""
349
+ """
350
+ for item in data:
351
+ if item["message"] not in ["", "[RaiseHand]"]:
352
+ message = item["message"]
353
+ break
354
+ """
355
+ for sender, msg in self.messages:
356
+ if sender == 0:
357
+ avatar = self.get_avatar(0)
358
+ elif sender == -1:
359
+ avatar = self.get_avatar(-1)
360
+ else:
361
+ avatar = self.get_avatar((sender - 1) % 11 + 1)
362
+ if self.task == "db_diag":
363
+ msg_json = json.loads(msg)
364
+ self.solution_status = [False] * self.tot_solutions
365
+ msg = msg_json["diagnose"]
366
+ if msg_json["solution"] != "":
367
+ solution: List[str] = msg_json["solution"]
368
+ for solu in solution:
369
+ if "query" in solu or "queries" in solu:
370
+ self.solution_status[0] = True
371
+ solu = solu.replace(
372
+ "query", '<span style="color:yellow;">query</span>'
373
+ )
374
+ solu = solu.replace(
375
+ "queries", '<span style="color:yellow;">queries</span>'
376
+ )
377
+ if "join" in solu:
378
+ self.solution_status[1] = True
379
+ solu = solu.replace(
380
+ "join", '<span style="color:yellow;">join</span>'
381
+ )
382
+ if "index" in solu:
383
+ self.solution_status[2] = True
384
+ solu = solu.replace(
385
+ "index", '<span style="color:yellow;">index</span>'
386
+ )
387
+ if "system configuration" in solu:
388
+ self.solution_status[3] = True
389
+ solu = solu.replace(
390
+ "system configuration",
391
+ '<span style="color:yellow;">system configuration</span>',
392
+ )
393
+ if (
394
+ "monitor" in solu
395
+ or "Monitor" in solu
396
+ or "Investigate" in solu
397
+ ):
398
+ self.solution_status[4] = True
399
+ solu = solu.replace(
400
+ "monitor", '<span style="color:yellow;">monitor</span>'
401
+ )
402
+ solu = solu.replace(
403
+ "Monitor", '<span style="color:yellow;">Monitor</span>'
404
+ )
405
+ solu = solu.replace(
406
+ "Investigate",
407
+ '<span style="color:yellow;">Investigate</span>',
408
+ )
409
+ msg = f"{msg}<br>{solu}"
410
+ if msg_json["knowledge"] != "":
411
+ msg = f'{msg}<hr style="margin: 5px 0"><span style="font-style: italic">{msg_json["knowledge"]}<span>'
412
+ else:
413
+ msg = msg.replace("<", "&lt;")
414
+ msg = msg.replace(">", "&gt;")
415
+ message = (
416
+ f'<div style="display: flex; align-items: center; margin-bottom: 10px;overflow:auto;">'
417
+ f'<img src="{avatar}" style="width: 5%; height: 5%; border-radius: 25px; margin-right: 10px;">'
418
+ f'<div style="background-color: gray; color: white; padding: 10px; border-radius: 10px;'
419
+ f'max-width: 70%; white-space: pre-wrap">'
420
+ f"{msg}"
421
+ f"</div></div>" + message
422
+ )
423
+ message = (
424
+ '<div id="divDetail" style="height:600px;overflow:auto;">'
425
+ + message
426
+ + "</div>"
427
+ )
428
+ return message
429
+
430
+ def submit(self, message: str):
431
+ """
432
+ submit message to backend
433
+ :param message: message
434
+ :return: [new image, new message]
435
+ """
436
+ self.backend.submit(message)
437
+ self.messages.append((-1, f"[User]: {message}"))
438
+ return self.gen_img([{"message": ""}] * len(self.agent_id)), self.gen_message()
439
+
440
+ def launch(self, single_agent=False, discussion_mode=False):
441
+ if self.task == "pipeline_brainstorming":
442
+ with gr.Blocks() as demo:
443
+ chatbot = gr.Chatbot(height=800, show_label=False)
444
+ msg = gr.Textbox(label="Input")
445
+
446
+ def respond(message, chat_history):
447
+ chat_history.append((message, None))
448
+ yield "", chat_history
449
+ for response in self.backend.iter_run(
450
+ single_agent=single_agent, discussion_mode=discussion_mode
451
+ ):
452
+ print(response)
453
+ chat_history.append((None, response))
454
+ yield "", chat_history
455
+
456
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
457
+ else:
458
+ with gr.Blocks() as demo:
459
+ with gr.Row():
460
+ task_dropdown = gr.Dropdown(
461
+ choices=[
462
+ "simulation/nlp_classroom_9players",
463
+ "simulation/prisoner_dilemma",
464
+ "simulation/sde_team/sde_team_2players",
465
+ "simulation/db_diag ",
466
+ ],
467
+ value="simulation/nlp_classroom_9players",
468
+ label="Task",
469
+ )
470
+ api_key_text = gr.Textbox(label="OPENAI API KEY")
471
+ organization_text = gr.Textbox(label="Organization")
472
+ with gr.Row():
473
+ with gr.Column():
474
+ image_output = gr.Image()
475
+ with gr.Row():
476
+ reset_btn = gr.Button("Build/Reset")
477
+ # next_btn = gr.Button("Next", variant="primary")
478
+ next_btn = gr.Button("Next", interactive=False)
479
+ stop_autoplay_btn = gr.Button(
480
+ "Stop Autoplay", interactive=False
481
+ )
482
+ start_autoplay_btn = gr.Button(
483
+ "Start Autoplay", interactive=False
484
+ )
485
+ with gr.Box(visible=False) as solutions:
486
+ with gr.Column():
487
+ gr.HTML("Optimization Solutions:")
488
+ with gr.Row():
489
+ rewrite_slow_query_btn = gr.Button(
490
+ "Rewrite Slow Query", visible=False
491
+ )
492
+ add_query_hints_btn = gr.Button(
493
+ "Add Query Hints", visible=False
494
+ )
495
+ update_indexes_btn = gr.Button(
496
+ "Update Indexes", visible=False
497
+ )
498
+ tune_parameters_btn = gr.Button(
499
+ "Tune Parameters", visible=False
500
+ )
501
+ gather_more_info_btn = gr.Button(
502
+ "Gather More Info", visible=False
503
+ )
504
+ # text_output = gr.Textbox()
505
+ text_output = gr.HTML(self.reset()[1])
506
+
507
+ # Given a botton to provide student numbers and their inf.
508
+ # stu_num = gr.Number(label="Student Number", precision=0)
509
+ # stu_num = self.stu_num
510
+
511
+ if self.task == "db_diag":
512
+ user_msg = gr.Textbox()
513
+ submit_btn = gr.Button("Submit", variant="primary")
514
+
515
+ submit_btn.click(
516
+ fn=self.submit,
517
+ inputs=user_msg,
518
+ outputs=[image_output, text_output],
519
+ show_progress=False,
520
+ )
521
+ else:
522
+ pass
523
+
524
+ # next_btn.click(fn=self.gen_output, inputs=None, outputs=[image_output, text_output],
525
+ # show_progress=False)
526
+ next_btn.click(
527
+ fn=self.delay_gen_output,
528
+ inputs=None,
529
+ outputs=[
530
+ image_output,
531
+ text_output,
532
+ next_btn,
533
+ start_autoplay_btn,
534
+ rewrite_slow_query_btn,
535
+ add_query_hints_btn,
536
+ update_indexes_btn,
537
+ tune_parameters_btn,
538
+ gather_more_info_btn,
539
+ solutions,
540
+ ],
541
+ show_progress=False,
542
+ )
543
+
544
+ # [To-Do] Add botton: re-start (load different people and env)
545
+ # reset_btn.click(fn=self.reset, inputs=stu_num, outputs=[image_output, text_output],
546
+ # show_progress=False)
547
+ # reset_btn.click(fn=self.reset, inputs=None, outputs=[image_output, text_output], show_progress=False)
548
+ reset_btn.click(
549
+ fn=self.delay_reset,
550
+ inputs=[task_dropdown, api_key_text, organization_text],
551
+ outputs=[
552
+ image_output,
553
+ text_output,
554
+ next_btn,
555
+ stop_autoplay_btn,
556
+ start_autoplay_btn,
557
+ rewrite_slow_query_btn,
558
+ add_query_hints_btn,
559
+ update_indexes_btn,
560
+ tune_parameters_btn,
561
+ gather_more_info_btn,
562
+ solutions,
563
+ ],
564
+ show_progress=False,
565
+ )
566
+
567
+ stop_autoplay_btn.click(
568
+ fn=self.stop_autoplay,
569
+ inputs=None,
570
+ outputs=[next_btn, stop_autoplay_btn, start_autoplay_btn],
571
+ show_progress=False,
572
+ )
573
+ start_autoplay_btn.click(
574
+ fn=self.start_autoplay,
575
+ inputs=None,
576
+ outputs=[
577
+ image_output,
578
+ text_output,
579
+ next_btn,
580
+ stop_autoplay_btn,
581
+ start_autoplay_btn,
582
+ rewrite_slow_query_btn,
583
+ add_query_hints_btn,
584
+ update_indexes_btn,
585
+ tune_parameters_btn,
586
+ gather_more_info_btn,
587
+ solutions,
588
+ ],
589
+ show_progress=False,
590
+ )
591
+
592
+ demo.queue(concurrency_count=5, max_size=20).launch()
593
+ # demo.launch()
594
+
595
+
596
+
597
+ ui = GUI("simulation/nlp_classroom_9players", "agentverse/tasks")
598
+ ui.launch()