suhyun.kang commited on
Commit
6b89337
1 Parent(s): 67812d2

update (https://screen.yanolja.in/eKUGn6i6wCYX2HBS.png)

Browse files
Files changed (1) hide show
  1. app.py +31 -39
app.py CHANGED
@@ -8,24 +8,18 @@ import gradio as gr
8
  SUPPORTED_MODELS = ["gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"]
9
 
10
 
11
- def user(state_a, state_b, user_message):
12
- if state_a is None and state_b is None:
13
- model_pair = sample(SUPPORTED_MODELS, 2)
14
- state_a = gradio_web_server.State(model_pair[0])
15
- state_b = gradio_web_server.State(model_pair[1])
16
 
17
- for state in [state_a, state_b]:
18
  state.conv.append_message(state.conv.roles[0], user_message)
19
  state.conv.append_message(state.conv.roles[1], None)
20
  state.skip_next = False
21
 
22
- empty_prompt = ""
23
-
24
  return [
25
- state_a, state_b,
26
- state_a.to_gradio_chatbot(),
27
- state_b.to_gradio_chatbot(), state_a.model_name, state_b.model_name,
28
- empty_prompt
29
  ]
30
 
31
 
@@ -33,8 +27,10 @@ def bot(state_a, state_b, request: gr.Request):
33
  if state_a is None or state_b is None:
34
  raise RuntimeError(f"states cannot be None, got [{state_a}, {state_b}]")
35
 
 
 
36
  generators = []
37
- for state in [state_a, state_b]:
38
  try:
39
  # TODO(#1): Allow user to set configuration.
40
  # bot_response returns a generator yielding states and chatbots.
@@ -50,56 +46,52 @@ def bot(state_a, state_b, request: gr.Request):
50
  print(f"Error in bot_response: {e}")
51
  raise e
52
 
53
- new_chatbots = [None, None]
 
 
54
  while True:
55
  stop = True
56
 
57
  for i in range(2):
58
  try:
59
  generator = next(generators[i])
60
- states[i], new_chatbots[i] = generator[0], generator[1]
 
 
 
61
  stop = False
62
  except StopIteration:
63
  pass
64
 
65
- yield [state_a, state_b] + new_chatbots
66
 
67
  if stop:
68
  break
69
 
70
 
71
  with gr.Blocks() as app:
 
 
 
 
 
 
 
 
 
72
  with gr.Row():
73
- response_type = gr.Radio(
74
- ["Summarization", "Translation"],
75
- value="Summarization",
76
- label="Response type",
77
- info="Choose the type of response you want from the model.")
78
- language = gr.Dropdown(["Korean", "English"],
79
- value="Korean",
80
- label="Language",
81
- info="Choose the target language.")
82
-
83
- chatbots = [None, None]
84
- with gr.Row():
85
- chatbots[0] = gr.Chatbot(label="Model A")
86
- chatbots[1] = gr.Chatbot(label="Model B")
87
 
88
- model_names = [None, None]
89
  with gr.Accordion("Show models", open=False):
90
  with gr.Row():
91
  model_names[0] = gr.Textbox(label="Model A", interactive=False)
92
  model_names[1] = gr.Textbox(label="Model B", interactive=False)
93
 
94
- prompt = gr.Textbox(label="Prompt")
95
-
96
- states = [gr.State(None), gr.State(None)]
97
- prompt.submit(user,
98
- states + [prompt],
99
- states + chatbots + model_names + [prompt],
100
- queue=False).then(bot, states, states + chatbots)
101
 
102
  if __name__ == "__main__":
103
  # We need to enable queue to use generators.
104
  app.queue()
105
- app.launch()
 
8
  SUPPORTED_MODELS = ["gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"]
9
 
10
 
11
+ def user(user_message):
12
+ model_pair = sample(SUPPORTED_MODELS, 2)
13
+ new_state_a = gradio_web_server.State(model_pair[0])
14
+ new_state_b = gradio_web_server.State(model_pair[1])
 
15
 
16
+ for state in [new_state_a, new_state_b]:
17
  state.conv.append_message(state.conv.roles[0], user_message)
18
  state.conv.append_message(state.conv.roles[1], None)
19
  state.skip_next = False
20
 
 
 
21
  return [
22
+ new_state_a, new_state_b, new_state_a.model_name, new_state_b.model_name
 
 
 
23
  ]
24
 
25
 
 
27
  if state_a is None or state_b is None:
28
  raise RuntimeError(f"states cannot be None, got [{state_a}, {state_b}]")
29
 
30
+ new_states = [state_a, state_b]
31
+
32
  generators = []
33
+ for state in new_states:
34
  try:
35
  # TODO(#1): Allow user to set configuration.
36
  # bot_response returns a generator yielding states and chatbots.
 
46
  print(f"Error in bot_response: {e}")
47
  raise e
48
 
49
+ new_responses = [None, None]
50
+
51
+ # It simulates concurrent response generation from two models.
52
  while True:
53
  stop = True
54
 
55
  for i in range(2):
56
  try:
57
  generator = next(generators[i])
58
+ new_state = generator[0]
59
+ new_states[i] = new_state
60
+ # conv.messages is a list of [role, message].
61
+ new_responses[i] = new_state.conv.messages[-1][-1]
62
  stop = False
63
  except StopIteration:
64
  pass
65
 
66
+ yield new_states + new_responses
67
 
68
  if stop:
69
  break
70
 
71
 
72
  with gr.Blocks() as app:
73
+ model_names = [gr.State(None), gr.State(None)]
74
+ responses = [gr.State(None), gr.State(None)]
75
+
76
+ # states stores FastChat-specific conversation states.
77
+ states = [gr.State(None), gr.State(None)]
78
+
79
+ prompt = gr.TextArea(label="Prompt", lines=4)
80
+ submit = gr.Button()
81
+
82
  with gr.Row():
83
+ responses[0] = gr.Textbox(label="Model A", interactive=False)
84
+ responses[1] = gr.Textbox(label="Model B", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
85
 
 
86
  with gr.Accordion("Show models", open=False):
87
  with gr.Row():
88
  model_names[0] = gr.Textbox(label="Model A", interactive=False)
89
  model_names[1] = gr.Textbox(label="Model B", interactive=False)
90
 
91
+ submit.click(user, prompt, states + model_names,
92
+ queue=False).then(bot, states, states + responses)
 
 
 
 
 
93
 
94
  if __name__ == "__main__":
95
  # We need to enable queue to use generators.
96
  app.queue()
97
+ app.launch(debug=True)