xxx1 commited on
Commit
f97f3fb
1 Parent(s): e379c02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -35
app.py CHANGED
@@ -32,50 +32,46 @@ with gr.Blocks(
32
  with gr.Row():
33
  with gr.Column(scale=1):
34
  image_input = gr.Image(type="pil")
35
-
36
  with gr.Row():
37
-
38
  with gr.Column(scale=1):
39
- caption_output = gr.Textbox(lines=0, label="VQA Output(模型答案输出)")
40
  chat_input = gr.Textbox(lines=1, label="VQA Input(问题输入)")
41
- chat_input.submit(
42
- inference_chat,
43
- [
44
- image_input,
45
- chat_input,
46
- ],
47
- [ caption_output],
48
- )
49
-
50
  with gr.Row():
51
  clear_button = gr.Button(value="Clear", interactive=True)
52
- clear_button.click(
53
- lambda: ("", [], []),
54
- [],
55
- [chat_input, state],
56
- queue=False,
57
- )
58
-
59
  submit_button = gr.Button(
60
  value="Submit", interactive=True, variant="primary"
61
  )
62
- submit_button.click(
63
- inference_chat,
64
- [
65
- image_input,
66
- chat_input,
67
- ],
68
- [caption_output],
69
- )
70
 
71
-
72
- image_input.change(
73
- lambda: ("", "", []),
74
- [],
75
- [ caption_output, state],
76
- queue=False,
77
- )
78
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  # examples = gr.Examples(
81
  # examples=examples,
 
32
  with gr.Row():
33
  with gr.Column(scale=1):
34
  image_input = gr.Image(type="pil")
 
35
  with gr.Row():
 
36
  with gr.Column(scale=1):
 
37
  chat_input = gr.Textbox(lines=1, label="VQA Input(问题输入)")
 
 
 
 
 
 
 
 
 
38
  with gr.Row():
39
  clear_button = gr.Button(value="Clear", interactive=True)
 
 
 
 
 
 
 
40
  submit_button = gr.Button(
41
  value="Submit", interactive=True, variant="primary"
42
  )
43
+ with gr.Column(scale=1):
44
+ caption_output = gr.Textbox(lines=0, label="VQA Output(模型答案输出)")
 
 
 
 
 
 
45
 
46
+
47
+ image_input.change(
48
+ lambda: ("", "", []),
49
+ [],
50
+ [ caption_output, state],
51
+ queue=False,
52
+ )
53
+ chat_input.submit(
54
+ inference_chat,
55
+ [
56
+ image_input,
57
+ chat_input,
58
+ ],
59
+ [ caption_output],
60
+ )
61
+ clear_button.click(
62
+ lambda: ("", [], []),
63
+ [],
64
+ [chat_input, state],
65
+ queue=False,
66
+ )
67
+ submit_button.click(
68
+ inference_chat,
69
+ [
70
+ image_input,
71
+ chat_input,
72
+ ],
73
+ [caption_output],
74
+ )
75
 
76
  # examples = gr.Examples(
77
  # examples=examples,