xxx1 commited on
Commit
3cc0902
·
1 Parent(s): 6b85915

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -0
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+ import gradio as gr
3
+ import requests
4
+ import io
5
+
6
+ url="http://202.85.216.23:5000/vqa_img"
7
+ def inference_chat(input_image,input_text):
8
+ with io.BytesIO() as buf:
9
+ input_image.save(buf, 'jpeg')
10
+ image_bytes = buf.getvalue()
11
+ files={"img":("input_imge.jpg",image_bytes,'image/png',{})}
12
+ res=requests.request("post",url=url,data={"input_text":input_text},files=files)
13
+ return res.json()["answer"]
14
+
15
+ with gr.Blocks(
16
+ css="""
17
+ .message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px}
18
+ #component-21 > div.wrap.svelte-w6rprc {height: 600px;}
19
+ """
20
+ ) as iface:
21
+ state = gr.State([])
22
+ #caption_output = None
23
+ #gr.Markdown(title)
24
+ #gr.Markdown(description)
25
+ #gr.Markdown(article)
26
+
27
+ with gr.Row():
28
+ with gr.Column(scale=1):
29
+ image_input = gr.Image(type="pil")
30
+ with gr.Row():
31
+ with gr.Column(scale=1):
32
+ chat_input = gr.Textbox(lines=1, label="VQA Input(问题输入)")
33
+ with gr.Row():
34
+ clear_button = gr.Button(value="Clear", interactive=True)
35
+ submit_button = gr.Button(
36
+ value="Submit", interactive=True, variant="primary"
37
+ )
38
+ with gr.Column():
39
+ caption_output = gr.Textbox(lines=0, label="VQA Output(模型答案输出)")
40
+
41
+
42
+ image_input.change(
43
+ lambda: ("", "", []),
44
+ [],
45
+ [ caption_output, state],
46
+ queue=False,
47
+ )
48
+ chat_input.submit(
49
+ inference_chat,
50
+ [
51
+ image_input,
52
+ chat_input,
53
+ ],
54
+ [ caption_output],
55
+ )
56
+ clear_button.click(
57
+ lambda: ("", [], []),
58
+ [],
59
+ [chat_input, state],
60
+ queue=False,
61
+ )
62
+ submit_button.click(
63
+ inference_chat,
64
+ [
65
+ image_input,
66
+ chat_input,
67
+ ],
68
+ [caption_output],
69
+ )
70
+
71
+ # examples = gr.Examples(
72
+ # examples=examples,
73
+ # inputs=[image_input, chat_input],
74
+ # )
75
+
76
+ iface.queue(concurrency_count=1, api_open=False, max_size=10)
77
+ iface.launch(enable_queue=True)