xxx1 commited on
Commit
1d42b83
1 Parent(s): dc9a7dd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+ import gradio as gr
3
+ import requests
4
+ import torch
5
+
6
+
7
+ from transformers import BlipForQuestionAnswering, BlipProcessor
8
+
9
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
+
11
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-capfilt-large")
12
+ model_vqa = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-capfilt-large").to(device)
13
+ def inference_chat(input_image,input_text):
14
+ inputs = processor(images=input_image, text=input_text,return_tensors="pt")
15
+ inputs["max_length"] = 20
16
+ inputs["num_beams"] = 5
17
+ inputs['num_return_sequences'] =4
18
+ out = model_vqa.generate(**inputs)
19
+ return "\n".join(processor.batch_decode(out, skip_special_tokens=True))
20
+
21
+ with gr.Blocks(
22
+ css="""
23
+ .message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px}
24
+ #component-21 > div.wrap.svelte-w6rprc {height: 600px;}
25
+ """
26
+ ) as iface:
27
+ state = gr.State([])
28
+ #caption_output = None
29
+ #gr.Markdown(title)
30
+ #gr.Markdown(description)
31
+ #gr.Markdown(article)
32
+
33
+ with gr.Row():
34
+ with gr.Column(scale=1):
35
+ image_input = gr.Image(type="pil")
36
+ with gr.Row():
37
+ with gr.Column(scale=1):
38
+ chat_input = gr.Textbox(lines=1, label="VQA Input(问题输入)")
39
+ with gr.Row():
40
+ clear_button = gr.Button(value="Clear", interactive=True)
41
+ submit_button = gr.Button(
42
+ value="Submit", interactive=True, variant="primary"
43
+ )
44
+ with gr.Column():
45
+ caption_output = gr.Textbox(lines=0, label="VQA Output(模型答案输出)")
46
+
47
+
48
+ image_input.change(
49
+ lambda: ("", "", []),
50
+ [],
51
+ [ caption_output, state],
52
+ queue=False,
53
+ )
54
+ chat_input.submit(
55
+ inference_chat,
56
+ [
57
+ image_input,
58
+ chat_input,
59
+ ],
60
+ [ caption_output],
61
+ )
62
+ clear_button.click(
63
+ lambda: ("", [], []),
64
+ [],
65
+ [chat_input, state],
66
+ queue=False,
67
+ )
68
+ submit_button.click(
69
+ inference_chat,
70
+ [
71
+ image_input,
72
+ chat_input,
73
+ ],
74
+ [caption_output],
75
+ )
76
+
77
+ # examples = gr.Examples(
78
+ # examples=examples,
79
+ # inputs=[image_input, chat_input],
80
+ # )
81
+
82
+ iface.queue(concurrency_count=1, api_open=False, max_size=10)
83
+ iface.launch(enable_queue=True)