Spaces:
Runtime error
Runtime error
Ma Jinyu
commited on
Commit
•
3504b1c
1
Parent(s):
28ec2a0
Re-arrange GUI
Browse files
app.py
CHANGED
@@ -65,31 +65,143 @@ def inference(raw_image, model_n , input_tag):
|
|
65 |
return tag_1[0],'none',caption[0]
|
66 |
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
return tag_1[0],'none',caption[0]
|
66 |
|
67 |
|
68 |
+
def build_gui():
|
69 |
+
|
70 |
+
description = """
|
71 |
+
<center><strong><font size='10'>Recognize Anything Model</font></strong></center>
|
72 |
+
<br>
|
73 |
+
Welcome to the Recognize Anything Model (RAM) and Tag2Text Model demo! <br><br>
|
74 |
+
<li>
|
75 |
+
<b>Recognize Anything Model:</b> Upload your image to get the <b>English and Chinese outputs of the image tags</b>!
|
76 |
+
</li>
|
77 |
+
<li>
|
78 |
+
<b>Tag2Text Model:</b> Upload your image to get the <b>tags</b> and <b>caption</b> of the image.
|
79 |
+
Optional: You can also input specified tags to get the corresponding caption.
|
80 |
+
</li>
|
81 |
+
""" # noqa
|
82 |
+
|
83 |
+
article = """
|
84 |
+
<p style='text-align: center'>
|
85 |
+
RAM and Tag2Text is training on open-source datasets, and we are persisting in refining and iterating upon it.<br/>
|
86 |
+
<a href='https://recognize-anything.github.io/' target='_blank'>Recognize Anything: A Strong Image Tagging Model</a>
|
87 |
+
|
|
88 |
+
<a href='https://https://tag2text.github.io/' target='_blank'>Tag2Text: Guiding Language-Image Model via Image Tagging</a>
|
89 |
+
|
|
90 |
+
<a href='https://github.com/xinyu1205/Tag2Text' target='_blank'>Github Repo</a>
|
91 |
+
</p>
|
92 |
+
""" # noqa
|
93 |
+
|
94 |
+
def inference_with_ram(img):
|
95 |
+
res = inference(img, "Recognize Anything Model", None)
|
96 |
+
return res[0], res[1]
|
97 |
+
|
98 |
+
def inference_with_t2t(img, input_tags):
|
99 |
+
res = inference(img, "Tag2Text Model", input_tags)
|
100 |
+
return res[0], res[2]
|
101 |
+
|
102 |
+
with gr.Blocks(title="Recognize Anything Model") as demo:
|
103 |
+
###############
|
104 |
+
# components
|
105 |
+
###############
|
106 |
+
gr.HTML(description)
|
107 |
+
|
108 |
+
with gr.Tab(label="Recognize Anything Model"):
|
109 |
+
with gr.Row():
|
110 |
+
with gr.Column():
|
111 |
+
ram_in_img = gr.Image(type="pil")
|
112 |
+
with gr.Row():
|
113 |
+
ram_btn_run = gr.Button(value="Run")
|
114 |
+
ram_btn_clear = gr.Button(value="Clear")
|
115 |
+
with gr.Column():
|
116 |
+
ram_out_tag = gr.Textbox(label="Tags")
|
117 |
+
ram_out_biaoqian = gr.Textbox(label="标签")
|
118 |
+
gr.Examples(
|
119 |
+
examples=[
|
120 |
+
["images/demo1.jpg"],
|
121 |
+
["images/demo2.jpg"],
|
122 |
+
["images/demo4.jpg"],
|
123 |
+
],
|
124 |
+
fn=inference_with_ram,
|
125 |
+
inputs=[ram_in_img],
|
126 |
+
outputs=[ram_out_tag, ram_out_biaoqian],
|
127 |
+
cache_examples=True
|
128 |
+
)
|
129 |
+
|
130 |
+
with gr.Tab(label="Tag2Text Model"):
|
131 |
+
with gr.Row():
|
132 |
+
with gr.Column():
|
133 |
+
t2t_in_img = gr.Image(type="pil")
|
134 |
+
t2t_in_tag = gr.Textbox(label="User Specified Tags (Optional, separated by comma)")
|
135 |
+
with gr.Row():
|
136 |
+
t2t_btn_run = gr.Button(value="Run")
|
137 |
+
t2t_btn_clear = gr.Button(value="Clear")
|
138 |
+
with gr.Column():
|
139 |
+
t2t_out_tag = gr.Textbox(label="Tags")
|
140 |
+
t2t_out_cap = gr.Textbox(label="Caption")
|
141 |
+
gr.Examples(
|
142 |
+
examples=[
|
143 |
+
["images/demo4.jpg", ""],
|
144 |
+
["images/demo4.jpg", "power line"],
|
145 |
+
["images/demo4.jpg", "track, train"],
|
146 |
+
],
|
147 |
+
fn=inference_with_t2t,
|
148 |
+
inputs=[t2t_in_img, t2t_in_tag],
|
149 |
+
outputs=[t2t_out_tag, t2t_out_cap],
|
150 |
+
cache_examples=True
|
151 |
+
)
|
152 |
+
|
153 |
+
gr.HTML(article)
|
154 |
+
|
155 |
+
###############
|
156 |
+
# events
|
157 |
+
###############
|
158 |
+
# run inference
|
159 |
+
ram_btn_run.click(
|
160 |
+
fn=inference_with_ram,
|
161 |
+
inputs=[ram_in_img],
|
162 |
+
outputs=[ram_out_tag, ram_out_biaoqian]
|
163 |
+
)
|
164 |
+
t2t_btn_run.click(
|
165 |
+
fn=inference_with_t2t,
|
166 |
+
inputs=[t2t_in_img, t2t_in_tag],
|
167 |
+
outputs=[t2t_out_tag, t2t_out_cap]
|
168 |
+
)
|
169 |
+
|
170 |
+
# # images of two image panels should keep the same
|
171 |
+
# # and clear old outputs when image changes
|
172 |
+
# # slow due to internet latency when deployed on huggingface, comment out
|
173 |
+
# def sync_img(v):
|
174 |
+
# return [gr.update(value=v)] + [gr.update(value="")] * 4
|
175 |
+
|
176 |
+
# ram_in_img.upload(fn=sync_img, inputs=[ram_in_img], outputs=[
|
177 |
+
# t2t_in_img, ram_out_tag, ram_out_biaoqian, t2t_out_tag, t2t_out_cap
|
178 |
+
# ])
|
179 |
+
# ram_in_img.clear(fn=sync_img, inputs=[ram_in_img], outputs=[
|
180 |
+
# t2t_in_img, ram_out_tag, ram_out_biaoqian, t2t_out_tag, t2t_out_cap
|
181 |
+
# ])
|
182 |
+
# t2t_in_img.clear(fn=sync_img, inputs=[t2t_in_img], outputs=[
|
183 |
+
# ram_in_img, ram_out_tag, ram_out_biaoqian, t2t_out_tag, t2t_out_cap
|
184 |
+
# ])
|
185 |
+
# t2t_in_img.upload(fn=sync_img, inputs=[t2t_in_img], outputs=[
|
186 |
+
# ram_in_img, ram_out_tag, ram_out_biaoqian, t2t_out_tag, t2t_out_cap
|
187 |
+
# ])
|
188 |
+
|
189 |
+
# clear all
|
190 |
+
def clear_all():
|
191 |
+
return [gr.update(value=None)] * 2 + [gr.update(value="")] * 5
|
192 |
+
|
193 |
+
ram_btn_clear.click(fn=clear_all, inputs=[], outputs=[
|
194 |
+
ram_in_img, t2t_in_img,
|
195 |
+
ram_out_tag, ram_out_biaoqian, t2t_in_tag, t2t_out_tag, t2t_out_cap
|
196 |
+
])
|
197 |
+
t2t_btn_clear.click(fn=clear_all, inputs=[], outputs=[
|
198 |
+
ram_in_img, t2t_in_img,
|
199 |
+
ram_out_tag, ram_out_biaoqian, t2t_in_tag, t2t_out_tag, t2t_out_cap
|
200 |
+
])
|
201 |
+
|
202 |
+
return demo
|
203 |
+
|
204 |
+
|
205 |
+
if __name__ == "__main__":
|
206 |
+
demo = build_gui()
|
207 |
+
demo.launch(enable_queue=True)
|