Update app.py
Browse files
app.py
CHANGED
@@ -146,49 +146,55 @@ def inference_image(img):
|
|
146 |
return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
|
147 |
|
148 |
|
149 |
-
demo = gr.
|
150 |
-
|
151 |
-
gr.
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
#
|
159 |
-
# #
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
#
|
173 |
-
#
|
174 |
-
#
|
175 |
-
#
|
176 |
-
#
|
177 |
-
|
178 |
-
#
|
179 |
-
#
|
180 |
-
#
|
181 |
-
#
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
gr.
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
)
|
190 |
-
|
191 |
-
|
192 |
-
#
|
193 |
-
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
|
147 |
|
148 |
|
149 |
+
demo = gr.Interface(
|
150 |
+
fn = ultra_inference_video,
|
151 |
+
inputs = gr.Video()
|
152 |
+
outputs = "label"
|
153 |
+
|
154 |
+
)
|
155 |
+
# demo = gr.Blocks()
|
156 |
+
# with demo:
|
157 |
+
# # gr.Markdown(
|
158 |
+
# # """
|
159 |
+
# # # VideoMamba-Ti
|
160 |
+
# # Gradio demo for <a href='https://github.com/OpenGVLab/VideoMamba' target='_blank'>VideoMamba</a>: To use it, simply upload your video, or click one of the examples to load them. Read more at the links below.
|
161 |
+
# # """
|
162 |
+
# # )
|
163 |
+
|
164 |
+
# # with gr.Tab("Video"):
|
165 |
+
# # # with gr.Box():
|
166 |
+
# with gr.Row():
|
167 |
+
# with gr.Column():
|
168 |
+
# with gr.Row():
|
169 |
+
# input_video = gr.Video(label='Input Video', height=360)
|
170 |
+
# # input_video = load_video(input_video)
|
171 |
+
# with gr.Row():
|
172 |
+
# submit_video_button = gr.Button('Submit')
|
173 |
+
# with gr.Column():
|
174 |
+
# label_video = gr.Label(num_top_classes=5)
|
175 |
+
# # with gr.Row():
|
176 |
+
# # gr.Examples(examples=['./videos/hitting_baseball.mp4', './videos/hoverboarding.mp4', './videos/yoga.mp4'], inputs=input_video, outputs=label_video, fn=inference_video, cache_examples=True)
|
177 |
+
|
178 |
+
# # with gr.Tab("Image"):
|
179 |
+
# # # with gr.Box():
|
180 |
+
# # with gr.Row():
|
181 |
+
# # with gr.Column():
|
182 |
+
# # with gr.Row():
|
183 |
+
# # input_image = gr.Image(label='Input Image', type='pil', height=360)
|
184 |
+
# # with gr.Row():
|
185 |
+
# # submit_image_button = gr.Button('Submit')
|
186 |
+
# # with gr.Column():
|
187 |
+
# # label_image = gr.Label(num_top_classes=5)
|
188 |
+
# # with gr.Row():
|
189 |
+
# # gr.Examples(examples=['./images/cat.png', './images/dog.png', './images/panda.png'], inputs=input_image, outputs=label_image, fn=inference_image, cache_examples=True)
|
190 |
+
|
191 |
+
# gr.Markdown(
|
192 |
+
# """
|
193 |
+
# <p style='text-align: center'><a href='https://arxiv.org/abs/2403.06977' target='_blank'>VideoMamba: State Space Model for Efficient Video Understanding</a> | <a href='https://github.com/OpenGVLab/VideoMamba' target='_blank'>Github Repo</a></p>
|
194 |
+
# """
|
195 |
+
# )
|
196 |
+
|
197 |
+
# submit_video_button.click(fn=ultra_inference_video, inputs=input_video, outputs=label_video)
|
198 |
+
# # submit_image_button.click(fn=inference_image, inputs=input_image, outputs=label_image)
|
199 |
+
|
200 |
+
demo.launch()
|