Spaces:
Running
Running
import gradio as gr | |
from gradio_image_prompter import ImagePrompter | |
import torch | |
from PIL import Image | |
from transformers import AutoModel, AutoTokenizer | |
import XGBoost_utils | |
import numpy as np | |
def calculate_areas(prompts, brand_num, pictorial_num, text_num): | |
points_all = prompts["points"] | |
brand_surf = 0 | |
for i in range(brand_num): | |
x1 = points_all[i][0]; y1 = points_all[i][1] | |
x2 = points_all[i][3]; y2 = points_all[i][4] | |
brand_surf += np.abs((x1-x2)*(y1-y2)) | |
pictorial_surf = 0 | |
for i in range(brand_num, brand_num+pictorial_num): | |
x1 = points_all[i][0]; y1 = points_all[i][1] | |
x2 = points_all[i][3]; y2 = points_all[i][4] | |
pictorial_surf += np.abs((x1-x2)*(y1-y2)) | |
text_surf = 0 | |
for i in range(brand_num+pictorial_num, brand_num+pictorial_num+text_num): | |
x1 = points_all[i][0]; y1 = points_all[i][1] | |
x2 = points_all[i][3]; y2 = points_all[i][4] | |
text_surf += np.abs((x1-x2)*(y1-y2)) | |
ad_size = 0 | |
x1 = points_all[-2][0]; y1 = points_all[-2][1] | |
x2 = points_all[-2][3]; y2 = points_all[-2][4] | |
ad_size += np.abs((x1-x2)*(y1-y2)) | |
whole_size = 0 | |
x1 = points_all[-1][0]; y1 = points_all[-1][1] | |
x2 = points_all[-1][3]; y2 = points_all[-1][4] | |
whole_size += np.abs((x1-x2)*(y1-y2)) | |
return (brand_surf, pictorial_surf/whole_size*100, text_surf/whole_size*100, ad_size/whole_size*100, prompts["points"]) | |
demo = gr.Interface( | |
# lambda prompts1, prompts2: (prompts1["image"], prompts1["points"]), | |
fn=calculate_areas, | |
inputs=[ImagePrompter(label="Draw bounding boxes"), | |
gr.Number(label="Number of brand bounding boxes"), | |
gr.Number(label="Number of pictorial bounding boxes"), | |
gr.Number(label="Number of text bounding boxes")], | |
outputs=[gr.Number(label="brand surf %"), | |
gr.Number(label="pictorial surf %"), | |
gr.Number(label="text surf %"), | |
gr.Number(label="ad size %"), | |
gr.Dataframe(label="Points")], | |
theme=gr.themes.Soft() | |
) | |
# demo2 = gr.Interface( | |
# lambda prompts: (prompts["image"], prompts["points"]), | |
# ImagePrompter(show_label=False), | |
# [gr.Image(show_label=False), gr.Dataframe(label="Points")], | |
# ) | |
# app = gr.TabbedInterface(interface_list=[demo, demo2], | |
# tab_names = ["Image inference", "Video inference"]) | |
demo.launch() | |