Spaces:
Running
Running
File size: 2,408 Bytes
569f484 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import gradio as gr
from gradio_image_prompter import ImagePrompter
import torch
from PIL import Image
from transformers import AutoModel, AutoTokenizer
import XGBoost_utils
import numpy as np
def calculate_areas(prompts, brand_num, pictorial_num, text_num):
points_all = prompts["points"]
brand_surf = 0
for i in range(brand_num):
x1 = points_all[i][0]; y1 = points_all[i][1]
x2 = points_all[i][3]; y2 = points_all[i][4]
brand_surf += np.abs((x1-x2)*(y1-y2))
pictorial_surf = 0
for i in range(brand_num, brand_num+pictorial_num):
x1 = points_all[i][0]; y1 = points_all[i][1]
x2 = points_all[i][3]; y2 = points_all[i][4]
pictorial_surf += np.abs((x1-x2)*(y1-y2))
text_surf = 0
for i in range(brand_num+pictorial_num, brand_num+pictorial_num+text_num):
x1 = points_all[i][0]; y1 = points_all[i][1]
x2 = points_all[i][3]; y2 = points_all[i][4]
text_surf += np.abs((x1-x2)*(y1-y2))
ad_size = 0
x1 = points_all[-2][0]; y1 = points_all[-2][1]
x2 = points_all[-2][3]; y2 = points_all[-2][4]
ad_size += np.abs((x1-x2)*(y1-y2))
whole_size = 0
x1 = points_all[-1][0]; y1 = points_all[-1][1]
x2 = points_all[-1][3]; y2 = points_all[-1][4]
whole_size += np.abs((x1-x2)*(y1-y2))
return (brand_surf, pictorial_surf/whole_size*100, text_surf/whole_size*100, ad_size/whole_size*100, prompts["points"])
demo = gr.Interface(
# lambda prompts1, prompts2: (prompts1["image"], prompts1["points"]),
fn=calculate_areas,
inputs=[ImagePrompter(label="Draw bounding boxes"),
gr.Number(label="Number of brand bounding boxes"),
gr.Number(label="Number of pictorial bounding boxes"),
gr.Number(label="Number of text bounding boxes")],
outputs=[gr.Number(label="brand surf %"),
gr.Number(label="pictorial surf %"),
gr.Number(label="text surf %"),
gr.Number(label="ad size %"),
gr.Dataframe(label="Points")],
theme=gr.themes.Soft()
)
# demo2 = gr.Interface(
# lambda prompts: (prompts["image"], prompts["points"]),
# ImagePrompter(show_label=False),
# [gr.Image(show_label=False), gr.Dataframe(label="Points")],
# )
# app = gr.TabbedInterface(interface_list=[demo, demo2],
# tab_names = ["Image inference", "Video inference"])
demo.launch()
|