rapadilla's picture
lint
65cfe9e
raw
history blame
7.21 kB
import gradio as gr
import pandas as pd
import json
from constants import (
BANNER,
INTRODUCTION_TEXT,
CITATION_TEXT,
METRICS_TAB_TEXT,
DIR_OUTPUT_REQUESTS,
)
from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
from utils_display import (
AutoEvalColumn,
fields,
make_clickable_model,
styled_error,
styled_message,
)
from datetime import datetime, timezone
LAST_UPDATED = "September, 7th 2023"
GPU_MODEL = "NVIDIA Tesla M60"
column_names = {
"model": "model",
"AP-IoU=0.50:0.95-area=all-maxDets=100": "AP",
"AP-IoU=0.50-area=all-maxDets=100": "AP@.50",
"AP-IoU=0.75-area=all-maxDets=100": "AP@.75",
"AP-IoU=0.50:0.95-area=small-maxDets=100": "AP-S",
"AP-IoU=0.50:0.95-area=medium-maxDets=100": "AP-M",
"AP-IoU=0.50:0.95-area=large-maxDets=100": "AP-L",
"AR-IoU=0.50:0.95-area=all-maxDets=1": "AR1",
"AR-IoU=0.50:0.95-area=all-maxDets=10": "AR10",
"AR-IoU=0.50:0.95-area=all-maxDets=100": "AR100",
"AR-IoU=0.50:0.95-area=small-maxDets=100": "AR-S",
"AR-IoU=0.50:0.95-area=medium-maxDets=100": "AR-M",
"AR-IoU=0.50:0.95-area=large-maxDets=100": "AR-L",
"estimated_fps": "FPS(*)",
"hub_license": "hub license",
}
eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
if not csv_results.exists():
raise Exception(f"CSV file {csv_results} does not exist locally")
# Get csv with data and parse columns
original_df = pd.read_csv(csv_results)
lst_evaluated_models = original_df["model"].tolist()
lst_evaluated_models = list(map(str.lower, lst_evaluated_models))
# Formats the columns
def decimal_formatter(x):
x = "{:.2f}".format(x)
return x
def perc_formatter(x):
x = "{:.2%}".format(x)
while len(x) < 6:
x = f"0{x}"
return x
# Drop columns not specified in dictionary
cols_to_drop = [col for col in original_df.columns if col not in column_names]
original_df.drop(cols_to_drop, axis=1, inplace=True)
for col in original_df.columns:
if col == "model":
original_df[col] = original_df[col].apply(
lambda x: x.replace(x, make_clickable_model(x))
)
elif col == "estimated_fps":
original_df[col] = original_df[col].apply(
decimal_formatter
) # For decimal values
elif col == "hub_license":
continue
else:
original_df[col] = original_df[col].apply(perc_formatter) # For % values
original_df.rename(columns=column_names, inplace=True)
COLS = [c.name for c in fields(AutoEvalColumn)]
TYPES = [c.type for c in fields(AutoEvalColumn)]
def request_model(model_text, chbcoco2017):
# Determine the selected checkboxes
dataset_selection = []
if chbcoco2017:
dataset_selection.append("COCO validation 2017 dataset")
if len(dataset_selection) == 0:
return styled_error("You need to select at least one dataset")
# Check if model exists on the hub
base_model_on_hub, error_msg = is_model_on_hub(model_text)
if not base_model_on_hub:
return styled_error(f"Base model '{model_text}' {error_msg}")
# Check if model is already evaluated
model_text = model_text.replace(" ", "")
if model_text.lower() in lst_evaluated_models:
return styled_error(
f"Results of the model '{model_text}' are now ready and available."
)
# Construct the output dictionary
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
required_datasets = ", ".join(dataset_selection)
eval_entry = {
"date": current_time,
"model": model_text,
"datasets_selected": required_datasets,
}
# Prepare file path
DIR_OUTPUT_REQUESTS.mkdir(parents=True, exist_ok=True)
fn_datasets = "@ ".join(dataset_selection)
filename = model_text.replace("/", "@") + "@@" + fn_datasets
if filename in requested_models:
return styled_error(
f"A request for this model '{model_text}' and dataset(s) was already made."
)
try:
filename_ext = filename + ".txt"
out_filepath = DIR_OUTPUT_REQUESTS / filename_ext
# Write the results to a text file
with open(out_filepath, "w") as f:
f.write(json.dumps(eval_entry))
upload_file(filename, out_filepath)
# Include file in the list of uploaded files
requested_models.append(filename)
# Remove the local file
out_filepath.unlink()
return styled_message(
"πŸ€— Your request has been submitted and will be evaluated soon!</p>"
)
except Exception:
return styled_error("Error submitting request!")
with gr.Blocks() as demo:
gr.HTML(BANNER, elem_id="banner")
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… COCO val 2017", elem_id="od-benchmark-tab-table", id=0):
leaderboard_table = gr.components.Dataframe(
value=original_df,
datatype=TYPES,
elem_id="leaderboard-table",
interactive=False,
visible=True,
)
with gr.TabItem("πŸ“ˆ Metrics", elem_id="od-benchmark-tab-table", id=1):
gr.Markdown(METRICS_TAB_TEXT, elem_classes="markdown-text")
with gr.TabItem(
"βœ‰οΈβœ¨ Request a model here!", elem_id="od-benchmark-tab-table", id=2
):
with gr.Column():
gr.Markdown(
"# βœ‰οΈβœ¨ Request results for a new model here!",
elem_classes="markdown-text",
)
with gr.Column():
gr.Markdown("Select a dataset:", elem_classes="markdown-text")
with gr.Column():
model_name_textbox = gr.Textbox(
label="Model name (user_name/model_name)"
)
chb_coco2017 = gr.Checkbox(
label="COCO validation 2017 dataset",
visible=False,
value=True,
interactive=False,
)
with gr.Column():
mdw_submission_result = gr.Markdown()
btn_submitt = gr.Button(value="πŸš€ Request")
btn_submitt.click(
request_model,
[model_name_textbox, chb_coco2017],
mdw_submission_result,
)
gr.Markdown(
f'(*) FPS was measured using *{GPU_MODEL}* processing 1 image per batch. Refer to the πŸ“ˆ "Metrics" tab for further details.',
elem_classes="markdown-text",
)
gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
gr.Textbox(
value=CITATION_TEXT,
lines=7,
label="Copy the BibTeX snippet to cite this source",
elem_id="citation-button",
show_copy_button=True,
)
demo.launch()