import torch import numpy as np import time import gradio as gr import tqdm torch.manual_seed(0) np.random.seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False from detectron2monitor import Detectron2Monitor def eval_gradio(id, backbone, clustering_algo, nb_clusters, eps, min_samples, progress=gr.Progress(track_tqdm=True)): detectron2monitor = Detectron2Monitor(id, backbone) df_id, df_ood = detectron2monitor._evaluate(clustering_algo, nb_clusters, eps, min_samples) return df_id, df_ood def inference_gradio(id, backbone, clustering_algo, nb_clusters, eps, min_samples, file): detectron2monitor = Detectron2Monitor(id, backbone, 0.5) monitors_dict = detectron2monitor._load_monitors(clustering_algo, nb_clusters, eps, min_samples) image_dict, df, df_verdict = detectron2monitor.get_output(monitors_dict, file) return image_dict["detection"], image_dict["verdict"], image_dict["cam"], df, df_verdict with gr.Blocks(theme='soft') as demo: gr.Markdown("# Runtime Monitoring Computer Vision Models") gr.Markdown( """ This interactive demo presents an approach to monitoring neural networks-based computer vision models using box abstraction-based techniques. Our method involves abstracting features extracted from training data to construct monitors. The demo walks users through the entire process, from monitor construction to evaluation. """ ) with gr.Tab("Image Classification"): id = gr.Radio(['MNIST', 'CIFAR-10', 'CIFAR-100', 'ImageNet-100', 'ImageNet-1K'], label="Dataset") backbone = gr.Radio(['LeNet-5', 'ResNet-18', 'WideResNet-28', 'ResNet-50'], label="Backbone") with gr.Tab("Object Detection"): id = gr.Radio(['PASCAL-VOC', 'BDD100K', 'KITTI', 'Speed signs', 'NuScenes'], label="Dataset") backbone = gr.Radio(['regnet', 'resnet'], label="Backbone") clustering_algo = gr.Dropdown(['kmeans', 'spectral', 'dbscan', 'opticals'], label="Clustering algorithm") with gr.Row(): nb_clusters = gr.Number(value=5, label="Number of clusters", precision=0) eps = gr.Number(value=5, label="Epsilon", precision=0) min_samples = gr.Number(value=10, label="Minimum samples", precision=0) with gr.Column(): # with gr.Column(): # with gr.Group(): # extract_btn = gr.Button("Extract features") # output1 = gr.Textbox(label="Output") # with gr.Group(): # construct_btn = gr.Button("Monitor Construction") # clustering_algo = gr.Dropdown(['kmeans', 'spectral', 'dbscan', 'opticals'], label="Clustering algorithm") # with gr.Row(): # nb_clusters = gr.Number(value=5, label="Number of clusters", precision=0) # eps = gr.Number(value=5, label="Epsilon", precision=0) # min_samples = gr.Number(value=10, label="Minimum samples", precision=0) # output2 = gr.Textbox(label="Output") # with gr.Column(): # with gr.Group(): # prep_btn = gr.Button("Evaluation Data Preparation") # prep_output = gr.Textbox(label="Output") with gr.Group(): eval_btn = gr.Button("Monitor Evaluation") eval_id = gr.Dataframe(type="pandas", label="ID performance") eavl_ood = gr.Dataframe(type="pandas", label="OOD performance") with gr.Row(): with gr.Column(): image = gr.Image(type="filepath", label="Input") button = gr.Button("Infer") with gr.Column(): with gr.Tab("Detection"): detection = gr.Image(label="Output") df = gr.Dataframe(label="Detection summary") with gr.Tab("Verdict"): verdict = gr.Image(label="Output") df_verdict = gr.Dataframe(label="Verdict summary") with gr.Tab("Explainable AI"): cam = gr.Image(label="Output") button.click(fn=inference_gradio, inputs=[id, backbone, clustering_algo, nb_clusters, eps, min_samples, image], outputs=[detection, verdict, cam, df, df_verdict]) # extract_btn.click(fn=fx_gradio, inputs=[id, backbone], outputs=[output1]) # construct_btn.click(fn=construct_gradio, inputs=[id, backbone, clustering_algo, nb_clusters, eps, min_samples], outputs=[output2]) # prep_btn.click(fn=fx_eval_gradio, inputs=[id, backbone], outputs=[prep_output]) eval_btn.click(fn=eval_gradio, inputs=[id, backbone, clustering_algo, nb_clusters, eps, min_samples], outputs=[eval_id, eavl_ood]) demo.queue().launch()