import os.path as osp import gradio as gr import torch import logging import torchvision from torchvision.models.detection.faster_rcnn import fasterrcnn_resnet50_fpn from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from src.detection.graph_utils import add_bbox from src.detection.vision import presets logging.getLogger('PIL').setLevel(logging.CRITICAL) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def load_model(baseline: bool = False): if baseline: model = fasterrcnn_resnet50_fpn( weights="DEFAULT") else: model = fasterrcnn_resnet50_fpn() in_features = model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 2) checkpoint = torch.load( "model_split_3_FT_MOT17.pth", map_location="cpu") model.load_state_dict(checkpoint["model"]) model.to(device) model.eval() return model def frcnn_motsynth(image): model = load_model(baseline=True) transformEval = presets.DetectionPresetEval() image_tensor = transformEval(image, None)[0] image_tensor = image_tensor.to(device) prediction = model([image_tensor])[0] image_w_bbox = add_bbox(image_tensor, prediction, 0.80) torchvision.io.write_png(image_w_bbox, "custom_out.png") return "custom_out.png" def frcnn_coco(image): model = load_model(baseline=True) transformEval = presets.DetectionPresetEval() image_tensor = transformEval(image, None)[0] image_tensor = image_tensor.to(device) prediction = model([image_tensor])[0] image_w_bbox = add_bbox(image_tensor, prediction, 0.80) torchvision.io.write_png(image_w_bbox, "baseline_out.png") return "baseline_out.png" title = "Domain shift adaption on pedestrian detection with Faster R-CNN" description = "![alt text](http://www.aiacademy.unimore.it/media/news/ai-logo-white_2ND_EDITION.png)" examples = ["001.jpg", "002.jpg", "003.jpg", "004.jpg", "005.jpg", "006.jpg", "007.jpg", ] io_baseline = gr.Interface(frcnn_coco, gr.Image(type="pil"), gr.Image( type="file", shape=(1920, 1080), label="Baseline Model trained on COCO + FT on MOT17")) io_custom = gr.Interface(frcnn_motsynth, gr.Image(type="pil"), gr.Image( type="file", shape=(1920, 1080), label="Faster R-CNN trained on MOTSynth + FT on MOT17")) gr.Parallel(io_baseline, io_custom, title=title, description=description, examples=examples, theme="default").launch(enable_queue=True)