File size: 2,391 Bytes
24910f2
a4c8bb5
 
24910f2
e3dc417
 
b17bb63
24910f2
 
 
 
 
 
a4c8bb5
24910f2
b17bb63
24910f2
 
b17bb63
 
 
 
6955470
b17bb63
4378f9c
24910f2
a4c8bb5
e679284
b17bb63
05d4fcf
9810614
4378f9c
b942514
24910f2
24797c0
ef7cf07
a4c8bb5
24797c0
 
 
 
 
 
dea2364
 
84b4836
dea2364
24797c0
 
88d8a4f
24910f2
59bd0c5
5525934
a4c8bb5
 
5525934
24910f2
ef7cf07
 
b17bb63
 
 
24910f2
 
e06d371
5525934
ef7cf07
742d503
ef7cf07
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
"""
building-segmentation
Proof of concept showing effectiveness of a fine tuned instance segmentation model for deteting buildings.
"""
import os
import cv2
os.system("pip install 'git+https://github.com/facebookresearch/detectron2.git'")
from transformers import DetrFeatureExtractor, DetrForSegmentation
from PIL import Image
import gradio as gr
import numpy as np
import torch
import torchvision
import detectron2

# import some common detectron2 utilities
import itertools
import seaborn as sns
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.visualizer import ColorMode
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.checkpoint import DetectionCheckpointer

cfg = get_cfg()
cfg.merge_from_file("model_weights/buildings_poc_cfg.yml")
cfg.MODEL.DEVICE='cpu'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.35
cfg.MODEL.WEIGHTS = "model_weights/model_final.pth"  
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 8
predictor = DefaultPredictor(cfg)

def segment_buildings(im, confidence_threshold):
    im = np.array(im)
    outputs = predictor(im)

    instances = outputs["instances"].to("cpu")
    scores = instances.scores
    selected_indices = scores > confidence_threshold
    selected_instances = instances[selected_indices]

    v = Visualizer(im[:, :, ::-1],
                   scale=0.5,
                   instance_mode=ColorMode.SEGMENTATION
    )
    out = v.draw_instance_predictions(selected_instances)

    return Image.fromarray(out.get_image()[:, :, ::-1])

# gradio components 

gr_slider_confidence = gr.inputs.Slider(0,1,.1,.7,
                                        label='Set confidence threshold % for masks')

# gradio outputs
inputs = gr.inputs.Image(type="pil", label="Input Image")
outputs = gr.outputs.Image(type="pil", label="Output Image")

title = "Building Segmentation"
description = "An instance segmentation demo for identifying boundaries of buildings in aerial images using DETR (End-to-End Object Detection) model with MaskRCNN-101 backbone"

# Create user interface and launch
gr.Interface(segment_buildings, 
                inputs = [inputs, gr_slider_confidence],
                outputs = outputs,
                 title = title,
                description = description).launch(debug=True)