RashiAgarwal
commited on
Commit
·
4258cd5
1
Parent(s):
92299a3
Update app.py
Browse files
app.py
CHANGED
@@ -1,36 +1,136 @@
|
|
1 |
-
|
2 |
-
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
|
|
|
|
|
|
|
|
5 |
from pytorch_grad_cam import GradCAM
|
6 |
from pytorch_grad_cam.utils.image import show_cam_on_image
|
7 |
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
8 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
17 |
],
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
]
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
).launch()
|
|
|
1 |
+
import torch
|
2 |
+
import pandas as pd
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
5 |
+
from PIL import Image
|
6 |
+
from torch.nn import functional as F
|
7 |
+
from collections import OrderedDict
|
8 |
+
from torchvision import transforms
|
9 |
from pytorch_grad_cam import GradCAM
|
10 |
from pytorch_grad_cam.utils.image import show_cam_on_image
|
11 |
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
12 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
13 |
+
import albumentations as A
|
14 |
+
from albumentations.pytorch import ToTensorV2
|
15 |
+
import torchvision.transforms as T
|
16 |
+
from model import YOLOv3
|
17 |
+
from train import YOLOTraining
|
18 |
+
import config
|
19 |
+
from utils import *
|
20 |
+
import numpy as np
|
21 |
+
import cv2
|
22 |
+
import albumentations as A
|
23 |
+
from utils import *
|
24 |
+
import random
|
25 |
+
from albumentations.pytorch import ToTensorV2
|
26 |
|
27 |
+
model = YOLOv3(num_classes=config.NUM_CLASSES)
|
28 |
+
model = YOLOTraining(model)
|
29 |
+
model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu')), strict=False)
|
30 |
+
model.eval()
|
31 |
|
32 |
+
def yolo_predict(image: np.ndarray, iou_thresh: float = 0.5, thresh: float = 0.5):
|
33 |
+
|
34 |
+
transforms = A.Compose(
|
35 |
+
[
|
36 |
+
A.LongestMaxSize(max_size=config.IMAGE_SIZE),
|
37 |
+
A.PadIfNeeded(
|
38 |
+
min_height=config.IMAGE_SIZE, min_width=config.IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
|
39 |
+
),
|
40 |
+
A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
|
41 |
+
ToTensorV2(),
|
42 |
],
|
43 |
+
)
|
44 |
+
with torch.no_grad():
|
45 |
+
transformed_image = transforms(image=image)["image"].unsqueeze(0).to(config.DEVICE)
|
46 |
+
output = model(transformed_image)
|
47 |
+
|
48 |
+
bboxes = [[] for _ in range(1)]
|
49 |
+
for i in range(3):
|
50 |
+
batch_size, A1, S, _, _ = output[i].shape
|
51 |
+
anchor = config.SCALED_ANCHORS[i].to(config.DEVICE)
|
52 |
+
boxes_scale_i = cells_to_bboxes(
|
53 |
+
output[i].to(config.DEVICE), anchor, S=S, is_preds=True
|
54 |
+
)
|
55 |
+
for idx, (box) in enumerate(boxes_scale_i):
|
56 |
+
bboxes[idx] += box
|
57 |
+
|
58 |
+
nms_boxes = non_max_suppression(
|
59 |
+
bboxes[0], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
|
60 |
+
)
|
61 |
+
plot_img = draw_predictions(image, nms_boxes, class_labels=config.PASCAL_CLASSES)
|
62 |
|
63 |
+
return [plot_img]
|
64 |
+
|
65 |
+
|
66 |
+
def draw_predictions(image: np.ndarray, boxes: list[list], class_labels: list[str]) -> np.ndarray:
|
67 |
+
"""Plots predicted bounding boxes on the image"""
|
68 |
+
|
69 |
+
colors = [[random.randint(0, 255) for _ in range(3)] for name in class_labels]
|
70 |
+
|
71 |
+
im = np.array(image)
|
72 |
+
height, width, _ = im.shape
|
73 |
+
bbox_thick = int(0.6 * (height + width) / 600)
|
74 |
+
|
75 |
+
# Create a Rectangle patch
|
76 |
+
for box in boxes:
|
77 |
+
assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
|
78 |
+
class_pred = box[0]
|
79 |
+
conf = box[1]
|
80 |
+
box = box[2:]
|
81 |
+
upper_left_x = box[0] - box[2] / 2
|
82 |
+
upper_left_y = box[1] - box[3] / 2
|
83 |
+
|
84 |
+
x1 = int(upper_left_x * width)
|
85 |
+
y1 = int(upper_left_y * height)
|
86 |
+
|
87 |
+
x2 = x1 + int(box[2] * width)
|
88 |
+
y2 = y1 + int(box[3] * height)
|
89 |
+
|
90 |
+
cv2.rectangle(
|
91 |
+
image,
|
92 |
+
(x1, y1), (x2, y2),
|
93 |
+
color=colors[int(class_pred)],
|
94 |
+
thickness=bbox_thick
|
95 |
+
)
|
96 |
+
text = f"{class_labels[int(class_pred)]}: {conf:.2f}"
|
97 |
+
t_size = cv2.getTextSize(text, 0, 0.7, thickness=bbox_thick // 2)[0]
|
98 |
+
c3 = (x1 + t_size[0], y1 - t_size[1] - 3)
|
99 |
+
|
100 |
+
cv2.rectangle(image, (x1, y1), c3, colors[int(class_pred)], -1)
|
101 |
+
cv2.putText(
|
102 |
+
image,
|
103 |
+
text,
|
104 |
+
(x1, y1 - 2),
|
105 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
106 |
+
0.7,
|
107 |
+
(0, 0, 0),
|
108 |
+
bbox_thick // 2,
|
109 |
+
lineType=cv2.LINE_AA,
|
110 |
+
)
|
111 |
+
|
112 |
+
return image
|
113 |
+
|
114 |
+
demo = gr.Interface(
|
115 |
+
fn=yolo_predict,
|
116 |
+
inputs=[
|
117 |
+
gr.Image(shape=(config.IMAGE_SIZE,config.IMAGE_SIZE), label="Input Image"),
|
118 |
+
gr.Slider(0, 1, value=0.5, step=0.05, label="IOU Threshold"),
|
119 |
+
gr.Slider(0, 1, value=0.5, step=0.05, label="Threshold")
|
120 |
+
],
|
121 |
+
outputs=gr.Gallery(rows=1, columns=1),
|
122 |
+
examples=[
|
123 |
+
["examples/000001.jpg", 0.5, 0.5],
|
124 |
+
["examples/000002.jpg", 0.5, 0.5],
|
125 |
+
["examples/000003.jpg", 0.5, 0.5],
|
126 |
+
["examples/000004.jpg", 0.5, 0.5],
|
127 |
+
["examples/000005.jpg", 0.5, 0.5],
|
128 |
+
["examples/000006.jpg", 0.5, 0.5],
|
129 |
+
["examples/000007.jpg", 0.5, 0.5],
|
130 |
+
["examples/000008.jpg", 0.5, 0.5],
|
131 |
+
["examples/000009.jpg", 0.5, 0.5],
|
132 |
+
["examples/000010.jpg", 0.5, 0.5]
|
133 |
]
|
134 |
+
)
|
135 |
+
|
136 |
+
demo.launch()
|
|