RashiAgarwal
commited on
Commit
•
98375b6
1
Parent(s):
39230cc
Update display.py
Browse files- display.py +11 -10
display.py
CHANGED
@@ -9,16 +9,16 @@ from pytorch_grad_cam.utils.image import show_cam_on_image
|
|
9 |
from utils import YoloCAM, cells_to_bboxes, non_max_suppression
|
10 |
from model import YOLOv3
|
11 |
|
12 |
-
def inference(image: np.ndarray, iou_thresh: float = 0.5, thresh: float = 0.5,
|
13 |
model = YOLOV3_PL() #YOLOv3(num_classes=20)
|
14 |
model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu')), strict=False)
|
15 |
# iou_thresh = 0.75
|
16 |
# thresh = 0.75
|
17 |
scaled_anchors = config.SCALED_ANCHORS
|
18 |
|
19 |
-
|
20 |
|
21 |
-
|
22 |
transforms = A.Compose(
|
23 |
[
|
24 |
A.LongestMaxSize(max_size=config.IMAGE_SIZE),
|
@@ -47,15 +47,16 @@ def inference(image: np.ndarray, iou_thresh: float = 0.5, thresh: float = 0.5,sh
|
|
47 |
bboxes[0], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
|
48 |
)
|
49 |
plot_img = draw_predictions(image, nms_boxes, class_labels=config.PASCAL_CLASSES)
|
50 |
-
if not show_cam:
|
51 |
-
|
52 |
|
53 |
-
grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
|
54 |
-
img = cv2.resize(image, (416, 416))
|
55 |
-
img = np.float32(img) / 255
|
56 |
-
cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True, image_weight=transparency)
|
57 |
-
return [plot_img, cam_image]
|
58 |
|
|
|
59 |
|
60 |
|
61 |
|
|
|
9 |
from utils import YoloCAM, cells_to_bboxes, non_max_suppression
|
10 |
from model import YOLOv3
|
11 |
|
12 |
+
def inference(image: np.ndarray, iou_thresh: float = 0.5, thresh: float = 0.5, transparency: float = 0.5):
|
13 |
model = YOLOV3_PL() #YOLOv3(num_classes=20)
|
14 |
model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu')), strict=False)
|
15 |
# iou_thresh = 0.75
|
16 |
# thresh = 0.75
|
17 |
scaled_anchors = config.SCALED_ANCHORS
|
18 |
|
19 |
+
# target_layer_list = model.layers[-2]
|
20 |
|
21 |
+
# cam = YoloCAM(model=model, target_layers = target_layer_list, use_cuda=False)
|
22 |
transforms = A.Compose(
|
23 |
[
|
24 |
A.LongestMaxSize(max_size=config.IMAGE_SIZE),
|
|
|
47 |
bboxes[0], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
|
48 |
)
|
49 |
plot_img = draw_predictions(image, nms_boxes, class_labels=config.PASCAL_CLASSES)
|
50 |
+
# if not show_cam:
|
51 |
+
# return [plot_img]
|
52 |
|
53 |
+
# grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
|
54 |
+
# img = cv2.resize(image, (416, 416))
|
55 |
+
# img = np.float32(img) / 255
|
56 |
+
# cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True, image_weight=transparency)
|
57 |
+
# return [plot_img, cam_image]
|
58 |
|
59 |
+
return [plot_img]
|
60 |
|
61 |
|
62 |
|