BhumikaMak commited on
Commit
60af537
·
1 Parent(s): 59b21e0

Added application file

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.filterwarnings('ignore')
3
+ warnings.simplefilter('ignore')
4
+
5
+ import torch
6
+ import cv2
7
+ import numpy as np
8
+ import torchvision.transforms as transforms
9
+ from pytorch_grad_cam import EigenCAM
10
+ from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
11
+ from PIL import Image
12
+ import gradio as gr
13
+
14
+ # Global Color Palette
15
+ COLORS = np.random.uniform(0, 255, size=(80, 3))
16
+
17
+ # Function to parse YOLO detections
18
+ def parse_detections(results):
19
+ detections = results.pandas().xyxy[0].to_dict()
20
+ boxes, colors, names = [], [], []
21
+ for i in range(len(detections["xmin"])):
22
+ confidence = detections["confidence"][i]
23
+ if confidence < 0.2:
24
+ continue
25
+ xmin, ymin = int(detections["xmin"][i]), int(detections["ymin"][i])
26
+ xmax, ymax = int(detections["xmax"][i]), int(detections["ymax"][i])
27
+ name, category = detections["name"][i], int(detections["class"][i])
28
+ boxes.append((xmin, ymin, xmax, ymax))
29
+ colors.append(COLORS[category])
30
+ names.append(name)
31
+ return boxes, colors, names
32
+
33
+ # Draw bounding boxes and labels
34
+ def draw_detections(boxes, colors, names, img):
35
+ for box, color, name in zip(boxes, colors, names):
36
+ xmin, ymin, xmax, ymax = box
37
+ cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
38
+ cv2.putText(img, name, (xmin, ymin - 5),
39
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2,
40
+ lineType=cv2.LINE_AA)
41
+ return img
42
+
43
+ # Main function for Grad-CAM visualization
44
+ def process_image(image):
45
+ image = np.array(image)
46
+ image = cv2.resize(image, (640, 640))
47
+ rgb_img = image.copy()
48
+ img_float = np.float32(image) / 255
49
+
50
+ # Image transformation
51
+ transform = transforms.ToTensor()
52
+ tensor = transform(img_float).unsqueeze(0)
53
+
54
+ # Load YOLOv5 model
55
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
56
+ model.eval()
57
+ model.cpu()
58
+ target_layers = [model.model.model.model[-2]]
59
+
60
+ # Run YOLO detection
61
+ results = model([rgb_img])
62
+ boxes, colors, names = parse_detections(results)
63
+ detections_img = draw_detections(boxes, colors, names, rgb_img.copy())
64
+
65
+ # Grad-CAM visualization
66
+ cam = EigenCAM(model, target_layers)
67
+ grayscale_cam = cam(tensor)[0, :, :]
68
+ cam_image = show_cam_on_image(img_float, grayscale_cam, use_rgb=True)
69
+
70
+ # Renormalize Grad-CAM inside bounding boxes
71
+ renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)
72
+ for x1, y1, x2, y2 in boxes:
73
+ renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy())
74
+ renormalized_cam = scale_cam_image(renormalized_cam)
75
+ renormalized_cam_image = show_cam_on_image(img_float, renormalized_cam, use_rgb=True)
76
+
77
+ # Concatenate images
78
+ final_image = np.hstack((rgb_img, cam_image, renormalized_cam_image))
79
+
80
+ return Image.fromarray(final_image)
81
+
82
+ # Gradio Interface
83
+ interface = gr.Interface(
84
+ fn=process_image,
85
+ inputs=gr.Image(type="pil", label="Upload an Image"),
86
+ outputs=gr.Image(type="pil", label="Result"),
87
+ title="YOLOv5 Object Detection with Grad-CAM",
88
+ description="Upload an image to visualize object detection (YOLOv5) and Grad-CAM explanations."
89
+ )
90
+
91
+ if __name__ == "__main__":
92
+ interface.launch()