BhumikaMak commited on
Commit
4b26edc
·
1 Parent(s): 0318d64

Add: DFF support

Browse files
Files changed (1) hide show
  1. yolov5.py +0 -195
yolov5.py DELETED
@@ -1,195 +0,0 @@
1
- import torch
2
- import cv2
3
- import numpy as np
4
- from PIL import Image
5
- import torchvision.transforms as transforms
6
- from pytorch_grad_cam import EigenCAM
7
- from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
8
- import gradio as gr
9
- """
10
- # Global Color Palette
11
- COLORS = np.random.uniform(0, 255, size=(80, 3))
12
-
13
-
14
- def parse_detections(results):
15
- detections = results.pandas().xyxy[0].to_dict()
16
- boxes, colors, names, classes = [], [], [], []
17
- for i in range(len(detections["xmin"])):
18
- confidence = detections["confidence"][i]
19
- if confidence < 0.2:
20
- continue
21
- xmin, ymin = int(detections["xmin"][i]), int(detections["ymin"][i])
22
- xmax, ymax = int(detections["xmax"][i]), int(detections["ymax"][i])
23
- name, category = detections["name"][i], int(detections["class"][i])
24
- boxes.append((xmin, ymin, xmax, ymax))
25
- colors.append(COLORS[category])
26
- names.append(name)
27
- classes.append(category)
28
- return boxes, colors, names, classes
29
-
30
-
31
- def draw_detections(boxes, colors, names, classes, img):
32
- for box, color, name, cls in zip(boxes, colors, names, classes):
33
- xmin, ymin, xmax, ymax = box
34
- label = f"{cls}: {name}" # Combine class ID and name
35
- cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
36
- cv2.putText(
37
- img, label, (xmin, ymin - 5),
38
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2,
39
- lineType=cv2.LINE_AA
40
- )
41
- return img
42
-
43
-
44
- def generate_cam_image(model, target_layers, tensor, rgb_img, boxes):
45
- cam = EigenCAM(model, target_layers)
46
- grayscale_cam = cam(tensor)[0, :, :]
47
- img_float = np.float32(rgb_img) / 255
48
- cam_image = show_cam_on_image(img_float, grayscale_cam, use_rgb=True)
49
- renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)
50
- for x1, y1, x2, y2 in boxes:
51
- renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy())
52
- renormalized_cam = scale_cam_image(renormalized_cam)
53
- renormalized_cam_image = show_cam_on_image(img_float, renormalized_cam, use_rgb=True)
54
-
55
- return cam_image, renormalized_cam_image
56
-
57
-
58
- def xai_yolov5(image):
59
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
60
- model.eval()
61
- model.cpu()
62
-
63
- target_layers = [model.model.model.model[-2]] # Grad-CAM target layer
64
-
65
- # Run YOLO detection
66
- results = model([image])
67
- boxes, colors, names, classes = parse_detections(results)
68
- detections_img = draw_detections(boxes, colors, names,classes, image.copy())
69
-
70
- # Prepare input tensor for Grad-CAM
71
- img_float = np.float32(image) / 255
72
- transform = transforms.ToTensor()
73
- tensor = transform(img_float).unsqueeze(0)
74
-
75
- # Grad-CAM visualization
76
- cam_image, renormalized_cam_image = generate_cam_image(model, target_layers, tensor, image, boxes)
77
-
78
- # Combine results
79
- final_image = np.hstack((image, detections_img, renormalized_cam_image))
80
- caption = "Results using YOLOv5"
81
- return Image.fromarray(final_image), caption
82
-
83
-
84
- """
85
-
86
- import torch
87
- import cv2
88
- import numpy as np
89
- from PIL import Image
90
- import torchvision.transforms as transforms
91
- from pytorch_grad_cam import EigenCAM
92
- from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
93
- import gradio as gr
94
- from sklearn.decomposition import NMF # For feature factorization
95
-
96
- # Global Color Palette
97
- COLORS = np.random.uniform(0, 255, size=(80, 3))
98
-
99
-
100
- def parse_detections(results):
101
- detections = results.pandas().xyxy[0].to_dict()
102
- boxes, colors, names, classes = [], [], [], []
103
- for i in range(len(detections["xmin"])):
104
- confidence = detections["confidence"][i]
105
- if confidence < 0.2:
106
- continue
107
- xmin, ymin = int(detections["xmin"][i]), int(detections["ymin"][i])
108
- xmax, ymax = int(detections["xmax"][i]), int(detections["ymax"][i])
109
- name, category = detections["name"][i], int(detections["class"][i])
110
- boxes.append((xmin, ymin, xmax, ymax))
111
- colors.append(COLORS[category])
112
- names.append(name)
113
- classes.append(category)
114
- return boxes, colors, names, classes
115
-
116
-
117
- def draw_detections(boxes, colors, names, classes, img):
118
- for box, color, name, cls in zip(boxes, colors, names, classes):
119
- xmin, ymin, xmax, ymax = box
120
- label = f"{cls}: {name}" # Combine class ID and name
121
- cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
122
- cv2.putText(
123
- img, label, (xmin, ymin - 5),
124
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2,
125
- lineType=cv2.LINE_AA
126
- )
127
- return img
128
-
129
-
130
- def generate_cam_image(model, target_layers, tensor, rgb_img, boxes):
131
- cam = EigenCAM(model, target_layers)
132
- grayscale_cam = cam(tensor)[0, :, :]
133
- img_float = np.float32(rgb_img) / 255
134
- cam_image = show_cam_on_image(img_float, grayscale_cam, use_rgb=True)
135
- renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)
136
- for x1, y1, x2, y2 in boxes:
137
- renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy())
138
- renormalized_cam = scale_cam_image(renormalized_cam)
139
- renormalized_cam_image = show_cam_on_image(img_float, renormalized_cam, use_rgb=True)
140
-
141
- return cam_image, renormalized_cam_image
142
-
143
-
144
- def deep_feature_factorization(features):
145
- # Reshape the features for factorization (Flatten spatial dimensions)
146
- n, c, h, w = features.shape
147
- reshaped_features = features.view(c, -1).detach().cpu().numpy()
148
-
149
- # Apply Non-Negative Matrix Factorization (NMF)
150
- nmf = NMF(n_components=10, init='random', random_state=42, max_iter=300)
151
- basis = nmf.fit_transform(reshaped_features)
152
- coefficients = nmf.components_
153
-
154
- # Reconstruct the feature map
155
- reconstructed = np.dot(basis, coefficients).reshape((c, h, w))
156
-
157
- return torch.tensor(reconstructed, dtype=torch.float32).unsqueeze(0)
158
-
159
-
160
- def xai_yolov5(image):
161
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
162
- model.eval()
163
- model.cpu()
164
-
165
- target_layers = [model.model.model.model[-2]]
166
-
167
- # Run YOLO detection
168
- results = model([image])
169
- boxes, colors, names, classes = parse_detections(results)
170
- detections_img = draw_detections(boxes, colors, names, classes, image.copy())
171
-
172
- # Extract intermediate features
173
- def hook(module, input, output):
174
- return output
175
-
176
- hook_handle = target_layers[0].register_forward_hook(hook)
177
- with torch.no_grad():
178
- model([image])
179
- intermediate_features = hook_handle.remove()
180
-
181
- # Apply Deep Feature Factorization
182
- factored_features = deep_feature_factorization(intermediate_features)
183
-
184
- # Prepare input tensor for Grad-CAM
185
- img_float = np.float32(image) / 255
186
- transform = transforms.ToTensor()
187
- tensor = transform(img_float).unsqueeze(0)
188
-
189
- # Grad-CAM visualization using factored features
190
- cam_image, renormalized_cam_image = generate_cam_image(model, target_layers, factored_features, image, boxes)
191
-
192
- # Combine results
193
- final_image = np.hstack((image, detections_img, renormalized_cam_image))
194
- caption = "Results using YOLOv5 with Deep Feature Factorization"
195
- return Image.fromarray(final_image), caption