Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ logger = logging.getLogger(__name__)
|
|
20 |
|
21 |
|
22 |
# 下載YOLOv8預訓練模型
|
23 |
-
model_yolo = YOLO('
|
24 |
|
25 |
|
26 |
dog_breeds = ["Afghan_Hound", "African_Hunting_Dog", "Airedale", "American_Staffordshire_Terrier",
|
@@ -167,56 +167,34 @@ async def predict_single_dog(image):
|
|
167 |
return top1_prob, topk_breeds, topk_probs_percent
|
168 |
|
169 |
|
170 |
-
|
171 |
-
# results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
172 |
-
# dogs = []
|
173 |
-
# boxes = []
|
174 |
-
# for box in results.boxes:
|
175 |
-
# if box.cls == 16: # COCO dataset class for dog is 16
|
176 |
-
# xyxy = box.xyxy[0].tolist()
|
177 |
-
# confidence = box.conf.item()
|
178 |
-
# boxes.append((xyxy, confidence))
|
179 |
-
|
180 |
-
# if not boxes:
|
181 |
-
# dogs.append((image, 1.0, [0, 0, image.width, image.height]))
|
182 |
-
# else:
|
183 |
-
# nms_boxes = non_max_suppression(boxes, iou_threshold)
|
184 |
-
|
185 |
-
# for box, confidence in nms_boxes:
|
186 |
-
# x1, y1, x2, y2 = box
|
187 |
-
# w, h = x2 - x1, y2 - y1
|
188 |
-
# x1 = max(0, x1 - w * 0.05)
|
189 |
-
# y1 = max(0, y1 - h * 0.05)
|
190 |
-
# x2 = min(image.width, x2 + w * 0.05)
|
191 |
-
# y2 = min(image.height, y2 + h * 0.05)
|
192 |
-
# cropped_image = image.crop((x1, y1, x2, y2))
|
193 |
-
# dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
194 |
-
|
195 |
-
# return dogs
|
196 |
-
|
197 |
-
async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.5):
|
198 |
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
199 |
dogs = []
|
200 |
boxes = []
|
201 |
for box in results.boxes:
|
202 |
-
if box.cls == 16: #
|
203 |
xyxy = box.xyxy[0].tolist()
|
204 |
confidence = box.conf.item()
|
205 |
boxes.append((xyxy, confidence))
|
206 |
|
207 |
if not boxes:
|
208 |
-
# 當沒檢測到狗時,使用完整圖片
|
209 |
dogs.append((image, 1.0, [0, 0, image.width, image.height]))
|
210 |
else:
|
211 |
nms_boxes = non_max_suppression(boxes, iou_threshold)
|
|
|
212 |
for box, confidence in nms_boxes:
|
213 |
x1, y1, x2, y2 = box
|
|
|
|
|
|
|
|
|
|
|
214 |
cropped_image = image.crop((x1, y1, x2, y2))
|
215 |
dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
|
|
216 |
return dogs
|
217 |
|
218 |
|
219 |
-
|
220 |
def non_max_suppression(boxes, iou_threshold):
|
221 |
keep = []
|
222 |
boxes = sorted(boxes, key=lambda x: x[1], reverse=True)
|
|
|
20 |
|
21 |
|
22 |
# 下載YOLOv8預訓練模型
|
23 |
+
model_yolo = YOLO('yolov8s.pt') # 使用 YOLOv8 預訓練模型
|
24 |
|
25 |
|
26 |
dog_breeds = ["Afghan_Hound", "African_Hunting_Dog", "Airedale", "American_Staffordshire_Terrier",
|
|
|
167 |
return top1_prob, topk_breeds, topk_probs_percent
|
168 |
|
169 |
|
170 |
+
async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.6):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
172 |
dogs = []
|
173 |
boxes = []
|
174 |
for box in results.boxes:
|
175 |
+
if box.cls == 16: # COCO dataset class for dog is 16
|
176 |
xyxy = box.xyxy[0].tolist()
|
177 |
confidence = box.conf.item()
|
178 |
boxes.append((xyxy, confidence))
|
179 |
|
180 |
if not boxes:
|
|
|
181 |
dogs.append((image, 1.0, [0, 0, image.width, image.height]))
|
182 |
else:
|
183 |
nms_boxes = non_max_suppression(boxes, iou_threshold)
|
184 |
+
|
185 |
for box, confidence in nms_boxes:
|
186 |
x1, y1, x2, y2 = box
|
187 |
+
w, h = x2 - x1, y2 - y1
|
188 |
+
x1 = max(0, x1 - w * 0.05)
|
189 |
+
y1 = max(0, y1 - h * 0.05)
|
190 |
+
x2 = min(image.width, x2 + w * 0.05)
|
191 |
+
y2 = min(image.height, y2 + h * 0.05)
|
192 |
cropped_image = image.crop((x1, y1, x2, y2))
|
193 |
dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
194 |
+
|
195 |
return dogs
|
196 |
|
197 |
|
|
|
198 |
def non_max_suppression(boxes, iou_threshold):
|
199 |
keep = []
|
200 |
boxes = sorted(boxes, key=lambda x: x[1], reverse=True)
|