DawnC commited on
Commit
2156e3a
1 Parent(s): 7393f93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -82
app.py CHANGED
@@ -194,7 +194,7 @@ async def predict_single_dog(image):
194
  return top1_prob, topk_breeds, topk_probs_percent
195
 
196
 
197
- async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.3):
198
  results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
199
  dogs = []
200
  boxes = []
@@ -203,28 +203,27 @@ async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.3):
203
  xyxy = box.xyxy[0].tolist()
204
  confidence = box.conf.item()
205
  boxes.append((xyxy, confidence))
206
-
207
- # 如果沒有檢測到狗,使用整張圖片
208
  if not boxes:
209
  dogs.append((image, 1.0, [0, 0, image.width, image.height]))
210
  else:
211
- # 按置信度排序
212
  sorted_boxes = sorted(boxes, key=lambda x: x[1], reverse=True)
213
-
214
- # 使用非極大值抑制(NMS)來合併重疊的框
215
  nms_boxes = non_max_suppression(sorted_boxes, iou_threshold)
216
 
217
  for box, confidence in nms_boxes:
218
  x1, y1, x2, y2 = box
219
- # 擴大框的大小以包含更多上下文
220
  w, h = x2 - x1, y2 - y1
221
- x1 = max(0, x1 - w * 0.15)
222
- y1 = max(0, y1 - h * 0.15)
223
- x2 = min(image.width, x2 + w * 0.15)
224
- y2 = min(image.height, y2 + h * 0.15)
225
  cropped_image = image.crop((x1, y1, x2, y2))
226
  dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
227
-
 
 
 
 
228
  return dogs
229
 
230
  def non_max_suppression(boxes, iou_threshold):
@@ -447,69 +446,6 @@ async def process_single_dog(image):
447
  # iface.launch()
448
 
449
 
450
- # async def predict(image):
451
- # if image is None:
452
- # return "Please upload an image to start.", None, gr.update(visible=False, choices=[]), None
453
-
454
- # try:
455
- # if isinstance(image, np.ndarray):
456
- # image = Image.fromarray(image)
457
-
458
- # dogs = await detect_multiple_dogs(image)
459
-
460
- # color_list = ['#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#00FFFF', '#FF00FF', '#800080', '#FFA500']
461
- # explanations = []
462
- # buttons = []
463
- # annotated_image = image.copy()
464
- # draw = ImageDraw.Draw(annotated_image)
465
- # font = ImageFont.load_default()
466
-
467
- # for i, (cropped_image, _, box) in enumerate(dogs):
468
- # top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image)
469
- # color = color_list[i % len(color_list)]
470
- # draw.rectangle(box, outline=color, width=3)
471
- # draw.text((box[0], box[1]), f"Dog {i+1}", fill=color, font=font)
472
-
473
- # if top1_prob >= 0.5:
474
- # breed = topk_breeds[0]
475
- # description = get_dog_description(breed)
476
- # formatted_description = format_description(description, breed)
477
- # explanations.append(f"Dog {i+1}: {formatted_description}")
478
- # elif top1_prob >= 0.2:
479
- # dog_explanation = f"Dog {i+1}: Top 3 possible breeds:\n"
480
- # dog_explanation += "\n".join([f"{j+1}. **{breed}** ({prob} confidence)" for j, (breed, prob) in enumerate(zip(topk_breeds[:3], topk_probs_percent[:3]))])
481
- # explanations.append(dog_explanation)
482
- # buttons.extend([f"Dog {i+1}: More about {breed}" for breed in topk_breeds[:3]])
483
- # else:
484
- # explanations.append(f"Dog {i+1}: The image is unclear or the breed is not in the dataset.")
485
-
486
- # final_explanation = "\n\n".join(explanations)
487
- # if buttons:
488
- # final_explanation += "\n\nClick on a button to view more information about the breed."
489
- # initial_state = {
490
- # "explanation": final_explanation,
491
- # "buttons": buttons,
492
- # "show_back": True,
493
- # "image": annotated_image,
494
- # "is_multi_dog": len(dogs) > 1,
495
- # "dogs_info": explanations
496
- # }
497
- # return final_explanation, annotated_image, gr.update(visible=True, choices=buttons), initial_state
498
- # else:
499
- # initial_state = {
500
- # "explanation": final_explanation,
501
- # "buttons": [],
502
- # "show_back": False,
503
- # "image": annotated_image,
504
- # "is_multi_dog": len(dogs) > 1,
505
- # "dogs_info": explanations
506
- # }
507
- # return final_explanation, annotated_image, gr.update(visible=False, choices=[]), initial_state
508
-
509
- # except Exception as e:
510
- # error_msg = f"An error occurred: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
511
- # print(error_msg)
512
- # return error_msg, None, gr.update(visible=False, choices=[]), None
513
 
514
  async def predict(image):
515
  if image is None:
@@ -528,27 +464,27 @@ async def predict(image):
528
  draw = ImageDraw.Draw(annotated_image)
529
  font = ImageFont.load_default()
530
 
531
- for i, (cropped_image, _, box) in enumerate(dogs):
532
  top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image)
533
  color = color_list[i % len(color_list)]
534
  draw.rectangle(box, outline=color, width=3)
535
  draw.text((box[0], box[1]), f"Dog {i+1}", fill=color, font=font)
536
 
537
- if top1_prob >= 0.5:
 
 
 
538
  breed = topk_breeds[0]
539
  description = get_dog_description(breed)
540
  formatted_description = format_description(description, breed)
541
  explanations.append(f"Dog {i+1}: {formatted_description}")
542
- elif top1_prob >= 0.2:
543
  dog_explanation = f"Dog {i+1}: Top 3 possible breeds:\n"
544
  dog_explanation += "\n".join([f"{j+1}. **{breed}** ({prob} confidence)" for j, (breed, prob) in enumerate(zip(topk_breeds[:3], topk_probs_percent[:3]))])
545
  explanations.append(dog_explanation)
546
  buttons.extend([f"Dog {i+1}: More about {breed}" for breed in topk_breeds[:3]])
547
  else:
548
- if len(dogs) == 1:
549
- explanations.append("The image is unclear or does not contain a recognized dog breed.")
550
- else:
551
- explanations.append(f"Dog {i+1}: The image is unclear or the breed is not in the dataset.")
552
 
553
  final_explanation = "\n\n".join(explanations)
554
  if buttons:
 
194
  return top1_prob, topk_breeds, topk_probs_percent
195
 
196
 
197
+ async def detect_multiple_dogs(image, conf_threshold=0.2, iou_threshold=0.3):
198
  results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
199
  dogs = []
200
  boxes = []
 
203
  xyxy = box.xyxy[0].tolist()
204
  confidence = box.conf.item()
205
  boxes.append((xyxy, confidence))
206
+
 
207
  if not boxes:
208
  dogs.append((image, 1.0, [0, 0, image.width, image.height]))
209
  else:
 
210
  sorted_boxes = sorted(boxes, key=lambda x: x[1], reverse=True)
 
 
211
  nms_boxes = non_max_suppression(sorted_boxes, iou_threshold)
212
 
213
  for box, confidence in nms_boxes:
214
  x1, y1, x2, y2 = box
 
215
  w, h = x2 - x1, y2 - y1
216
+ x1 = max(0, x1 - w * 0.1)
217
+ y1 = max(0, y1 - h * 0.1)
218
+ x2 = min(image.width, x2 + w * 0.1)
219
+ y2 = min(image.height, y2 + h * 0.1)
220
  cropped_image = image.crop((x1, y1, x2, y2))
221
  dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
222
+
223
+ # 如果只檢測到一隻狗,但置信度較低,添加整張圖片作為備選
224
+ if len(dogs) == 1 and dogs[0][1] < 0.5:
225
+ dogs.append((image, 1.0, [0, 0, image.width, image.height]))
226
+
227
  return dogs
228
 
229
  def non_max_suppression(boxes, iou_threshold):
 
446
  # iface.launch()
447
 
448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449
 
450
  async def predict(image):
451
  if image is None:
 
464
  draw = ImageDraw.Draw(annotated_image)
465
  font = ImageFont.load_default()
466
 
467
+ for i, (cropped_image, detection_confidence, box) in enumerate(dogs):
468
  top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image)
469
  color = color_list[i % len(color_list)]
470
  draw.rectangle(box, outline=color, width=3)
471
  draw.text((box[0], box[1]), f"Dog {i+1}", fill=color, font=font)
472
 
473
+ # 結合 YOLO 檢測置信度和品種識別置信度
474
+ combined_confidence = detection_confidence * top1_prob
475
+
476
+ if combined_confidence >= 0.4:
477
  breed = topk_breeds[0]
478
  description = get_dog_description(breed)
479
  formatted_description = format_description(description, breed)
480
  explanations.append(f"Dog {i+1}: {formatted_description}")
481
+ elif combined_confidence >= 0.2:
482
  dog_explanation = f"Dog {i+1}: Top 3 possible breeds:\n"
483
  dog_explanation += "\n".join([f"{j+1}. **{breed}** ({prob} confidence)" for j, (breed, prob) in enumerate(zip(topk_breeds[:3], topk_probs_percent[:3]))])
484
  explanations.append(dog_explanation)
485
  buttons.extend([f"Dog {i+1}: More about {breed}" for breed in topk_breeds[:3]])
486
  else:
487
+ explanations.append(f"Dog {i+1}: The image is unclear or the breed is not in the dataset.")
 
 
 
488
 
489
  final_explanation = "\n\n".join(explanations)
490
  if buttons: