import os import numpy as np import torch import torch.nn as nn import gradio as gr from torchvision.models import efficientnet_v2_m, EfficientNet_V2_M_Weights from torchvision.ops import nms, box_iou import torch.nn.functional as F from torchvision import transforms from PIL import Image, ImageDraw, ImageFont, ImageFilter from data_manager import get_dog_description from urllib.parse import quote from ultralytics import YOLO import asyncio import traceback # 下載YOLOv8預訓練模型 model_yolo = YOLO('yolov8n.pt') # 使用 YOLOv8 預訓練模型 dog_breeds = ["Afghan_Hound", "African_Hunting_Dog", "Airedale", "American_Staffordshire_Terrier", "Appenzeller", "Australian_Terrier", "Bedlington_Terrier", "Bernese_Mountain_Dog", "Blenheim_Spaniel", "Border_Collie", "Border_Terrier", "Boston_Bull", "Bouvier_Des_Flandres", "Brabancon_Griffon", "Brittany_Spaniel", "Cardigan", "Chesapeake_Bay_Retriever", "Chihuahua", "Dandie_Dinmont", "Doberman", "English_Foxhound", "English_Setter", "English_Springer", "EntleBucher", "Eskimo_Dog", "French_Bulldog", "German_Shepherd", "German_Short-Haired_Pointer", "Gordon_Setter", "Great_Dane", "Great_Pyrenees", "Greater_Swiss_Mountain_Dog", "Ibizan_Hound", "Irish_Setter", "Irish_Terrier", "Irish_Water_Spaniel", "Irish_Wolfhound", "Italian_Greyhound", "Japanese_Spaniel", "Kerry_Blue_Terrier", "Labrador_Retriever", "Lakeland_Terrier", "Leonberg", "Lhasa", "Maltese_Dog", "Mexican_Hairless", "Newfoundland", "Norfolk_Terrier", "Norwegian_Elkhound", "Norwich_Terrier", "Old_English_Sheepdog", "Pekinese", "Pembroke", "Pomeranian", "Rhodesian_Ridgeback", "Rottweiler", "Saint_Bernard", "Saluki", "Samoyed", "Scotch_Terrier", "Scottish_Deerhound", "Sealyham_Terrier", "Shetland_Sheepdog", "Shih-Tzu", "Siberian_Husky", "Staffordshire_Bullterrier", "Sussex_Spaniel", "Tibetan_Mastiff", "Tibetan_Terrier", "Walker_Hound", "Weimaraner", "Welsh_Springer_Spaniel", "West_Highland_White_Terrier", "Yorkshire_Terrier", "Affenpinscher", "Basenji", "Basset", "Beagle", "Black-and-Tan_Coonhound", "Bloodhound", "Bluetick", "Borzoi", "Boxer", "Briard", "Bull_Mastiff", "Cairn", "Chow", "Clumber", "Cocker_Spaniel", "Collie", "Curly-Coated_Retriever", "Dhole", "Dingo", "Flat-Coated_Retriever", "Giant_Schnauzer", "Golden_Retriever", "Groenendael", "Keeshond", "Kelpie", "Komondor", "Kuvasz", "Malamute", "Malinois", "Miniature_Pinscher", "Miniature_Poodle", "Miniature_Schnauzer", "Otterhound", "Papillon", "Pug", "Redbone", "Schipperke", "Silky_Terrier", "Soft-Coated_Wheaten_Terrier", "Standard_Poodle", "Standard_Schnauzer", "Toy_Poodle", "Toy_Terrier", "Vizsla", "Whippet", "Wire-Haired_Fox_Terrier"] class MultiHeadAttention(nn.Module): def __init__(self, in_dim, num_heads=8): super().__init__() self.num_heads = num_heads self.head_dim = max(1, in_dim // num_heads) self.scaled_dim = self.head_dim * num_heads self.fc_in = nn.Linear(in_dim, self.scaled_dim) self.query = nn.Linear(self.scaled_dim, self.scaled_dim) self.key = nn.Linear(self.scaled_dim, self.scaled_dim) self.value = nn.Linear(self.scaled_dim, self.scaled_dim) self.fc_out = nn.Linear(self.scaled_dim, in_dim) def forward(self, x): N = x.shape[0] x = self.fc_in(x) q = self.query(x).view(N, self.num_heads, self.head_dim) k = self.key(x).view(N, self.num_heads, self.head_dim) v = self.value(x).view(N, self.num_heads, self.head_dim) energy = torch.einsum("nqd,nkd->nqk", [q, k]) attention = F.softmax(energy / (self.head_dim ** 0.5), dim=2) out = torch.einsum("nqk,nvd->nqd", [attention, v]) out = out.reshape(N, self.scaled_dim) out = self.fc_out(out) return out class BaseModel(nn.Module): def __init__(self, num_classes, device='cuda' if torch.cuda.is_available() else 'cpu'): super().__init__() self.device = device self.backbone = efficientnet_v2_m(weights=EfficientNet_V2_M_Weights.IMAGENET1K_V1) self.feature_dim = self.backbone.classifier[1].in_features self.backbone.classifier = nn.Identity() self.num_heads = max(1, min(8, self.feature_dim // 64)) self.attention = MultiHeadAttention(self.feature_dim, num_heads=self.num_heads) self.classifier = nn.Sequential( nn.LayerNorm(self.feature_dim), nn.Dropout(0.3), nn.Linear(self.feature_dim, num_classes) ) self.to(device) def forward(self, x): x = x.to(self.device) features = self.backbone(x) attended_features = self.attention(features) logits = self.classifier(attended_features) return logits, attended_features num_classes = 120 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = BaseModel(num_classes=num_classes, device=device) checkpoint = torch.load('best_model_81_dog.pth', map_location=torch.device('cpu')) model.load_state_dict(checkpoint['model_state_dict']) # evaluation mode model.eval() # Image preprocessing function def preprocess_image(image): # If the image is numpy.ndarray turn into PIL.Image if isinstance(image, np.ndarray): image = Image.fromarray(image) # Use torchvision.transforms to process images transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) return transform(image).unsqueeze(0) def get_akc_breeds_link(): return "https://www.akc.org/dog-breeds/" def format_description(description, breed): if isinstance(description, dict): # 確保每一個描述項目換行顯示 formatted_description = "\n\n".join([f"**{key}**: {value}" for key, value in description.items()]) else: formatted_description = description akc_link = get_akc_breeds_link() formatted_description += f"\n\n**Want to learn more about dog breeds?** [Visit the AKC dog breeds page]({akc_link}) and search for {breed} to find detailed information." disclaimer = ("\n\n*Disclaimer: The external link provided leads to the American Kennel Club (AKC) dog breeds page. " "You may need to search for the specific breed on that page. " "I am not responsible for the content on external sites. " "Please refer to the AKC's terms of use and privacy policy.*") formatted_description += disclaimer return formatted_description async def predict_single_dog(image): return await asyncio.to_thread(_predict_single_dog, image) def _predict_single_dog(image): image_tensor = preprocess_image(image) with torch.no_grad(): output = model(image_tensor) logits = output[0] if isinstance(output, tuple) else output probabilities = F.softmax(logits, dim=1) topk_probs, topk_indices = torch.topk(probabilities, k=3) top1_prob = topk_probs[0][0].item() topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]] topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]] return top1_prob, topk_breeds, topk_probs_percent async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4): results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0] dogs = [] for box in results.boxes: if box.cls == 16: # COCO 資料集中狗的類別是 16 xyxy = box.xyxy[0].tolist() confidence = box.conf.item() cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3])) dogs.append((cropped_image, confidence, xyxy)) return dogs async def process_single_dog(image): top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(image) if top1_prob < 0.2: initial_state = { "explanation": "The image is unclear or the breed is not in the dataset. Please upload a clearer image of a dog.", "buttons": [], "show_back": False, "image": None, "is_multi_dog": False } return initial_state["explanation"], None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), initial_state breed = topk_breeds[0] description = get_dog_description(breed) if top1_prob >= 0.5: formatted_description = format_description(description, breed) initial_state = { "explanation": formatted_description, "buttons": [], "show_back": False, "image": image, "is_multi_dog": False } return formatted_description, image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), initial_state else: explanation = ( f"The model couldn't confidently identify the breed. Here are the top 3 possible breeds:\n\n" f"1. **{topk_breeds[0]}** ({topk_probs_percent[0]} confidence)\n" f"2. **{topk_breeds[1]}** ({topk_probs_percent[1]} confidence)\n" f"3. **{topk_breeds[2]}** ({topk_probs_percent[2]} confidence)\n\n" "Click on a button to view more information about the breed." ) buttons = [ gr.update(visible=True, value=f"More about {topk_breeds[0]}"), gr.update(visible=True, value=f"More about {topk_breeds[1]}"), gr.update(visible=True, value=f"More about {topk_breeds[2]}") ] initial_state = { "explanation": explanation, "buttons": buttons, "show_back": True, "image": image, "is_multi_dog": False } return explanation, image, buttons[0], buttons[1], buttons[2], gr.update(visible=True), initial_state async def predict(image): if image is None: return "Please upload an image to start.", None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None try: if isinstance(image, np.ndarray): image = Image.fromarray(image) dogs = await detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4) if len(dogs) <= 1: return await process_single_dog(image) color_list = ['#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#00FFFF', '#FF00FF', '#800080', '#FFA500'] explanations = [] buttons = [] annotated_image = image.copy() draw = ImageDraw.Draw(annotated_image) font = ImageFont.load_default() for i, (cropped_image, _, box) in enumerate(dogs): top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image) color = color_list[i % len(color_list)] draw.rectangle(box, outline=color, width=3) draw.text((box[0], box[1]), f"Dog {i+1}", fill=color, font=font) breed = topk_breeds[0] if top1_prob >= 0.5: description = get_dog_description(breed) formatted_description = format_description(description, breed) explanations.append(f"Dog {i+1}: {formatted_description}") elif top1_prob >= 0.2: dog_explanation = f"Dog {i+1}: Top 3 possible breeds:\n" dog_explanation += "\n".join([f"{j+1}. **{breed}** ({prob} confidence)" for j, (breed, prob) in enumerate(zip(topk_breeds[:3], topk_probs_percent[:3]))]) explanations.append(dog_explanation) buttons.extend([gr.update(visible=True, value=f"Dog {i+1}: More about {breed}") for breed in topk_breeds[:3]]) else: explanations.append(f"Dog {i+1}: The image is unclear or the breed is not in the dataset.") final_explanation = "\n\n".join(explanations) if buttons: final_explanation += "\n\nClick on a button to view more information about the breed." initial_state = { "explanation": final_explanation, "buttons": buttons, "show_back": True, "image": annotated_image, "is_multi_dog": True, "dogs_info": explanations } return (final_explanation, annotated_image, buttons[0] if len(buttons) > 0 else gr.update(visible=False), buttons[1] if len(buttons) > 1 else gr.update(visible=False), buttons[2] if len(buttons) > 2 else gr.update(visible=False), gr.update(visible=True), initial_state) else: initial_state = { "explanation": final_explanation, "buttons": [], "show_back": False, "image": annotated_image, "is_multi_dog": True, "dogs_info": explanations } return final_explanation, annotated_image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), initial_state except Exception as e: error_msg = f"An error occurred: {str(e)}" print(error_msg) # 添加日誌輸出 return error_msg, None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None # async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4, merge_threshold=0.5): # results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0] # dogs = [] # image_area = image.width * image.height # min_area_ratio = 0.005 # 最小檢測面積佔整個圖像的比例 # for box in results.boxes: # if box.cls == 16: # COCO 數據集中狗的類別是 16 # xyxy = box.xyxy[0].tolist() # area = (xyxy[2] - xyxy[0]) * (xyxy[3] - xyxy[1]) # if area / image_area >= min_area_ratio: # confidence = box.conf.item() # dogs.append((xyxy, confidence)) # if dogs: # boxes = torch.tensor([dog[0] for dog in dogs]) # scores = torch.tensor([dog[1] for dog in dogs]) # # 應用 NMS # keep = nms(boxes, scores, iou_threshold) # merged_dogs = [] # for i in keep: # xyxy = boxes[i].tolist() # confidence = scores[i].item() # merged_dogs.append((xyxy, confidence)) # # 後處理:分離過於接近的檢測框 # final_dogs = [] # while merged_dogs: # base_dog = merged_dogs.pop(0) # to_merge = [base_dog] # i = 0 # while i < len(merged_dogs): # iou = box_iou(torch.tensor([base_dog[0]]), torch.tensor([merged_dogs[i][0]]))[0][0].item() # if iou > merge_threshold: # to_merge.append(merged_dogs.pop(i)) # else: # i += 1 # if len(to_merge) == 1: # final_dogs.append(base_dog) # else: # # 如果檢測到多個重疊框,嘗試分離它們 # centers = torch.tensor([[((box[0] + box[2]) / 2, (box[1] + box[3]) / 2)] for box, _ in to_merge]) # distances = torch.cdist(centers, centers) # if torch.any(distances > 0): # 確保不是完全重疊 # max_distance = distances.max() # if max_distance > (base_dog[0][2] - base_dog[0][0]) * 0.5: # 如果最大距離大於框寬度的一半 # final_dogs.extend(to_merge) # else: # # 合併為一個框 # merged_box = torch.tensor([box for box, _ in to_merge]).mean(dim=0) # merged_confidence = max(conf for _, conf in to_merge) # final_dogs.append((merged_box.tolist(), merged_confidence)) # else: # # 完全重疊的情況,保留置信度最高的 # best_dog = max(to_merge, key=lambda x: x[1]) # final_dogs.append(best_dog) # # 擴展邊界框並創建剪裁的圖像 # expanded_dogs = [] # for xyxy, confidence in final_dogs: # expanded_xyxy = [ # max(0, xyxy[0] - 20), # max(0, xyxy[1] - 20), # min(image.width, xyxy[2] + 20), # min(image.height, xyxy[3] + 20) # ] # cropped_image = image.crop(expanded_xyxy) # expanded_dogs.append((cropped_image, confidence, expanded_xyxy)) # return expanded_dogs # # 如果沒有檢測到狗狗,返回整張圖片 # return [(image, 1.0, [0, 0, image.width, image.height])] # async def predict(image): # if image is None: # return "Please upload an image to start.", None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None # try: # if isinstance(image, np.ndarray): # image = Image.fromarray(image) # dogs = await detect_multiple_dogs(image) # # 如果沒有檢測到狗狗或只檢測到一隻,使用整張圖像進行分類 # if len(dogs) <= 1: # top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(image) # if top1_prob >= 0.5: # return await process_single_dog(image) # else: # dogs = [(image, 1.0, [0, 0, image.width, image.height])] # # 多狗情境處理保持不變 # color_list = ['#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#00FFFF', '#FF00FF', '#800080', '#FFA500'] # explanations = [] # buttons = [] # annotated_image = image.copy() # draw = ImageDraw.Draw(annotated_image) # font = ImageFont.load_default() # for i, (cropped_image, _, box) in enumerate(dogs): # top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image) # color = color_list[i % len(color_list)] # draw.rectangle(box, outline=color, width=3) # draw.text((box[0], box[1]), f"Dog {i+1}", fill=color, font=font) # breed = topk_breeds[0] # if top1_prob >= 0.5: # description = get_dog_description(breed) # formatted_description = format_description(description, breed) # explanations.append(f"Dog {i+1}: {formatted_description}") # else: # dog_explanation = f"Dog {i+1}: Top 3 possible breeds:\n" # dog_explanation += "\n".join([f"{j+1}. **{breed}** ({prob} confidence)" for j, (breed, prob) in enumerate(zip(topk_breeds[:3], topk_probs_percent[:3]))]) # explanations.append(dog_explanation) # buttons.extend([gr.update(visible=True, value=f"Dog {i+1}: More about {breed}") for breed in topk_breeds[:3]]) # final_explanation = "\n\n".join(explanations) # if buttons: # final_explanation += "\n\nClick on a button to view more information about the breed." # initial_state = { # "explanation": final_explanation, # "buttons": buttons, # "show_back": True # } # return (final_explanation, annotated_image, # buttons[0] if len(buttons) > 0 else gr.update(visible=False), # buttons[1] if len(buttons) > 1 else gr.update(visible=False), # buttons[2] if len(buttons) > 2 else gr.update(visible=False), # gr.update(visible=True), # initial_state) # else: # initial_state = { # "explanation": final_explanation, # "buttons": [], # "show_back": False # } # return final_explanation, annotated_image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), initial_state # except Exception as e: # error_msg = f"An error occurred: {str(e)}" # print(error_msg) # 添加日誌輸出 # return error_msg, None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None def show_details(choice, previous_output, initial_state): if not choice: return previous_output, gr.update(visible=True), initial_state try: breed = choice.split("More about ")[-1] description = get_dog_description(breed) formatted_description = format_description(description, breed) initial_state["current_description"] = formatted_description # 保存當前顯示的描述 initial_state["show_back"] = True # 確保 back 按鈕可見 return formatted_description, gr.update(visible=True), initial_state except Exception as e: error_msg = f"An error occurred while showing details: {e}" print(error_msg) return error_msg, gr.update(visible=True), initial_state def go_back(state): if state.get("is_multi_dog", False): # 恢復到多狗情境的初始狀態 buttons = state.get("buttons", []) return ( state["explanation"], state["image"], buttons[0] if len(buttons) > 0 else gr.update(visible=False), buttons[1] if len(buttons) > 1 else gr.update(visible=False), buttons[2] if len(buttons) > 2 else gr.update(visible=False), gr.update(visible=False), # 隱藏 back 按鈕 state ) else: # 單狗情境,不需要特殊處理 return ( state["explanation"], state["image"], gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), state ) with gr.Blocks() as iface: gr.HTML("
Upload a picture of a dog, and the model will predict its breed, provide detailed information, and include an extra information link!
") with gr.Row(): input_image = gr.Image(label="Upload a dog image", type="pil") output_image = gr.Image(label="Annotated Image") output = gr.Markdown(label="Prediction Results") with gr.Row(): btn1 = gr.Button("View More 1", visible=False) btn2 = gr.Button("View More 2", visible=False) btn3 = gr.Button("View More 3", visible=False) back_button = gr.Button("Back", visible=False) initial_state = gr.State() input_image.change( predict, inputs=input_image, outputs=[output, output_image, btn1, btn2, btn3, back_button, initial_state] ) for btn in [btn1, btn2, btn3]: btn.click( show_details, inputs=[btn, output, initial_state], outputs=[output, back_button, initial_state] ) back_button.click( go_back, inputs=[initial_state], outputs=[output, output_image, btn1, btn2, btn3, back_button, initial_state] ) gr.Examples( examples=['Border_Collie.jpg', 'Golden_Retriever.jpeg', 'Saint_Bernard.jpeg', 'French_Bulldog.jpeg', 'Samoyed.jpg'], inputs=input_image ) gr.HTML('For more details on this project and other work, feel free to visit my GitHub Dog Breed Classifier') if __name__ == "__main__": iface.launch()