import os import numpy as np import torch import torch.nn as nn import gradio as gr from torchvision.models import efficientnet_v2_m, EfficientNet_V2_M_Weights import torch.nn.functional as F from torchvision import transforms from PIL import Image, ImageDraw, ImageFont from data_manager import get_dog_description from urllib.parse import quote from ultralytics import YOLO import asyncio # 下載YOLOv8預訓練模型 model_yolo = YOLO('yolov8n.pt') # 使用 YOLOv8 預訓練模型 dog_breeds = ["Afghan_Hound", "African_Hunting_Dog", "Airedale", "American_Staffordshire_Terrier", "Appenzeller", "Australian_Terrier", "Bedlington_Terrier", "Bernese_Mountain_Dog", "Blenheim_Spaniel", "Border_Collie", "Border_Terrier", "Boston_Bull", "Bouvier_Des_Flandres", "Brabancon_Griffon", "Brittany_Spaniel", "Cardigan", "Chesapeake_Bay_Retriever", "Chihuahua", "Dandie_Dinmont", "Doberman", "English_Foxhound", "English_Setter", "English_Springer", "EntleBucher", "Eskimo_Dog", "French_Bulldog", "German_Shepherd", "German_Short-Haired_Pointer", "Gordon_Setter", "Great_Dane", "Great_Pyrenees", "Greater_Swiss_Mountain_Dog", "Ibizan_Hound", "Irish_Setter", "Irish_Terrier", "Irish_Water_Spaniel", "Irish_Wolfhound", "Italian_Greyhound", "Japanese_Spaniel", "Kerry_Blue_Terrier", "Labrador_Retriever", "Lakeland_Terrier", "Leonberg", "Lhasa", "Maltese_Dog", "Mexican_Hairless", "Newfoundland", "Norfolk_Terrier", "Norwegian_Elkhound", "Norwich_Terrier", "Old_English_Sheepdog", "Pekinese", "Pembroke", "Pomeranian", "Rhodesian_Ridgeback", "Rottweiler", "Saint_Bernard", "Saluki", "Samoyed", "Scotch_Terrier", "Scottish_Deerhound", "Sealyham_Terrier", "Shetland_Sheepdog", "Shih-Tzu", "Siberian_Husky", "Staffordshire_Bullterrier", "Sussex_Spaniel", "Tibetan_Mastiff", "Tibetan_Terrier", "Walker_Hound", "Weimaraner", "Welsh_Springer_Spaniel", "West_Highland_White_Terrier", "Yorkshire_Terrier", "Affenpinscher", "Basenji", "Basset", "Beagle", "Black-and-Tan_Coonhound", "Bloodhound", "Bluetick", "Borzoi", "Boxer", "Briard", "Bull_Mastiff", "Cairn", "Chow", "Clumber", "Cocker_Spaniel", "Collie", "Curly-Coated_Retriever", "Dhole", "Dingo", "Flat-Coated_Retriever", "Giant_Schnauzer", "Golden_Retriever", "Groenendael", "Keeshond", "Kelpie", "Komondor", "Kuvasz", "Malamute", "Malinois", "Miniature_Pinscher", "Miniature_Poodle", "Miniature_Schnauzer", "Otterhound", "Papillon", "Pug", "Redbone", "Schipperke", "Silky_Terrier", "Soft-Coated_Wheaten_Terrier", "Standard_Poodle", "Standard_Schnauzer", "Toy_Poodle", "Toy_Terrier", "Vizsla", "Whippet", "Wire-Haired_Fox_Terrier"] class MultiHeadAttention(nn.Module): def __init__(self, in_dim, num_heads=8): super().__init__() self.num_heads = num_heads self.head_dim = max(1, in_dim // num_heads) self.scaled_dim = self.head_dim * num_heads self.fc_in = nn.Linear(in_dim, self.scaled_dim) self.query = nn.Linear(self.scaled_dim, self.scaled_dim) self.key = nn.Linear(self.scaled_dim, self.scaled_dim) self.value = nn.Linear(self.scaled_dim, self.scaled_dim) self.fc_out = nn.Linear(self.scaled_dim, in_dim) def forward(self, x): N = x.shape[0] x = self.fc_in(x) q = self.query(x).view(N, self.num_heads, self.head_dim) k = self.key(x).view(N, self.num_heads, self.head_dim) v = self.value(x).view(N, self.num_heads, self.head_dim) energy = torch.einsum("nqd,nkd->nqk", [q, k]) attention = F.softmax(energy / (self.head_dim ** 0.5), dim=2) out = torch.einsum("nqk,nvd->nqd", [attention, v]) out = out.reshape(N, self.scaled_dim) out = self.fc_out(out) return out class BaseModel(nn.Module): def __init__(self, num_classes, device='cuda' if torch.cuda.is_available() else 'cpu'): super().__init__() self.device = device self.backbone = efficientnet_v2_m(weights=EfficientNet_V2_M_Weights.IMAGENET1K_V1) self.feature_dim = self.backbone.classifier[1].in_features self.backbone.classifier = nn.Identity() self.num_heads = max(1, min(8, self.feature_dim // 64)) self.attention = MultiHeadAttention(self.feature_dim, num_heads=self.num_heads) self.classifier = nn.Sequential( nn.LayerNorm(self.feature_dim), nn.Dropout(0.3), nn.Linear(self.feature_dim, num_classes) ) self.to(device) def forward(self, x): x = x.to(self.device) features = self.backbone(x) attended_features = self.attention(features) logits = self.classifier(attended_features) return logits, attended_features num_classes = 120 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = BaseModel(num_classes=num_classes, device=device) checkpoint = torch.load('best_model_81_dog.pth', map_location=torch.device('cpu')) model.load_state_dict(checkpoint['model_state_dict']) # evaluation mode model.eval() # Image preprocessing function def preprocess_image(image): # If the image is numpy.ndarray turn into PIL.Image if isinstance(image, np.ndarray): image = Image.fromarray(image) # Use torchvision.transforms to process images transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) return transform(image).unsqueeze(0) def get_akc_breeds_link(): return "https://www.akc.org/dog-breeds/" # def predict(image): # if image is None: # return "Please upload an image to get started.", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) # try: # image_tensor = preprocess_image(image) # with torch.no_grad(): # output = model(image_tensor) # logits = output[0] if isinstance(output, tuple) else output # probabilities = F.softmax(logits, dim=1) # topk_probs, topk_indices = torch.topk(probabilities, k=3) # top1_prob = topk_probs[0][0].item() # topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]] # topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]] # if top1_prob >= 0.5: # breed = topk_breeds[0] # description = get_dog_description(breed) # return format_description(description, breed), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) # elif top1_prob < 0.2: # return ("The image is too unclear or the dog breed is not in the dataset. Please upload a clearer image of the dog.", # gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)) # else: # explanation = ( # f"The model couldn't confidently identify the breed. Here are the top 3 possible breeds:\n\n" # f"1. **{topk_breeds[0]}** ({topk_probs_percent[0]} confidence)\n" # f"2. **{topk_breeds[1]}** ({topk_probs_percent[1]} confidence)\n" # f"3. **{topk_breeds[2]}** ({topk_probs_percent[2]} confidence)\n\n" # "Click on a button to view more information about the breed." # ) # return explanation, gr.update(visible=True, value=f"More about {topk_breeds[0]}"), gr.update(visible=True, value=f"More about {topk_breeds[1]}"), gr.update(visible=True, value=f"More about {topk_breeds[2]}") # except Exception as e: # return f"An error occurred: {e}", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) # def format_description(description, breed): # if isinstance(description, dict): # formatted_description = "\n\n".join([f"**{key}**: {value}" for key, value in description.items()]) # else: # formatted_description = description # akc_link = get_akc_breeds_link() # formatted_description += f"\n\n**Want to learn more about dog breeds?** [Visit the AKC dog breeds page]({akc_link}) and search for {breed} to find detailed information." # disclaimer = ("\n\n*Disclaimer: The external link provided leads to the American Kennel Club (AKC) dog breeds page. " # "You may need to search for the specific breed on that page. " # "I am not responsible for the content on external sites. " # "Please refer to the AKC's terms of use and privacy policy.*") # formatted_description += disclaimer # return formatted_description # def show_details(breed): # breed_name = breed.split("More about ")[-1] # description = get_dog_description(breed_name) # return format_description(description, breed_name) # with gr.Blocks(css=""" # .container { # max-width: 900px; # margin: 0 auto; # padding: 20px; # background-color: rgba(255, 255, 255, 0.9); # border-radius: 15px; # box-shadow: 0 0 20px rgba(0, 0, 0, 0.1); # } # .gr-form { display: flex; flex-direction: column; align-items: center; } # .gr-box { width: 100%; max-width: 500px; } # .output-markdown, .output-image { # margin-top: 20px; # padding: 15px; # background-color: #f5f5f5; # border-radius: 10px; # } # .examples { # display: flex; # justify-content: center; # flex-wrap: wrap; # gap: 10px; # margin-top: 20px; # } # .examples img { # width: 100px; # height: 100px; # object-fit: cover; # } # """) as iface: # gr.HTML("

🐶 Dog Breed Classifier 🔍

") # gr.HTML("

Upload a picture of a dog, and the model will predict its breed, provide detailed information, and include an extra information link!

") # with gr.Row(): # input_image = gr.Image(label="Upload a dog image", type="numpy") # output = gr.Markdown(label="Prediction Results") # with gr.Row(): # btn1 = gr.Button("View More 1", visible=False) # btn2 = gr.Button("View More 2", visible=False) # btn3 = gr.Button("View More 3", visible=False) # input_image.change(predict, inputs=input_image, outputs=[output, btn1, btn2, btn3]) # btn1.click(show_details, inputs=btn1, outputs=output) # btn2.click(show_details, inputs=btn2, outputs=output) # btn3.click(show_details, inputs=btn3, outputs=output) # gr.Examples( # examples=['Border_Collie.jpg', 'Golden_Retriever.jpeg', 'Saint_Bernard.jpeg', 'French_Bulldog.jpeg', 'Samoyed.jpg'], # inputs=input_image # ) # gr.HTML('For more details on this project and other work, feel free to visit my GitHub Dog Breed Classifier') # # launch the program # if __name__ == "__main__": # iface.launch() def format_description(description, breed, is_multi_dog=False, dog_number=None): if isinstance(description, dict): formatted_description = "\n\n".join([f"**{key}**: {value}" for key, value in description.items() if key != "Breed"]) else: formatted_description = description header = f"**Dog {dog_number}: {breed}**\n\n" if is_multi_dog else f"**Breed: {breed}**\n\n" formatted_description = f""" {header} {formatted_description} **Want to learn more about dog breeds?** [Visit the AKC dog breeds page]({get_akc_breeds_link()}) and search for {breed} to find detailed information. *Disclaimer: The external link provided leads to the American Kennel Club (AKC) dog breeds page. You may need to search for the specific breed on that page. I am not responsible for the content on external sites. Please refer to the AKC's terms of use and privacy policy.* """ return formatted_description async def predict_single_dog(image): image_tensor = preprocess_image(image) with torch.no_grad(): output = model(image_tensor.to(device)) logits = output[0] if isinstance(output, tuple) else output probabilities = F.softmax(logits, dim=1) topk_probs, topk_indices = torch.topk(probabilities, k=3) top1_prob = topk_probs[0][0].item() topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]] topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]] return top1_prob, topk_breeds, topk_probs_percent async def detect_multiple_dogs(image): try: img = image.copy() img.thumbnail((640, 640)) results = model_yolo(img, conf=0.1) # 降低閾值以檢測更多狗 dogs = [] for result in results: for box in result.boxes: if box.cls == 16: # COCO dataset class for dog is 16 xyxy = box.xyxy[0].tolist() confidence = box.conf.item() cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3])) dogs.append((cropped_image, confidence, xyxy)) # 如果只檢測到一隻狗,嘗試檢測其他可能的狗 if len(dogs) == 1: # 使用整張圖像進行品種預測 full_image_prob, full_image_breeds, _ = await predict_single_dog(image) if full_image_prob >= 0.3 and full_image_breeds[0] != dogs[0][0]: # 如果整張圖像的預測結果不同且置信度較高,添加為第二隻狗 dogs.append((image, full_image_prob, [0, 0, image.width, image.height])) return dogs except Exception as e: print(f"Error in detect_multiple_dogs: {e}") return [] async def predict(image): if image is None: return "Please upload an image to start.", None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) try: if isinstance(image, np.ndarray): image = Image.fromarray(image) dogs = await detect_multiple_dogs(image) if len(dogs) == 0: # 如果沒有檢測到狗,嘗試對整張圖像進行預測 top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(image) if top1_prob >= 0.3: return await process_single_dog_result(top1_prob, topk_breeds, topk_probs_percent, image, [0, 0, image.width, image.height]) else: return "No dogs detected in the image. Please upload a clear image of a dog.", None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) elif len(dogs) == 1: cropped_image, _, box = dogs[0] top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image) return await process_single_dog_result(top1_prob, topk_breeds, topk_probs_percent, image, box) else: return await process_multiple_dogs_result(dogs, image) except Exception as e: return f"An error occurred: {str(e)}", None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) async def process_multiple_dogs_result(dogs, image): annotated_image = image.copy() draw = ImageDraw.Draw(annotated_image) font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 20) explanations = [] buttons = [] for i, (cropped_image, _, box) in enumerate(dogs, 1): top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image) optimized_box = optimize_box(box, image.size) draw.rectangle(optimized_box, outline="red", width=3) draw.text((optimized_box[0], optimized_box[1]), f"Dog {i}", fill="yellow", font=font, stroke_width=2, stroke_fill="black") if top1_prob >= 0.2: breed = topk_breeds[0] description = get_dog_description(breed) explanation = f"Dog {i}: **{breed}**\n\n" explanation += "\n".join([f"**{key}**: {value}" for key, value in description.items() if key != "Breed"]) explanation += f"\n\n**Want to learn more about dog breeds?** [Visit the AKC dog breeds page]({get_akc_breeds_link()}) and search for {breed} to find detailed information." explanations.append(explanation) if top1_prob < 0.5: buttons.append(f"More about Dog {i}: {breed}") buttons.append(f"More about Dog {i}: {topk_breeds[1]}") buttons.append(f"More about Dog {i}: {topk_breeds[2]}") else: explanations.append(f"Dog {i}: The image is unclear or the breed is not in the dataset. Please upload a clearer image of this dog.") final_explanation = "\n\n---\n\n".join(explanations) if not explanations: return "No dogs were confidently detected in the image. Please upload a clearer image of a dog.", annotated_image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) return final_explanation, annotated_image, gr.update(visible=bool(buttons), choices=buttons), gr.update(visible=False), gr.update(visible=False) async def process_single_dog_result(top1_prob, topk_breeds, topk_probs_percent, image, box): annotated_image = image.copy() draw = ImageDraw.Draw(annotated_image) optimized_box = optimize_box(box, image.size) draw.rectangle(optimized_box, outline="red", width=3) draw.text((optimized_box[0], optimized_box[1]), "Dog", fill="yellow", font=ImageFont.load_default()) if top1_prob >= 0.5: breed = topk_breeds[0] description = get_dog_description(breed) formatted_description = format_description(description, breed) return formatted_description, annotated_image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) elif top1_prob >= 0.2: explanation = ( f"The model couldn't confidently identify the breed. Here are the top 3 possible breeds:\n\n" f"1. **{topk_breeds[0]}** ({topk_probs_percent[0]} confidence)\n" f"2. **{topk_breeds[1]}** ({topk_probs_percent[1]} confidence)\n" f"3. **{topk_breeds[2]}** ({topk_probs_percent[2]} confidence)\n\n" "Click on a button to view more information about the breed." ) breed_buttons = [f"More about {breed}" for breed in topk_breeds[:3]] return explanation, annotated_image, gr.update(visible=True, choices=breed_buttons), gr.update(visible=False), gr.update(visible=False) else: return "The image is unclear or the breed is not in the dataset. Please upload a clearer image of a dog.", annotated_image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) def optimize_box(box, image_size): x1, y1, x2, y2 = box w, h = image_size # 擴大邊界框以確保完整包含狗 x1 = max(0, x1 - 10) y1 = max(0, y1 - 10) x2 = min(w, x2 + 10) y2 = min(h, y2 + 10) return [x1, y1, x2, y2] async def show_details(choice): if not choice: return "Please select a breed to view details." try: if "Dog" in choice: _, breed = choice.split(": ", 1) else: _, breed = choice.split("More about ", 1) description = get_dog_description(breed) return format_description(description, breed) except Exception as e: return f"An error occurred while showing details: {e}" with gr.Blocks(css=""" .container { max-width: 900px; margin: auto; padding: 20px; } .gr-box { border-radius: 15px; } .output-markdown { margin-top: 20px; padding: 15px; background-color: #f5f5f5; border-radius: 10px; } .examples { display: flex; justify-content: center; flex-wrap: wrap; gap: 10px; margin-top: 20px; } .examples img { width: 100px; height: 100px; object-fit: cover; } """) as iface: gr.HTML("

🐶 Dog Breed Classifier 🔍

") gr.HTML("

Upload a picture of a dog, and the model will predict its breed, provide detailed information, and include an extra information link!

") with gr.Row(): input_image = gr.Image(label="Upload a dog image", type="pil") output_image = gr.Image(label="Annotated Image") output = gr.Markdown(label="Prediction Results") breed_buttons = gr.Radio([], label="Select breed for more details", visible=False) breed_details = gr.Markdown(label="Breed Details") async def safe_predict(image): try: return await predict(image) except Exception as e: return str(e), None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) input_image.change( safe_predict, inputs=input_image, outputs=[output, output_image, breed_buttons, breed_details] ) breed_buttons.select( show_details, inputs=breed_buttons, outputs=breed_details ) gr.Examples( examples=['Border_Collie.jpg', 'Golden_Retriever.jpeg', 'Saint_Bernard.jpeg', 'French_Bulldog.jpeg', 'Samoyed.jpg'], inputs=input_image ) gr.HTML('For more details on this project and other work, feel free to visit my GitHub Dog Breed Classifier') if __name__ == "__main__": iface.launch()