Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import numpy as np | |
import torch | |
import torch.nn as nn | |
import gradio as gr | |
from torchvision.models import efficientnet_v2_m, EfficientNet_V2_M_Weights | |
from torchvision.ops import nms, box_iou | |
import torch.nn.functional as F | |
from torchvision import transforms | |
from PIL import Image, ImageDraw, ImageFont, ImageFilter | |
from data_manager import get_dog_description | |
from urllib.parse import quote | |
from ultralytics import YOLO | |
import asyncio | |
import traceback | |
import logging | |
logging.basicConfig(level=logging.DEBUG) | |
logger = logging.getLogger(__name__) | |
# 下載YOLOv8預訓練模型 | |
model_yolo = YOLO('yolov8n.pt') # 使用 YOLOv8 預訓練模型 | |
dog_breeds = ["Afghan_Hound", "African_Hunting_Dog", "Airedale", "American_Staffordshire_Terrier", | |
"Appenzeller", "Australian_Terrier", "Bedlington_Terrier", "Bernese_Mountain_Dog", | |
"Blenheim_Spaniel", "Border_Collie", "Border_Terrier", "Boston_Bull", "Bouvier_Des_Flandres", | |
"Brabancon_Griffon", "Brittany_Spaniel", "Cardigan", "Chesapeake_Bay_Retriever", | |
"Chihuahua", "Dandie_Dinmont", "Doberman", "English_Foxhound", "English_Setter", | |
"English_Springer", "EntleBucher", "Eskimo_Dog", "French_Bulldog", "German_Shepherd", | |
"German_Short-Haired_Pointer", "Gordon_Setter", "Great_Dane", "Great_Pyrenees", | |
"Greater_Swiss_Mountain_Dog", "Ibizan_Hound", "Irish_Setter", "Irish_Terrier", | |
"Irish_Water_Spaniel", "Irish_Wolfhound", "Italian_Greyhound", "Japanese_Spaniel", | |
"Kerry_Blue_Terrier", "Labrador_Retriever", "Lakeland_Terrier", "Leonberg", "Lhasa", | |
"Maltese_Dog", "Mexican_Hairless", "Newfoundland", "Norfolk_Terrier", "Norwegian_Elkhound", | |
"Norwich_Terrier", "Old_English_Sheepdog", "Pekinese", "Pembroke", "Pomeranian", | |
"Rhodesian_Ridgeback", "Rottweiler", "Saint_Bernard", "Saluki", "Samoyed", | |
"Scotch_Terrier", "Scottish_Deerhound", "Sealyham_Terrier", "Shetland_Sheepdog", | |
"Shih-Tzu", "Siberian_Husky", "Staffordshire_Bullterrier", "Sussex_Spaniel", | |
"Tibetan_Mastiff", "Tibetan_Terrier", "Walker_Hound", "Weimaraner", | |
"Welsh_Springer_Spaniel", "West_Highland_White_Terrier", "Yorkshire_Terrier", | |
"Affenpinscher", "Basenji", "Basset", "Beagle", "Black-and-Tan_Coonhound", "Bloodhound", | |
"Bluetick", "Borzoi", "Boxer", "Briard", "Bull_Mastiff", "Cairn", "Chow", "Clumber", | |
"Cocker_Spaniel", "Collie", "Curly-Coated_Retriever", "Dhole", "Dingo", | |
"Flat-Coated_Retriever", "Giant_Schnauzer", "Golden_Retriever", "Groenendael", "Keeshond", | |
"Kelpie", "Komondor", "Kuvasz", "Malamute", "Malinois", "Miniature_Pinscher", | |
"Miniature_Poodle", "Miniature_Schnauzer", "Otterhound", "Papillon", "Pug", "Redbone", | |
"Schipperke", "Silky_Terrier", "Soft-Coated_Wheaten_Terrier", "Standard_Poodle", | |
"Standard_Schnauzer", "Toy_Poodle", "Toy_Terrier", "Vizsla", "Whippet", | |
"Wire-Haired_Fox_Terrier"] | |
class MultiHeadAttention(nn.Module): | |
def __init__(self, in_dim, num_heads=8): | |
super().__init__() | |
self.num_heads = num_heads | |
self.head_dim = max(1, in_dim // num_heads) | |
self.scaled_dim = self.head_dim * num_heads | |
self.fc_in = nn.Linear(in_dim, self.scaled_dim) | |
self.query = nn.Linear(self.scaled_dim, self.scaled_dim) | |
self.key = nn.Linear(self.scaled_dim, self.scaled_dim) | |
self.value = nn.Linear(self.scaled_dim, self.scaled_dim) | |
self.fc_out = nn.Linear(self.scaled_dim, in_dim) | |
def forward(self, x): | |
N = x.shape[0] | |
x = self.fc_in(x) | |
q = self.query(x).view(N, self.num_heads, self.head_dim) | |
k = self.key(x).view(N, self.num_heads, self.head_dim) | |
v = self.value(x).view(N, self.num_heads, self.head_dim) | |
energy = torch.einsum("nqd,nkd->nqk", [q, k]) | |
attention = F.softmax(energy / (self.head_dim ** 0.5), dim=2) | |
out = torch.einsum("nqk,nvd->nqd", [attention, v]) | |
out = out.reshape(N, self.scaled_dim) | |
out = self.fc_out(out) | |
return out | |
class BaseModel(nn.Module): | |
def __init__(self, num_classes, device='cuda' if torch.cuda.is_available() else 'cpu'): | |
super().__init__() | |
self.device = device | |
self.backbone = efficientnet_v2_m(weights=EfficientNet_V2_M_Weights.IMAGENET1K_V1) | |
self.feature_dim = self.backbone.classifier[1].in_features | |
self.backbone.classifier = nn.Identity() | |
self.num_heads = max(1, min(8, self.feature_dim // 64)) | |
self.attention = MultiHeadAttention(self.feature_dim, num_heads=self.num_heads) | |
self.classifier = nn.Sequential( | |
nn.LayerNorm(self.feature_dim), | |
nn.Dropout(0.3), | |
nn.Linear(self.feature_dim, num_classes) | |
) | |
self.to(device) | |
def forward(self, x): | |
x = x.to(self.device) | |
features = self.backbone(x) | |
attended_features = self.attention(features) | |
logits = self.classifier(attended_features) | |
return logits, attended_features | |
num_classes = 120 | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
model = BaseModel(num_classes=num_classes, device=device) | |
checkpoint = torch.load('best_model_81_dog.pth', map_location=torch.device('cpu')) | |
model.load_state_dict(checkpoint['model_state_dict']) | |
# evaluation mode | |
model.eval() | |
# Image preprocessing function | |
def preprocess_image(image): | |
# If the image is numpy.ndarray turn into PIL.Image | |
if isinstance(image, np.ndarray): | |
image = Image.fromarray(image) | |
# Use torchvision.transforms to process images | |
transform = transforms.Compose([ | |
transforms.Resize((224, 224)), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), | |
]) | |
return transform(image).unsqueeze(0) | |
def get_akc_breeds_link(): | |
return "https://www.akc.org/dog-breeds/" | |
def format_description(description, breed): | |
if isinstance(description, dict): | |
# 確保每一個描述項目換行顯示 | |
formatted_description = "\n\n".join([f"**{key}**: {value}" for key, value in description.items()]) | |
else: | |
formatted_description = description | |
akc_link = get_akc_breeds_link() | |
formatted_description += f"\n\n**Want to learn more about dog breeds?** [Visit the AKC dog breeds page]({akc_link}) and search for {breed} to find detailed information." | |
disclaimer = ("\n\n*Disclaimer: The external link provided leads to the American Kennel Club (AKC) dog breeds page. " | |
"You may need to search for the specific breed on that page. " | |
"I am not responsible for the content on external sites. " | |
"Please refer to the AKC's terms of use and privacy policy.*") | |
formatted_description += disclaimer | |
return formatted_description | |
async def predict_single_dog(image): | |
return await asyncio.to_thread(_predict_single_dog, image) | |
# def _predict_single_dog(image): | |
# image_tensor = preprocess_image(image) | |
# with torch.no_grad(): | |
# output = model(image_tensor) | |
# logits = output[0] if isinstance(output, tuple) else output | |
# probabilities = F.softmax(logits, dim=1) | |
# topk_probs, topk_indices = torch.topk(probabilities, k=3) | |
# top1_prob = topk_probs[0][0].item() | |
# topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]] | |
# topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]] | |
# return top1_prob, topk_breeds, topk_probs_percent | |
# async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4): | |
# results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0] | |
# dogs = [] | |
# for box in results.boxes: | |
# if box.cls == 16: # COCO 資料集中狗的類別是 16 | |
# xyxy = box.xyxy[0].tolist() | |
# confidence = box.conf.item() | |
# cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3])) | |
# dogs.append((cropped_image, confidence, xyxy)) | |
# return dogs | |
async def predict_single_dog(image): | |
image_tensor = preprocess(image).unsqueeze(0) | |
with torch.no_grad(): | |
output = model(image_tensor) | |
probabilities = torch.nn.functional.softmax(output[0], dim=0) | |
top3_prob, top3_catid = torch.topk(probabilities, 3) | |
top3_breeds = [dog_breeds[idx.item()] for idx in top3_catid] | |
top3_probs = [f"{prob.item()*100:.2f}%" for prob in top3_prob] | |
return top3_prob[0].item(), top3_breeds, top3_probs | |
async def detect_multiple_dogs(image, conf_threshold=0.3, iou_threshold=0.45): | |
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0] | |
dogs = [] | |
for box in results.boxes: | |
if box.cls == 16: # COCO dataset class for dog is 16 | |
xyxy = box.xyxy[0].tolist() | |
confidence = box.conf.item() | |
cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3])) | |
dogs.append((cropped_image, confidence, xyxy)) | |
return dogs | |
async def process_single_dog(image): | |
top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(image) | |
if top1_prob < 0.2: | |
initial_state = { | |
"explanation": "The image is unclear or the breed is not in the dataset. Please upload a clearer image of a dog.", | |
"buttons": [], | |
"show_back": False, | |
"image": None, | |
"is_multi_dog": False | |
} | |
return initial_state["explanation"], None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), initial_state | |
breed = topk_breeds[0] | |
description = get_dog_description(breed) | |
if top1_prob >= 0.5: | |
formatted_description = format_description(description, breed) | |
initial_state = { | |
"explanation": formatted_description, | |
"buttons": [], | |
"show_back": False, | |
"image": image, | |
"is_multi_dog": False | |
} | |
return formatted_description, image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), initial_state | |
else: | |
explanation = ( | |
f"The model couldn't confidently identify the breed. Here are the top 3 possible breeds:\n\n" | |
f"1. **{topk_breeds[0]}** ({topk_probs_percent[0]} confidence)\n" | |
f"2. **{topk_breeds[1]}** ({topk_probs_percent[1]} confidence)\n" | |
f"3. **{topk_breeds[2]}** ({topk_probs_percent[2]} confidence)\n\n" | |
"Click on a button to view more information about the breed." | |
) | |
buttons = [ | |
gr.update(visible=True, value=f"More about {topk_breeds[0]}"), | |
gr.update(visible=True, value=f"More about {topk_breeds[1]}"), | |
gr.update(visible=True, value=f"More about {topk_breeds[2]}") | |
] | |
initial_state = { | |
"explanation": explanation, | |
"buttons": buttons, | |
"show_back": True, | |
"image": image, | |
"is_multi_dog": False | |
} | |
return explanation, image, buttons[0], buttons[1], buttons[2], gr.update(visible=True), initial_state | |
# async def predict(image): | |
# if image is None: | |
# return "Please upload an image to start.", None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None | |
# try: | |
# if isinstance(image, np.ndarray): | |
# image = Image.fromarray(image) | |
# dogs = await detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4) | |
# if len(dogs) <= 1: | |
# return await process_single_dog(image) | |
# color_list = ['#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#00FFFF', '#FF00FF', '#800080', '#FFA500'] | |
# explanations = [] | |
# buttons = [] | |
# annotated_image = image.copy() | |
# draw = ImageDraw.Draw(annotated_image) | |
# font = ImageFont.load_default() | |
# for i, (cropped_image, _, box) in enumerate(dogs): | |
# top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image) | |
# color = color_list[i % len(color_list)] | |
# draw.rectangle(box, outline=color, width=3) | |
# draw.text((box[0], box[1]), f"Dog {i+1}", fill=color, font=font) | |
# breed = topk_breeds[0] | |
# if top1_prob >= 0.5: | |
# description = get_dog_description(breed) | |
# formatted_description = format_description(description, breed) | |
# explanations.append(f"Dog {i+1}: {formatted_description}") | |
# elif top1_prob >= 0.2: | |
# dog_explanation = f"Dog {i+1}: Top 3 possible breeds:\n" | |
# dog_explanation += "\n".join([f"{j+1}. **{breed}** ({prob} confidence)" for j, (breed, prob) in enumerate(zip(topk_breeds[:3], topk_probs_percent[:3]))]) | |
# explanations.append(dog_explanation) | |
# buttons.extend([gr.update(visible=True, value=f"Dog {i+1}: More about {breed}") for breed in topk_breeds[:3]]) | |
# else: | |
# explanations.append(f"Dog {i+1}: The image is unclear or the breed is not in the dataset.") | |
# final_explanation = "\n\n".join(explanations) | |
# if buttons: | |
# final_explanation += "\n\nClick on a button to view more information about the breed." | |
# initial_state = { | |
# "explanation": final_explanation, | |
# "buttons": buttons, | |
# "show_back": True, | |
# "image": annotated_image, | |
# "is_multi_dog": True, | |
# "dogs_info": explanations | |
# } | |
# return (final_explanation, annotated_image, | |
# buttons[0] if len(buttons) > 0 else gr.update(visible=False), | |
# buttons[1] if len(buttons) > 1 else gr.update(visible=False), | |
# buttons[2] if len(buttons) > 2 else gr.update(visible=False), | |
# gr.update(visible=True), | |
# initial_state) | |
# else: | |
# initial_state = { | |
# "explanation": final_explanation, | |
# "buttons": [], | |
# "show_back": False, | |
# "image": annotated_image, | |
# "is_multi_dog": True, | |
# "dogs_info": explanations | |
# } | |
# return final_explanation, annotated_image, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), initial_state | |
# except Exception as e: | |
# error_msg = f"An error occurred: {str(e)}" | |
# print(error_msg) # 添加日誌輸出 | |
# return error_msg, None, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None | |
# def show_details(choice, previous_output, initial_state): | |
# if not choice: | |
# return previous_output, gr.update(visible=True), initial_state | |
# try: | |
# breed = choice.split("More about ")[-1] | |
# description = get_dog_description(breed) | |
# formatted_description = format_description(description, breed) | |
# # 保存當前描述和原始按鈕狀態 | |
# initial_state["current_description"] = formatted_description | |
# initial_state["original_buttons"] = initial_state.get("buttons", []) | |
# return formatted_description, gr.update(visible=True), initial_state | |
# except Exception as e: | |
# error_msg = f"An error occurred while showing details: {e}" | |
# print(error_msg) | |
# return error_msg, gr.update(visible=True), initial_state | |
# def go_back(state): | |
# buttons = state.get("buttons", []) | |
# return ( | |
# state["explanation"], | |
# state["image"], | |
# buttons[0] if len(buttons) > 0 else gr.update(visible=False), | |
# buttons[1] if len(buttons) > 1 else gr.update(visible=False), | |
# buttons[2] if len(buttons) > 2 else gr.update(visible=False), | |
# gr.update(visible=False), # 隱藏 back 按鈕 | |
# state | |
# ) | |
# with gr.Blocks() as iface: | |
# gr.HTML("<h1 style='text-align: center;'>🐶 Dog Breed Classifier 🔍</h1>") | |
# gr.HTML("<p style='text-align: center;'>Upload a picture of a dog, and the model will predict its breed, provide detailed information, and include an extra information link!</p>") | |
# with gr.Row(): | |
# input_image = gr.Image(label="Upload a dog image", type="pil") | |
# output_image = gr.Image(label="Annotated Image") | |
# output = gr.Markdown(label="Prediction Results") | |
# with gr.Row(): | |
# btn1 = gr.Button("View More 1", visible=False) | |
# btn2 = gr.Button("View More 2", visible=False) | |
# btn3 = gr.Button("View More 3", visible=False) | |
# back_button = gr.Button("Back", visible=False) | |
# initial_state = gr.State() | |
# input_image.change( | |
# predict, | |
# inputs=input_image, | |
# outputs=[output, output_image, btn1, btn2, btn3, back_button, initial_state] | |
# ) | |
# for btn in [btn1, btn2, btn3]: | |
# btn.click( | |
# show_details, | |
# inputs=[btn, output, initial_state], | |
# outputs=[output, back_button, initial_state] | |
# ) | |
# back_button.click( | |
# go_back, | |
# inputs=[initial_state], | |
# outputs=[output, output_image, btn1, btn2, btn3, back_button, initial_state] | |
# ) | |
# gr.Examples( | |
# examples=['Border_Collie.jpg', 'Golden_Retriever.jpeg', 'Saint_Bernard.jpeg', 'French_Bulldog.jpeg', 'Samoyed.jpg'], | |
# inputs=input_image | |
# ) | |
# gr.HTML('For more details on this project and other work, feel free to visit my GitHub <a href="https://github.com/Eric-Chung-0511/Learning-Record/tree/main/Data%20Science%20Projects/Dog_Breed_Classifier">Dog Breed Classifier</a>') | |
# if __name__ == "__main__": | |
# iface.launch() | |
async def predict(image): | |
if image is None: | |
return "Please upload an image to start.", None, [], gr.update(visible=False), None | |
try: | |
if isinstance(image, np.ndarray): | |
image = Image.fromarray(image) | |
dogs = await detect_multiple_dogs(image) | |
if len(dogs) == 0: | |
dogs = [(image, 1.0, [0, 0, image.width, image.height])] | |
color_list = ['#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#00FFFF', '#FF00FF', '#800080', '#FFA500'] | |
explanations = [] | |
buttons = [] | |
annotated_image = image.copy() | |
draw = ImageDraw.Draw(annotated_image) | |
font = ImageFont.load_default() | |
for i, (cropped_image, _, box) in enumerate(dogs): | |
top1_prob, topk_breeds, topk_probs_percent = await predict_single_dog(cropped_image) | |
color = color_list[i % len(color_list)] | |
draw.rectangle(box, outline=color, width=3) | |
draw.text((box[0], box[1]), f"Dog {i+1}", fill=color, font=font) | |
if top1_prob >= 0.5: | |
breed = topk_breeds[0] | |
description = get_dog_description(breed) | |
formatted_description = format_description(description, breed) | |
explanations.append(f"Dog {i+1}: {formatted_description}") | |
elif top1_prob >= 0.2: | |
dog_explanation = f"Dog {i+1}: Top 3 possible breeds:\n" | |
dog_explanation += "\n".join([f"{j+1}. **{breed}** ({prob} confidence)" for j, (breed, prob) in enumerate(zip(topk_breeds[:3], topk_probs_percent[:3]))]) | |
explanations.append(dog_explanation) | |
buttons.extend([f"Dog {i+1}: More about {breed}" for breed in topk_breeds[:3]]) | |
else: | |
explanations.append(f"Dog {i+1}: The image is unclear or the breed is not in the dataset.") | |
final_explanation = "\n\n".join(explanations) | |
if buttons: | |
final_explanation += "\n\nClick on a button to view more information about the breed." | |
initial_state = { | |
"explanation": final_explanation, | |
"buttons": buttons, | |
"show_back": True, | |
"image": annotated_image, | |
"is_multi_dog": len(dogs) > 1, | |
"dogs_info": explanations | |
} | |
return final_explanation, annotated_image, gr.update(visible=True, choices=buttons), initial_state | |
else: | |
initial_state = { | |
"explanation": final_explanation, | |
"buttons": [], | |
"show_back": False, | |
"image": annotated_image, | |
"is_multi_dog": len(dogs) > 1, | |
"dogs_info": explanations | |
} | |
return final_explanation, annotated_image, gr.update(visible=False), initial_state | |
except Exception as e: | |
error_msg = f"An error occurred: {str(e)}" | |
print(error_msg) | |
return error_msg, None, gr.update(visible=False), None | |
def show_details(choice, previous_output, initial_state): | |
if not choice: | |
return previous_output, gr.update(visible=True), initial_state | |
try: | |
breed = choice.split("More about ")[-1] | |
description = get_dog_description(breed) | |
formatted_description = format_description(description, breed) | |
initial_state["current_description"] = formatted_description | |
initial_state["original_buttons"] = initial_state.get("buttons", []) | |
return formatted_description, gr.update(visible=True), initial_state | |
except Exception as e: | |
error_msg = f"An error occurred while showing details: {e}" | |
logger.error(error_msg) | |
return error_msg, gr.update(visible=True), initial_state | |
def go_back(state): | |
buttons = state.get("buttons", []) | |
return ( | |
state["explanation"], | |
state["image"], | |
gr.update(visible=True, choices=buttons), | |
gr.update(visible=False), | |
state | |
) | |
with gr.Blocks() as iface: | |
gr.HTML("<h1 style='text-align: center;'>🐶 Dog Breed Classifier 🔍</h1>") | |
gr.HTML("<p style='text-align: center;'>Upload a picture of a dog, and the model will predict its breed, provide detailed information, and include an extra information link!</p>") | |
with gr.Row(): | |
input_image = gr.Image(label="Upload a dog image", type="pil") | |
output_image = gr.Image(label="Annotated Image") | |
output = gr.Markdown(label="Prediction Results") | |
breed_buttons = gr.Radio(choices=[], label="More Information", visible=False) | |
back_button = gr.Button("Back", visible=False) | |
initial_state = gr.State() | |
input_image.change( | |
predict, | |
inputs=input_image, | |
outputs=[output, output_image, breed_buttons, initial_state] | |
) | |
breed_buttons.change( | |
show_details, | |
inputs=[breed_buttons, output, initial_state], | |
outputs=[output, back_button, initial_state] | |
) | |
back_button.click( | |
go_back, | |
inputs=[initial_state], | |
outputs=[output, output_image, breed_buttons, back_button, initial_state] | |
) | |
gr.Examples( | |
examples=['Border_Collie.jpg', 'Golden_Retriever.jpeg', 'Saint_Bernard.jpeg', 'French_Bulldog.jpeg', 'Samoyed.jpg'], | |
inputs=input_image | |
) | |
gr.HTML('For more details on this project and other work, feel free to visit my GitHub <a href="https://github.com/Eric-Chung-0511/Learning-Record/tree/main/Data%20Science%20Projects/Dog_Breed_Classifier">Dog Breed Classifier</a>') | |
if __name__ == "__main__": | |
iface.launch() | |