|
import gradio as gr |
|
import torch |
|
from PIL import ImageDraw |
|
from transformers import AutoModelForObjectDetection, AutoImageProcessor |
|
|
|
|
|
processor = AutoImageProcessor.from_pretrained("tanukinet/hanko") |
|
model = AutoModelForObjectDetection.from_pretrained("tanukinet/hanko", ignore_mismatched_sizes=True,) |
|
|
|
|
|
def object_detection(image): |
|
image = image.copy() |
|
inputs = processor(images=image, return_tensors="pt") |
|
outputs = model(**inputs) |
|
target_sizes = torch.tensor([image.size[::-1]]) |
|
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.8)[0] |
|
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): |
|
box = [round(i, 2) for i in box.tolist()] |
|
print( |
|
f"Detected {model.config.id2label[label.item()]} with confidence " |
|
f"{round(score.item(), 3)} at location {box}" |
|
) |
|
draw = ImageDraw.Draw(image) |
|
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): |
|
box = [round(i, 2) for i in box.tolist()] |
|
x, y, x2, y2 = tuple(box) |
|
draw.rectangle((x, y, x2, y2), outline="red", width=1) |
|
draw.text((x, y), model.config.id2label[label.item()], fill="white") |
|
return image |
|
|
|
|
|
demo = gr.Interface( |
|
object_detection, |
|
gr.Image(type="pil"), |
|
"image", |
|
examples=[ |
|
"sample1.png", |
|
"sample2.png", |
|
], |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|