Yolov10 / app.py
BoukamchaSmartVisions's picture
Refactor yolov10_inference and yolov10_video_inference to take a single argument containing all inputs.
acef008 verified
raw
history blame
8.1 kB
import gradio as gr
from ultralytics import YOLOv10
import supervision as sv
import spaces
from huggingface_hub import hf_hub_download
import cv2
import tempfile
def download_models(model_id):
hf_hub_download("BoukamchaSmartVisions/Yolov10", filename=f"{model_id}", local_dir=f"./")
return f"./{model_id}"
box_annotator = sv.BoxAnnotator()
category_dict = {
0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus',
6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant',
11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat',
16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear',
22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',
27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard',
32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove',
36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle',
40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',
46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli',
51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake',
56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table',
61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard',
67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink',
72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors',
77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'
}
@spaces.GPU(duration=200)
def yolov10_inference(inputs):
image, model_id, image_size, conf_threshold, iou_threshold = inputs[1], inputs[2], inputs[3], inputs[4], inputs[5]
model_path = download_models(model_id)
model = YOLOv10(model_path)
results = model(source=image, imgsz=image_size, iou=iou_threshold, conf=conf_threshold, verbose=False)[0]
detections = sv.Detections.from_ultralytics(results)
labels = [
f"{category_dict[class_id]} {confidence:.2f}"
for class_id, confidence in zip(detections.class_id, detections.confidence)
]
annotated_image = box_annotator.annotate(image, detections=detections, labels=labels)
return annotated_image
def yolov10_video_inference(inputs):
video, model_id, image_size, conf_threshold, iou_threshold = inputs[2], inputs[3], inputs[4], inputs[5], inputs[6]
model_path = download_models(model_id)
model = YOLOv10(model_path)
cap = cv2.VideoCapture(video)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
out_path = out.name
ret, frame = cap.read()
height, width, _ = frame.shape
writer = cv2.VideoWriter(out_path, fourcc, 30, (width, height))
while ret:
results = model(source=frame, imgsz=image_size, iou=iou_threshold, conf=conf_threshold, verbose=False)[0]
detections = sv.Detections.from_ultralytics(results)
labels = [
f"{category_dict[class_id]} {confidence:.2f}"
for class_id, confidence in zip(detections.class_id, detections.confidence)
]
annotated_frame = box_annotator.annotate(frame, detections=detections, labels=labels)
writer.write(annotated_frame)
ret, frame = cap.read()
cap.release()
writer.release()
return out_path
def app():
with gr.Blocks():
with gr.Row():
with gr.Column():
image_or_video = gr.Radio(
label="Input Type",
choices=["Image", "Video"],
value="Image",
)
image = gr.Image(type="numpy", label="Image", visible=True)
video = gr.Video(label="Video", visible=False)
image_or_video.change(
lambda x: (gr.update(visible=x=="Image"), gr.update(visible=x=="Video")),
inputs=[image_or_video],
outputs=[image, video],
)
model_id = gr.Dropdown(
label="Model",
choices=[
"yolov10n.pt",
"yolov10s.pt",
"yolov10m.pt",
"yolov10b.pt",
"yolov10l.pt",
"yolov10x.pt",
],
value="yolov10m.pt",
)
image_size = gr.Slider(
label="Image Size",
minimum=320,
maximum=1280,
step=32,
value=640,
)
conf_threshold = gr.Slider(
label="Confidence Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.25,
)
iou_threshold = gr.Slider(
label="IoU Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.45,
)
yolov10_infer = gr.Button(value="Detect Objects")
with gr.Column():
output_image = gr.Image(type="numpy", label="Annotated Image", visible=True)
output_video = gr.Video(label="Annotated Video", visible=False)
def process_inputs(inputs):
if inputs[0] == "Image":
return yolov10_inference(inputs)
else:
return yolov10_video_inference(inputs)
yolov10_infer.click(
fn=process_inputs,
inputs=[
image_or_video,
image,
video,
model_id,
image_size,
conf_threshold,
iou_threshold,
],
outputs=[output_image, output_video],
)
gr.Examples(
examples=[
[
"Image",
"Animals_persones.jpg",
None,
"yolov10x.pt",
640,
0.25,
0.45,
],
[
"Image",
"collage-horses-other-pets-white.jpg",
None,
"yolov10m.pt",
640,
0.25,
0.45,
],
[
"Image",
"Ville.png",
None,
"yolov10b.pt",
640,
0.25,
0.45,
],
[
"Video",
None,
"sample_video.mp4",
"yolov10m.pt",
640,
0.25,
0.45,
],
],
fn=process_inputs,
inputs=[
image_or_video,
image,
video,
model_id,
image_size,
conf_threshold,
iou_threshold,
],
outputs=[output_image, output_video],
cache_examples=True,
)
gradio_app = gr.Blocks()
with gradio_app:
gr.HTML(
"""
<h1 style='text-align: center'>
YOLOv10: Real-Time End-to-End Object Detection
</h1>
""")
gr.HTML(
"""
<h3 style='text-align: center'>
Follow me for more!
<a href='https://github.com/hamdiboukamcha/' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/hamdi-boukamcha-437830146/' target='_blank'>Linkedin</a> | <a href='https://huggingface.co/BoukamchaSmartVisions' target='_blank'>HuggingFace</a>
</h3>
""")
with gr.Row():
with gr.Column():
app()
gradio_app.launch(debug=True)