Q-bert's picture
Update app.py
a3abb26 verified
raw
history blame
No virus
2.02 kB
import gradio as gr
import sahi
import torch
from ultralytics import YOLO
# Download images
model_names = [
"yolov8n.pt",
"yolov8s.pt",
"yolov8m.pt",
"yolov8l.pt",
"yolov8x.pt",
]
current_model_name = "yolov8m.pt"
model = YOLO(current_model_name)
def yolov8_inference(
image: gr.Image = None,
model_name: gr.Dropdown = None,
image_size: gr.Slider = 640,
conf_threshold: gr.Slider = 0.25,
iou_threshold: gr.Slider = 0.45,
):
"""
YOLOv8 inference function
Args:
image: Input image
model_name: Name of the model
image_size: Image size
conf_threshold: Confidence threshold
iou_threshold: IOU threshold
Returns:
Bounding box coordinates in xyxy format
"""
global model
global current_model_name
if model_name != current_model_name:
model = YOLO(model_name)
current_model_name = model_name
model.overrides["conf"] = conf_threshold
model.overrides["iou"] = iou_threshold
results = model(image, return_outputs=True)
print(results)
outputs=[]
for i,box in enumerate(results[0].boxes):
label = results[0].names[box.cls[0].item()]
bbox = box.xyxy[0]
outputs.append({"label": label, "bbox_coords": bbox})
return outputs
inputs = [
gr.Image(type="filepath", label="Input Image"),
gr.Dropdown(
model_names,
value=current_model_name,
label="Model type",
),
gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"),
gr.Slider(
minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
),
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
]
outputs = gr.JSON(label="Bounding Boxes (xyxy format)")
title = "YOLOv8 Bounding Box Extraction Demo"
demo_app = gr.Interface(
fn=yolov8_inference,
inputs=inputs,
outputs=outputs,
title=title,
theme="default"
)
demo_app.queue().launch(debug=True)