Spaces:
Runtime error
Runtime error
import gradio as gr | |
import yolov7 | |
from yolov7.models.common import autoShape | |
from yolov7.models.experimental import attempt_load | |
from yolov7.utils.google_utils import attempt_download_from_hub, attempt_download | |
from yolov7.utils.torch_utils import TracedModel | |
YOLO_MODEL_FILE_NAME="kadirnar/yolov7-v0.1" | |
def load_local_model(model_file, autoshape=True, device='cpu', trace=False, size=640, half=False, hf_model=False): | |
""" | |
Creates a specified YOLOv7 model | |
Arguments: | |
model_path (str): path of the model | |
device (str): select device that model will be loaded (cpu, cuda) | |
trace (bool): if True, model will be traced | |
size (int): size of the input image | |
half (bool): if True, model will be in half precision | |
hf_model (bool): if True, model will be loaded from huggingface hub | |
Returns: | |
pytorch model | |
(Adapted from yolov7.hubconf.create) | |
""" | |
model = attempt_load(model_file, map_location=device) | |
if trace: | |
model = TracedModel(model, device, size) | |
if autoshape: | |
model = autoShape(model) | |
if half: | |
model.half() | |
return model | |
# YOLO_MODEL_FILE_NAME="kadirnar/yolov7-tiny-v0.1" | |
def yolov7_inference( | |
image: gr.inputs.Image = None, | |
image_size: gr.inputs.Slider = 640, | |
conf_threshold: gr.inputs.Slider = 0.25, | |
iou_threshold: gr.inputs.Slider = 0.45, | |
): | |
model = yolov7.load_model(YOLO_MODEL_FILE_NAME, device="cpu", hf_model=False, trace=False) | |
model.conf = conf_threshold | |
model.iou = iou_threshold | |
results = model([image], size=image_size) | |
return results.render()[0] | |
inputs = [ | |
gr.inputs.Image(type="pil", label="Input Image"), | |
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"), | |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), | |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"), | |
] | |
outputs = gr.outputs.Image(type="filepath", label="Output Image") | |
title = "Yolov7: evaluation yolov7.pt" | |
examples = [['car.jpeg', 640, 0.5, 0.75], | |
['horse.jpeg', 640, 0.5, 0.75]] | |
demo_app = gr.Interface( | |
fn=yolov7_inference, | |
inputs=inputs, | |
outputs=outputs, | |
title=title, | |
examples=examples, | |
cache_examples=True, | |
) | |
demo_app.launch(debug=True, enable_queue=True) |