import gradio as gr #import torch import yolov7 # # from huggingface_hub import hf_hub_download from huggingface_hub import HfApi # Images #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg') #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg') def yolov7_inference( image: gr.inputs.Image = None, model_path: gr.inputs.Dropdown = None, image_size: gr.inputs.Slider = 640, conf_threshold: gr.inputs.Slider = 0.25, iou_threshold: gr.inputs.Slider = 0.45, ): """ YOLOv7 inference function Args: image: Input image model_path: Path to the model image_size: Image size conf_threshold: Confidence threshold iou_threshold: IOU threshold Returns: Rendered image """ model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False) model.conf = conf_threshold model.iou = iou_threshold results = model([image], size=image_size) return results.render()[0] inputs = [ gr.inputs.Image(type="pil", label="Input Image"), gr.inputs.Dropdown( choices=[ "alshimaa/model_baseline", "alshimaa/model_yolo7", #"kadirnar/yolov7-v0.1", ], default="alshimaa/model_baseline", label="Model", ) #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size") #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold") ] outputs = gr.outputs.Image(type="filepath", label="Output Image") title = "Smart Environmental Eye (SEE)" examples = [['image1.jpg.', 'alshimaa/model_baseline', 640, 0.25, 0.45], ['image2.jpg', 'alshimaa/model_baseline', 640, 0.25, 0.45], ['image3.jpg', 'alshimaa/model_baseline', 640, 0.25, 0.45]] demo_app = gr.Interface( fn=yolov7_inference, inputs=inputs, outputs=outputs, title=title, examples=examples, cache_examples=True, theme='huggingface', ) demo_app.launch(debug=True, enable_queue=True)