DHEIVER commited on
Commit
d7ba2cb
β€’
1 Parent(s): 0d56a85

Upload 4 files

Browse files
Files changed (4) hide show
  1. README (5).md +12 -0
  2. app (7).py +113 -0
  3. gitattributes (5) +35 -0
  4. requirements (6).txt +2 -0
README (5).md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Yolo9
3
+ emoji: πŸ“Š
4
+ colorFrom: red
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 4.26.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app (7).py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from huggingface_hub import hf_hub_download
4
+
5
+
6
+ def download_models(model_id):
7
+ hf_hub_download("merve/yolov9", filename=f"{model_id}", local_dir=f"./")
8
+ return f"./{model_id}"
9
+
10
+ @spaces.GPU
11
+ def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
12
+ """
13
+ Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust
14
+ the input size and apply test time augmentation.
15
+
16
+ :param model_path: Path to the YOLOv9 model file.
17
+ :param conf_threshold: Confidence threshold for NMS.
18
+ :param iou_threshold: IoU threshold for NMS.
19
+ :param img_path: Path to the image file.
20
+ :param size: Optional, input size for inference.
21
+ :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying.
22
+ """
23
+ # Import YOLOv9
24
+ import yolov9
25
+
26
+ # Load the model
27
+ model_path = download_models(model_id)
28
+ model = yolov9.load(model_path, device="cpu")
29
+
30
+ # Set model parameters
31
+ model.conf = conf_threshold
32
+ model.iou = iou_threshold
33
+
34
+ # Perform inference
35
+ results = model(img_path, size=image_size)
36
+
37
+ # Optionally, show detection bounding boxes on image
38
+ output = results.render()
39
+
40
+ return output[0]
41
+
42
+
43
+ def app():
44
+ with gr.Blocks():
45
+ with gr.Row():
46
+ with gr.Column():
47
+ img_path = gr.Image(type="filepath", label="Image")
48
+ model_path = gr.Dropdown(
49
+ label="Model",
50
+ choices=[
51
+ "gelan-c.pt",
52
+ "gelan-e.pt",
53
+ "yolov9-c.pt",
54
+ "yolov9-e.pt",
55
+ ],
56
+ value="gelan-e.pt",
57
+ )
58
+ image_size = gr.Slider(
59
+ label="Image Size",
60
+ minimum=320,
61
+ maximum=1280,
62
+ step=32,
63
+ value=640,
64
+ )
65
+ conf_threshold = gr.Slider(
66
+ label="Confidence Threshold",
67
+ minimum=0.1,
68
+ maximum=1.0,
69
+ step=0.1,
70
+ value=0.4,
71
+ )
72
+ iou_threshold = gr.Slider(
73
+ label="IoU Threshold",
74
+ minimum=0.1,
75
+ maximum=1.0,
76
+ step=0.1,
77
+ value=0.5,
78
+ )
79
+ yolov9_infer = gr.Button(value="Submit")
80
+
81
+ with gr.Column():
82
+ output_numpy = gr.Image(type="numpy",label="Output")
83
+
84
+ yolov9_infer.click(
85
+ fn=yolov9_inference,
86
+ inputs=[
87
+ img_path,
88
+ model_path,
89
+ image_size,
90
+ conf_threshold,
91
+ iou_threshold,
92
+ ],
93
+ outputs=[output_numpy],
94
+ )
95
+
96
+ gradio_app = gr.Blocks()
97
+ with gradio_app:
98
+ gr.HTML(
99
+ """
100
+ <h1 style='text-align: center'>
101
+ YOLOv9 Base Model
102
+ </h1>
103
+ """)
104
+ gr.HTML(
105
+ """
106
+ <h3 style='text-align: center'>
107
+ </h3>
108
+ """)
109
+ with gr.Row():
110
+ with gr.Column():
111
+ app()
112
+
113
+ gradio_app.launch(debug=True)
gitattributes (5) ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements (6).txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ yolov9pip==0.0.4
2
+ huggingface_hub