Spaces:
Sleeping
Sleeping
Initial Commit
Browse files- INSTALL.md +35 -0
- README.md +1 -13
- RESOURCES.md +7 -0
- examples/gradio/basic.py +15 -0
- examples/gradio/tabbed.py +40 -0
- examples/tracking/track_webcam.py +63 -0
- examples/tracking/track_youtube.py +17 -0
- examples/training/Images/Craig.jpg +0 -0
- examples/training/Images/WalterWhite.jpg +0 -0
- examples/training/train.py +11 -0
- examples/training/wandb_train.py +28 -0
- interface/defaults.py +3 -0
- interface/detect_interface.py +70 -0
- interface/detect_interface_methods.py +15 -0
- interface/resources_interface.py +18 -0
- interface/train_interface.py +58 -0
- interface/train_interface_methods.py +56 -0
- main_interface.py +32 -0
- requirements.txt +8 -0
INSTALL.md
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Installation Guide
|
2 |
+
This is being built with python 3.11.7. Try to use that version if possible.
|
3 |
+
## Guide for this Project
|
4 |
+
We are currently working on setting up the correct requirements.txt file for the entire interface. This will be updated soon!
|
5 |
+
### Current Method
|
6 |
+
1. Create a conda environment.
|
7 |
+
- conda create -n yolo8inter python=3.11.7
|
8 |
+
2. Activate conda environment.
|
9 |
+
- conda activate yolo8inter
|
10 |
+
3. Install PyTorch. The code below is specified for Windows, Conda, Python, & CUDA 11.8. If you need a different build, go to the [PyTorch website](https://pytorch.org/get-started/locally/).
|
11 |
+
- conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia
|
12 |
+
4. Install Ultralytics.
|
13 |
+
- pip install ultralytics==8.0.186
|
14 |
+
5. Install Other Requirements, like GRADIO. This is implemented but will be continually updated.
|
15 |
+
- pip install -r requirements.txt
|
16 |
+
### Other Recommendations
|
17 |
+
1. When you run any of the examples, at least 1 weights file (.pt) will be downloaded. It is reccomended to create a "weights" folder in the directory to keep track of these.
|
18 |
+
|
19 |
+
## Directly from Ultralytics Github
|
20 |
+
See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com) for full documentation on training, validation, prediction and deployment.
|
21 |
+
|
22 |
+
<details open>
|
23 |
+
<summary>Install</summary>
|
24 |
+
|
25 |
+
Pip install the ultralytics package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
|
26 |
+
|
27 |
+
[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)
|
28 |
+
|
29 |
+
```bash
|
30 |
+
pip install ultralytics
|
31 |
+
```
|
32 |
+
|
33 |
+
For alternative installation methods including [Conda](https://anaconda.org/conda-forge/ultralytics), [Docker](https://hub.docker.com/r/ultralytics/ultralytics), and Git, please refer to the [Quickstart Guide](https://docs.ultralytics.com/quickstart).
|
34 |
+
|
35 |
+
</details>
|
README.md
CHANGED
@@ -1,13 +1 @@
|
|
1 |
-
|
2 |
-
title: YOLOv8 Interface
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.14.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
YOLOv8 Interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
RESOURCES.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Resources
|
2 |
+
## YOLOv8 Code & Documentation
|
3 |
+
YOLOv8 is made by a company called Ultralytics. Most important info is on their Github, website, and youtube channel.
|
4 |
+
1. [Ultralytics (YOLOv8) Github](https://github.com/ultralytics/ultralytics)
|
5 |
+
2. [YOLOv8 Documentation Website](https://docs.ultralytics.com/)
|
6 |
+
3. [Ultralytics Youtube Channel](https://www.youtube.com/ultralytics)
|
7 |
+
3. Join the Ultralytics Discord server as well!
|
examples/gradio/basic.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
def update(name):
|
3 |
+
return f"Welcome to Gradio, {name}!"
|
4 |
+
|
5 |
+
with gr.Blocks() as demo:
|
6 |
+
gr.Markdown("Start typing below and then click **Run** to see the output.")
|
7 |
+
with gr.Row():
|
8 |
+
inp = gr.Textbox(placeholder="What is your name?")
|
9 |
+
out = gr.Textbox()
|
10 |
+
btn = gr.Button("Run")
|
11 |
+
btn.click(fn=update, inputs=inp, outputs=out)
|
12 |
+
|
13 |
+
if __name__== "__main__" :
|
14 |
+
# demo.queue().launch(share=True)
|
15 |
+
demo.queue().launch()
|
examples/gradio/tabbed.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def welcome(name):
|
4 |
+
return f"Welcome to Gradio, {name}!"
|
5 |
+
|
6 |
+
with gr.Blocks() as demo:
|
7 |
+
gr.Markdown(
|
8 |
+
"""
|
9 |
+
# Hello World!
|
10 |
+
Start typing below to see the output.
|
11 |
+
""")
|
12 |
+
inp = gr.Textbox(placeholder="What is your name?")
|
13 |
+
out = gr.Textbox()
|
14 |
+
inp.change(welcome, inp, out)
|
15 |
+
|
16 |
+
tts_examples = [
|
17 |
+
"I love learning machine learning",
|
18 |
+
"How do you do?",
|
19 |
+
]
|
20 |
+
|
21 |
+
tts_demo = gr.load(
|
22 |
+
"huggingface/facebook/fastspeech2-en-ljspeech",
|
23 |
+
title=None,
|
24 |
+
examples=tts_examples,
|
25 |
+
description="Give me something to say!",
|
26 |
+
cache_examples=False
|
27 |
+
)
|
28 |
+
|
29 |
+
stt_demo = gr.load(
|
30 |
+
"huggingface/facebook/wav2vec2-base-960h",
|
31 |
+
title=None,
|
32 |
+
inputs="mic",
|
33 |
+
description="Let me try to guess what you're saying!",
|
34 |
+
)
|
35 |
+
|
36 |
+
demo = gr.TabbedInterface([tts_demo, stt_demo], ["Text-to-speech", "Speech-to-text"])
|
37 |
+
|
38 |
+
if __name__ == "__main__":
|
39 |
+
demo.launch()
|
40 |
+
|
examples/tracking/track_webcam.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Track but done as a stream from computer webcam
|
2 |
+
from collections import defaultdict
|
3 |
+
from ultralytics import YOLO
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
# Load an official or custom model
|
8 |
+
model = YOLO('yolov8n.pt') # Load an official Detect model
|
9 |
+
model = YOLO('yolov8n-seg.pt') # Load an official Segment model
|
10 |
+
model = YOLO('yolov8n-pose.pt') # Load an official Pose model
|
11 |
+
|
12 |
+
# Starts capturing video from webcam
|
13 |
+
cap = cv2.VideoCapture(0)
|
14 |
+
# Can be used for a video file as well
|
15 |
+
# cap = cv2.VideoCapture("path/to/video.mp4")
|
16 |
+
|
17 |
+
# Store the track history
|
18 |
+
track_history = defaultdict(lambda: [])
|
19 |
+
|
20 |
+
# Loop through the video frames
|
21 |
+
while cap.isOpened():
|
22 |
+
# Read a frame from the video
|
23 |
+
success, frame = cap.read()
|
24 |
+
|
25 |
+
if success:
|
26 |
+
# Run YOLOv8 tracking on the frame, persisting tracks between frames
|
27 |
+
results = model.track(frame, persist=True, tracker="bytetrack.yaml")
|
28 |
+
|
29 |
+
# Get the boxes and track IDs
|
30 |
+
boxes = results[0].boxes.xywh.cpu()
|
31 |
+
if results[0].boxes.id is not None:
|
32 |
+
track_ids = results[0].boxes.id.int().cpu().tolist()
|
33 |
+
else:
|
34 |
+
track_ids = []
|
35 |
+
|
36 |
+
# Visualize the results on the frame
|
37 |
+
annotated_frame = results[0].plot()
|
38 |
+
|
39 |
+
# Plot the tracks
|
40 |
+
for box, track_id in zip(boxes, track_ids):
|
41 |
+
x, y, w, h = box
|
42 |
+
track = track_history[track_id]
|
43 |
+
track.append((float(x), float(y))) # x, y center point
|
44 |
+
if len(track) > 30: # retain 90 tracks for 90 frames
|
45 |
+
track.pop(0)
|
46 |
+
|
47 |
+
# Draw the tracking lines
|
48 |
+
points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
|
49 |
+
cv2.polylines(annotated_frame, [points], isClosed=False, color=(230, 230, 230), thickness=10)
|
50 |
+
|
51 |
+
# Display the annotated frame
|
52 |
+
cv2.imshow("YOLOv8 Tracking", annotated_frame)
|
53 |
+
|
54 |
+
# Break the loop if 'q' is pressed
|
55 |
+
if cv2.waitKey(1) & 0xFF == ord("q"):
|
56 |
+
break
|
57 |
+
else:
|
58 |
+
# Break the loop if the end of the video is reached
|
59 |
+
break
|
60 |
+
|
61 |
+
# Release the video capture object and close the display window
|
62 |
+
cap.release()
|
63 |
+
cv2.destroyAllWindows()
|
examples/tracking/track_youtube.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Runs but can't be shown on lambda, if run on local machine a new window is opened to show the tracking
|
2 |
+
## Is effective, but has no output. Maybe use for interface?
|
3 |
+
|
4 |
+
from ultralytics import YOLO
|
5 |
+
|
6 |
+
# Load an official or custom model
|
7 |
+
model = YOLO('yolov8n.pt') # Load an official Detect model
|
8 |
+
model = YOLO('yolov8n-seg.pt') # Load an official Segment model
|
9 |
+
model = YOLO('yolov8n-pose.pt') # Load an official Pose model
|
10 |
+
|
11 |
+
# Perform tracking with the model
|
12 |
+
## LNwODJXcvt4 is 6 mins, it runs at real time so the full length
|
13 |
+
# results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True) # Tracking with default tracker
|
14 |
+
# results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml") # Tracking with ByteTrack tracker, shows in new window
|
15 |
+
# results = model.track(source="https://youtu.be/LNwODJXcvt4", show=False, tracker="bytetrack.yaml") # Tracking with ByteTrack tracker, does not show
|
16 |
+
results = model.track(source="https://www.youtube.com/watch?v=BZP1rYjoBgI&ab_channel=UndoTube", show=True, tracker="bytetrack.yaml") # Shorter video, 30s
|
17 |
+
print(results)
|
examples/training/Images/Craig.jpg
ADDED
examples/training/Images/WalterWhite.jpg
ADDED
examples/training/train.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
|
3 |
+
if __name__== '__main__':
|
4 |
+
# Load a model
|
5 |
+
model = YOLO('yolov8n.yaml') # build a new model from YAML
|
6 |
+
model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
|
7 |
+
model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights
|
8 |
+
|
9 |
+
# Train the model
|
10 |
+
# results = model.train(data='coco128.yaml', epochs=100, imgsz=640) # train yolov8n on COCO128 for 100 epochs
|
11 |
+
results = model.train(data='coco128.yaml', epochs=2, imgsz=640) # train yolov8n on COCO128 for 2 epochs
|
examples/training/wandb_train.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
from wandb.integration.ultralytics import add_wandb_callback
|
3 |
+
import wandb
|
4 |
+
|
5 |
+
if __name__ == "__main__":
|
6 |
+
wandb.login()
|
7 |
+
# Step 1: Initialize a Weights & Biases run
|
8 |
+
wandb.init(project="ultralytics", job_type="training")
|
9 |
+
|
10 |
+
# Step 2: Define the YOLOv8 Model and Dataset
|
11 |
+
model_name = "yolov8n"
|
12 |
+
dataset_name = "coco128.yaml"
|
13 |
+
model = YOLO(f"{model_name}.pt")
|
14 |
+
|
15 |
+
# Step 3: Add W&B Callback for Ultralytics
|
16 |
+
add_wandb_callback(model, enable_model_checkpointing=True)
|
17 |
+
|
18 |
+
# Step 4: Train and Fine-Tune the Model
|
19 |
+
model.train(project="ultralytics", data=dataset_name, epochs=2, imgsz=640)
|
20 |
+
|
21 |
+
# Step 5: Validate the Model
|
22 |
+
model.val()
|
23 |
+
|
24 |
+
# Step 6: Perform Inference and Log Results
|
25 |
+
model(["examples\\training\Images\Craig.jpg", "examples\\training\Images\WalterWhite.jpg"])
|
26 |
+
|
27 |
+
# Step 7: Finalize the W&B Run
|
28 |
+
wandb.finish()
|
interface/defaults.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
shared_theme = gr.themes.Base()
|
interface/detect_interface.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from interface.detect_interface_methods import interface_detect
|
3 |
+
from interface.defaults import shared_theme
|
4 |
+
|
5 |
+
def build_detect_interface():
|
6 |
+
# Gradio Interface Code
|
7 |
+
with gr.Blocks(theme=shared_theme) as demo:
|
8 |
+
gr.Markdown(
|
9 |
+
"""
|
10 |
+
# Image & Video Interface for YOLOv8
|
11 |
+
Upload your own image or video and watch YOLOv8 try to guess what it is!
|
12 |
+
""")
|
13 |
+
# Row for for input & output settings
|
14 |
+
with gr.Row() as file_settings:
|
15 |
+
# Allows choice for uploading image or video [for all]
|
16 |
+
file_type = gr.Radio(label="File Type",info="Choose 'Image' if you are uploading an image, Choose 'Video' if you are uploading a video",
|
17 |
+
choices=['Image','Video'],value='Image',show_label=True,interactive=True,visible=True)
|
18 |
+
# Row for all inputs & outputs
|
19 |
+
with gr.Row() as inputs_outputs:
|
20 |
+
# Default input image: Visible, Upload from computer
|
21 |
+
input_im = gr.Image(sources=['upload','webcam','clipboard'],type='numpy',label="Input Image",
|
22 |
+
show_download_button=True,show_share_button=True,interactive=True,visible=True)
|
23 |
+
# Default Boxed output image: Visible
|
24 |
+
output_box_im = gr.Image(type='numpy',label="Output Image",
|
25 |
+
show_download_button=True,show_share_button=True,interactive=False,visible=True)
|
26 |
+
# Default input video: Not visible, Upload from computer
|
27 |
+
input_vid = gr.Video(sources=['upload','webcam'],label="Input Video",
|
28 |
+
show_share_button=True,interactive=True,visible=False)
|
29 |
+
# Default Boxed output video: Not visible
|
30 |
+
output_box_vid = gr.Video(label="Output Video",show_share_button=True,visible=False)
|
31 |
+
|
32 |
+
# List of components for clearing
|
33 |
+
clear_list = [input_im,output_box_im,input_vid,output_box_vid]
|
34 |
+
|
35 |
+
# Row for start & clear buttons
|
36 |
+
with gr.Row() as buttons:
|
37 |
+
start_but = gr.Button(value="Start")
|
38 |
+
clear_but = gr.ClearButton(value='Clear All',components=clear_list,
|
39 |
+
interactive=True,visible=True)
|
40 |
+
|
41 |
+
update_list = [input_im,output_box_im,input_vid,output_box_vid]
|
42 |
+
input_media = input_im
|
43 |
+
output_media = output_box_im
|
44 |
+
|
45 |
+
def change_input_type(file_type, input_media):
|
46 |
+
if file_type == 'Image':
|
47 |
+
input_media = input_im
|
48 |
+
output_media = output_box_im
|
49 |
+
return {
|
50 |
+
input_im: gr.Image(visible=True),
|
51 |
+
output_box_im: gr.Image(visible=True),
|
52 |
+
input_vid: gr.Video(visible=False),
|
53 |
+
output_box_vid: gr.Video(visible=False)
|
54 |
+
}
|
55 |
+
elif file_type == 'Video':
|
56 |
+
input_media = input_vid
|
57 |
+
output_media = output_box_vid
|
58 |
+
return {
|
59 |
+
input_im: gr.Image(visible=False),
|
60 |
+
output_box_im: gr.Image(visible=False),
|
61 |
+
input_vid: gr.Video(visible=True),
|
62 |
+
output_box_vid: gr.Video(visible=True)
|
63 |
+
}
|
64 |
+
|
65 |
+
# When start button is clicked, the run_all method is called
|
66 |
+
start_but.click(interface_detect, inputs=[input_media], outputs=output_media)
|
67 |
+
# When these settings are changed, the change_file_type method is called
|
68 |
+
file_type.input(change_input_type, show_progress=True, inputs=[file_type, input_media], outputs=update_list)
|
69 |
+
|
70 |
+
return demo
|
interface/detect_interface_methods.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
from ultralytics import YOLO
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
def interface_detect(source):
|
7 |
+
# Load a pretrained YOLOv8n model
|
8 |
+
model = YOLO('yolov8n.pt') # Load an official Detect model
|
9 |
+
if isinstance(source, gr.Video):
|
10 |
+
print("To be added")
|
11 |
+
elif isinstance(source, np.ndarray):
|
12 |
+
results = model.predict(source)
|
13 |
+
return results[0].plot()
|
14 |
+
else:
|
15 |
+
raise ValueError("Invalid source type")
|
interface/resources_interface.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from interface.defaults import shared_theme
|
3 |
+
|
4 |
+
def build_resources_interface():
|
5 |
+
# Gradio Interface Code
|
6 |
+
with gr.Blocks(theme=shared_theme) as demo:
|
7 |
+
gr.Markdown(
|
8 |
+
"""
|
9 |
+
# Helpful Resources for YOLOv8
|
10 |
+
This page has a list of websites & guides to help you become a YOLOv8 Pro!
|
11 |
+
## YOLOv8 Code & Documentation
|
12 |
+
YOLOv8 is made by a company called Ultralytics. Most important info is on their Github, website, and youtube channel.
|
13 |
+
1. [Ultralytics (YOLOv8) Github](https://github.com/ultralytics/ultralytics)
|
14 |
+
2. [YOLOv8 Documentation Website](https://docs.ultralytics.com/)
|
15 |
+
3. [Ultralytics Youtube Channel](https://www.youtube.com/ultralytics)
|
16 |
+
3. Join the Ultralytics Discord server as well!
|
17 |
+
""")
|
18 |
+
return demo
|
interface/train_interface.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from interface.defaults import shared_theme
|
3 |
+
from interface.train_interface_methods import interface_train, interface_login
|
4 |
+
|
5 |
+
class TrainInterface():
|
6 |
+
def __init__(self):
|
7 |
+
self.demo = None
|
8 |
+
self.api_key = None
|
9 |
+
with gr.Blocks(theme=shared_theme) as demo:
|
10 |
+
gr.Markdown(
|
11 |
+
"""
|
12 |
+
# Training Interface for YOLOv8
|
13 |
+
Train your own YOLOv8 model!
|
14 |
+
""")
|
15 |
+
|
16 |
+
with gr.Row() as finetune_row:
|
17 |
+
is_finetune = gr.Checkbox(label="Finetune",info="Check this box if you want to finetune a model")
|
18 |
+
is_offical_pretrained = gr.Checkbox(label="Official",info="Check this box if you want to train an official model",visible=True,interactive=True,value=True)
|
19 |
+
custom_pretrained = gr.File(label="Pretrained Model Weights",file_count='single',type='binary',
|
20 |
+
file_types=['.pt'],visible=True,show_label=True,interactive=True)
|
21 |
+
offical_pretrained = gr.Dropdown(label="Pretrained Model",choices=["yolov8n.pt"],visible=True,interactive=True)
|
22 |
+
with gr.Row() as dataset_row:
|
23 |
+
is_official_dataset = gr.Checkbox(label="Official",info="Check this box if you want to use an official dataset",visible=True,interactive=True,value=True)
|
24 |
+
custom_dataset = gr.File(label="Custom Dataset",file_count='single',type='binary',
|
25 |
+
file_types=['.zip'],visible=True,show_label=True,interactive=True)
|
26 |
+
official_dataset = gr.Dropdown(label="Dataset",choices=["coco128"],visible=True,interactive=True)
|
27 |
+
|
28 |
+
# Row for start & clear buttons
|
29 |
+
with gr.Row() as buttons:
|
30 |
+
start_but = gr.Button(value="Start")
|
31 |
+
with gr.Accordion("Logger Options") as login_accordion:
|
32 |
+
use_logger = gr.Checkbox(label="Use Logger",info="Check this box if you want to use a logger",visible=True,interactive=True,value=False)
|
33 |
+
logger = gr.Radio(choices=['WANDB', 'ClearML', 'Tensorboard'],value='WANDB',show_label=True,interactive=True,visible=True,
|
34 |
+
label="Logger",info="Choose which logger to use")
|
35 |
+
wandb_key = gr.Textbox(label="WANDB Key",placeholder="Enter WANDB Key",visible=True,interactive=True)
|
36 |
+
login_but = gr.Button(value="Login")
|
37 |
+
|
38 |
+
def string_from_textbox(textbox):
|
39 |
+
self.api_key = textbox
|
40 |
+
|
41 |
+
wandb_key.change(fn=string_from_textbox,inputs=[wandb_key],outputs=[])
|
42 |
+
|
43 |
+
def logger_login(use_logger, logger):
|
44 |
+
if use_logger:
|
45 |
+
interface_login(logger, [self.api_key])
|
46 |
+
else:
|
47 |
+
gr.Warning("Not using logger, so no need to login")
|
48 |
+
|
49 |
+
start_but.click(fn=interface_train,inputs=[is_finetune, official_dataset],outputs=[])
|
50 |
+
login_but.click(fn=logger_login,inputs=[use_logger,logger],outputs=[])
|
51 |
+
self.demo = demo
|
52 |
+
|
53 |
+
def get_interface(self):
|
54 |
+
return self.demo
|
55 |
+
|
56 |
+
if __name__== "__main__" :
|
57 |
+
demo = TrainInterface().get_interface()
|
58 |
+
demo.queue().launch()
|
interface/train_interface_methods.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from ultralytics import YOLO
|
3 |
+
from wandb.integration.ultralytics import add_wandb_callback
|
4 |
+
import wandb
|
5 |
+
|
6 |
+
def interface_login(logger, args):
|
7 |
+
if logger == 'WANDB':
|
8 |
+
result = False
|
9 |
+
wandb_key = args[0]
|
10 |
+
if (wandb_key is not None) & isinstance(wandb_key, str):
|
11 |
+
try:
|
12 |
+
result = wandb.login(key=wandb_key,relogin=True,timeout=15)
|
13 |
+
except:
|
14 |
+
gr.Warning("Issue with the WANDB key")
|
15 |
+
else:
|
16 |
+
gr.Warning("Issue with the WANDB key")
|
17 |
+
if result:
|
18 |
+
gr.Info("Logged in to WANDB")
|
19 |
+
else:
|
20 |
+
gr.Warning("Failed to log in to WANDB")
|
21 |
+
elif logger == 'ClearML':
|
22 |
+
pass
|
23 |
+
elif logger == 'Tensorboard':
|
24 |
+
pass
|
25 |
+
|
26 |
+
def interface_finetune():
|
27 |
+
# Load a pretrained YOLOv8n model
|
28 |
+
model = YOLO('yolov8n.pt') # Load an official Detect model
|
29 |
+
return model
|
30 |
+
|
31 |
+
def interface_train(is_fintune=False, dataset=None, epochs=2, imgsz=640):
|
32 |
+
model = YOLO('yolov8n.yaml')
|
33 |
+
if is_fintune:
|
34 |
+
model = interface_finetune()
|
35 |
+
results = model.train(data=dataset, epochs=epochs, imgsz=imgsz)
|
36 |
+
|
37 |
+
def interface_train_wandb(project_name, model_name, dataset_name, epochs=2, imgsz=640):
|
38 |
+
# Step 1: Initialize a Weights & Biases run
|
39 |
+
wandb.init(project=project_name, job_type="training")
|
40 |
+
|
41 |
+
model = YOLO(f"{model_name}.pt")
|
42 |
+
|
43 |
+
# Step 3: Add W&B Callback for Ultralytics
|
44 |
+
add_wandb_callback(model, enable_model_checkpointing=True)
|
45 |
+
|
46 |
+
# Step 4: Train and Fine-Tune the Model
|
47 |
+
model.train(project=project_name, data=dataset_name, epochs=epochs, imgsz=imgsz)
|
48 |
+
|
49 |
+
# Step 5: Validate the Model
|
50 |
+
model.val()
|
51 |
+
|
52 |
+
# # Step 6: Perform Inference and Log Results
|
53 |
+
# model(["Images\Craig.jpg", "Images\WalterWhite.jpg"])
|
54 |
+
|
55 |
+
# Step 7: Finalize the W&B Run
|
56 |
+
wandb.finish()
|
main_interface.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Abstracted file for running the interface. Can run from command line or through debugger.
|
2 |
+
|
3 |
+
from interface.detect_interface import build_detect_interface
|
4 |
+
from interface.train_interface import TrainInterface
|
5 |
+
from interface.resources_interface import build_resources_interface
|
6 |
+
import gradio as gr
|
7 |
+
from interface.defaults import shared_theme
|
8 |
+
import wandb
|
9 |
+
|
10 |
+
def build_main_interface():
|
11 |
+
detect = build_detect_interface()
|
12 |
+
train = TrainInterface().get_interface()
|
13 |
+
resources = build_resources_interface()
|
14 |
+
|
15 |
+
with gr.Blocks(title="YOLOv8 Interface",theme=shared_theme) as demo:
|
16 |
+
gr.Markdown(
|
17 |
+
"""
|
18 |
+
# YOLOv8 Interface
|
19 |
+
Choose between the Detect and Train interfaces.
|
20 |
+
""")
|
21 |
+
gr.TabbedInterface(interface_list=[detect, train, resources],
|
22 |
+
tab_names=["Detect", "Train", "Resources"],
|
23 |
+
theme=shared_theme,
|
24 |
+
analytics_enabled=True)
|
25 |
+
|
26 |
+
return demo
|
27 |
+
|
28 |
+
if __name__== "__main__" :
|
29 |
+
# run_main_interface()
|
30 |
+
demo = build_main_interface()
|
31 |
+
demo.queue().launch()
|
32 |
+
demo.integrate(wandb=wandb)
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Usage: pip install -r requirements.txt
|
2 |
+
# If issues: pip --no-cache-dir install -r requirements.txt
|
3 |
+
lapx
|
4 |
+
pafy
|
5 |
+
youtube_dl==2020.12.2
|
6 |
+
gradio
|
7 |
+
clearml>=1.2.0
|
8 |
+
wandb
|