Spaces:
Sleeping
Sleeping
stphtan94117
commited on
Commit
•
1b15ea8
1
Parent(s):
d087c42
Upload 142 files
Browse files- yolov5/.github/ISSUE_TEMPLATE/config.yml +1 -1
- yolov5/.github/workflows/ci-testing.yml +6 -6
- yolov5/.github/workflows/codeql-analysis.yml +1 -1
- yolov5/.github/workflows/docker.yml +9 -7
- yolov5/.github/workflows/greetings.yml +1 -1
- yolov5/.github/workflows/links.yml +10 -5
- yolov5/.github/workflows/translate-readme.yml +2 -2
- yolov5/.pre-commit-config.yaml +6 -6
- yolov5/README.md +42 -61
- yolov5/README.zh-CN.md +46 -63
- yolov5/classify/predict.py +4 -3
- yolov5/classify/tutorial.ipynb +2 -2
- yolov5/detect.py +26 -2
- yolov5/export.py +3 -3
- yolov5/models/common.py +21 -9
- yolov5/models/experimental.py +2 -2
- yolov5/requirements.txt +6 -6
- yolov5/segment/predict.py +3 -2
- yolov5/segment/tutorial.ipynb +2 -2
- yolov5/tutorial.ipynb +10 -10
- yolov5/utils/__init__.py +1 -1
- yolov5/utils/dataloaders.py +1 -1
- yolov5/utils/docker/Dockerfile +3 -4
- yolov5/utils/docker/Dockerfile-arm64 +2 -3
- yolov5/utils/docker/Dockerfile-cpu +9 -9
- yolov5/utils/general.py +12 -2
- yolov5/utils/google_app_engine/additional_requirements.txt +2 -2
- yolov5/utils/loggers/clearml/clearml_utils.py +1 -2
- yolov5/utils/loggers/comet/README.md +1 -1
- yolov5/utils/loggers/comet/__init__.py +22 -13
- yolov5/utils/plots.py +4 -120
- yolov5/utils/segment/plots.py +1 -1
- yolov5/val.py +2 -0
yolov5/.github/ISSUE_TEMPLATE/config.yml
CHANGED
@@ -7,5 +7,5 @@ contact_links:
|
|
7 |
url: https://community.ultralytics.com/
|
8 |
about: Ask on Ultralytics Community Forum
|
9 |
- name: 🎧 Discord
|
10 |
-
url: https://
|
11 |
about: Ask on Ultralytics Discord
|
|
|
7 |
url: https://community.ultralytics.com/
|
8 |
about: Ask on Ultralytics Community Forum
|
9 |
- name: 🎧 Discord
|
10 |
+
url: https://ultralytics.com/discord
|
11 |
about: Ask on Ultralytics Discord
|
yolov5/.github/workflows/ci-testing.yml
CHANGED
@@ -21,7 +21,7 @@ jobs:
|
|
21 |
python-version: [ '3.10' ] # requires python<=3.10
|
22 |
model: [ yolov5n ]
|
23 |
steps:
|
24 |
-
- uses: actions/checkout@
|
25 |
- uses: actions/setup-python@v4
|
26 |
with:
|
27 |
python-version: ${{ matrix.python-version }}
|
@@ -63,11 +63,11 @@ jobs:
|
|
63 |
python-version: '3.9'
|
64 |
model: yolov5n
|
65 |
- os: ubuntu-latest
|
66 |
-
python-version: '3.8' # torch 1.
|
67 |
model: yolov5n
|
68 |
-
torch: '1.
|
69 |
steps:
|
70 |
-
- uses: actions/checkout@
|
71 |
- uses: actions/setup-python@v4
|
72 |
with:
|
73 |
python-version: ${{ matrix.python-version }}
|
@@ -75,8 +75,8 @@ jobs:
|
|
75 |
- name: Install requirements
|
76 |
run: |
|
77 |
python -m pip install --upgrade pip wheel
|
78 |
-
if [ "${{ matrix.torch }}" == "1.
|
79 |
-
pip install -r requirements.txt torch==1.
|
80 |
else
|
81 |
pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu
|
82 |
fi
|
|
|
21 |
python-version: [ '3.10' ] # requires python<=3.10
|
22 |
model: [ yolov5n ]
|
23 |
steps:
|
24 |
+
- uses: actions/checkout@v4
|
25 |
- uses: actions/setup-python@v4
|
26 |
with:
|
27 |
python-version: ${{ matrix.python-version }}
|
|
|
63 |
python-version: '3.9'
|
64 |
model: yolov5n
|
65 |
- os: ubuntu-latest
|
66 |
+
python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8
|
67 |
model: yolov5n
|
68 |
+
torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/
|
69 |
steps:
|
70 |
+
- uses: actions/checkout@v4
|
71 |
- uses: actions/setup-python@v4
|
72 |
with:
|
73 |
python-version: ${{ matrix.python-version }}
|
|
|
75 |
- name: Install requirements
|
76 |
run: |
|
77 |
python -m pip install --upgrade pip wheel
|
78 |
+
if [ "${{ matrix.torch }}" == "1.8.0" ]; then
|
79 |
+
pip install -r requirements.txt torch==1.8.0 torchvision==0.9.0 --extra-index-url https://download.pytorch.org/whl/cpu
|
80 |
else
|
81 |
pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu
|
82 |
fi
|
yolov5/.github/workflows/codeql-analysis.yml
CHANGED
@@ -23,7 +23,7 @@ jobs:
|
|
23 |
|
24 |
steps:
|
25 |
- name: Checkout repository
|
26 |
-
uses: actions/checkout@
|
27 |
|
28 |
# Initializes the CodeQL tools for scanning.
|
29 |
- name: Initialize CodeQL
|
|
|
23 |
|
24 |
steps:
|
25 |
- name: Checkout repository
|
26 |
+
uses: actions/checkout@v4
|
27 |
|
28 |
# Initializes the CodeQL tools for scanning.
|
29 |
- name: Initialize CodeQL
|
yolov5/.github/workflows/docker.yml
CHANGED
@@ -15,22 +15,24 @@ jobs:
|
|
15 |
runs-on: ubuntu-latest
|
16 |
steps:
|
17 |
- name: Checkout repo
|
18 |
-
uses: actions/checkout@
|
|
|
|
|
19 |
|
20 |
- name: Set up QEMU
|
21 |
-
uses: docker/setup-qemu-action@
|
22 |
|
23 |
- name: Set up Docker Buildx
|
24 |
-
uses: docker/setup-buildx-action@
|
25 |
|
26 |
- name: Login to Docker Hub
|
27 |
-
uses: docker/login-action@
|
28 |
with:
|
29 |
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
30 |
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
31 |
|
32 |
- name: Build and push arm64 image
|
33 |
-
uses: docker/build-push-action@
|
34 |
continue-on-error: true
|
35 |
with:
|
36 |
context: .
|
@@ -40,7 +42,7 @@ jobs:
|
|
40 |
tags: ultralytics/yolov5:latest-arm64
|
41 |
|
42 |
- name: Build and push CPU image
|
43 |
-
uses: docker/build-push-action@
|
44 |
continue-on-error: true
|
45 |
with:
|
46 |
context: .
|
@@ -49,7 +51,7 @@ jobs:
|
|
49 |
tags: ultralytics/yolov5:latest-cpu
|
50 |
|
51 |
- name: Build and push GPU image
|
52 |
-
uses: docker/build-push-action@
|
53 |
continue-on-error: true
|
54 |
with:
|
55 |
context: .
|
|
|
15 |
runs-on: ubuntu-latest
|
16 |
steps:
|
17 |
- name: Checkout repo
|
18 |
+
uses: actions/checkout@v4
|
19 |
+
with:
|
20 |
+
fetch-depth: 0 # copy full .git directory to access full git history in Docker images
|
21 |
|
22 |
- name: Set up QEMU
|
23 |
+
uses: docker/setup-qemu-action@v3
|
24 |
|
25 |
- name: Set up Docker Buildx
|
26 |
+
uses: docker/setup-buildx-action@v3
|
27 |
|
28 |
- name: Login to Docker Hub
|
29 |
+
uses: docker/login-action@v3
|
30 |
with:
|
31 |
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
32 |
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
33 |
|
34 |
- name: Build and push arm64 image
|
35 |
+
uses: docker/build-push-action@v5
|
36 |
continue-on-error: true
|
37 |
with:
|
38 |
context: .
|
|
|
42 |
tags: ultralytics/yolov5:latest-arm64
|
43 |
|
44 |
- name: Build and push CPU image
|
45 |
+
uses: docker/build-push-action@v5
|
46 |
continue-on-error: true
|
47 |
with:
|
48 |
context: .
|
|
|
51 |
tags: ultralytics/yolov5:latest-cpu
|
52 |
|
53 |
- name: Build and push GPU image
|
54 |
+
uses: docker/build-push-action@v5
|
55 |
continue-on-error: true
|
56 |
with:
|
57 |
context: .
|
yolov5/.github/workflows/greetings.yml
CHANGED
@@ -31,7 +31,7 @@ jobs:
|
|
31 |
|
32 |
## Requirements
|
33 |
|
34 |
-
[**Python>=3.
|
35 |
```bash
|
36 |
git clone https://github.com/ultralytics/yolov5 # clone
|
37 |
cd yolov5
|
|
|
31 |
|
32 |
## Requirements
|
33 |
|
34 |
+
[**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started:
|
35 |
```bash
|
36 |
git clone https://github.com/ultralytics/yolov5 # clone
|
37 |
cd yolov5
|
yolov5/.github/workflows/links.yml
CHANGED
@@ -1,6 +1,11 @@
|
|
1 |
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2 |
-
#
|
3 |
-
#
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
name: Check Broken links
|
6 |
|
@@ -13,7 +18,7 @@ jobs:
|
|
13 |
Links:
|
14 |
runs-on: ubuntu-latest
|
15 |
steps:
|
16 |
-
- uses: actions/checkout@
|
17 |
|
18 |
- name: Download and install lychee
|
19 |
run: |
|
@@ -28,7 +33,7 @@ jobs:
|
|
28 |
timeout_minutes: 5
|
29 |
retry_wait_seconds: 60
|
30 |
max_attempts: 3
|
31 |
-
command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html'
|
32 |
|
33 |
- name: Test Markdown, HTML, YAML, Python and Notebook links with retry
|
34 |
if: github.event_name == 'workflow_dispatch'
|
@@ -37,4 +42,4 @@ jobs:
|
|
37 |
timeout_minutes: 5
|
38 |
retry_wait_seconds: 60
|
39 |
max_attempts: 3
|
40 |
-
command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'
|
|
|
1 |
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2 |
+
# Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee
|
3 |
+
# Ignores the following status codes to reduce false positives:
|
4 |
+
# - 403(OpenVINO, 'forbidden')
|
5 |
+
# - 429(Instagram, 'too many requests')
|
6 |
+
# - 500(Zenodo, 'cached')
|
7 |
+
# - 502(Zenodo, 'bad gateway')
|
8 |
+
# - 999(LinkedIn, 'unknown status code')
|
9 |
|
10 |
name: Check Broken links
|
11 |
|
|
|
18 |
Links:
|
19 |
runs-on: ubuntu-latest
|
20 |
steps:
|
21 |
+
- uses: actions/checkout@v4
|
22 |
|
23 |
- name: Download and install lychee
|
24 |
run: |
|
|
|
33 |
timeout_minutes: 5
|
34 |
retry_wait_seconds: 60
|
35 |
max_attempts: 3
|
36 |
+
command: lychee --accept 403,429,500,502,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html'
|
37 |
|
38 |
- name: Test Markdown, HTML, YAML, Python and Notebook links with retry
|
39 |
if: github.event_name == 'workflow_dispatch'
|
|
|
42 |
timeout_minutes: 5
|
43 |
retry_wait_seconds: 60
|
44 |
max_attempts: 3
|
45 |
+
command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'
|
yolov5/.github/workflows/translate-readme.yml
CHANGED
@@ -14,9 +14,9 @@ jobs:
|
|
14 |
Translate:
|
15 |
runs-on: ubuntu-latest
|
16 |
steps:
|
17 |
-
- uses: actions/checkout@
|
18 |
- name: Setup Node.js
|
19 |
-
uses: actions/setup-node@
|
20 |
with:
|
21 |
node-version: 16
|
22 |
# ISO Language Codes: https://cloud.google.com/translate/docs/languages
|
|
|
14 |
Translate:
|
15 |
runs-on: ubuntu-latest
|
16 |
steps:
|
17 |
+
- uses: actions/checkout@v4
|
18 |
- name: Setup Node.js
|
19 |
+
uses: actions/setup-node@v4
|
20 |
with:
|
21 |
node-version: 16
|
22 |
# ISO Language Codes: https://cloud.google.com/translate/docs/languages
|
yolov5/.pre-commit-config.yaml
CHANGED
@@ -11,7 +11,7 @@ ci:
|
|
11 |
|
12 |
repos:
|
13 |
- repo: https://github.com/pre-commit/pre-commit-hooks
|
14 |
-
rev: v4.
|
15 |
hooks:
|
16 |
- id: end-of-file-fixer
|
17 |
- id: trailing-whitespace
|
@@ -22,7 +22,7 @@ repos:
|
|
22 |
- id: detect-private-key
|
23 |
|
24 |
- repo: https://github.com/asottile/pyupgrade
|
25 |
-
rev: v3.
|
26 |
hooks:
|
27 |
- id: pyupgrade
|
28 |
name: Upgrade code
|
@@ -34,13 +34,13 @@ repos:
|
|
34 |
name: Sort imports
|
35 |
|
36 |
- repo: https://github.com/google/yapf
|
37 |
-
rev: v0.40.
|
38 |
hooks:
|
39 |
- id: yapf
|
40 |
name: YAPF formatting
|
41 |
|
42 |
- repo: https://github.com/executablebooks/mdformat
|
43 |
-
rev: 0.7.
|
44 |
hooks:
|
45 |
- id: mdformat
|
46 |
name: MD formatting
|
@@ -50,13 +50,13 @@ repos:
|
|
50 |
# exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
|
51 |
|
52 |
- repo: https://github.com/PyCQA/flake8
|
53 |
-
rev: 6.
|
54 |
hooks:
|
55 |
- id: flake8
|
56 |
name: PEP8
|
57 |
|
58 |
- repo: https://github.com/codespell-project/codespell
|
59 |
-
rev: v2.2.
|
60 |
hooks:
|
61 |
- id: codespell
|
62 |
args:
|
|
|
11 |
|
12 |
repos:
|
13 |
- repo: https://github.com/pre-commit/pre-commit-hooks
|
14 |
+
rev: v4.5.0
|
15 |
hooks:
|
16 |
- id: end-of-file-fixer
|
17 |
- id: trailing-whitespace
|
|
|
22 |
- id: detect-private-key
|
23 |
|
24 |
- repo: https://github.com/asottile/pyupgrade
|
25 |
+
rev: v3.15.0
|
26 |
hooks:
|
27 |
- id: pyupgrade
|
28 |
name: Upgrade code
|
|
|
34 |
name: Sort imports
|
35 |
|
36 |
- repo: https://github.com/google/yapf
|
37 |
+
rev: v0.40.2
|
38 |
hooks:
|
39 |
- id: yapf
|
40 |
name: YAPF formatting
|
41 |
|
42 |
- repo: https://github.com/executablebooks/mdformat
|
43 |
+
rev: 0.7.17
|
44 |
hooks:
|
45 |
- id: mdformat
|
46 |
name: MD formatting
|
|
|
50 |
# exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
|
51 |
|
52 |
- repo: https://github.com/PyCQA/flake8
|
53 |
+
rev: 6.1.0
|
54 |
hooks:
|
55 |
- id: flake8
|
56 |
name: PEP8
|
57 |
|
58 |
- repo: https://github.com/codespell-project/codespell
|
59 |
+
rev: v2.2.6
|
60 |
hooks:
|
61 |
- id: codespell
|
62 |
args:
|
yolov5/README.md
CHANGED
@@ -1,7 +1,11 @@
|
|
1 |
<div align="center">
|
2 |
<p>
|
|
|
|
|
|
|
3 |
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
4 |
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
|
|
|
5 |
</p>
|
6 |
|
7 |
[English](README.md) | [简体中文](README.zh-CN.md)
|
@@ -20,31 +24,24 @@
|
|
20 |
|
21 |
YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
|
22 |
|
23 |
-
We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href="https://docs.ultralytics.com/yolov5">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> for support, and join our <a href="https://
|
24 |
|
25 |
To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
|
26 |
|
27 |
<div align="center">
|
28 |
-
<a href="https://github.com/ultralytics"
|
29 |
-
|
30 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
31 |
-
<
|
32 |
-
|
33 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%"
|
34 |
-
<a href="https://
|
35 |
-
|
36 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
37 |
-
<
|
38 |
-
|
39 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%"
|
40 |
-
<a href="https://
|
41 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="" /></a>
|
42 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
43 |
-
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
44 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
|
45 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
46 |
-
<a href="https://discord.gg/2wNGbc6g9X" style="text-decoration:none;">
|
47 |
-
<img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="2%" alt="" /></a>
|
48 |
</div>
|
49 |
|
50 |
</div>
|
@@ -52,10 +49,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens
|
|
52 |
|
53 |
## <div align="center">YOLOv8 🚀 NEW</div>
|
54 |
|
55 |
-
We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model
|
56 |
-
released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**.
|
57 |
-
YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of
|
58 |
-
object detection, image segmentation and image classification tasks.
|
59 |
|
60 |
See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
|
61 |
|
@@ -78,8 +72,8 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentatio
|
|
78 |
<summary>Install</summary>
|
79 |
|
80 |
Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
|
81 |
-
[**Python>=3.
|
82 |
-
[**PyTorch>=1.
|
83 |
|
84 |
```bash
|
85 |
git clone https://github.com/ultralytics/yolov5 # clone
|
@@ -92,8 +86,7 @@ pip install -r requirements.txt # install
|
|
92 |
<details>
|
93 |
<summary>Inference</summary>
|
94 |
|
95 |
-
YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest
|
96 |
-
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
|
97 |
|
98 |
```python
|
99 |
import torch
|
@@ -116,8 +109,7 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
|
|
116 |
<details>
|
117 |
<summary>Inference with detect.py</summary>
|
118 |
|
119 |
-
`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from
|
120 |
-
the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
|
121 |
|
122 |
```bash
|
123 |
python detect.py --weights yolov5s.pt --source 0 # webcam
|
@@ -128,7 +120,7 @@ python detect.py --weights yolov5s.pt --source 0 #
|
|
128 |
list.txt # list of images
|
129 |
list.streams # list of streams
|
130 |
'path/*.jpg' # glob
|
131 |
-
'https://youtu.be/
|
132 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
133 |
```
|
134 |
|
@@ -139,11 +131,7 @@ python detect.py --weights yolov5s.pt --source 0 #
|
|
139 |
|
140 |
The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
|
141 |
results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
|
142 |
-
and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest
|
143 |
-
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
|
144 |
-
1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the
|
145 |
-
largest `--batch-size` possible, or pass `--batch-size -1` for
|
146 |
-
YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
|
147 |
|
148 |
```bash
|
149 |
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
|
@@ -461,37 +449,30 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare
|
|
461 |
|
462 |
## <div align="center">License</div>
|
463 |
|
464 |
-
|
465 |
|
466 |
-
- **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details.
|
467 |
-
- **Enterprise License**:
|
468 |
|
469 |
## <div align="center">Contact</div>
|
470 |
|
471 |
-
For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://
|
472 |
|
473 |
<br>
|
474 |
<div align="center">
|
475 |
-
<a href="https://github.com/ultralytics"
|
476 |
-
|
477 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
478 |
-
<
|
479 |
-
|
480 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%"
|
481 |
-
<a href="https://
|
482 |
-
|
483 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
484 |
-
<
|
485 |
-
|
486 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%"
|
487 |
-
<a href="https://
|
488 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="" /></a>
|
489 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
490 |
-
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
491 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="" /></a>
|
492 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
493 |
-
<a href="https://discord.gg/2wNGbc6g9X" style="text-decoration:none;">
|
494 |
-
<img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="3%" alt="" /></a>
|
495 |
</div>
|
496 |
|
497 |
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
|
|
|
1 |
<div align="center">
|
2 |
<p>
|
3 |
+
<a href="https://yolovision.ultralytics.com/" target="_blank">
|
4 |
+
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-yolo-vision-2023.png"></a>
|
5 |
+
<!--
|
6 |
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
7 |
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
|
8 |
+
-->
|
9 |
</p>
|
10 |
|
11 |
[English](README.md) | [简体中文](README.zh-CN.md)
|
|
|
24 |
|
25 |
YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
|
26 |
|
27 |
+
We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href="https://docs.ultralytics.com/yolov5">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> for support, and join our <a href="https://ultralytics.com/discord">Discord</a> community for questions and discussions!
|
28 |
|
29 |
To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
|
30 |
|
31 |
<div align="center">
|
32 |
+
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
|
33 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
34 |
+
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
|
35 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
36 |
+
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
|
37 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
38 |
+
<a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
|
39 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
40 |
+
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
|
41 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
42 |
+
<a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="Ultralytics Instagram"></a>
|
43 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
44 |
+
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
</div>
|
46 |
|
47 |
</div>
|
|
|
49 |
|
50 |
## <div align="center">YOLOv8 🚀 NEW</div>
|
51 |
|
52 |
+
We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks.
|
|
|
|
|
|
|
53 |
|
54 |
See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
|
55 |
|
|
|
72 |
<summary>Install</summary>
|
73 |
|
74 |
Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
|
75 |
+
[**Python>=3.8.0**](https://www.python.org/) environment, including
|
76 |
+
[**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
|
77 |
|
78 |
```bash
|
79 |
git clone https://github.com/ultralytics/yolov5 # clone
|
|
|
86 |
<details>
|
87 |
<summary>Inference</summary>
|
88 |
|
89 |
+
YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
|
|
|
90 |
|
91 |
```python
|
92 |
import torch
|
|
|
109 |
<details>
|
110 |
<summary>Inference with detect.py</summary>
|
111 |
|
112 |
+
`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
|
|
|
113 |
|
114 |
```bash
|
115 |
python detect.py --weights yolov5s.pt --source 0 # webcam
|
|
|
120 |
list.txt # list of images
|
121 |
list.streams # list of streams
|
122 |
'path/*.jpg' # glob
|
123 |
+
'https://youtu.be/LNwODJXcvt4' # YouTube
|
124 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
125 |
```
|
126 |
|
|
|
131 |
|
132 |
The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
|
133 |
results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
|
134 |
+
and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
|
|
|
|
|
|
|
|
|
135 |
|
136 |
```bash
|
137 |
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
|
|
|
449 |
|
450 |
## <div align="center">License</div>
|
451 |
|
452 |
+
Ultralytics offers two licensing options to accommodate diverse use cases:
|
453 |
|
454 |
+
- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details.
|
455 |
+
- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).
|
456 |
|
457 |
## <div align="center">Contact</div>
|
458 |
|
459 |
+
For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions!
|
460 |
|
461 |
<br>
|
462 |
<div align="center">
|
463 |
+
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
|
464 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
465 |
+
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
|
466 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
467 |
+
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
|
468 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
469 |
+
<a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
|
470 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
471 |
+
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
|
472 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
473 |
+
<a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="Ultralytics Instagram"></a>
|
474 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
475 |
+
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
476 |
</div>
|
477 |
|
478 |
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
|
yolov5/README.zh-CN.md
CHANGED
@@ -1,7 +1,11 @@
|
|
1 |
<div align="center">
|
2 |
<p>
|
|
|
|
|
|
|
3 |
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
4 |
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
|
|
|
5 |
</p>
|
6 |
|
7 |
[英文](README.md)|[简体中文](README.zh-CN.md)<br>
|
@@ -19,38 +23,30 @@
|
|
19 |
|
20 |
YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表<a href="https://ultralytics.com"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。
|
21 |
|
22 |
-
我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 <a href="https://docs.ultralytics.com/">文档</a> 了解详细信息,在 <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> 上提交问题以获得支持,并加入我们的 <a href="https://
|
23 |
|
24 |
如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格
|
25 |
|
26 |
<div align="center">
|
27 |
-
<a href="https://github.com/ultralytics"
|
28 |
-
|
29 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
30 |
-
<
|
31 |
-
|
32 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%"
|
33 |
-
<a href="https://
|
34 |
-
|
35 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
36 |
-
<
|
37 |
-
|
38 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%"
|
39 |
-
<a href="https://
|
40 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="" /></a>
|
41 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
42 |
-
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
43 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
|
44 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
45 |
-
<a href="https://discord.gg/2wNGbc6g9X" style="text-decoration:none;">
|
46 |
-
<img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="2%" alt="" /></a>
|
47 |
</div>
|
48 |
</div>
|
49 |
|
50 |
## <div align="center">YOLOv8 🚀 新品</div>
|
51 |
|
52 |
-
我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。
|
53 |
-
YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。
|
54 |
|
55 |
请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用:
|
56 |
|
@@ -67,12 +63,12 @@ pip install ultralytics
|
|
67 |
|
68 |
## <div align="center">文档</div>
|
69 |
|
70 |
-
有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。
|
71 |
|
72 |
<details open>
|
73 |
<summary>安装</summary>
|
74 |
|
75 |
-
克隆 repo,并要求在 [**Python>=3.
|
76 |
|
77 |
```bash
|
78 |
git clone https://github.com/ultralytics/yolov5 # clone
|
@@ -85,8 +81,7 @@ pip install -r requirements.txt # install
|
|
85 |
<details>
|
86 |
<summary>推理</summary>
|
87 |
|
88 |
-
使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从
|
89 |
-
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
|
90 |
|
91 |
```python
|
92 |
import torch
|
@@ -109,8 +104,7 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
|
|
109 |
<details>
|
110 |
<summary>使用 detect.py 推理</summary>
|
111 |
|
112 |
-
`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从
|
113 |
-
最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。
|
114 |
|
115 |
```bash
|
116 |
python detect.py --weights yolov5s.pt --source 0 # webcam
|
@@ -121,7 +115,7 @@ python detect.py --weights yolov5s.pt --source 0 #
|
|
121 |
list.txt # list of images
|
122 |
list.streams # list of streams
|
123 |
'path/*.jpg' # glob
|
124 |
-
'https://youtu.be/
|
125 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
126 |
```
|
127 |
|
@@ -130,12 +124,8 @@ python detect.py --weights yolov5s.pt --source 0 #
|
|
130 |
<details>
|
131 |
<summary>训练</summary>
|
132 |
|
133 |
-
下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。
|
134 |
-
|
135 |
-
将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
|
136 |
-
YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。
|
137 |
-
尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现
|
138 |
-
YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。
|
139 |
|
140 |
```bash
|
141 |
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
|
@@ -250,7 +240,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
|
|
250 |
|
251 |
</details>
|
252 |
|
253 |
-
## <div align="center">实例分割模型 ⭐
|
254 |
|
255 |
我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
|
256 |
|
@@ -452,39 +442,32 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
|
|
452 |
<a href="https://github.com/ultralytics/yolov5/graphs/contributors">
|
453 |
<img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>
|
454 |
|
455 |
-
## <div align="center"
|
456 |
|
457 |
-
|
458 |
|
459 |
-
- **AGPL-3.0
|
460 |
-
-
|
461 |
|
462 |
-
## <div align="center"
|
463 |
|
464 |
-
对于
|
465 |
|
466 |
<br>
|
467 |
<div align="center">
|
468 |
-
<a href="https://github.com/ultralytics"
|
469 |
-
|
470 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
471 |
-
<
|
472 |
-
|
473 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%"
|
474 |
-
<a href="https://
|
475 |
-
|
476 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-
|
477 |
-
<
|
478 |
-
|
479 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%"
|
480 |
-
<a href="https://
|
481 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="" /></a>
|
482 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
483 |
-
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
484 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="" /></a>
|
485 |
-
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
|
486 |
-
<a href="https://discord.gg/2wNGbc6g9X" style="text-decoration:none;">
|
487 |
-
<img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="3%" alt="" /></a>
|
488 |
</div>
|
489 |
|
490 |
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
|
|
|
1 |
<div align="center">
|
2 |
<p>
|
3 |
+
<a href="https://yolovision.ultralytics.com/" target="_blank">
|
4 |
+
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-yolo-vision-2023.png"></a>
|
5 |
+
<!--
|
6 |
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
7 |
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
|
8 |
+
-->
|
9 |
</p>
|
10 |
|
11 |
[英文](README.md)|[简体中文](README.zh-CN.md)<br>
|
|
|
23 |
|
24 |
YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表<a href="https://ultralytics.com"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。
|
25 |
|
26 |
+
我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 <a href="https://docs.ultralytics.com/yolov5/">文档</a> 了解详细信息,在 <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> 上提交问题以获得支持,并加入我们的 <a href="https://ultralytics.com/discord">Discord</a> 社区进行问题和讨论!
|
27 |
|
28 |
如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格
|
29 |
|
30 |
<div align="center">
|
31 |
+
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
|
32 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
33 |
+
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
|
34 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
35 |
+
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
|
36 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
37 |
+
<a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
|
38 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
39 |
+
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
|
40 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
41 |
+
<a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="Ultralytics Instagram"></a>
|
42 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
|
43 |
+
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
</div>
|
45 |
</div>
|
46 |
|
47 |
## <div align="center">YOLOv8 🚀 新品</div>
|
48 |
|
49 |
+
我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。
|
|
|
50 |
|
51 |
请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用:
|
52 |
|
|
|
63 |
|
64 |
## <div align="center">文档</div>
|
65 |
|
66 |
+
有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com/yolov5/)。请参阅下面的快速入门示例。
|
67 |
|
68 |
<details open>
|
69 |
<summary>安装</summary>
|
70 |
|
71 |
+
克隆 repo,并要求在 [**Python>=3.8.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 。
|
72 |
|
73 |
```bash
|
74 |
git clone https://github.com/ultralytics/yolov5 # clone
|
|
|
81 |
<details>
|
82 |
<summary>推理</summary>
|
83 |
|
84 |
+
使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
|
|
|
85 |
|
86 |
```python
|
87 |
import torch
|
|
|
104 |
<details>
|
105 |
<summary>使用 detect.py 推理</summary>
|
106 |
|
107 |
+
`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。
|
|
|
108 |
|
109 |
```bash
|
110 |
python detect.py --weights yolov5s.pt --source 0 # webcam
|
|
|
115 |
list.txt # list of images
|
116 |
list.streams # list of streams
|
117 |
'path/*.jpg' # glob
|
118 |
+
'https://youtu.be/LNwODJXcvt4' # YouTube
|
119 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
120 |
```
|
121 |
|
|
|
124 |
<details>
|
125 |
<summary>训练</summary>
|
126 |
|
127 |
+
下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data)
|
128 |
+
将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。
|
|
|
|
|
|
|
|
|
129 |
|
130 |
```bash
|
131 |
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
|
|
|
240 |
|
241 |
</details>
|
242 |
|
243 |
+
## <div align="center">实例分割模型 ⭐ 新</div>
|
244 |
|
245 |
我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
|
246 |
|
|
|
442 |
<a href="https://github.com/ultralytics/yolov5/graphs/contributors">
|
443 |
<img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>
|
444 |
|
445 |
+
## <div align="center">许可证</div>
|
446 |
|
447 |
+
Ultralytics 提供两种许可证选项以适应各种使用场景:
|
448 |
|
449 |
+
- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。
|
450 |
+
- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。
|
451 |
|
452 |
+
## <div align="center">联系方式</div>
|
453 |
|
454 |
+
对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://ultralytics.com/discord) 社区进行问题和讨论!
|
455 |
|
456 |
<br>
|
457 |
<div align="center">
|
458 |
+
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
|
459 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
460 |
+
<a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
|
461 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
462 |
+
<a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
|
463 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
464 |
+
<a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
|
465 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
466 |
+
<a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
|
467 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
468 |
+
<a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="Ultralytics Instagram"></a>
|
469 |
+
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
|
470 |
+
<a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
471 |
</div>
|
472 |
|
473 |
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
|
yolov5/classify/predict.py
CHANGED
@@ -11,7 +11,7 @@ Usage - sources:
|
|
11 |
list.txt # list of images
|
12 |
list.streams # list of streams
|
13 |
'path/*.jpg' # glob
|
14 |
-
'https://youtu.be/
|
15 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
|
17 |
Usage - formats:
|
@@ -43,12 +43,13 @@ if str(ROOT) not in sys.path:
|
|
43 |
sys.path.append(str(ROOT)) # add ROOT to PATH
|
44 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
45 |
|
|
|
|
|
46 |
from models.common import DetectMultiBackend
|
47 |
from utils.augmentations import classify_transforms
|
48 |
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
49 |
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
50 |
increment_path, print_args, strip_optimizer)
|
51 |
-
from utils.plots import Annotator
|
52 |
from utils.torch_utils import select_device, smart_inference_mode
|
53 |
|
54 |
|
@@ -144,7 +145,7 @@ def run(
|
|
144 |
# Write results
|
145 |
text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
|
146 |
if save_img or view_img: # Add bbox to image
|
147 |
-
annotator.text(
|
148 |
if save_txt: # Write to file
|
149 |
with open(f'{txt_path}.txt', 'a') as f:
|
150 |
f.write(text + '\n')
|
|
|
11 |
list.txt # list of images
|
12 |
list.streams # list of streams
|
13 |
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/LNwODJXcvt4' # YouTube
|
15 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
|
17 |
Usage - formats:
|
|
|
43 |
sys.path.append(str(ROOT)) # add ROOT to PATH
|
44 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
45 |
|
46 |
+
from ultralytics.utils.plotting import Annotator
|
47 |
+
|
48 |
from models.common import DetectMultiBackend
|
49 |
from utils.augmentations import classify_transforms
|
50 |
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
51 |
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
52 |
increment_path, print_args, strip_optimizer)
|
|
|
53 |
from utils.torch_utils import select_device, smart_inference_mode
|
54 |
|
55 |
|
|
|
145 |
# Write results
|
146 |
text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
|
147 |
if save_img or view_img: # Add bbox to image
|
148 |
+
annotator.text([32, 32], text, txt_color=(255, 255, 255))
|
149 |
if save_txt: # Write to file
|
150 |
with open(f'{txt_path}.txt', 'a') as f:
|
151 |
f.write(text + '\n')
|
yolov5/classify/tutorial.ipynb
CHANGED
@@ -87,7 +87,7 @@
|
|
87 |
" screen # screenshot\n",
|
88 |
" path/ # directory\n",
|
89 |
" 'path/*.jpg' # glob\n",
|
90 |
-
" 'https://youtu.be/
|
91 |
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
92 |
"```"
|
93 |
]
|
@@ -1445,7 +1445,7 @@
|
|
1445 |
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
1446 |
"import torch\n",
|
1447 |
"\n",
|
1448 |
-
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n",
|
1449 |
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
1450 |
"results = model(im) # inference\n",
|
1451 |
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
|
|
87 |
" screen # screenshot\n",
|
88 |
" path/ # directory\n",
|
89 |
" 'path/*.jpg' # glob\n",
|
90 |
+
" 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
|
91 |
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
92 |
"```"
|
93 |
]
|
|
|
1445 |
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
1446 |
"import torch\n",
|
1447 |
"\n",
|
1448 |
+
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n",
|
1449 |
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
1450 |
"results = model(im) # inference\n",
|
1451 |
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
yolov5/detect.py
CHANGED
@@ -11,7 +11,7 @@ Usage - sources:
|
|
11 |
list.txt # list of images
|
12 |
list.streams # list of streams
|
13 |
'path/*.jpg' # glob
|
14 |
-
'https://youtu.be/
|
15 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
|
17 |
Usage - formats:
|
@@ -29,6 +29,7 @@ Usage - formats:
|
|
29 |
"""
|
30 |
|
31 |
import argparse
|
|
|
32 |
import os
|
33 |
import platform
|
34 |
import sys
|
@@ -42,11 +43,12 @@ if str(ROOT) not in sys.path:
|
|
42 |
sys.path.append(str(ROOT)) # add ROOT to PATH
|
43 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
44 |
|
|
|
|
|
45 |
from models.common import DetectMultiBackend
|
46 |
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
47 |
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
48 |
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
|
49 |
-
from utils.plots import Annotator, colors, save_one_box
|
50 |
from utils.torch_utils import select_device, smart_inference_mode
|
51 |
|
52 |
|
@@ -62,6 +64,7 @@ def run(
|
|
62 |
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
63 |
view_img=False, # show results
|
64 |
save_txt=False, # save results to *.txt
|
|
|
65 |
save_conf=False, # save confidences in --save-txt labels
|
66 |
save_crop=False, # save cropped prediction boxes
|
67 |
nosave=False, # do not save images/videos
|
@@ -134,6 +137,18 @@ def run(
|
|
134 |
# Second-stage classifier (optional)
|
135 |
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
# Process predictions
|
138 |
for i, det in enumerate(pred): # per image
|
139 |
seen += 1
|
@@ -161,6 +176,14 @@ def run(
|
|
161 |
|
162 |
# Write results
|
163 |
for *xyxy, conf, cls in reversed(det):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
if save_txt: # Write to file
|
165 |
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
166 |
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
@@ -228,6 +251,7 @@ def parse_opt():
|
|
228 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
229 |
parser.add_argument('--view-img', action='store_true', help='show results')
|
230 |
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
|
|
231 |
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
232 |
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
233 |
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
|
|
11 |
list.txt # list of images
|
12 |
list.streams # list of streams
|
13 |
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/LNwODJXcvt4' # YouTube
|
15 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
|
17 |
Usage - formats:
|
|
|
29 |
"""
|
30 |
|
31 |
import argparse
|
32 |
+
import csv
|
33 |
import os
|
34 |
import platform
|
35 |
import sys
|
|
|
43 |
sys.path.append(str(ROOT)) # add ROOT to PATH
|
44 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
45 |
|
46 |
+
from ultralytics.utils.plotting import Annotator, colors, save_one_box
|
47 |
+
|
48 |
from models.common import DetectMultiBackend
|
49 |
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
50 |
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
51 |
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
|
|
|
52 |
from utils.torch_utils import select_device, smart_inference_mode
|
53 |
|
54 |
|
|
|
64 |
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
65 |
view_img=False, # show results
|
66 |
save_txt=False, # save results to *.txt
|
67 |
+
save_csv=False, # save results in CSV format
|
68 |
save_conf=False, # save confidences in --save-txt labels
|
69 |
save_crop=False, # save cropped prediction boxes
|
70 |
nosave=False, # do not save images/videos
|
|
|
137 |
# Second-stage classifier (optional)
|
138 |
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
139 |
|
140 |
+
# Define the path for the CSV file
|
141 |
+
csv_path = save_dir / 'predictions.csv'
|
142 |
+
|
143 |
+
# Create or append to the CSV file
|
144 |
+
def write_to_csv(image_name, prediction, confidence):
|
145 |
+
data = {'Image Name': image_name, 'Prediction': prediction, 'Confidence': confidence}
|
146 |
+
with open(csv_path, mode='a', newline='') as f:
|
147 |
+
writer = csv.DictWriter(f, fieldnames=data.keys())
|
148 |
+
if not csv_path.is_file():
|
149 |
+
writer.writeheader()
|
150 |
+
writer.writerow(data)
|
151 |
+
|
152 |
# Process predictions
|
153 |
for i, det in enumerate(pred): # per image
|
154 |
seen += 1
|
|
|
176 |
|
177 |
# Write results
|
178 |
for *xyxy, conf, cls in reversed(det):
|
179 |
+
c = int(cls) # integer class
|
180 |
+
label = names[c] if hide_conf else f'{names[c]}'
|
181 |
+
confidence = float(conf)
|
182 |
+
confidence_str = f'{confidence:.2f}'
|
183 |
+
|
184 |
+
if save_csv:
|
185 |
+
write_to_csv(p.name, label, confidence_str)
|
186 |
+
|
187 |
if save_txt: # Write to file
|
188 |
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
189 |
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
|
|
251 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
252 |
parser.add_argument('--view-img', action='store_true', help='show results')
|
253 |
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
254 |
+
parser.add_argument('--save-csv', action='store_true', help='save results in CSV format')
|
255 |
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
256 |
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
257 |
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
yolov5/export.py
CHANGED
@@ -155,7 +155,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
|
|
155 |
import onnx
|
156 |
|
157 |
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
158 |
-
f = file.with_suffix('.onnx')
|
159 |
|
160 |
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
|
161 |
if dynamic:
|
@@ -207,7 +207,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
|
|
207 |
@try_export
|
208 |
def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')):
|
209 |
# YOLOv5 OpenVINO export
|
210 |
-
check_requirements('openvino-dev>=
|
211 |
import openvino.runtime as ov # noqa
|
212 |
from openvino.tools import mo # noqa
|
213 |
|
@@ -216,7 +216,7 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:
|
|
216 |
f_onnx = file.with_suffix('.onnx')
|
217 |
f_ov = str(Path(f) / file.with_suffix('.xml').name)
|
218 |
if int8:
|
219 |
-
check_requirements('nncf')
|
220 |
import nncf
|
221 |
import numpy as np
|
222 |
from openvino.runtime import Core
|
|
|
155 |
import onnx
|
156 |
|
157 |
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
158 |
+
f = str(file.with_suffix('.onnx'))
|
159 |
|
160 |
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
|
161 |
if dynamic:
|
|
|
207 |
@try_export
|
208 |
def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')):
|
209 |
# YOLOv5 OpenVINO export
|
210 |
+
check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
211 |
import openvino.runtime as ov # noqa
|
212 |
from openvino.tools import mo # noqa
|
213 |
|
|
|
216 |
f_onnx = file.with_suffix('.onnx')
|
217 |
f_ov = str(Path(f) / file.with_suffix('.xml').name)
|
218 |
if int8:
|
219 |
+
check_requirements('nncf>=2.4.0') # requires at least version 2.4.0 to use the post-training quantization
|
220 |
import nncf
|
221 |
import numpy as np
|
222 |
from openvino.runtime import Core
|
yolov5/models/common.py
CHANGED
@@ -24,12 +24,24 @@ import torch.nn as nn
|
|
24 |
from PIL import Image
|
25 |
from torch.cuda import amp
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
from utils import TryExcept
|
28 |
from utils.dataloaders import exif_transpose, letterbox
|
29 |
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
|
30 |
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
|
31 |
xyxy2xywh, yaml_load)
|
32 |
-
from utils.plots import Annotator, colors, save_one_box
|
33 |
from utils.torch_utils import copy_attr, smart_inference_mode
|
34 |
|
35 |
|
@@ -373,18 +385,18 @@ class DetectMultiBackend(nn.Module):
|
|
373 |
stride, names = int(meta['stride']), eval(meta['names'])
|
374 |
elif xml: # OpenVINO
|
375 |
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
376 |
-
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
377 |
from openvino.runtime import Core, Layout, get_batch
|
378 |
-
|
379 |
if not Path(w).is_file(): # if not *.xml
|
380 |
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
381 |
-
|
382 |
-
if
|
383 |
-
|
384 |
-
batch_dim = get_batch(
|
385 |
if batch_dim.is_static:
|
386 |
batch_size = batch_dim.get_length()
|
387 |
-
|
388 |
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
|
389 |
elif engine: # TensorRT
|
390 |
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
@@ -524,7 +536,7 @@ class DetectMultiBackend(nn.Module):
|
|
524 |
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
525 |
elif self.xml: # OpenVINO
|
526 |
im = im.cpu().numpy() # FP32
|
527 |
-
y = list(self.
|
528 |
elif self.engine: # TensorRT
|
529 |
if self.dynamic and im.shape != self.bindings['images'].shape:
|
530 |
i = self.model.get_binding_index('images')
|
|
|
24 |
from PIL import Image
|
25 |
from torch.cuda import amp
|
26 |
|
27 |
+
# Import 'ultralytics' package or install if if missing
|
28 |
+
try:
|
29 |
+
import ultralytics
|
30 |
+
|
31 |
+
assert hasattr(ultralytics, '__version__') # verify package is not directory
|
32 |
+
except (ImportError, AssertionError):
|
33 |
+
import os
|
34 |
+
|
35 |
+
os.system('pip install -U ultralytics')
|
36 |
+
import ultralytics
|
37 |
+
|
38 |
+
from ultralytics.utils.plotting import Annotator, colors, save_one_box
|
39 |
+
|
40 |
from utils import TryExcept
|
41 |
from utils.dataloaders import exif_transpose, letterbox
|
42 |
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
|
43 |
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
|
44 |
xyxy2xywh, yaml_load)
|
|
|
45 |
from utils.torch_utils import copy_attr, smart_inference_mode
|
46 |
|
47 |
|
|
|
385 |
stride, names = int(meta['stride']), eval(meta['names'])
|
386 |
elif xml: # OpenVINO
|
387 |
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
388 |
+
check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
389 |
from openvino.runtime import Core, Layout, get_batch
|
390 |
+
core = Core()
|
391 |
if not Path(w).is_file(): # if not *.xml
|
392 |
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
393 |
+
ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
394 |
+
if ov_model.get_parameters()[0].get_layout().empty:
|
395 |
+
ov_model.get_parameters()[0].set_layout(Layout('NCHW'))
|
396 |
+
batch_dim = get_batch(ov_model)
|
397 |
if batch_dim.is_static:
|
398 |
batch_size = batch_dim.get_length()
|
399 |
+
ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device
|
400 |
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
|
401 |
elif engine: # TensorRT
|
402 |
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
|
|
536 |
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
537 |
elif self.xml: # OpenVINO
|
538 |
im = im.cpu().numpy() # FP32
|
539 |
+
y = list(self.ov_compiled_model(im).values())
|
540 |
elif self.engine: # TensorRT
|
541 |
if self.dynamic and im.shape != self.bindings['images'].shape:
|
542 |
i = self.model.get_binding_index('images')
|
yolov5/models/experimental.py
CHANGED
@@ -87,11 +87,11 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
|
|
87 |
|
88 |
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
|
89 |
|
90 |
-
# Module
|
91 |
for m in model.modules():
|
92 |
t = type(m)
|
93 |
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
|
94 |
-
m.inplace = inplace
|
95 |
if t is Detect and not isinstance(m.anchor_grid, list):
|
96 |
delattr(m, 'anchor_grid')
|
97 |
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
|
|
|
87 |
|
88 |
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
|
89 |
|
90 |
+
# Module updates
|
91 |
for m in model.modules():
|
92 |
t = type(m)
|
93 |
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
|
94 |
+
m.inplace = inplace
|
95 |
if t is Detect and not isinstance(m.anchor_grid, list):
|
96 |
delattr(m, 'anchor_grid')
|
97 |
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
|
yolov5/requirements.txt
CHANGED
@@ -4,18 +4,18 @@
|
|
4 |
# Base ------------------------------------------------------------------------
|
5 |
gitpython>=3.1.30
|
6 |
matplotlib>=3.3
|
7 |
-
numpy>=1.
|
8 |
opencv-python>=4.1.1
|
9 |
-
Pillow>=
|
10 |
psutil # system resources
|
11 |
PyYAML>=5.3.1
|
12 |
requests>=2.23.0
|
13 |
scipy>=1.4.1
|
14 |
thop>=0.1.1 # FLOPs computation
|
15 |
-
torch>=1.
|
16 |
-
torchvision>=0.
|
17 |
tqdm>=4.64.0
|
18 |
-
ultralytics>=8.0.
|
19 |
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
|
20 |
|
21 |
# Logging ---------------------------------------------------------------------
|
@@ -36,7 +36,7 @@ seaborn>=0.11.0
|
|
36 |
# scikit-learn<=1.1.2 # CoreML quantization
|
37 |
# tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos)
|
38 |
# tensorflowjs>=3.9.0 # TF.js export
|
39 |
-
# openvino-dev # OpenVINO export
|
40 |
|
41 |
# Deploy ----------------------------------------------------------------------
|
42 |
setuptools>=65.5.1 # Snyk vulnerability fix
|
|
|
4 |
# Base ------------------------------------------------------------------------
|
5 |
gitpython>=3.1.30
|
6 |
matplotlib>=3.3
|
7 |
+
numpy>=1.22.2
|
8 |
opencv-python>=4.1.1
|
9 |
+
Pillow>=10.0.1
|
10 |
psutil # system resources
|
11 |
PyYAML>=5.3.1
|
12 |
requests>=2.23.0
|
13 |
scipy>=1.4.1
|
14 |
thop>=0.1.1 # FLOPs computation
|
15 |
+
torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended)
|
16 |
+
torchvision>=0.9.0
|
17 |
tqdm>=4.64.0
|
18 |
+
ultralytics>=8.0.147
|
19 |
# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
|
20 |
|
21 |
# Logging ---------------------------------------------------------------------
|
|
|
36 |
# scikit-learn<=1.1.2 # CoreML quantization
|
37 |
# tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos)
|
38 |
# tensorflowjs>=3.9.0 # TF.js export
|
39 |
+
# openvino-dev>=2023.0 # OpenVINO export
|
40 |
|
41 |
# Deploy ----------------------------------------------------------------------
|
42 |
setuptools>=65.5.1 # Snyk vulnerability fix
|
yolov5/segment/predict.py
CHANGED
@@ -11,7 +11,7 @@ Usage - sources:
|
|
11 |
list.txt # list of images
|
12 |
list.streams # list of streams
|
13 |
'path/*.jpg' # glob
|
14 |
-
'https://youtu.be/
|
15 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
|
17 |
Usage - formats:
|
@@ -42,12 +42,13 @@ if str(ROOT) not in sys.path:
|
|
42 |
sys.path.append(str(ROOT)) # add ROOT to PATH
|
43 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
44 |
|
|
|
|
|
45 |
from models.common import DetectMultiBackend
|
46 |
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
47 |
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
48 |
increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
|
49 |
strip_optimizer)
|
50 |
-
from utils.plots import Annotator, colors, save_one_box
|
51 |
from utils.segment.general import masks2segments, process_mask, process_mask_native
|
52 |
from utils.torch_utils import select_device, smart_inference_mode
|
53 |
|
|
|
11 |
list.txt # list of images
|
12 |
list.streams # list of streams
|
13 |
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/LNwODJXcvt4' # YouTube
|
15 |
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
|
17 |
Usage - formats:
|
|
|
42 |
sys.path.append(str(ROOT)) # add ROOT to PATH
|
43 |
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
44 |
|
45 |
+
from ultralytics.utils.plotting import Annotator, colors, save_one_box
|
46 |
+
|
47 |
from models.common import DetectMultiBackend
|
48 |
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
49 |
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
50 |
increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
|
51 |
strip_optimizer)
|
|
|
52 |
from utils.segment.general import masks2segments, process_mask, process_mask_native
|
53 |
from utils.torch_utils import select_device, smart_inference_mode
|
54 |
|
yolov5/segment/tutorial.ipynb
CHANGED
@@ -87,7 +87,7 @@
|
|
87 |
" screen # screenshot\n",
|
88 |
" path/ # directory\n",
|
89 |
" 'path/*.jpg' # glob\n",
|
90 |
-
" 'https://youtu.be/
|
91 |
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
92 |
"```"
|
93 |
]
|
@@ -558,7 +558,7 @@
|
|
558 |
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
559 |
"import torch\n",
|
560 |
"\n",
|
561 |
-
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n",
|
562 |
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
563 |
"results = model(im) # inference\n",
|
564 |
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
|
|
87 |
" screen # screenshot\n",
|
88 |
" path/ # directory\n",
|
89 |
" 'path/*.jpg' # glob\n",
|
90 |
+
" 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
|
91 |
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
92 |
"```"
|
93 |
]
|
|
|
558 |
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
559 |
"import torch\n",
|
560 |
"\n",
|
561 |
+
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n",
|
562 |
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
563 |
"results = model(im) # inference\n",
|
564 |
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
yolov5/tutorial.ipynb
CHANGED
@@ -31,7 +31,7 @@
|
|
31 |
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
32 |
"<br>\n",
|
33 |
"\n",
|
34 |
-
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href=\"https://docs.ultralytics.com/yolov5\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/yolov5\">GitHub</a> for support, and join our <a href=\"https://
|
35 |
"\n",
|
36 |
"</div>"
|
37 |
]
|
@@ -65,7 +65,7 @@
|
|
65 |
"import utils\n",
|
66 |
"display = utils.notebook_init() # checks"
|
67 |
],
|
68 |
-
"execution_count":
|
69 |
"outputs": [
|
70 |
{
|
71 |
"output_type": "stream",
|
@@ -95,12 +95,12 @@
|
|
95 |
"\n",
|
96 |
"```shell\n",
|
97 |
"python detect.py --source 0 # webcam\n",
|
98 |
-
" img.jpg # image
|
99 |
" vid.mp4 # video\n",
|
100 |
" screen # screenshot\n",
|
101 |
" path/ # directory\n",
|
102 |
" 'path/*.jpg' # glob\n",
|
103 |
-
" 'https://youtu.be/
|
104 |
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
105 |
"```"
|
106 |
]
|
@@ -118,7 +118,7 @@
|
|
118 |
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
|
119 |
"# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)"
|
120 |
],
|
121 |
-
"execution_count":
|
122 |
"outputs": [
|
123 |
{
|
124 |
"output_type": "stream",
|
@@ -174,7 +174,7 @@
|
|
174 |
"torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n",
|
175 |
"!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip"
|
176 |
],
|
177 |
-
"execution_count":
|
178 |
"outputs": [
|
179 |
{
|
180 |
"output_type": "stream",
|
@@ -198,7 +198,7 @@
|
|
198 |
"# Validate YOLOv5s on COCO val\n",
|
199 |
"!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half"
|
200 |
],
|
201 |
-
"execution_count":
|
202 |
"outputs": [
|
203 |
{
|
204 |
"output_type": "stream",
|
@@ -308,7 +308,7 @@
|
|
308 |
"# Train YOLOv5s on COCO128 for 3 epochs\n",
|
309 |
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
|
310 |
],
|
311 |
-
"execution_count":
|
312 |
"outputs": [
|
313 |
{
|
314 |
"output_type": "stream",
|
@@ -539,7 +539,7 @@
|
|
539 |
"\n",
|
540 |
"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
|
541 |
"\n",
|
542 |
-
"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices
|
543 |
"\n",
|
544 |
"<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
|
545 |
]
|
@@ -593,7 +593,7 @@
|
|
593 |
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
594 |
"import torch\n",
|
595 |
"\n",
|
596 |
-
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n",
|
597 |
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
598 |
"results = model(im) # inference\n",
|
599 |
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
|
|
31 |
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
32 |
"<br>\n",
|
33 |
"\n",
|
34 |
+
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href=\"https://docs.ultralytics.com/yolov5\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/yolov5\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
|
35 |
"\n",
|
36 |
"</div>"
|
37 |
]
|
|
|
65 |
"import utils\n",
|
66 |
"display = utils.notebook_init() # checks"
|
67 |
],
|
68 |
+
"execution_count": null,
|
69 |
"outputs": [
|
70 |
{
|
71 |
"output_type": "stream",
|
|
|
95 |
"\n",
|
96 |
"```shell\n",
|
97 |
"python detect.py --source 0 # webcam\n",
|
98 |
+
" img.jpg # image\n",
|
99 |
" vid.mp4 # video\n",
|
100 |
" screen # screenshot\n",
|
101 |
" path/ # directory\n",
|
102 |
" 'path/*.jpg' # glob\n",
|
103 |
+
" 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
|
104 |
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
105 |
"```"
|
106 |
]
|
|
|
118 |
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
|
119 |
"# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)"
|
120 |
],
|
121 |
+
"execution_count": null,
|
122 |
"outputs": [
|
123 |
{
|
124 |
"output_type": "stream",
|
|
|
174 |
"torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n",
|
175 |
"!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip"
|
176 |
],
|
177 |
+
"execution_count": null,
|
178 |
"outputs": [
|
179 |
{
|
180 |
"output_type": "stream",
|
|
|
198 |
"# Validate YOLOv5s on COCO val\n",
|
199 |
"!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half"
|
200 |
],
|
201 |
+
"execution_count": null,
|
202 |
"outputs": [
|
203 |
{
|
204 |
"output_type": "stream",
|
|
|
308 |
"# Train YOLOv5s on COCO128 for 3 epochs\n",
|
309 |
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
|
310 |
],
|
311 |
+
"execution_count": null,
|
312 |
"outputs": [
|
313 |
{
|
314 |
"output_type": "stream",
|
|
|
539 |
"\n",
|
540 |
"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
|
541 |
"\n",
|
542 |
+
"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices.\n",
|
543 |
"\n",
|
544 |
"<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
|
545 |
]
|
|
|
593 |
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
594 |
"import torch\n",
|
595 |
"\n",
|
596 |
+
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n",
|
597 |
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
598 |
"results = model(im) # inference\n",
|
599 |
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
yolov5/utils/__init__.py
CHANGED
@@ -54,7 +54,7 @@ def notebook_init(verbose=True):
|
|
54 |
import os
|
55 |
import shutil
|
56 |
|
57 |
-
from ultralytics.
|
58 |
|
59 |
from utils.general import check_font, is_colab
|
60 |
from utils.torch_utils import select_device # imports
|
|
|
54 |
import os
|
55 |
import shutil
|
56 |
|
57 |
+
from ultralytics.utils.checks import check_requirements
|
58 |
|
59 |
from utils.general import check_font, is_colab
|
60 |
from utils.torch_utils import select_device # imports
|
yolov5/utils/dataloaders.py
CHANGED
@@ -355,7 +355,7 @@ class LoadStreams:
|
|
355 |
# Start thread to read frames from video stream
|
356 |
st = f'{i + 1}/{n}: {s}... '
|
357 |
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
|
358 |
-
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/
|
359 |
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
|
360 |
import pafy
|
361 |
s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
|
|
|
355 |
# Start thread to read frames from video stream
|
356 |
st = f'{i + 1}/{n}: {s}... '
|
357 |
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
|
358 |
+
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
|
359 |
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
|
360 |
import pafy
|
361 |
s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
|
yolov5/utils/docker/Dockerfile
CHANGED
@@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
|
|
12 |
ENV DEBIAN_FRONTEND noninteractive
|
13 |
RUN apt update
|
14 |
RUN TZ=Etc/UTC apt install -y tzdata
|
15 |
-
RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1
|
16 |
# RUN alias python=python3
|
17 |
|
18 |
# Security updates
|
@@ -24,14 +24,13 @@ RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
|
|
24 |
WORKDIR /usr/src/app
|
25 |
|
26 |
# Copy contents
|
27 |
-
|
28 |
-
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
29 |
|
30 |
# Install pip packages
|
31 |
COPY requirements.txt .
|
32 |
RUN python3 -m pip install --upgrade pip wheel
|
33 |
RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
|
34 |
-
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=
|
35 |
# tensorflow tensorflowjs \
|
36 |
|
37 |
# Set environment variables
|
|
|
12 |
ENV DEBIAN_FRONTEND noninteractive
|
13 |
RUN apt update
|
14 |
RUN TZ=Etc/UTC apt install -y tzdata
|
15 |
+
RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg
|
16 |
# RUN alias python=python3
|
17 |
|
18 |
# Security updates
|
|
|
24 |
WORKDIR /usr/src/app
|
25 |
|
26 |
# Copy contents
|
27 |
+
COPY . /usr/src/app
|
|
|
28 |
|
29 |
# Install pip packages
|
30 |
COPY requirements.txt .
|
31 |
RUN python3 -m pip install --upgrade pip wheel
|
32 |
RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
|
33 |
+
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0'
|
34 |
# tensorflow tensorflowjs \
|
35 |
|
36 |
# Set environment variables
|
yolov5/utils/docker/Dockerfile-arm64
CHANGED
@@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
|
|
12 |
ENV DEBIAN_FRONTEND noninteractive
|
13 |
RUN apt update
|
14 |
RUN TZ=Etc/UTC apt install -y tzdata
|
15 |
-
RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1
|
16 |
# RUN alias python=python3
|
17 |
|
18 |
# Install pip packages
|
@@ -27,8 +27,7 @@ RUN mkdir -p /usr/src/app
|
|
27 |
WORKDIR /usr/src/app
|
28 |
|
29 |
# Copy contents
|
30 |
-
|
31 |
-
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
32 |
ENV DEBIAN_FRONTEND teletype
|
33 |
|
34 |
|
|
|
12 |
ENV DEBIAN_FRONTEND noninteractive
|
13 |
RUN apt update
|
14 |
RUN TZ=Etc/UTC apt install -y tzdata
|
15 |
+
RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev
|
16 |
# RUN alias python=python3
|
17 |
|
18 |
# Install pip packages
|
|
|
27 |
WORKDIR /usr/src/app
|
28 |
|
29 |
# Copy contents
|
30 |
+
COPY . /usr/src/app
|
|
|
31 |
ENV DEBIAN_FRONTEND teletype
|
32 |
|
33 |
|
yolov5/utils/docker/Dockerfile-cpu
CHANGED
@@ -3,23 +3,25 @@
|
|
3 |
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
|
4 |
|
5 |
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
|
6 |
-
FROM ubuntu:
|
7 |
|
8 |
# Downloads to user config dir
|
9 |
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
10 |
|
11 |
# Install linux packages
|
12 |
-
|
13 |
-
RUN apt update
|
14 |
-
|
15 |
-
RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
|
16 |
# RUN alias python=python3
|
17 |
|
|
|
|
|
|
|
18 |
# Install pip packages
|
19 |
COPY requirements.txt .
|
20 |
RUN python3 -m pip install --upgrade pip wheel
|
21 |
RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
|
22 |
-
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=
|
23 |
# tensorflow tensorflowjs \
|
24 |
--extra-index-url https://download.pytorch.org/whl/cpu
|
25 |
|
@@ -28,9 +30,7 @@ RUN mkdir -p /usr/src/app
|
|
28 |
WORKDIR /usr/src/app
|
29 |
|
30 |
# Copy contents
|
31 |
-
|
32 |
-
RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
|
33 |
-
ENV DEBIAN_FRONTEND teletype
|
34 |
|
35 |
|
36 |
# Usage Examples -------------------------------------------------------------------------------------------------------
|
|
|
3 |
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
|
4 |
|
5 |
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
|
6 |
+
FROM ubuntu:mantic-20231011
|
7 |
|
8 |
# Downloads to user config dir
|
9 |
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
|
10 |
|
11 |
# Install linux packages
|
12 |
+
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
13 |
+
RUN apt update \
|
14 |
+
&& apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
|
|
15 |
# RUN alias python=python3
|
16 |
|
17 |
+
# Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error
|
18 |
+
RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
|
19 |
+
|
20 |
# Install pip packages
|
21 |
COPY requirements.txt .
|
22 |
RUN python3 -m pip install --upgrade pip wheel
|
23 |
RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
|
24 |
+
coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \
|
25 |
# tensorflow tensorflowjs \
|
26 |
--extra-index-url https://download.pytorch.org/whl/cpu
|
27 |
|
|
|
30 |
WORKDIR /usr/src/app
|
31 |
|
32 |
# Copy contents
|
33 |
+
COPY . /usr/src/app
|
|
|
|
|
34 |
|
35 |
|
36 |
# Usage Examples -------------------------------------------------------------------------------------------------------
|
yolov5/utils/general.py
CHANGED
@@ -35,7 +35,17 @@ import pkg_resources as pkg
|
|
35 |
import torch
|
36 |
import torchvision
|
37 |
import yaml
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
from utils import TryExcept, emojis
|
41 |
from utils.downloads import curl_download, gsutil_getsize
|
@@ -371,7 +381,7 @@ def check_git_info(path='.'):
|
|
371 |
return {'remote': None, 'branch': None, 'commit': None}
|
372 |
|
373 |
|
374 |
-
def check_python(minimum='3.
|
375 |
# Check current python version vs. required python version
|
376 |
check_version(platform.python_version(), minimum, name='Python ', hard=True)
|
377 |
|
|
|
35 |
import torch
|
36 |
import torchvision
|
37 |
import yaml
|
38 |
+
|
39 |
+
# Import 'ultralytics' package or install if if missing
|
40 |
+
try:
|
41 |
+
import ultralytics
|
42 |
+
|
43 |
+
assert hasattr(ultralytics, '__version__') # verify package is not directory
|
44 |
+
except (ImportError, AssertionError):
|
45 |
+
os.system('pip install -U ultralytics')
|
46 |
+
import ultralytics
|
47 |
+
|
48 |
+
from ultralytics.utils.checks import check_requirements
|
49 |
|
50 |
from utils import TryExcept, emojis
|
51 |
from utils.downloads import curl_download, gsutil_getsize
|
|
|
381 |
return {'remote': None, 'branch': None, 'commit': None}
|
382 |
|
383 |
|
384 |
+
def check_python(minimum='3.8.0'):
|
385 |
# Check current python version vs. required python version
|
386 |
check_version(platform.python_version(), minimum, name='Python ', hard=True)
|
387 |
|
yolov5/utils/google_app_engine/additional_requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# add these requirements in your app on top of the existing ones
|
2 |
-
pip==
|
3 |
Flask==2.3.2
|
4 |
gunicorn==19.10.0
|
5 |
-
werkzeug>=
|
|
|
1 |
# add these requirements in your app on top of the existing ones
|
2 |
+
pip==23.3
|
3 |
Flask==2.3.2
|
4 |
gunicorn==19.10.0
|
5 |
+
werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
yolov5/utils/loggers/clearml/clearml_utils.py
CHANGED
@@ -5,8 +5,7 @@ from pathlib import Path
|
|
5 |
|
6 |
import numpy as np
|
7 |
import yaml
|
8 |
-
|
9 |
-
from utils.plots import Annotator, colors
|
10 |
|
11 |
try:
|
12 |
import clearml
|
|
|
5 |
|
6 |
import numpy as np
|
7 |
import yaml
|
8 |
+
from ultralytics.utils.plotting import Annotator, colors
|
|
|
9 |
|
10 |
try:
|
11 |
import clearml
|
yolov5/utils/loggers/comet/README.md
CHANGED
@@ -59,7 +59,7 @@ Check out an example of a [completed run here](https://www.comet.com/examples/co
|
|
59 |
|
60 |
Or better yet, try it out yourself in this Colab Notebook
|
61 |
|
62 |
-
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/
|
63 |
|
64 |
# Log automatically
|
65 |
|
|
|
59 |
|
60 |
Or better yet, try it out yourself in this Colab Notebook
|
61 |
|
62 |
+
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-training/yolov5/notebooks/Comet_and_YOLOv5.ipynb)
|
63 |
|
64 |
# Log automatically
|
65 |
|
yolov5/utils/loggers/comet/__init__.py
CHANGED
@@ -42,7 +42,7 @@ COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
|
|
42 |
COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true'
|
43 |
|
44 |
# Evaluation Settings
|
45 |
-
COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true'
|
46 |
COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true'
|
47 |
COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100))
|
48 |
|
@@ -51,10 +51,10 @@ CONF_THRES = float(os.getenv('CONF_THRES', 0.001))
|
|
51 |
IOU_THRES = float(os.getenv('IOU_THRES', 0.6))
|
52 |
|
53 |
# Batch Logging Settings
|
54 |
-
COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true'
|
55 |
COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1)
|
56 |
COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1)
|
57 |
-
COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true'
|
58 |
|
59 |
RANK = int(os.getenv('RANK', -1))
|
60 |
|
@@ -137,7 +137,7 @@ class CometLogger:
|
|
137 |
|
138 |
self.comet_log_predictions = COMET_LOG_PREDICTIONS
|
139 |
if self.opt.bbox_interval == -1:
|
140 |
-
self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
|
141 |
else:
|
142 |
self.comet_log_prediction_interval = self.opt.bbox_interval
|
143 |
|
@@ -232,7 +232,8 @@ class CometLogger:
|
|
232 |
with open(data_file) as f:
|
233 |
data_config = yaml.safe_load(f)
|
234 |
|
235 |
-
|
|
|
236 |
path = data_config['path'].replace(COMET_PREFIX, '')
|
237 |
data_dict = self.download_dataset_artifact(path)
|
238 |
|
@@ -313,8 +314,16 @@ class CometLogger:
|
|
313 |
image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
|
314 |
|
315 |
try:
|
316 |
-
artifact.add(
|
317 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
except ValueError as e:
|
319 |
logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
|
320 |
logger.error(f'COMET ERROR: {e}')
|
@@ -356,15 +365,14 @@ class CometLogger:
|
|
356 |
data_dict['path'] = artifact_save_dir
|
357 |
|
358 |
metadata_names = metadata.get('names')
|
359 |
-
if
|
360 |
data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()}
|
361 |
-
elif
|
362 |
data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
|
363 |
else:
|
364 |
raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
|
365 |
|
366 |
-
|
367 |
-
return data_dict
|
368 |
|
369 |
def update_data_paths(self, data_dict):
|
370 |
path = data_dict.get('path', '')
|
@@ -476,8 +484,9 @@ class CometLogger:
|
|
476 |
'f1': f1[i],
|
477 |
'true_positives': tp[i],
|
478 |
'false_positives': fp[i],
|
479 |
-
'support': nt[c]},
|
480 |
-
prefix=class_name
|
|
|
481 |
|
482 |
if self.comet_log_confusion_matrix:
|
483 |
epoch = self.experiment.curr_epoch
|
|
|
42 |
COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true'
|
43 |
|
44 |
# Evaluation Settings
|
45 |
+
COMET_LOG_CONFUSION_MATRIX = (os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true')
|
46 |
COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true'
|
47 |
COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100))
|
48 |
|
|
|
51 |
IOU_THRES = float(os.getenv('IOU_THRES', 0.6))
|
52 |
|
53 |
# Batch Logging Settings
|
54 |
+
COMET_LOG_BATCH_METRICS = (os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true')
|
55 |
COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1)
|
56 |
COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1)
|
57 |
+
COMET_LOG_PER_CLASS_METRICS = (os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true')
|
58 |
|
59 |
RANK = int(os.getenv('RANK', -1))
|
60 |
|
|
|
137 |
|
138 |
self.comet_log_predictions = COMET_LOG_PREDICTIONS
|
139 |
if self.opt.bbox_interval == -1:
|
140 |
+
self.comet_log_prediction_interval = (1 if self.opt.epochs < 10 else self.opt.epochs // 10)
|
141 |
else:
|
142 |
self.comet_log_prediction_interval = self.opt.bbox_interval
|
143 |
|
|
|
232 |
with open(data_file) as f:
|
233 |
data_config = yaml.safe_load(f)
|
234 |
|
235 |
+
path = data_config.get('path')
|
236 |
+
if path and path.startswith(COMET_PREFIX):
|
237 |
path = data_config['path'].replace(COMET_PREFIX, '')
|
238 |
data_dict = self.download_dataset_artifact(path)
|
239 |
|
|
|
314 |
image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
|
315 |
|
316 |
try:
|
317 |
+
artifact.add(
|
318 |
+
image_file,
|
319 |
+
logical_path=image_logical_path,
|
320 |
+
metadata={'split': split},
|
321 |
+
)
|
322 |
+
artifact.add(
|
323 |
+
label_file,
|
324 |
+
logical_path=label_logical_path,
|
325 |
+
metadata={'split': split},
|
326 |
+
)
|
327 |
except ValueError as e:
|
328 |
logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
|
329 |
logger.error(f'COMET ERROR: {e}')
|
|
|
365 |
data_dict['path'] = artifact_save_dir
|
366 |
|
367 |
metadata_names = metadata.get('names')
|
368 |
+
if isinstance(metadata_names, dict):
|
369 |
data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()}
|
370 |
+
elif isinstance(metadata_names, list):
|
371 |
data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
|
372 |
else:
|
373 |
raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
|
374 |
|
375 |
+
return self.update_data_paths(data_dict)
|
|
|
376 |
|
377 |
def update_data_paths(self, data_dict):
|
378 |
path = data_dict.get('path', '')
|
|
|
484 |
'f1': f1[i],
|
485 |
'true_positives': tp[i],
|
486 |
'false_positives': fp[i],
|
487 |
+
'support': nt[c], },
|
488 |
+
prefix=class_name,
|
489 |
+
)
|
490 |
|
491 |
if self.comet_log_confusion_matrix:
|
492 |
epoch = self.experiment.curr_epoch
|
yolov5/utils/plots.py
CHANGED
@@ -8,7 +8,6 @@ import math
|
|
8 |
import os
|
9 |
from copy import copy
|
10 |
from pathlib import Path
|
11 |
-
from urllib.error import URLError
|
12 |
|
13 |
import cv2
|
14 |
import matplotlib
|
@@ -17,14 +16,13 @@ import numpy as np
|
|
17 |
import pandas as pd
|
18 |
import seaborn as sn
|
19 |
import torch
|
20 |
-
from PIL import Image, ImageDraw
|
21 |
from scipy.ndimage.filters import gaussian_filter1d
|
|
|
22 |
|
23 |
from utils import TryExcept, threaded
|
24 |
-
from utils.general import
|
25 |
-
is_ascii, xywh2xyxy, xyxy2xywh)
|
26 |
from utils.metrics import fitness
|
27 |
-
from utils.segment.general import scale_image
|
28 |
|
29 |
# Settings
|
30 |
RANK = int(os.getenv('RANK', -1))
|
@@ -53,120 +51,6 @@ class Colors:
|
|
53 |
colors = Colors() # create instance for 'from utils.plots import colors'
|
54 |
|
55 |
|
56 |
-
def check_pil_font(font=FONT, size=10):
|
57 |
-
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
|
58 |
-
font = Path(font)
|
59 |
-
font = font if font.exists() else (CONFIG_DIR / font.name)
|
60 |
-
try:
|
61 |
-
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
|
62 |
-
except Exception: # download if missing
|
63 |
-
try:
|
64 |
-
check_font(font)
|
65 |
-
return ImageFont.truetype(str(font), size)
|
66 |
-
except TypeError:
|
67 |
-
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
|
68 |
-
except URLError: # not online
|
69 |
-
return ImageFont.load_default()
|
70 |
-
|
71 |
-
|
72 |
-
class Annotator:
|
73 |
-
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
|
74 |
-
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
|
75 |
-
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
|
76 |
-
non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
|
77 |
-
self.pil = pil or non_ascii
|
78 |
-
if self.pil: # use PIL
|
79 |
-
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
80 |
-
self.draw = ImageDraw.Draw(self.im)
|
81 |
-
self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font,
|
82 |
-
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
|
83 |
-
else: # use cv2
|
84 |
-
self.im = im
|
85 |
-
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
|
86 |
-
|
87 |
-
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
|
88 |
-
# Add one xyxy box to image with label
|
89 |
-
if self.pil or not is_ascii(label):
|
90 |
-
self.draw.rectangle(box, width=self.lw, outline=color) # box
|
91 |
-
if label:
|
92 |
-
w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0
|
93 |
-
# _, _, w, h = self.font.getbbox(label) # text width, height (New)
|
94 |
-
outside = box[1] - h >= 0 # label fits outside box
|
95 |
-
self.draw.rectangle(
|
96 |
-
(box[0], box[1] - h if outside else box[1], box[0] + w + 1,
|
97 |
-
box[1] + 1 if outside else box[1] + h + 1),
|
98 |
-
fill=color,
|
99 |
-
)
|
100 |
-
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
|
101 |
-
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
|
102 |
-
else: # cv2
|
103 |
-
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
104 |
-
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
|
105 |
-
if label:
|
106 |
-
tf = max(self.lw - 1, 1) # font thickness
|
107 |
-
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
|
108 |
-
outside = p1[1] - h >= 3
|
109 |
-
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
|
110 |
-
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
|
111 |
-
cv2.putText(self.im,
|
112 |
-
label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
|
113 |
-
0,
|
114 |
-
self.lw / 3,
|
115 |
-
txt_color,
|
116 |
-
thickness=tf,
|
117 |
-
lineType=cv2.LINE_AA)
|
118 |
-
|
119 |
-
def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
|
120 |
-
"""Plot masks at once.
|
121 |
-
Args:
|
122 |
-
masks (tensor): predicted masks on cuda, shape: [n, h, w]
|
123 |
-
colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
|
124 |
-
im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
|
125 |
-
alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
|
126 |
-
"""
|
127 |
-
if self.pil:
|
128 |
-
# convert to numpy first
|
129 |
-
self.im = np.asarray(self.im).copy()
|
130 |
-
if len(masks) == 0:
|
131 |
-
self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
|
132 |
-
colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
|
133 |
-
colors = colors[:, None, None] # shape(n,1,1,3)
|
134 |
-
masks = masks.unsqueeze(3) # shape(n,h,w,1)
|
135 |
-
masks_color = masks * (colors * alpha) # shape(n,h,w,3)
|
136 |
-
|
137 |
-
inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
|
138 |
-
mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
|
139 |
-
|
140 |
-
im_gpu = im_gpu.flip(dims=[0]) # flip channel
|
141 |
-
im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
|
142 |
-
im_gpu = im_gpu * inv_alph_masks[-1] + mcs
|
143 |
-
im_mask = (im_gpu * 255).byte().cpu().numpy()
|
144 |
-
self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape)
|
145 |
-
if self.pil:
|
146 |
-
# convert im back to PIL and update draw
|
147 |
-
self.fromarray(self.im)
|
148 |
-
|
149 |
-
def rectangle(self, xy, fill=None, outline=None, width=1):
|
150 |
-
# Add rectangle to image (PIL-only)
|
151 |
-
self.draw.rectangle(xy, fill, outline, width)
|
152 |
-
|
153 |
-
def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
|
154 |
-
# Add text to image (PIL-only)
|
155 |
-
if anchor == 'bottom': # start y from font bottom
|
156 |
-
w, h = self.font.getsize(text) # text width, height
|
157 |
-
xy[1] += 1 - h
|
158 |
-
self.draw.text(xy, text, fill=txt_color, font=self.font)
|
159 |
-
|
160 |
-
def fromarray(self, im):
|
161 |
-
# Update self.im from a numpy array
|
162 |
-
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
163 |
-
self.draw = ImageDraw.Draw(self.im)
|
164 |
-
|
165 |
-
def result(self):
|
166 |
-
# Return annotated image as array
|
167 |
-
return np.asarray(self.im)
|
168 |
-
|
169 |
-
|
170 |
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
|
171 |
"""
|
172 |
x: Features to be visualized
|
@@ -266,7 +150,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
|
|
266 |
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
267 |
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
268 |
if paths:
|
269 |
-
annotator.text(
|
270 |
if len(targets) > 0:
|
271 |
ti = targets[targets[:, 0] == i] # image targets
|
272 |
boxes = xywh2xyxy(ti[:, 2:6]).T
|
|
|
8 |
import os
|
9 |
from copy import copy
|
10 |
from pathlib import Path
|
|
|
11 |
|
12 |
import cv2
|
13 |
import matplotlib
|
|
|
16 |
import pandas as pd
|
17 |
import seaborn as sn
|
18 |
import torch
|
19 |
+
from PIL import Image, ImageDraw
|
20 |
from scipy.ndimage.filters import gaussian_filter1d
|
21 |
+
from ultralytics.utils.plotting import Annotator
|
22 |
|
23 |
from utils import TryExcept, threaded
|
24 |
+
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
|
|
|
25 |
from utils.metrics import fitness
|
|
|
26 |
|
27 |
# Settings
|
28 |
RANK = int(os.getenv('RANK', -1))
|
|
|
51 |
colors = Colors() # create instance for 'from utils.plots import colors'
|
52 |
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
|
55 |
"""
|
56 |
x: Features to be visualized
|
|
|
150 |
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
151 |
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
152 |
if paths:
|
153 |
+
annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
154 |
if len(targets) > 0:
|
155 |
ti = targets[targets[:, 0] == i] # image targets
|
156 |
boxes = xywh2xyxy(ti[:, 2:6]).T
|
yolov5/utils/segment/plots.py
CHANGED
@@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg'
|
|
54 |
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
55 |
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
56 |
if paths:
|
57 |
-
annotator.text(
|
58 |
if len(targets) > 0:
|
59 |
idx = targets[:, 0] == i
|
60 |
ti = targets[idx] # image targets
|
|
|
54 |
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
55 |
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
56 |
if paths:
|
57 |
+
annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
|
58 |
if len(targets) > 0:
|
59 |
idx = targets[:, 0] == i
|
60 |
ti = targets[idx] # image targets
|
yolov5/val.py
CHANGED
@@ -304,6 +304,8 @@ def run(
|
|
304 |
if save_json and len(jdict):
|
305 |
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
306 |
anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
|
|
|
|
|
307 |
pred_json = str(save_dir / f'{w}_predictions.json') # predictions
|
308 |
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
309 |
with open(pred_json, 'w') as f:
|
|
|
304 |
if save_json and len(jdict):
|
305 |
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
306 |
anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
|
307 |
+
if not os.path.exists(anno_json):
|
308 |
+
anno_json = os.path.join(data['path'], 'annotations', 'instances_val2017.json')
|
309 |
pred_json = str(save_dir / f'{w}_predictions.json') # predictions
|
310 |
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
311 |
with open(pred_json, 'w') as f:
|