stphtan94117 commited on
Commit
d087c42
1 Parent(s): 6aac215

Upload 142 files

Browse files
Files changed (45) hide show
  1. ultralytics/yolov5/.github/ISSUE_TEMPLATE/config.yml +1 -1
  2. ultralytics/yolov5/.github/workflows/ci-testing.yml +7 -10
  3. ultralytics/yolov5/.github/workflows/codeql-analysis.yml +1 -1
  4. ultralytics/yolov5/.github/workflows/docker.yml +9 -7
  5. ultralytics/yolov5/.github/workflows/greetings.yml +1 -1
  6. ultralytics/yolov5/.github/workflows/links.yml +10 -5
  7. ultralytics/yolov5/.github/workflows/translate-readme.yml +2 -2
  8. ultralytics/yolov5/.pre-commit-config.yaml +16 -12
  9. ultralytics/yolov5/README.md +45 -62
  10. ultralytics/yolov5/README.zh-CN.md +50 -67
  11. ultralytics/yolov5/classify/predict.py +4 -3
  12. ultralytics/yolov5/classify/tutorial.ipynb +2 -2
  13. ultralytics/yolov5/data/Objects365.yaml +1 -1
  14. ultralytics/yolov5/detect.py +26 -2
  15. ultralytics/yolov5/export.py +59 -11
  16. ultralytics/yolov5/models/common.py +22 -10
  17. ultralytics/yolov5/models/experimental.py +2 -2
  18. ultralytics/yolov5/models/tf.py +2 -2
  19. ultralytics/yolov5/models/yolo.py +4 -4
  20. ultralytics/yolov5/requirements.txt +6 -6
  21. ultralytics/yolov5/segment/predict.py +3 -2
  22. ultralytics/yolov5/segment/train.py +1 -1
  23. ultralytics/yolov5/segment/tutorial.ipynb +3 -3
  24. ultralytics/yolov5/setup.cfg +8 -6
  25. ultralytics/yolov5/train.py +6 -1
  26. ultralytics/yolov5/tutorial.ipynb +11 -11
  27. ultralytics/yolov5/utils/__init__.py +5 -1
  28. ultralytics/yolov5/utils/augmentations.py +1 -1
  29. ultralytics/yolov5/utils/callbacks.py +1 -1
  30. ultralytics/yolov5/utils/dataloaders.py +1 -1
  31. ultralytics/yolov5/utils/docker/Dockerfile +3 -4
  32. ultralytics/yolov5/utils/docker/Dockerfile-arm64 +2 -3
  33. ultralytics/yolov5/utils/docker/Dockerfile-cpu +9 -9
  34. ultralytics/yolov5/utils/downloads.py +1 -2
  35. ultralytics/yolov5/utils/general.py +15 -5
  36. ultralytics/yolov5/utils/google_app_engine/additional_requirements.txt +2 -2
  37. ultralytics/yolov5/utils/loggers/__init__.py +4 -8
  38. ultralytics/yolov5/utils/loggers/clearml/clearml_utils.py +1 -2
  39. ultralytics/yolov5/utils/loggers/comet/README.md +1 -1
  40. ultralytics/yolov5/utils/loggers/comet/__init__.py +31 -21
  41. ultralytics/yolov5/utils/plots.py +4 -120
  42. ultralytics/yolov5/utils/segment/metrics.py +2 -2
  43. ultralytics/yolov5/utils/segment/plots.py +1 -1
  44. ultralytics/yolov5/utils/torch_utils.py +2 -2
  45. ultralytics/yolov5/val.py +2 -0
ultralytics/yolov5/.github/ISSUE_TEMPLATE/config.yml CHANGED
@@ -7,5 +7,5 @@ contact_links:
7
  url: https://community.ultralytics.com/
8
  about: Ask on Ultralytics Community Forum
9
  - name: 🎧 Discord
10
- url: https://discord.gg/n6cFeSPZdD
11
  about: Ask on Ultralytics Discord
 
7
  url: https://community.ultralytics.com/
8
  about: Ask on Ultralytics Community Forum
9
  - name: 🎧 Discord
10
+ url: https://ultralytics.com/discord
11
  about: Ask on Ultralytics Discord
ultralytics/yolov5/.github/workflows/ci-testing.yml CHANGED
@@ -21,7 +21,7 @@ jobs:
21
  python-version: [ '3.10' ] # requires python<=3.10
22
  model: [ yolov5n ]
23
  steps:
24
- - uses: actions/checkout@v3
25
  - uses: actions/setup-python@v4
26
  with:
27
  python-version: ${{ matrix.python-version }}
@@ -57,20 +57,17 @@ jobs:
57
  model: [ yolov5n ]
58
  include:
59
  - os: ubuntu-latest
60
- python-version: '3.7' # '3.6.8' min
61
- model: yolov5n
62
- - os: ubuntu-latest
63
- python-version: '3.8'
64
  model: yolov5n
65
  - os: ubuntu-latest
66
  python-version: '3.9'
67
  model: yolov5n
68
  - os: ubuntu-latest
69
- python-version: '3.8' # torch 1.7.0 requires python >=3.6, <=3.8
70
  model: yolov5n
71
- torch: '1.7.0' # min torch version CI https://pypi.org/project/torchvision/
72
  steps:
73
- - uses: actions/checkout@v3
74
  - uses: actions/setup-python@v4
75
  with:
76
  python-version: ${{ matrix.python-version }}
@@ -78,8 +75,8 @@ jobs:
78
  - name: Install requirements
79
  run: |
80
  python -m pip install --upgrade pip wheel
81
- if [ "${{ matrix.torch }}" == "1.7.0" ]; then
82
- pip install -r requirements.txt torch==1.7.0 torchvision==0.8.1 --extra-index-url https://download.pytorch.org/whl/cpu
83
  else
84
  pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu
85
  fi
 
21
  python-version: [ '3.10' ] # requires python<=3.10
22
  model: [ yolov5n ]
23
  steps:
24
+ - uses: actions/checkout@v4
25
  - uses: actions/setup-python@v4
26
  with:
27
  python-version: ${{ matrix.python-version }}
 
57
  model: [ yolov5n ]
58
  include:
59
  - os: ubuntu-latest
60
+ python-version: '3.8' # '3.6.8' min
 
 
 
61
  model: yolov5n
62
  - os: ubuntu-latest
63
  python-version: '3.9'
64
  model: yolov5n
65
  - os: ubuntu-latest
66
+ python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8
67
  model: yolov5n
68
+ torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/
69
  steps:
70
+ - uses: actions/checkout@v4
71
  - uses: actions/setup-python@v4
72
  with:
73
  python-version: ${{ matrix.python-version }}
 
75
  - name: Install requirements
76
  run: |
77
  python -m pip install --upgrade pip wheel
78
+ if [ "${{ matrix.torch }}" == "1.8.0" ]; then
79
+ pip install -r requirements.txt torch==1.8.0 torchvision==0.9.0 --extra-index-url https://download.pytorch.org/whl/cpu
80
  else
81
  pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu
82
  fi
ultralytics/yolov5/.github/workflows/codeql-analysis.yml CHANGED
@@ -23,7 +23,7 @@ jobs:
23
 
24
  steps:
25
  - name: Checkout repository
26
- uses: actions/checkout@v3
27
 
28
  # Initializes the CodeQL tools for scanning.
29
  - name: Initialize CodeQL
 
23
 
24
  steps:
25
  - name: Checkout repository
26
+ uses: actions/checkout@v4
27
 
28
  # Initializes the CodeQL tools for scanning.
29
  - name: Initialize CodeQL
ultralytics/yolov5/.github/workflows/docker.yml CHANGED
@@ -15,22 +15,24 @@ jobs:
15
  runs-on: ubuntu-latest
16
  steps:
17
  - name: Checkout repo
18
- uses: actions/checkout@v3
 
 
19
 
20
  - name: Set up QEMU
21
- uses: docker/setup-qemu-action@v2
22
 
23
  - name: Set up Docker Buildx
24
- uses: docker/setup-buildx-action@v2
25
 
26
  - name: Login to Docker Hub
27
- uses: docker/login-action@v2
28
  with:
29
  username: ${{ secrets.DOCKERHUB_USERNAME }}
30
  password: ${{ secrets.DOCKERHUB_TOKEN }}
31
 
32
  - name: Build and push arm64 image
33
- uses: docker/build-push-action@v4
34
  continue-on-error: true
35
  with:
36
  context: .
@@ -40,7 +42,7 @@ jobs:
40
  tags: ultralytics/yolov5:latest-arm64
41
 
42
  - name: Build and push CPU image
43
- uses: docker/build-push-action@v4
44
  continue-on-error: true
45
  with:
46
  context: .
@@ -49,7 +51,7 @@ jobs:
49
  tags: ultralytics/yolov5:latest-cpu
50
 
51
  - name: Build and push GPU image
52
- uses: docker/build-push-action@v4
53
  continue-on-error: true
54
  with:
55
  context: .
 
15
  runs-on: ubuntu-latest
16
  steps:
17
  - name: Checkout repo
18
+ uses: actions/checkout@v4
19
+ with:
20
+ fetch-depth: 0 # copy full .git directory to access full git history in Docker images
21
 
22
  - name: Set up QEMU
23
+ uses: docker/setup-qemu-action@v3
24
 
25
  - name: Set up Docker Buildx
26
+ uses: docker/setup-buildx-action@v3
27
 
28
  - name: Login to Docker Hub
29
+ uses: docker/login-action@v3
30
  with:
31
  username: ${{ secrets.DOCKERHUB_USERNAME }}
32
  password: ${{ secrets.DOCKERHUB_TOKEN }}
33
 
34
  - name: Build and push arm64 image
35
+ uses: docker/build-push-action@v5
36
  continue-on-error: true
37
  with:
38
  context: .
 
42
  tags: ultralytics/yolov5:latest-arm64
43
 
44
  - name: Build and push CPU image
45
+ uses: docker/build-push-action@v5
46
  continue-on-error: true
47
  with:
48
  context: .
 
51
  tags: ultralytics/yolov5:latest-cpu
52
 
53
  - name: Build and push GPU image
54
+ uses: docker/build-push-action@v5
55
  continue-on-error: true
56
  with:
57
  context: .
ultralytics/yolov5/.github/workflows/greetings.yml CHANGED
@@ -31,7 +31,7 @@ jobs:
31
 
32
  ## Requirements
33
 
34
- [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started:
35
  ```bash
36
  git clone https://github.com/ultralytics/yolov5 # clone
37
  cd yolov5
 
31
 
32
  ## Requirements
33
 
34
+ [**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started:
35
  ```bash
36
  git clone https://github.com/ultralytics/yolov5 # clone
37
  cd yolov5
ultralytics/yolov5/.github/workflows/links.yml CHANGED
@@ -1,6 +1,11 @@
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
- # YOLO Continuous Integration (CI) GitHub Actions tests broken link checker
3
- # Accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter)
 
 
 
 
 
4
 
5
  name: Check Broken links
6
 
@@ -13,7 +18,7 @@ jobs:
13
  Links:
14
  runs-on: ubuntu-latest
15
  steps:
16
- - uses: actions/checkout@v3
17
 
18
  - name: Download and install lychee
19
  run: |
@@ -28,7 +33,7 @@ jobs:
28
  timeout_minutes: 5
29
  retry_wait_seconds: 60
30
  max_attempts: 3
31
- command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html'
32
 
33
  - name: Test Markdown, HTML, YAML, Python and Notebook links with retry
34
  if: github.event_name == 'workflow_dispatch'
@@ -37,4 +42,4 @@ jobs:
37
  timeout_minutes: 5
38
  retry_wait_seconds: 60
39
  max_attempts: 3
40
- command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'
 
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee
3
+ # Ignores the following status codes to reduce false positives:
4
+ # - 403(OpenVINO, 'forbidden')
5
+ # - 429(Instagram, 'too many requests')
6
+ # - 500(Zenodo, 'cached')
7
+ # - 502(Zenodo, 'bad gateway')
8
+ # - 999(LinkedIn, 'unknown status code')
9
 
10
  name: Check Broken links
11
 
 
18
  Links:
19
  runs-on: ubuntu-latest
20
  steps:
21
+ - uses: actions/checkout@v4
22
 
23
  - name: Download and install lychee
24
  run: |
 
33
  timeout_minutes: 5
34
  retry_wait_seconds: 60
35
  max_attempts: 3
36
+ command: lychee --accept 403,429,500,502,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html'
37
 
38
  - name: Test Markdown, HTML, YAML, Python and Notebook links with retry
39
  if: github.event_name == 'workflow_dispatch'
 
42
  timeout_minutes: 5
43
  retry_wait_seconds: 60
44
  max_attempts: 3
45
+ command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'
ultralytics/yolov5/.github/workflows/translate-readme.yml CHANGED
@@ -14,9 +14,9 @@ jobs:
14
  Translate:
15
  runs-on: ubuntu-latest
16
  steps:
17
- - uses: actions/checkout@v3
18
  - name: Setup Node.js
19
- uses: actions/setup-node@v3
20
  with:
21
  node-version: 16
22
  # ISO Language Codes: https://cloud.google.com/translate/docs/languages
 
14
  Translate:
15
  runs-on: ubuntu-latest
16
  steps:
17
+ - uses: actions/checkout@v4
18
  - name: Setup Node.js
19
+ uses: actions/setup-node@v4
20
  with:
21
  node-version: 16
22
  # ISO Language Codes: https://cloud.google.com/translate/docs/languages
ultralytics/yolov5/.pre-commit-config.yaml CHANGED
@@ -11,22 +11,21 @@ ci:
11
 
12
  repos:
13
  - repo: https://github.com/pre-commit/pre-commit-hooks
14
- rev: v4.4.0
15
  hooks:
16
  - id: end-of-file-fixer
17
  - id: trailing-whitespace
18
  - id: check-case-conflict
19
- - id: check-yaml
20
  - id: check-docstring-first
21
  - id: double-quote-string-fixer
22
  - id: detect-private-key
23
 
24
  - repo: https://github.com/asottile/pyupgrade
25
- rev: v3.4.0
26
  hooks:
27
  - id: pyupgrade
28
  name: Upgrade code
29
- args: [--py37-plus]
30
 
31
  - repo: https://github.com/PyCQA/isort
32
  rev: 5.12.0
@@ -35,13 +34,13 @@ repos:
35
  name: Sort imports
36
 
37
  - repo: https://github.com/google/yapf
38
- rev: v0.33.0
39
  hooks:
40
  - id: yapf
41
  name: YAPF formatting
42
 
43
  - repo: https://github.com/executablebooks/mdformat
44
- rev: 0.7.16
45
  hooks:
46
  - id: mdformat
47
  name: MD formatting
@@ -51,19 +50,24 @@ repos:
51
  # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
52
 
53
  - repo: https://github.com/PyCQA/flake8
54
- rev: 6.0.0
55
  hooks:
56
  - id: flake8
57
  name: PEP8
58
 
59
  - repo: https://github.com/codespell-project/codespell
60
- rev: v2.2.4
61
  hooks:
62
  - id: codespell
63
  args:
64
  - --ignore-words-list=crate,nd,strack,dota
65
 
66
- #- repo: https://github.com/asottile/yesqa
67
- # rev: v1.4.0
68
- # hooks:
69
- # - id: yesqa
 
 
 
 
 
 
11
 
12
  repos:
13
  - repo: https://github.com/pre-commit/pre-commit-hooks
14
+ rev: v4.5.0
15
  hooks:
16
  - id: end-of-file-fixer
17
  - id: trailing-whitespace
18
  - id: check-case-conflict
19
+ # - id: check-yaml
20
  - id: check-docstring-first
21
  - id: double-quote-string-fixer
22
  - id: detect-private-key
23
 
24
  - repo: https://github.com/asottile/pyupgrade
25
+ rev: v3.15.0
26
  hooks:
27
  - id: pyupgrade
28
  name: Upgrade code
 
29
 
30
  - repo: https://github.com/PyCQA/isort
31
  rev: 5.12.0
 
34
  name: Sort imports
35
 
36
  - repo: https://github.com/google/yapf
37
+ rev: v0.40.2
38
  hooks:
39
  - id: yapf
40
  name: YAPF formatting
41
 
42
  - repo: https://github.com/executablebooks/mdformat
43
+ rev: 0.7.17
44
  hooks:
45
  - id: mdformat
46
  name: MD formatting
 
50
  # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
51
 
52
  - repo: https://github.com/PyCQA/flake8
53
+ rev: 6.1.0
54
  hooks:
55
  - id: flake8
56
  name: PEP8
57
 
58
  - repo: https://github.com/codespell-project/codespell
59
+ rev: v2.2.6
60
  hooks:
61
  - id: codespell
62
  args:
63
  - --ignore-words-list=crate,nd,strack,dota
64
 
65
+ # - repo: https://github.com/asottile/yesqa
66
+ # rev: v1.4.0
67
+ # hooks:
68
+ # - id: yesqa
69
+
70
+ # - repo: https://github.com/asottile/dead
71
+ # rev: v1.5.0
72
+ # hooks:
73
+ # - id: dead
ultralytics/yolov5/README.md CHANGED
@@ -1,7 +1,11 @@
1
  <div align="center">
2
  <p>
 
 
 
3
  <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
4
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
 
5
  </p>
6
 
7
  [English](README.md) | [简体中文](README.zh-CN.md)
@@ -20,31 +24,24 @@
20
 
21
  YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
22
 
23
- We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href="https://docs.ultralytics.com/yolov5">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> for support, and join our <a href="https://discord.gg/n6cFeSPZdD">Discord</a> community for questions and discussions!
24
 
25
  To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
26
 
27
  <div align="center">
28
- <a href="https://github.com/ultralytics" style="text-decoration:none;">
29
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="" /></a>
30
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
31
- <a href="https://www.linkedin.com/company/ultralytics/" style="text-decoration:none;">
32
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="" /></a>
33
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
34
- <a href="https://twitter.com/ultralytics" style="text-decoration:none;">
35
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="" /></a>
36
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
37
- <a href="https://youtube.com/ultralytics" style="text-decoration:none;">
38
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="" /></a>
39
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
40
- <a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
41
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="" /></a>
42
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
43
- <a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
44
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
45
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
46
- <a href="https://discord.gg/n6cFeSPZdD" style="text-decoration:none;">
47
- <img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="2%" alt="" /></a>
48
  </div>
49
 
50
  </div>
@@ -52,14 +49,13 @@ To request an Enterprise License please complete the form at [Ultralytics Licens
52
 
53
  ## <div align="center">YOLOv8 🚀 NEW</div>
54
 
55
- We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model
56
- released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**.
57
- YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of
58
- object detection, image segmentation and image classification tasks.
59
 
60
  See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
61
 
62
- ```commandline
 
 
63
  pip install ultralytics
64
  ```
65
 
@@ -76,8 +72,8 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentatio
76
  <summary>Install</summary>
77
 
78
  Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
79
- [**Python>=3.7.0**](https://www.python.org/) environment, including
80
- [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/).
81
 
82
  ```bash
83
  git clone https://github.com/ultralytics/yolov5 # clone
@@ -90,8 +86,7 @@ pip install -r requirements.txt # install
90
  <details>
91
  <summary>Inference</summary>
92
 
93
- YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest
94
- YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
95
 
96
  ```python
97
  import torch
@@ -114,8 +109,7 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
114
  <details>
115
  <summary>Inference with detect.py</summary>
116
 
117
- `detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from
118
- the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
119
 
120
  ```bash
121
  python detect.py --weights yolov5s.pt --source 0 # webcam
@@ -126,7 +120,7 @@ python detect.py --weights yolov5s.pt --source 0 #
126
  list.txt # list of images
127
  list.streams # list of streams
128
  'path/*.jpg' # glob
129
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
130
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
131
  ```
132
 
@@ -137,11 +131,7 @@ python detect.py --weights yolov5s.pt --source 0 #
137
 
138
  The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
139
  results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
140
- and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest
141
- YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
142
- 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the
143
- largest `--batch-size` possible, or pass `--batch-size -1` for
144
- YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
145
 
146
  ```bash
147
  python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
@@ -459,37 +449,30 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare
459
 
460
  ## <div align="center">License</div>
461
 
462
- YOLOv5 is available under two different licenses:
463
 
464
- - **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details.
465
- - **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license).
466
 
467
  ## <div align="center">Contact</div>
468
 
469
- For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions!
470
 
471
  <br>
472
  <div align="center">
473
- <a href="https://github.com/ultralytics" style="text-decoration:none;">
474
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="" /></a>
475
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
476
- <a href="https://www.linkedin.com/company/ultralytics/" style="text-decoration:none;">
477
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="" /></a>
478
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
479
- <a href="https://twitter.com/ultralytics" style="text-decoration:none;">
480
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="" /></a>
481
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
482
- <a href="https://youtube.com/ultralytics" style="text-decoration:none;">
483
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="" /></a>
484
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
485
- <a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
486
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="" /></a>
487
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
488
- <a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
489
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="" /></a>
490
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
491
- <a href="https://discord.gg/n6cFeSPZdD" style="text-decoration:none;">
492
- <img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="3%" alt="" /></a>
493
  </div>
494
 
495
  [tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
 
1
  <div align="center">
2
  <p>
3
+ <a href="https://yolovision.ultralytics.com/" target="_blank">
4
+ <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-yolo-vision-2023.png"></a>
5
+ <!--
6
  <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
7
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
8
+ -->
9
  </p>
10
 
11
  [English](README.md) | [简体中文](README.zh-CN.md)
 
24
 
25
  YOLOv5 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
26
 
27
+ We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href="https://docs.ultralytics.com/yolov5">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> for support, and join our <a href="https://ultralytics.com/discord">Discord</a> community for questions and discussions!
28
 
29
  To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
30
 
31
  <div align="center">
32
+ <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
33
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
34
+ <a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
35
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
36
+ <a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
37
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
38
+ <a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
39
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
40
+ <a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
41
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
42
+ <a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="Ultralytics Instagram"></a>
43
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
44
+ <a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
 
 
 
 
 
 
 
45
  </div>
46
 
47
  </div>
 
49
 
50
  ## <div align="center">YOLOv8 🚀 NEW</div>
51
 
52
+ We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks.
 
 
 
53
 
54
  See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
55
 
56
+ [![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)
57
+
58
+ ```bash
59
  pip install ultralytics
60
  ```
61
 
 
72
  <summary>Install</summary>
73
 
74
  Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
75
+ [**Python>=3.8.0**](https://www.python.org/) environment, including
76
+ [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
77
 
78
  ```bash
79
  git clone https://github.com/ultralytics/yolov5 # clone
 
86
  <details>
87
  <summary>Inference</summary>
88
 
89
+ YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
 
90
 
91
  ```python
92
  import torch
 
109
  <details>
110
  <summary>Inference with detect.py</summary>
111
 
112
+ `detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
 
113
 
114
  ```bash
115
  python detect.py --weights yolov5s.pt --source 0 # webcam
 
120
  list.txt # list of images
121
  list.streams # list of streams
122
  'path/*.jpg' # glob
123
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
124
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
125
  ```
126
 
 
131
 
132
  The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
133
  results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
134
+ and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
 
 
 
 
135
 
136
  ```bash
137
  python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
 
449
 
450
  ## <div align="center">License</div>
451
 
452
+ Ultralytics offers two licensing options to accommodate diverse use cases:
453
 
454
+ - **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details.
455
+ - **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).
456
 
457
  ## <div align="center">Contact</div>
458
 
459
+ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions!
460
 
461
  <br>
462
  <div align="center">
463
+ <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
464
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
465
+ <a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
466
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
467
+ <a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
468
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
469
+ <a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
470
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
471
+ <a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
472
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
473
+ <a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="Ultralytics Instagram"></a>
474
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
475
+ <a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
 
 
 
 
 
 
 
476
  </div>
477
 
478
  [tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
ultralytics/yolov5/README.zh-CN.md CHANGED
@@ -1,7 +1,11 @@
1
  <div align="center">
2
  <p>
 
 
 
3
  <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
4
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
 
5
  </p>
6
 
7
  [英文](README.md)|[简体中文](README.zh-CN.md)<br>
@@ -19,42 +23,34 @@
19
 
20
  YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表<a href="https://ultralytics.com"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。
21
 
22
- 我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 <a href="https://docs.ultralytics.com/">文档</a> 了解详细信息,在 <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> 上提交问题以获得支持,并加入我们的 <a href="https://discord.gg/n6cFeSPZdD">Discord</a> 社区进行问题和讨论!
23
 
24
  如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格
25
 
26
  <div align="center">
27
- <a href="https://github.com/ultralytics" style="text-decoration:none;">
28
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="" /></a>
29
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
30
- <a href="https://www.linkedin.com/company/ultralytics/" style="text-decoration:none;">
31
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="" /></a>
32
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
33
- <a href="https://twitter.com/ultralytics" style="text-decoration:none;">
34
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="" /></a>
35
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
36
- <a href="https://youtube.com/ultralytics" style="text-decoration:none;">
37
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="" /></a>
38
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
39
- <a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
40
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="" /></a>
41
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
42
- <a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
43
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
44
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
45
- <a href="https://discord.gg/n6cFeSPZdD" style="text-decoration:none;">
46
- <img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="2%" alt="" /></a>
47
  </div>
48
  </div>
49
 
50
- ## <div align="center">YOLOv8 🚀 NEW</div>
51
 
52
- We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model
53
- released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**.
54
- YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of
55
- object detection, image segmentation and image classification tasks.
56
 
57
- See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with:
 
 
58
 
59
  ```commandline
60
  pip install ultralytics
@@ -67,12 +63,12 @@ pip install ultralytics
67
 
68
  ## <div align="center">文档</div>
69
 
70
- 有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。
71
 
72
  <details open>
73
  <summary>安装</summary>
74
 
75
- 克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。
76
 
77
  ```bash
78
  git clone https://github.com/ultralytics/yolov5 # clone
@@ -85,8 +81,7 @@ pip install -r requirements.txt # install
85
  <details>
86
  <summary>推理</summary>
87
 
88
- 使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从
89
- YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
90
 
91
  ```python
92
  import torch
@@ -109,8 +104,7 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
109
  <details>
110
  <summary>使用 detect.py 推理</summary>
111
 
112
- `detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从
113
- 最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。
114
 
115
  ```bash
116
  python detect.py --weights yolov5s.pt --source 0 # webcam
@@ -121,7 +115,7 @@ python detect.py --weights yolov5s.pt --source 0 #
121
  list.txt # list of images
122
  list.streams # list of streams
123
  'path/*.jpg' # glob
124
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
125
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
126
  ```
127
 
@@ -130,12 +124,8 @@ python detect.py --weights yolov5s.pt --source 0 #
130
  <details>
131
  <summary>训练</summary>
132
 
133
- 下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) ���据集上的结果。
134
- 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) [数据集](https://github.com/ultralytics/yolov5/tree/master/data)
135
- 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
136
- YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。
137
- 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现
138
- YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。
139
 
140
  ```bash
141
  python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
@@ -250,7 +240,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
250
 
251
  </details>
252
 
253
- ## <div align="center">实例分割模型 ⭐ 新</div>
254
 
255
  我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
256
 
@@ -452,39 +442,32 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
452
  <a href="https://github.com/ultralytics/yolov5/graphs/contributors">
453
  <img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>
454
 
455
- ## <div align="center">License</div>
456
 
457
- YOLOv5 在两种不同的 License 下可用:
458
 
459
- - **AGPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。
460
- - **企业License**:在没有 AGPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license)
461
 
462
- ## <div align="center">联系我们</div>
463
 
464
- 对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/n6cFeSPZdD) 社区进行问题和讨论!
465
 
466
  <br>
467
  <div align="center">
468
- <a href="https://github.com/ultralytics" style="text-decoration:none;">
469
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="" /></a>
470
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
471
- <a href="https://www.linkedin.com/company/ultralytics/" style="text-decoration:none;">
472
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="" /></a>
473
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
474
- <a href="https://twitter.com/ultralytics" style="text-decoration:none;">
475
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="" /></a>
476
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
477
- <a href="https://youtube.com/ultralytics" style="text-decoration:none;">
478
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="" /></a>
479
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
480
- <a href="https://www.tiktok.com/@ultralytics" style="text-decoration:none;">
481
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="" /></a>
482
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
483
- <a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
484
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="" /></a>
485
- <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="" />
486
- <a href="https://discord.gg/n6cFeSPZdD" style="text-decoration:none;">
487
- <img src="https://github.com/ultralytics/assets/blob/main/social/logo-social-discord.png" width="3%" alt="" /></a>
488
  </div>
489
 
490
  [tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
 
1
  <div align="center">
2
  <p>
3
+ <a href="https://yolovision.ultralytics.com/" target="_blank">
4
+ <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-yolo-vision-2023.png"></a>
5
+ <!--
6
  <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
7
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png"></a>
8
+ -->
9
  </p>
10
 
11
  [英文](README.md)|[简体中文](README.zh-CN.md)<br>
 
23
 
24
  YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表<a href="https://ultralytics.com"> Ultralytics </a>对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。
25
 
26
+ 我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 <a href="https://docs.ultralytics.com/yolov5/">文档</a> 了解详细信息,在 <a href="https://github.com/ultralytics/yolov5/issues/new/choose">GitHub</a> 上提交问题以获得支持,并加入我们的 <a href="https://ultralytics.com/discord">Discord</a> 社区进行问题和讨论!
27
 
28
  如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格
29
 
30
  <div align="center">
31
+ <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
32
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
33
+ <a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="Ultralytics LinkedIn"></a>
34
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
35
+ <a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="Ultralytics Twitter"></a>
36
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
37
+ <a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="Ultralytics YouTube"></a>
38
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
39
+ <a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="2%" alt="Ultralytics TikTok"></a>
40
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
41
+ <a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="Ultralytics Instagram"></a>
42
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%">
43
+ <a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="2%" alt="Ultralytics Discord"></a>
 
 
 
 
 
 
 
44
  </div>
45
  </div>
46
 
47
+ ## <div align="center">YOLOv8 🚀 新品</div>
48
 
49
+ 我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。
 
 
 
50
 
51
+ 请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用:
52
+
53
+ [![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)
54
 
55
  ```commandline
56
  pip install ultralytics
 
63
 
64
  ## <div align="center">文档</div>
65
 
66
+ 有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com/yolov5/)。请参阅下面的快速入门示例。
67
 
68
  <details open>
69
  <summary>安装</summary>
70
 
71
+ 克隆 repo,并要求在 [**Python>=3.8.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 。
72
 
73
  ```bash
74
  git clone https://github.com/ultralytics/yolov5 # clone
 
81
  <details>
82
  <summary>推理</summary>
83
 
84
+ 使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。
 
85
 
86
  ```python
87
  import torch
 
104
  <details>
105
  <summary>使用 detect.py 推理</summary>
106
 
107
+ `detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。
 
108
 
109
  ```bash
110
  python detect.py --weights yolov5s.pt --source 0 # webcam
 
115
  list.txt # list of images
116
  list.streams # list of streams
117
  'path/*.jpg' # glob
118
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
119
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
120
  ```
121
 
 
124
  <details>
125
  <summary>训练</summary>
126
 
127
+ 下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data)
128
+ 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。
 
 
 
 
129
 
130
  ```bash
131
  python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128
 
240
 
241
  </details>
242
 
243
+ ## <div align="center">实例分割模型 ⭐ 新</div>
244
 
245
  我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
246
 
 
442
  <a href="https://github.com/ultralytics/yolov5/graphs/contributors">
443
  <img src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" /></a>
444
 
445
+ ## <div align="center">许可证</div>
446
 
447
+ Ultralytics 提供两种许可证选项以适应各种使用场景:
448
 
449
+ - **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。
450
+ - **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。
451
 
452
+ ## <div align="center">联系方式</div>
453
 
454
+ 对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://ultralytics.com/discord) 社区进行问题和讨论!
455
 
456
  <br>
457
  <div align="center">
458
+ <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="3%" alt="Ultralytics GitHub"></a>
459
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
460
+ <a href="https://www.linkedin.com/company/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="3%" alt="Ultralytics LinkedIn"></a>
461
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
462
+ <a href="https://twitter.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="3%" alt="Ultralytics Twitter"></a>
463
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
464
+ <a href="https://youtube.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="3%" alt="Ultralytics YouTube"></a>
465
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
466
+ <a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
467
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
468
+ <a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="Ultralytics Instagram"></a>
469
+ <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%">
470
+ <a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
 
 
 
 
 
 
 
471
  </div>
472
 
473
  [tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation
ultralytics/yolov5/classify/predict.py CHANGED
@@ -11,7 +11,7 @@ Usage - sources:
11
  list.txt # list of images
12
  list.streams # list of streams
13
  'path/*.jpg' # glob
14
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
 
17
  Usage - formats:
@@ -43,12 +43,13 @@ if str(ROOT) not in sys.path:
43
  sys.path.append(str(ROOT)) # add ROOT to PATH
44
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
45
 
 
 
46
  from models.common import DetectMultiBackend
47
  from utils.augmentations import classify_transforms
48
  from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
49
  from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
50
  increment_path, print_args, strip_optimizer)
51
- from utils.plots import Annotator
52
  from utils.torch_utils import select_device, smart_inference_mode
53
 
54
 
@@ -144,7 +145,7 @@ def run(
144
  # Write results
145
  text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
146
  if save_img or view_img: # Add bbox to image
147
- annotator.text((32, 32), text, txt_color=(255, 255, 255))
148
  if save_txt: # Write to file
149
  with open(f'{txt_path}.txt', 'a') as f:
150
  f.write(text + '\n')
 
11
  list.txt # list of images
12
  list.streams # list of streams
13
  'path/*.jpg' # glob
14
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
 
17
  Usage - formats:
 
43
  sys.path.append(str(ROOT)) # add ROOT to PATH
44
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
45
 
46
+ from ultralytics.utils.plotting import Annotator
47
+
48
  from models.common import DetectMultiBackend
49
  from utils.augmentations import classify_transforms
50
  from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
51
  from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
52
  increment_path, print_args, strip_optimizer)
 
53
  from utils.torch_utils import select_device, smart_inference_mode
54
 
55
 
 
145
  # Write results
146
  text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
147
  if save_img or view_img: # Add bbox to image
148
+ annotator.text([32, 32], text, txt_color=(255, 255, 255))
149
  if save_txt: # Write to file
150
  with open(f'{txt_path}.txt', 'a') as f:
151
  f.write(text + '\n')
ultralytics/yolov5/classify/tutorial.ipynb CHANGED
@@ -87,7 +87,7 @@
87
  " screen # screenshot\n",
88
  " path/ # directory\n",
89
  " 'path/*.jpg' # glob\n",
90
- " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n",
91
  " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
92
  "```"
93
  ]
@@ -1445,7 +1445,7 @@
1445
  "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
1446
  "import torch\n",
1447
  "\n",
1448
- "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n",
1449
  "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
1450
  "results = model(im) # inference\n",
1451
  "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
 
87
  " screen # screenshot\n",
88
  " path/ # directory\n",
89
  " 'path/*.jpg' # glob\n",
90
+ " 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
91
  " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
92
  "```"
93
  ]
 
1445
  "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
1446
  "import torch\n",
1447
  "\n",
1448
+ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n",
1449
  "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
1450
  "results = model(im) # inference\n",
1451
  "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
ultralytics/yolov5/data/Objects365.yaml CHANGED
@@ -428,7 +428,7 @@ download: |
428
  path = Path(im["file_name"]) # image filename
429
  try:
430
  with open(labels / path.with_suffix('.txt').name, 'a') as file:
431
- annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
432
  for a in coco.loadAnns(annIds):
433
  x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
434
  xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
 
428
  path = Path(im["file_name"]) # image filename
429
  try:
430
  with open(labels / path.with_suffix('.txt').name, 'a') as file:
431
+ annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=False)
432
  for a in coco.loadAnns(annIds):
433
  x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
434
  xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
ultralytics/yolov5/detect.py CHANGED
@@ -11,7 +11,7 @@ Usage - sources:
11
  list.txt # list of images
12
  list.streams # list of streams
13
  'path/*.jpg' # glob
14
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
 
17
  Usage - formats:
@@ -29,6 +29,7 @@ Usage - formats:
29
  """
30
 
31
  import argparse
 
32
  import os
33
  import platform
34
  import sys
@@ -42,11 +43,12 @@ if str(ROOT) not in sys.path:
42
  sys.path.append(str(ROOT)) # add ROOT to PATH
43
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
44
 
 
 
45
  from models.common import DetectMultiBackend
46
  from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
47
  from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
48
  increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
49
- from utils.plots import Annotator, colors, save_one_box
50
  from utils.torch_utils import select_device, smart_inference_mode
51
 
52
 
@@ -62,6 +64,7 @@ def run(
62
  device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
63
  view_img=False, # show results
64
  save_txt=False, # save results to *.txt
 
65
  save_conf=False, # save confidences in --save-txt labels
66
  save_crop=False, # save cropped prediction boxes
67
  nosave=False, # do not save images/videos
@@ -134,6 +137,18 @@ def run(
134
  # Second-stage classifier (optional)
135
  # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
136
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  # Process predictions
138
  for i, det in enumerate(pred): # per image
139
  seen += 1
@@ -161,6 +176,14 @@ def run(
161
 
162
  # Write results
163
  for *xyxy, conf, cls in reversed(det):
 
 
 
 
 
 
 
 
164
  if save_txt: # Write to file
165
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
166
  line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
@@ -228,6 +251,7 @@ def parse_opt():
228
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
229
  parser.add_argument('--view-img', action='store_true', help='show results')
230
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
 
231
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
232
  parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
233
  parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
 
11
  list.txt # list of images
12
  list.streams # list of streams
13
  'path/*.jpg' # glob
14
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
 
17
  Usage - formats:
 
29
  """
30
 
31
  import argparse
32
+ import csv
33
  import os
34
  import platform
35
  import sys
 
43
  sys.path.append(str(ROOT)) # add ROOT to PATH
44
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
45
 
46
+ from ultralytics.utils.plotting import Annotator, colors, save_one_box
47
+
48
  from models.common import DetectMultiBackend
49
  from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
50
  from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
51
  increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
 
52
  from utils.torch_utils import select_device, smart_inference_mode
53
 
54
 
 
64
  device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
65
  view_img=False, # show results
66
  save_txt=False, # save results to *.txt
67
+ save_csv=False, # save results in CSV format
68
  save_conf=False, # save confidences in --save-txt labels
69
  save_crop=False, # save cropped prediction boxes
70
  nosave=False, # do not save images/videos
 
137
  # Second-stage classifier (optional)
138
  # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
139
 
140
+ # Define the path for the CSV file
141
+ csv_path = save_dir / 'predictions.csv'
142
+
143
+ # Create or append to the CSV file
144
+ def write_to_csv(image_name, prediction, confidence):
145
+ data = {'Image Name': image_name, 'Prediction': prediction, 'Confidence': confidence}
146
+ with open(csv_path, mode='a', newline='') as f:
147
+ writer = csv.DictWriter(f, fieldnames=data.keys())
148
+ if not csv_path.is_file():
149
+ writer.writeheader()
150
+ writer.writerow(data)
151
+
152
  # Process predictions
153
  for i, det in enumerate(pred): # per image
154
  seen += 1
 
176
 
177
  # Write results
178
  for *xyxy, conf, cls in reversed(det):
179
+ c = int(cls) # integer class
180
+ label = names[c] if hide_conf else f'{names[c]}'
181
+ confidence = float(conf)
182
+ confidence_str = f'{confidence:.2f}'
183
+
184
+ if save_csv:
185
+ write_to_csv(p.name, label, confidence_str)
186
+
187
  if save_txt: # Write to file
188
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
189
  line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
 
251
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
252
  parser.add_argument('--view-img', action='store_true', help='show results')
253
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
254
+ parser.add_argument('--save-csv', action='store_true', help='save results in CSV format')
255
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
256
  parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
257
  parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
ultralytics/yolov5/export.py CHANGED
@@ -110,7 +110,7 @@ def export_formats():
110
  ['TensorFlow Lite', 'tflite', '.tflite', True, False],
111
  ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
112
  ['TensorFlow.js', 'tfjs', '_web_model', False, False],
113
- ['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
114
  return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
115
 
116
 
@@ -155,7 +155,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
155
  import onnx
156
 
157
  LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
158
- f = file.with_suffix('.onnx')
159
 
160
  output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
161
  if dynamic:
@@ -205,9 +205,9 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX
205
 
206
 
207
  @try_export
208
- def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
209
  # YOLOv5 OpenVINO export
210
- check_requirements('openvino-dev>=2022.3') # requires openvino-dev: https://pypi.org/project/openvino-dev/
211
  import openvino.runtime as ov # noqa
212
  from openvino.tools import mo # noqa
213
 
@@ -215,8 +215,56 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
215
  f = str(file).replace(file.suffix, f'_openvino_model{os.sep}')
216
  f_onnx = file.with_suffix('.onnx')
217
  f_ov = str(Path(f) / file.with_suffix('.xml').name)
218
-
219
- ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
  ov.serialize(ov_model, f_ov) # save
222
  yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
@@ -453,7 +501,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
453
  '10',
454
  '--out_dir',
455
  str(file.parent),
456
- f_tfl,], check=True)
457
  return f, None
458
 
459
 
@@ -474,7 +522,7 @@ def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
474
  '--quantize_uint8' if int8 else '',
475
  '--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
476
  str(f_pb),
477
- str(f),]
478
  subprocess.run([arg for arg in args if arg], check=True)
479
 
480
  json = Path(f_json).read_text()
@@ -533,7 +581,7 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:
533
  batch_size, ch, h, w = list(im.shape) # BCHW
534
  t = time.time()
535
 
536
- # Output shapes
537
  spec = model.get_spec()
538
  out0, out1 = iter(spec.description.output)
539
  if platform.system() == 'Darwin':
@@ -723,7 +771,7 @@ def run(
723
  if onnx or xml: # OpenVINO requires ONNX
724
  f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
725
  if xml: # OpenVINO
726
- f[3], _ = export_openvino(file, metadata, half)
727
  if coreml: # CoreML
728
  f[4], ct_model = export_coreml(model, im, file, int8, half, nms)
729
  if nms:
@@ -783,7 +831,7 @@ def parse_opt(known=False):
783
  parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
784
  parser.add_argument('--keras', action='store_true', help='TF: use Keras')
785
  parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
786
- parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
787
  parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
788
  parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
789
  parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
 
110
  ['TensorFlow Lite', 'tflite', '.tflite', True, False],
111
  ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
112
  ['TensorFlow.js', 'tfjs', '_web_model', False, False],
113
+ ['PaddlePaddle', 'paddle', '_paddle_model', True, True], ]
114
  return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
115
 
116
 
 
155
  import onnx
156
 
157
  LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
158
+ f = str(file.with_suffix('.onnx'))
159
 
160
  output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
161
  if dynamic:
 
205
 
206
 
207
  @try_export
208
+ def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')):
209
  # YOLOv5 OpenVINO export
210
+ check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
211
  import openvino.runtime as ov # noqa
212
  from openvino.tools import mo # noqa
213
 
 
215
  f = str(file).replace(file.suffix, f'_openvino_model{os.sep}')
216
  f_onnx = file.with_suffix('.onnx')
217
  f_ov = str(Path(f) / file.with_suffix('.xml').name)
218
+ if int8:
219
+ check_requirements('nncf>=2.4.0') # requires at least version 2.4.0 to use the post-training quantization
220
+ import nncf
221
+ import numpy as np
222
+ from openvino.runtime import Core
223
+
224
+ from utils.dataloaders import create_dataloader
225
+ core = Core()
226
+ onnx_model = core.read_model(f_onnx) # export
227
+
228
+ def prepare_input_tensor(image: np.ndarray):
229
+ input_tensor = image.astype(np.float32) # uint8 to fp16/32
230
+ input_tensor /= 255.0 # 0 - 255 to 0.0 - 1.0
231
+
232
+ if input_tensor.ndim == 3:
233
+ input_tensor = np.expand_dims(input_tensor, 0)
234
+ return input_tensor
235
+
236
+ def gen_dataloader(yaml_path, task='train', imgsz=640, workers=4):
237
+ data_yaml = check_yaml(yaml_path)
238
+ data = check_dataset(data_yaml)
239
+ dataloader = create_dataloader(data[task],
240
+ imgsz=imgsz,
241
+ batch_size=1,
242
+ stride=32,
243
+ pad=0.5,
244
+ single_cls=False,
245
+ rect=False,
246
+ workers=workers)[0]
247
+ return dataloader
248
+
249
+ # noqa: F811
250
+
251
+ def transform_fn(data_item):
252
+ """
253
+ Quantization transform function. Extracts and preprocess input data from dataloader item for quantization.
254
+ Parameters:
255
+ data_item: Tuple with data item produced by DataLoader during iteration
256
+ Returns:
257
+ input_tensor: Input data for quantization
258
+ """
259
+ img = data_item[0].numpy()
260
+ input_tensor = prepare_input_tensor(img)
261
+ return input_tensor
262
+
263
+ ds = gen_dataloader(data)
264
+ quantization_dataset = nncf.Dataset(ds, transform_fn)
265
+ ov_model = nncf.quantize(onnx_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)
266
+ else:
267
+ ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export
268
 
269
  ov.serialize(ov_model, f_ov) # save
270
  yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
 
501
  '10',
502
  '--out_dir',
503
  str(file.parent),
504
+ f_tfl, ], check=True)
505
  return f, None
506
 
507
 
 
522
  '--quantize_uint8' if int8 else '',
523
  '--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
524
  str(f_pb),
525
+ str(f), ]
526
  subprocess.run([arg for arg in args if arg], check=True)
527
 
528
  json = Path(f_json).read_text()
 
581
  batch_size, ch, h, w = list(im.shape) # BCHW
582
  t = time.time()
583
 
584
+ # YOLOv5 Output shapes
585
  spec = model.get_spec()
586
  out0, out1 = iter(spec.description.output)
587
  if platform.system() == 'Darwin':
 
771
  if onnx or xml: # OpenVINO requires ONNX
772
  f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
773
  if xml: # OpenVINO
774
+ f[3], _ = export_openvino(file, metadata, half, int8, data)
775
  if coreml: # CoreML
776
  f[4], ct_model = export_coreml(model, im, file, int8, half, nms)
777
  if nms:
 
831
  parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
832
  parser.add_argument('--keras', action='store_true', help='TF: use Keras')
833
  parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
834
+ parser.add_argument('--int8', action='store_true', help='CoreML/TF/OpenVINO INT8 quantization')
835
  parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
836
  parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
837
  parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
ultralytics/yolov5/models/common.py CHANGED
@@ -24,12 +24,24 @@ import torch.nn as nn
24
  from PIL import Image
25
  from torch.cuda import amp
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  from utils import TryExcept
28
  from utils.dataloaders import exif_transpose, letterbox
29
  from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
30
  increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
31
  xyxy2xywh, yaml_load)
32
- from utils.plots import Annotator, colors, save_one_box
33
  from utils.torch_utils import copy_attr, smart_inference_mode
34
 
35
 
@@ -373,18 +385,18 @@ class DetectMultiBackend(nn.Module):
373
  stride, names = int(meta['stride']), eval(meta['names'])
374
  elif xml: # OpenVINO
375
  LOGGER.info(f'Loading {w} for OpenVINO inference...')
376
- check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
377
  from openvino.runtime import Core, Layout, get_batch
378
- ie = Core()
379
  if not Path(w).is_file(): # if not *.xml
380
  w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
381
- network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
382
- if network.get_parameters()[0].get_layout().empty:
383
- network.get_parameters()[0].set_layout(Layout('NCHW'))
384
- batch_dim = get_batch(network)
385
  if batch_dim.is_static:
386
  batch_size = batch_dim.get_length()
387
- executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2
388
  stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
389
  elif engine: # TensorRT
390
  LOGGER.info(f'Loading {w} for TensorRT inference...')
@@ -524,7 +536,7 @@ class DetectMultiBackend(nn.Module):
524
  y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
525
  elif self.xml: # OpenVINO
526
  im = im.cpu().numpy() # FP32
527
- y = list(self.executable_network([im]).values())
528
  elif self.engine: # TensorRT
529
  if self.dynamic and im.shape != self.bindings['images'].shape:
530
  i = self.model.get_binding_index('images')
@@ -541,7 +553,7 @@ class DetectMultiBackend(nn.Module):
541
  elif self.coreml: # CoreML
542
  im = im.cpu().numpy()
543
  im = Image.fromarray((im[0] * 255).astype('uint8'))
544
- # im = im.resize((192, 320), Image.ANTIALIAS)
545
  y = self.model.predict({'image': im}) # coordinates are xywh normalized
546
  if 'confidence' in y:
547
  box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
 
24
  from PIL import Image
25
  from torch.cuda import amp
26
 
27
+ # Import 'ultralytics' package or install if if missing
28
+ try:
29
+ import ultralytics
30
+
31
+ assert hasattr(ultralytics, '__version__') # verify package is not directory
32
+ except (ImportError, AssertionError):
33
+ import os
34
+
35
+ os.system('pip install -U ultralytics')
36
+ import ultralytics
37
+
38
+ from ultralytics.utils.plotting import Annotator, colors, save_one_box
39
+
40
  from utils import TryExcept
41
  from utils.dataloaders import exif_transpose, letterbox
42
  from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
43
  increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
44
  xyxy2xywh, yaml_load)
 
45
  from utils.torch_utils import copy_attr, smart_inference_mode
46
 
47
 
 
385
  stride, names = int(meta['stride']), eval(meta['names'])
386
  elif xml: # OpenVINO
387
  LOGGER.info(f'Loading {w} for OpenVINO inference...')
388
+ check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
389
  from openvino.runtime import Core, Layout, get_batch
390
+ core = Core()
391
  if not Path(w).is_file(): # if not *.xml
392
  w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
393
+ ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))
394
+ if ov_model.get_parameters()[0].get_layout().empty:
395
+ ov_model.get_parameters()[0].set_layout(Layout('NCHW'))
396
+ batch_dim = get_batch(ov_model)
397
  if batch_dim.is_static:
398
  batch_size = batch_dim.get_length()
399
+ ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device
400
  stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
401
  elif engine: # TensorRT
402
  LOGGER.info(f'Loading {w} for TensorRT inference...')
 
536
  y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
537
  elif self.xml: # OpenVINO
538
  im = im.cpu().numpy() # FP32
539
+ y = list(self.ov_compiled_model(im).values())
540
  elif self.engine: # TensorRT
541
  if self.dynamic and im.shape != self.bindings['images'].shape:
542
  i = self.model.get_binding_index('images')
 
553
  elif self.coreml: # CoreML
554
  im = im.cpu().numpy()
555
  im = Image.fromarray((im[0] * 255).astype('uint8'))
556
+ # im = im.resize((192, 320), Image.BILINEAR)
557
  y = self.model.predict({'image': im}) # coordinates are xywh normalized
558
  if 'confidence' in y:
559
  box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
ultralytics/yolov5/models/experimental.py CHANGED
@@ -87,11 +87,11 @@ def attempt_load(weights, device=None, inplace=True, fuse=True):
87
 
88
  model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
89
 
90
- # Module compatibility updates
91
  for m in model.modules():
92
  t = type(m)
93
  if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
94
- m.inplace = inplace # torch 1.7.0 compatibility
95
  if t is Detect and not isinstance(m.anchor_grid, list):
96
  delattr(m, 'anchor_grid')
97
  setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
 
87
 
88
  model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
89
 
90
+ # Module updates
91
  for m in model.modules():
92
  t = type(m)
93
  if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
94
+ m.inplace = inplace
95
  if t is Detect and not isinstance(m.anchor_grid, list):
96
  delattr(m, 'anchor_grid')
97
  setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
ultralytics/yolov5/models/tf.py CHANGED
@@ -310,7 +310,7 @@ class TFDetect(keras.layers.Layer):
310
  y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
311
  z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
312
 
313
- return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
314
 
315
  @staticmethod
316
  def _make_grid(nx=20, ny=20):
@@ -486,7 +486,7 @@ class TFModel:
486
  iou_thres,
487
  conf_thres,
488
  clip_boxes=False)
489
- return (nms,)
490
  return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
491
  # x = x[0] # [x(1,6300,85), ...] to x(6300,85)
492
  # xywh = x[..., :4] # x(6300,4) boxes
 
310
  y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
311
  z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
312
 
313
+ return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), )
314
 
315
  @staticmethod
316
  def _make_grid(nx=20, ny=20):
 
486
  iou_thres,
487
  conf_thres,
488
  clip_boxes=False)
489
+ return (nms, )
490
  return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
491
  # x = x[0] # [x(1,6300,85), ...] to x(6300,85)
492
  # xywh = x[..., :4] # x(6300,4) boxes
ultralytics/yolov5/models/yolo.py CHANGED
@@ -21,8 +21,8 @@ if str(ROOT) not in sys.path:
21
  if platform.system() != 'Windows':
22
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
23
 
24
- from models.common import *
25
- from models.experimental import *
26
  from utils.autoanchor import check_anchor_order
27
  from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
28
  from utils.plots import feature_visualization
@@ -76,7 +76,7 @@ class Detect(nn.Module):
76
  y = torch.cat((xy, wh, conf), 4)
77
  z.append(y.view(bs, self.na * nx * ny, self.no))
78
 
79
- return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
80
 
81
  def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
82
  d = self.anchors[i].device
@@ -126,7 +126,7 @@ class BaseModel(nn.Module):
126
 
127
  def _profile_one_layer(self, m, x, dt):
128
  c = m == self.model[-1] # is final layer, copy input as inplace fix
129
- o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
130
  t = time_sync()
131
  for _ in range(10):
132
  m(x.copy() if c else x)
 
21
  if platform.system() != 'Windows':
22
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
23
 
24
+ from models.common import * # noqa
25
+ from models.experimental import * # noqa
26
  from utils.autoanchor import check_anchor_order
27
  from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
28
  from utils.plots import feature_visualization
 
76
  y = torch.cat((xy, wh, conf), 4)
77
  z.append(y.view(bs, self.na * nx * ny, self.no))
78
 
79
+ return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x)
80
 
81
  def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
82
  d = self.anchors[i].device
 
126
 
127
  def _profile_one_layer(self, m, x, dt):
128
  c = m == self.model[-1] # is final layer, copy input as inplace fix
129
+ o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
130
  t = time_sync()
131
  for _ in range(10):
132
  m(x.copy() if c else x)
ultralytics/yolov5/requirements.txt CHANGED
@@ -4,18 +4,18 @@
4
  # Base ------------------------------------------------------------------------
5
  gitpython>=3.1.30
6
  matplotlib>=3.3
7
- numpy>=1.18.5
8
  opencv-python>=4.1.1
9
- Pillow>=7.1.2
10
  psutil # system resources
11
  PyYAML>=5.3.1
12
  requests>=2.23.0
13
  scipy>=1.4.1
14
  thop>=0.1.1 # FLOPs computation
15
- torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
16
- torchvision>=0.8.1
17
  tqdm>=4.64.0
18
- ultralytics>=8.0.111
19
  # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
20
 
21
  # Logging ---------------------------------------------------------------------
@@ -36,7 +36,7 @@ seaborn>=0.11.0
36
  # scikit-learn<=1.1.2 # CoreML quantization
37
  # tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos)
38
  # tensorflowjs>=3.9.0 # TF.js export
39
- # openvino-dev # OpenVINO export
40
 
41
  # Deploy ----------------------------------------------------------------------
42
  setuptools>=65.5.1 # Snyk vulnerability fix
 
4
  # Base ------------------------------------------------------------------------
5
  gitpython>=3.1.30
6
  matplotlib>=3.3
7
+ numpy>=1.22.2
8
  opencv-python>=4.1.1
9
+ Pillow>=10.0.1
10
  psutil # system resources
11
  PyYAML>=5.3.1
12
  requests>=2.23.0
13
  scipy>=1.4.1
14
  thop>=0.1.1 # FLOPs computation
15
+ torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended)
16
+ torchvision>=0.9.0
17
  tqdm>=4.64.0
18
+ ultralytics>=8.0.147
19
  # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
20
 
21
  # Logging ---------------------------------------------------------------------
 
36
  # scikit-learn<=1.1.2 # CoreML quantization
37
  # tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos)
38
  # tensorflowjs>=3.9.0 # TF.js export
39
+ # openvino-dev>=2023.0 # OpenVINO export
40
 
41
  # Deploy ----------------------------------------------------------------------
42
  setuptools>=65.5.1 # Snyk vulnerability fix
ultralytics/yolov5/segment/predict.py CHANGED
@@ -11,7 +11,7 @@ Usage - sources:
11
  list.txt # list of images
12
  list.streams # list of streams
13
  'path/*.jpg' # glob
14
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
 
17
  Usage - formats:
@@ -42,12 +42,13 @@ if str(ROOT) not in sys.path:
42
  sys.path.append(str(ROOT)) # add ROOT to PATH
43
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
44
 
 
 
45
  from models.common import DetectMultiBackend
46
  from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
47
  from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
48
  increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
49
  strip_optimizer)
50
- from utils.plots import Annotator, colors, save_one_box
51
  from utils.segment.general import masks2segments, process_mask, process_mask_native
52
  from utils.torch_utils import select_device, smart_inference_mode
53
 
 
11
  list.txt # list of images
12
  list.streams # list of streams
13
  'path/*.jpg' # glob
14
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
 
17
  Usage - formats:
 
42
  sys.path.append(str(ROOT)) # add ROOT to PATH
43
  ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
44
 
45
+ from ultralytics.utils.plotting import Annotator, colors, save_one_box
46
+
47
  from models.common import DetectMultiBackend
48
  from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
49
  from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
50
  increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
51
  strip_optimizer)
 
52
  from utils.segment.general import masks2segments, process_mask, process_mask_native
53
  from utils.torch_utils import select_device, smart_inference_mode
54
 
ultralytics/yolov5/segment/train.py CHANGED
@@ -605,7 +605,7 @@ def main(opt, callbacks=Callbacks()):
605
  'gsutil',
606
  'cp',
607
  f'gs://{opt.bucket}/evolve.csv',
608
- str(evolve_csv),])
609
 
610
  for _ in range(opt.evolve): # generations to evolve
611
  if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
 
605
  'gsutil',
606
  'cp',
607
  f'gs://{opt.bucket}/evolve.csv',
608
+ str(evolve_csv), ])
609
 
610
  for _ in range(opt.evolve): # generations to evolve
611
  if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
ultralytics/yolov5/segment/tutorial.ipynb CHANGED
@@ -63,7 +63,7 @@
63
  "source": [
64
  "!git clone https://github.com/ultralytics/yolov5 # clone\n",
65
  "%cd yolov5\n",
66
- "%pip install -qr requirements.txt # install\n",
67
  "\n",
68
  "import torch\n",
69
  "import utils\n",
@@ -87,7 +87,7 @@
87
  " screen # screenshot\n",
88
  " path/ # directory\n",
89
  " 'path/*.jpg' # glob\n",
90
- " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n",
91
  " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
92
  "```"
93
  ]
@@ -558,7 +558,7 @@
558
  "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
559
  "import torch\n",
560
  "\n",
561
- "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n",
562
  "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
563
  "results = model(im) # inference\n",
564
  "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
 
63
  "source": [
64
  "!git clone https://github.com/ultralytics/yolov5 # clone\n",
65
  "%cd yolov5\n",
66
+ "%pip install -qr requirements.txt comet_ml # install\n",
67
  "\n",
68
  "import torch\n",
69
  "import utils\n",
 
87
  " screen # screenshot\n",
88
  " path/ # directory\n",
89
  " 'path/*.jpg' # glob\n",
90
+ " 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
91
  " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
92
  "```"
93
  ]
 
558
  "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
559
  "import torch\n",
560
  "\n",
561
+ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n",
562
  "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
563
  "results = model(im) # inference\n",
564
  "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
ultralytics/yolov5/setup.cfg CHANGED
@@ -3,7 +3,7 @@
3
  # Local usage: pip install pre-commit, pre-commit run --all-files
4
 
5
  [metadata]
6
- license_file = LICENSE
7
  description_file = README.md
8
 
9
  [tool:pytest]
@@ -25,17 +25,19 @@ verbose = 2
25
  # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
26
  format = pylint
27
  # see: https://www.flake8rules.com/
28
- ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403
29
  # E731: Do not assign a lambda expression, use a def
30
  # F405: name may be undefined, or defined from star imports: module
31
  # E402: module level import not at top of file
32
- # F401: module imported but unused
33
  # W504: line break after binary operator
34
- # E127: continuation line over-indented for visual indent
35
- # E231: missing whitespace after ‘,’, ‘;’, or ‘:’
36
  # E501: line too long
 
 
 
 
37
  # F403: ‘from module import *’ used; unable to detect undefined names
38
 
 
39
  [isort]
40
  # https://pycqa.github.io/isort/docs/configuration/options.html
41
  line_length = 120
@@ -48,7 +50,7 @@ spaces_before_comment = 2
48
  COLUMN_LIMIT = 120
49
  COALESCE_BRACKETS = True
50
  SPACES_AROUND_POWER_OPERATOR = True
51
- SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False
52
  SPLIT_BEFORE_CLOSING_BRACKET = False
53
  SPLIT_BEFORE_FIRST_ARGUMENT = False
54
  # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
 
3
  # Local usage: pip install pre-commit, pre-commit run --all-files
4
 
5
  [metadata]
6
+ license_files = LICENSE
7
  description_file = README.md
8
 
9
  [tool:pytest]
 
25
  # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
26
  format = pylint
27
  # see: https://www.flake8rules.com/
28
+ ignore = E731,F405,E402,W504,E501
29
  # E731: Do not assign a lambda expression, use a def
30
  # F405: name may be undefined, or defined from star imports: module
31
  # E402: module level import not at top of file
 
32
  # W504: line break after binary operator
 
 
33
  # E501: line too long
34
+ # removed:
35
+ # F401: module imported but unused
36
+ # E231: missing whitespace after ‘,’, ‘;’, or ‘:’
37
+ # E127: continuation line over-indented for visual indent
38
  # F403: ‘from module import *’ used; unable to detect undefined names
39
 
40
+
41
  [isort]
42
  # https://pycqa.github.io/isort/docs/configuration/options.html
43
  line_length = 120
 
50
  COLUMN_LIMIT = 120
51
  COALESCE_BRACKETS = True
52
  SPACES_AROUND_POWER_OPERATOR = True
53
+ SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
54
  SPLIT_BEFORE_CLOSING_BRACKET = False
55
  SPLIT_BEFORE_FIRST_ARGUMENT = False
56
  # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
ultralytics/yolov5/train.py CHANGED
@@ -26,6 +26,11 @@ from copy import deepcopy
26
  from datetime import datetime
27
  from pathlib import Path
28
 
 
 
 
 
 
29
  import numpy as np
30
  import torch
31
  import torch.distributed as dist
@@ -579,7 +584,7 @@ def main(opt, callbacks=Callbacks()):
579
  'gsutil',
580
  'cp',
581
  f'gs://{opt.bucket}/evolve.csv',
582
- str(evolve_csv),])
583
 
584
  for _ in range(opt.evolve): # generations to evolve
585
  if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
 
26
  from datetime import datetime
27
  from pathlib import Path
28
 
29
+ try:
30
+ import comet_ml # must be imported before torch (if installed)
31
+ except ImportError:
32
+ comet_ml = None
33
+
34
  import numpy as np
35
  import torch
36
  import torch.distributed as dist
 
584
  'gsutil',
585
  'cp',
586
  f'gs://{opt.bucket}/evolve.csv',
587
+ str(evolve_csv), ])
588
 
589
  for _ in range(opt.evolve): # generations to evolve
590
  if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
ultralytics/yolov5/tutorial.ipynb CHANGED
@@ -31,7 +31,7 @@
31
  " <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
32
  "<br>\n",
33
  "\n",
34
- "This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href=\"https://docs.ultralytics.com/yolov5\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/yolov5\">GitHub</a> for support, and join our <a href=\"https://discord.gg/n6cFeSPZdD\">Discord</a> community for questions and discussions!\n",
35
  "\n",
36
  "</div>"
37
  ]
@@ -59,13 +59,13 @@
59
  "source": [
60
  "!git clone https://github.com/ultralytics/yolov5 # clone\n",
61
  "%cd yolov5\n",
62
- "%pip install -qr requirements.txt # install\n",
63
  "\n",
64
  "import torch\n",
65
  "import utils\n",
66
  "display = utils.notebook_init() # checks"
67
  ],
68
- "execution_count": 1,
69
  "outputs": [
70
  {
71
  "output_type": "stream",
@@ -95,12 +95,12 @@
95
  "\n",
96
  "```shell\n",
97
  "python detect.py --source 0 # webcam\n",
98
- " img.jpg # image \n",
99
  " vid.mp4 # video\n",
100
  " screen # screenshot\n",
101
  " path/ # directory\n",
102
  " 'path/*.jpg' # glob\n",
103
- " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n",
104
  " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
105
  "```"
106
  ]
@@ -118,7 +118,7 @@
118
  "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
119
  "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)"
120
  ],
121
- "execution_count": 13,
122
  "outputs": [
123
  {
124
  "output_type": "stream",
@@ -174,7 +174,7 @@
174
  "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n",
175
  "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip"
176
  ],
177
- "execution_count": 3,
178
  "outputs": [
179
  {
180
  "output_type": "stream",
@@ -198,7 +198,7 @@
198
  "# Validate YOLOv5s on COCO val\n",
199
  "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half"
200
  ],
201
- "execution_count": 4,
202
  "outputs": [
203
  {
204
  "output_type": "stream",
@@ -308,7 +308,7 @@
308
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
309
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
310
  ],
311
- "execution_count": 5,
312
  "outputs": [
313
  {
314
  "output_type": "stream",
@@ -539,7 +539,7 @@
539
  "\n",
540
  "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
541
  "\n",
542
- "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
543
  "\n",
544
  "<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
545
  ]
@@ -593,7 +593,7 @@
593
  "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
594
  "import torch\n",
595
  "\n",
596
- "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n",
597
  "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
598
  "results = model(im) # inference\n",
599
  "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
 
31
  " <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
32
  "<br>\n",
33
  "\n",
34
+ "This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 <a href=\"https://docs.ultralytics.com/yolov5\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/yolov5\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
35
  "\n",
36
  "</div>"
37
  ]
 
59
  "source": [
60
  "!git clone https://github.com/ultralytics/yolov5 # clone\n",
61
  "%cd yolov5\n",
62
+ "%pip install -qr requirements.txt comet_ml # install\n",
63
  "\n",
64
  "import torch\n",
65
  "import utils\n",
66
  "display = utils.notebook_init() # checks"
67
  ],
68
+ "execution_count": null,
69
  "outputs": [
70
  {
71
  "output_type": "stream",
 
95
  "\n",
96
  "```shell\n",
97
  "python detect.py --source 0 # webcam\n",
98
+ " img.jpg # image\n",
99
  " vid.mp4 # video\n",
100
  " screen # screenshot\n",
101
  " path/ # directory\n",
102
  " 'path/*.jpg' # glob\n",
103
+ " 'https://youtu.be/LNwODJXcvt4' # YouTube\n",
104
  " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
105
  "```"
106
  ]
 
118
  "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
119
  "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)"
120
  ],
121
+ "execution_count": null,
122
  "outputs": [
123
  {
124
  "output_type": "stream",
 
174
  "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n",
175
  "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip"
176
  ],
177
+ "execution_count": null,
178
  "outputs": [
179
  {
180
  "output_type": "stream",
 
198
  "# Validate YOLOv5s on COCO val\n",
199
  "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half"
200
  ],
201
+ "execution_count": null,
202
  "outputs": [
203
  {
204
  "output_type": "stream",
 
308
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
309
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
310
  ],
311
+ "execution_count": null,
312
  "outputs": [
313
  {
314
  "output_type": "stream",
 
539
  "\n",
540
  "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
541
  "\n",
542
+ "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices.\n",
543
  "\n",
544
  "<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
545
  ]
 
593
  "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
594
  "import torch\n",
595
  "\n",
596
+ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n",
597
  "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
598
  "results = model(im) # inference\n",
599
  "results.print() # or .show(), .save(), .crop(), .pandas(), etc."
ultralytics/yolov5/utils/__init__.py CHANGED
@@ -54,13 +54,17 @@ def notebook_init(verbose=True):
54
  import os
55
  import shutil
56
 
57
- from utils.general import check_font, check_requirements, is_colab
 
 
58
  from utils.torch_utils import select_device # imports
59
 
60
  check_font()
61
 
62
  import psutil
63
 
 
 
64
  if is_colab():
65
  shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
66
 
 
54
  import os
55
  import shutil
56
 
57
+ from ultralytics.utils.checks import check_requirements
58
+
59
+ from utils.general import check_font, is_colab
60
  from utils.torch_utils import select_device # imports
61
 
62
  check_font()
63
 
64
  import psutil
65
 
66
+ if check_requirements('wandb', install=False):
67
+ os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang
68
  if is_colab():
69
  shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
70
 
ultralytics/yolov5/utils/augmentations.py CHANGED
@@ -330,7 +330,7 @@ def classify_albumentations(
330
  if vflip > 0:
331
  T += [A.VerticalFlip(p=vflip)]
332
  if jitter > 0:
333
- color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
334
  T += [A.ColorJitter(*color_jitter, 0)]
335
  else: # Use fixed crop for eval set (reproducibility)
336
  T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
 
330
  if vflip > 0:
331
  T += [A.VerticalFlip(p=vflip)]
332
  if jitter > 0:
333
+ color_jitter = (float(jitter), ) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
334
  T += [A.ColorJitter(*color_jitter, 0)]
335
  else: # Use fixed crop for eval set (reproducibility)
336
  T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
ultralytics/yolov5/utils/callbacks.py CHANGED
@@ -32,7 +32,7 @@ class Callbacks:
32
  'on_model_save': [],
33
  'on_train_end': [],
34
  'on_params_update': [],
35
- 'teardown': [],}
36
  self.stop_training = False # set True to interrupt training
37
 
38
  def register_action(self, hook, name='', callback=None):
 
32
  'on_model_save': [],
33
  'on_train_end': [],
34
  'on_params_update': [],
35
+ 'teardown': [], }
36
  self.stop_training = False # set True to interrupt training
37
 
38
  def register_action(self, hook, name='', callback=None):
ultralytics/yolov5/utils/dataloaders.py CHANGED
@@ -355,7 +355,7 @@ class LoadStreams:
355
  # Start thread to read frames from video stream
356
  st = f'{i + 1}/{n}: {s}... '
357
  if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
358
- # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'
359
  check_requirements(('pafy', 'youtube_dl==2020.12.2'))
360
  import pafy
361
  s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
 
355
  # Start thread to read frames from video stream
356
  st = f'{i + 1}/{n}: {s}... '
357
  if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
358
+ # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
359
  check_requirements(('pafy', 'youtube_dl==2020.12.2'))
360
  import pafy
361
  s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
ultralytics/yolov5/utils/docker/Dockerfile CHANGED
@@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
12
  ENV DEBIAN_FRONTEND noninteractive
13
  RUN apt update
14
  RUN TZ=Etc/UTC apt install -y tzdata
15
- RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
16
  # RUN alias python=python3
17
 
18
  # Security updates
@@ -24,14 +24,13 @@ RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
24
  WORKDIR /usr/src/app
25
 
26
  # Copy contents
27
- # COPY . /usr/src/app (issues as not a .git directory)
28
- RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
29
 
30
  # Install pip packages
31
  COPY requirements.txt .
32
  RUN python3 -m pip install --upgrade pip wheel
33
  RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
34
- coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3'
35
  # tensorflow tensorflowjs \
36
 
37
  # Set environment variables
 
12
  ENV DEBIAN_FRONTEND noninteractive
13
  RUN apt update
14
  RUN TZ=Etc/UTC apt install -y tzdata
15
+ RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg
16
  # RUN alias python=python3
17
 
18
  # Security updates
 
24
  WORKDIR /usr/src/app
25
 
26
  # Copy contents
27
+ COPY . /usr/src/app
 
28
 
29
  # Install pip packages
30
  COPY requirements.txt .
31
  RUN python3 -m pip install --upgrade pip wheel
32
  RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
33
+ coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0'
34
  # tensorflow tensorflowjs \
35
 
36
  # Set environment variables
ultralytics/yolov5/utils/docker/Dockerfile-arm64 CHANGED
@@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria
12
  ENV DEBIAN_FRONTEND noninteractive
13
  RUN apt update
14
  RUN TZ=Etc/UTC apt install -y tzdata
15
- RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev
16
  # RUN alias python=python3
17
 
18
  # Install pip packages
@@ -27,8 +27,7 @@ RUN mkdir -p /usr/src/app
27
  WORKDIR /usr/src/app
28
 
29
  # Copy contents
30
- # COPY . /usr/src/app (issues as not a .git directory)
31
- RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
32
  ENV DEBIAN_FRONTEND teletype
33
 
34
 
 
12
  ENV DEBIAN_FRONTEND noninteractive
13
  RUN apt update
14
  RUN TZ=Etc/UTC apt install -y tzdata
15
+ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev
16
  # RUN alias python=python3
17
 
18
  # Install pip packages
 
27
  WORKDIR /usr/src/app
28
 
29
  # Copy contents
30
+ COPY . /usr/src/app
 
31
  ENV DEBIAN_FRONTEND teletype
32
 
33
 
ultralytics/yolov5/utils/docker/Dockerfile-cpu CHANGED
@@ -3,23 +3,25 @@
3
  # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
4
 
5
  # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6
- FROM ubuntu:22.10
7
 
8
  # Downloads to user config dir
9
  ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10
 
11
  # Install linux packages
12
- ENV DEBIAN_FRONTEND noninteractive
13
- RUN apt update
14
- RUN TZ=Etc/UTC apt install -y tzdata
15
- RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
16
  # RUN alias python=python3
17
 
 
 
 
18
  # Install pip packages
19
  COPY requirements.txt .
20
  RUN python3 -m pip install --upgrade pip wheel
21
  RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
22
- coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \
23
  # tensorflow tensorflowjs \
24
  --extra-index-url https://download.pytorch.org/whl/cpu
25
 
@@ -28,9 +30,7 @@ RUN mkdir -p /usr/src/app
28
  WORKDIR /usr/src/app
29
 
30
  # Copy contents
31
- # COPY . /usr/src/app (issues as not a .git directory)
32
- RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
33
- ENV DEBIAN_FRONTEND teletype
34
 
35
 
36
  # Usage Examples -------------------------------------------------------------------------------------------------------
 
3
  # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
4
 
5
  # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6
+ FROM ubuntu:mantic-20231011
7
 
8
  # Downloads to user config dir
9
  ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10
 
11
  # Install linux packages
12
+ # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
13
+ RUN apt update \
14
+ && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
 
15
  # RUN alias python=python3
16
 
17
+ # Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error
18
+ RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
19
+
20
  # Install pip packages
21
  COPY requirements.txt .
22
  RUN python3 -m pip install --upgrade pip wheel
23
  RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
24
+ coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \
25
  # tensorflow tensorflowjs \
26
  --extra-index-url https://download.pytorch.org/whl/cpu
27
 
 
30
  WORKDIR /usr/src/app
31
 
32
  # Copy contents
33
+ COPY . /usr/src/app
 
 
34
 
35
 
36
  # Usage Examples -------------------------------------------------------------------------------------------------------
ultralytics/yolov5/utils/downloads.py CHANGED
@@ -4,7 +4,6 @@ Download utils
4
  """
5
 
6
  import logging
7
- import os
8
  import subprocess
9
  import urllib
10
  from pathlib import Path
@@ -53,7 +52,7 @@ def curl_download(url, filename, *, silent: bool = False) -> bool:
53
  '--retry',
54
  '9',
55
  '-C',
56
- '-',])
57
  return proc.returncode == 0
58
 
59
 
 
4
  """
5
 
6
  import logging
 
7
  import subprocess
8
  import urllib
9
  from pathlib import Path
 
52
  '--retry',
53
  '9',
54
  '-C',
55
+ '-', ])
56
  return proc.returncode == 0
57
 
58
 
ultralytics/yolov5/utils/general.py CHANGED
@@ -35,7 +35,17 @@ import pkg_resources as pkg
35
  import torch
36
  import torchvision
37
  import yaml
38
- from ultralytics.yolo.utils.checks import check_requirements
 
 
 
 
 
 
 
 
 
 
39
 
40
  from utils import TryExcept, emojis
41
  from utils.downloads import curl_download, gsutil_getsize
@@ -139,12 +149,12 @@ def set_logging(name=LOGGING_NAME, verbose=True):
139
  name: {
140
  'class': 'logging.StreamHandler',
141
  'formatter': name,
142
- 'level': level,}},
143
  'loggers': {
144
  name: {
145
  'level': level,
146
  'handlers': [name],
147
- 'propagate': False,}}})
148
 
149
 
150
  set_logging(LOGGING_NAME) # run before defining LOGGER
@@ -371,7 +381,7 @@ def check_git_info(path='.'):
371
  return {'remote': None, 'branch': None, 'commit': None}
372
 
373
 
374
- def check_python(minimum='3.7.0'):
375
  # Check current python version vs. required python version
376
  check_version(platform.python_version(), minimum, name='Python ', hard=True)
377
 
@@ -416,7 +426,7 @@ def check_imshow(warn=False):
416
  return False
417
 
418
 
419
- def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
420
  # Check file(s) for acceptable suffix
421
  if file and suffix:
422
  if isinstance(suffix, str):
 
35
  import torch
36
  import torchvision
37
  import yaml
38
+
39
+ # Import 'ultralytics' package or install if if missing
40
+ try:
41
+ import ultralytics
42
+
43
+ assert hasattr(ultralytics, '__version__') # verify package is not directory
44
+ except (ImportError, AssertionError):
45
+ os.system('pip install -U ultralytics')
46
+ import ultralytics
47
+
48
+ from ultralytics.utils.checks import check_requirements
49
 
50
  from utils import TryExcept, emojis
51
  from utils.downloads import curl_download, gsutil_getsize
 
149
  name: {
150
  'class': 'logging.StreamHandler',
151
  'formatter': name,
152
+ 'level': level, }},
153
  'loggers': {
154
  name: {
155
  'level': level,
156
  'handlers': [name],
157
+ 'propagate': False, }}})
158
 
159
 
160
  set_logging(LOGGING_NAME) # run before defining LOGGER
 
381
  return {'remote': None, 'branch': None, 'commit': None}
382
 
383
 
384
+ def check_python(minimum='3.8.0'):
385
  # Check current python version vs. required python version
386
  check_version(platform.python_version(), minimum, name='Python ', hard=True)
387
 
 
426
  return False
427
 
428
 
429
+ def check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):
430
  # Check file(s) for acceptable suffix
431
  if file and suffix:
432
  if isinstance(suffix, str):
ultralytics/yolov5/utils/google_app_engine/additional_requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  # add these requirements in your app on top of the existing ones
2
- pip==21.1
3
  Flask==2.3.2
4
  gunicorn==19.10.0
5
- werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability
 
1
  # add these requirements in your app on top of the existing ones
2
+ pip==23.3
3
  Flask==2.3.2
4
  gunicorn==19.10.0
5
+ werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
ultralytics/yolov5/utils/loggers/__init__.py CHANGED
@@ -46,15 +46,15 @@ except (ImportError, AssertionError):
46
  clearml = None
47
 
48
  try:
49
- if RANK not in [0, -1]:
50
- comet_ml = None
51
- else:
52
  import comet_ml
53
 
54
  assert hasattr(comet_ml, '__version__') # verify package import not local dir
55
  from utils.loggers.comet import CometLogger
56
 
57
- except (ModuleNotFoundError, ImportError, AssertionError):
 
 
58
  comet_ml = None
59
 
60
 
@@ -88,10 +88,6 @@ class Loggers():
88
  self.csv = True # always log to csv
89
 
90
  # Messages
91
- if not clearml:
92
- prefix = colorstr('ClearML: ')
93
- s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML"
94
- self.logger.info(s)
95
  if not comet_ml:
96
  prefix = colorstr('Comet: ')
97
  s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet"
 
46
  clearml = None
47
 
48
  try:
49
+ if RANK in {0, -1}:
 
 
50
  import comet_ml
51
 
52
  assert hasattr(comet_ml, '__version__') # verify package import not local dir
53
  from utils.loggers.comet import CometLogger
54
 
55
+ else:
56
+ comet_ml = None
57
+ except (ImportError, AssertionError):
58
  comet_ml = None
59
 
60
 
 
88
  self.csv = True # always log to csv
89
 
90
  # Messages
 
 
 
 
91
  if not comet_ml:
92
  prefix = colorstr('Comet: ')
93
  s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet"
ultralytics/yolov5/utils/loggers/clearml/clearml_utils.py CHANGED
@@ -5,8 +5,7 @@ from pathlib import Path
5
 
6
  import numpy as np
7
  import yaml
8
-
9
- from utils.plots import Annotator, colors
10
 
11
  try:
12
  import clearml
 
5
 
6
  import numpy as np
7
  import yaml
8
+ from ultralytics.utils.plotting import Annotator, colors
 
9
 
10
  try:
11
  import clearml
ultralytics/yolov5/utils/loggers/comet/README.md CHANGED
@@ -59,7 +59,7 @@ Check out an example of a [completed run here](https://www.comet.com/examples/co
59
 
60
  Or better yet, try it out yourself in this Colab Notebook
61
 
62
- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)
63
 
64
  # Log automatically
65
 
 
59
 
60
  Or better yet, try it out yourself in this Colab Notebook
61
 
62
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-training/yolov5/notebooks/Comet_and_YOLOv5.ipynb)
63
 
64
  # Log automatically
65
 
ultralytics/yolov5/utils/loggers/comet/__init__.py CHANGED
@@ -18,7 +18,7 @@ try:
18
  # Project Configuration
19
  config = comet_ml.config.get_config()
20
  COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5')
21
- except (ModuleNotFoundError, ImportError):
22
  comet_ml = None
23
  COMET_PROJECT_NAME = None
24
 
@@ -42,7 +42,7 @@ COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
42
  COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true'
43
 
44
  # Evaluation Settings
45
- COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true'
46
  COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true'
47
  COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100))
48
 
@@ -51,10 +51,10 @@ CONF_THRES = float(os.getenv('CONF_THRES', 0.001))
51
  IOU_THRES = float(os.getenv('IOU_THRES', 0.6))
52
 
53
  # Batch Logging Settings
54
- COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true'
55
  COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1)
56
  COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1)
57
- COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true'
58
 
59
  RANK = int(os.getenv('RANK', -1))
60
 
@@ -82,7 +82,7 @@ class CometLogger:
82
  self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
83
 
84
  # Dataset Artifact Settings
85
- self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET
86
  self.resume = self.opt.resume
87
 
88
  # Default parameters to pass to Experiment objects
@@ -90,9 +90,10 @@ class CometLogger:
90
  'log_code': False,
91
  'log_env_gpu': True,
92
  'log_env_cpu': True,
93
- 'project_name': COMET_PROJECT_NAME,}
94
  self.default_experiment_kwargs.update(experiment_kwargs)
95
  self.experiment = self._get_experiment(self.comet_mode, run_id)
 
96
 
97
  self.data_dict = self.check_dataset(self.opt.data)
98
  self.class_names = self.data_dict['names']
@@ -136,7 +137,7 @@ class CometLogger:
136
 
137
  self.comet_log_predictions = COMET_LOG_PREDICTIONS
138
  if self.opt.bbox_interval == -1:
139
- self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10
140
  else:
141
  self.comet_log_prediction_interval = self.opt.bbox_interval
142
 
@@ -152,7 +153,7 @@ class CometLogger:
152
  'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS,
153
  'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS,
154
  'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX,
155
- 'comet_model_name': COMET_MODEL_NAME,})
156
 
157
  # Check if running the Experiment with the Comet Optimizer
158
  if hasattr(self.opt, 'comet_optimizer_id'):
@@ -169,7 +170,7 @@ class CometLogger:
169
  **self.default_experiment_kwargs,
170
  )
171
 
172
- return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,)
173
 
174
  else:
175
  try:
@@ -213,7 +214,7 @@ class CometLogger:
213
  'fitness_score': fitness_score[-1],
214
  'epochs_trained': epoch + 1,
215
  'save_period': opt.save_period,
216
- 'total_epochs': opt.epochs,}
217
 
218
  model_files = glob.glob(f'{path}/*.pt')
219
  for model_path in model_files:
@@ -231,7 +232,8 @@ class CometLogger:
231
  with open(data_file) as f:
232
  data_config = yaml.safe_load(f)
233
 
234
- if data_config['path'].startswith(COMET_PREFIX):
 
235
  path = data_config['path'].replace(COMET_PREFIX, '')
236
  data_dict = self.download_dataset_artifact(path)
237
 
@@ -269,7 +271,7 @@ class CometLogger:
269
  'x': xyxy[0],
270
  'y': xyxy[1],
271
  'x2': xyxy[2],
272
- 'y2': xyxy[3]},})
273
  for *xyxy, conf, cls in filtered_detections.tolist():
274
  metadata.append({
275
  'label': f'{self.class_names[int(cls)]}',
@@ -278,7 +280,7 @@ class CometLogger:
278
  'x': xyxy[0],
279
  'y': xyxy[1],
280
  'x2': xyxy[2],
281
- 'y2': xyxy[3]},})
282
 
283
  self.metadata_dict[image_name] = metadata
284
  self.logged_images_count += 1
@@ -312,8 +314,16 @@ class CometLogger:
312
  image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
313
 
314
  try:
315
- artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split})
316
- artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split})
 
 
 
 
 
 
 
 
317
  except ValueError as e:
318
  logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
319
  logger.error(f'COMET ERROR: {e}')
@@ -355,15 +365,14 @@ class CometLogger:
355
  data_dict['path'] = artifact_save_dir
356
 
357
  metadata_names = metadata.get('names')
358
- if type(metadata_names) == dict:
359
  data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()}
360
- elif type(metadata_names) == list:
361
  data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
362
  else:
363
  raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
364
 
365
- data_dict = self.update_data_paths(data_dict)
366
- return data_dict
367
 
368
  def update_data_paths(self, data_dict):
369
  path = data_dict.get('path', '')
@@ -475,8 +484,9 @@ class CometLogger:
475
  'f1': f1[i],
476
  'true_positives': tp[i],
477
  'false_positives': fp[i],
478
- 'support': nt[c]},
479
- prefix=class_name)
 
480
 
481
  if self.comet_log_confusion_matrix:
482
  epoch = self.experiment.curr_epoch
 
18
  # Project Configuration
19
  config = comet_ml.config.get_config()
20
  COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5')
21
+ except ImportError:
22
  comet_ml = None
23
  COMET_PROJECT_NAME = None
24
 
 
42
  COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true'
43
 
44
  # Evaluation Settings
45
+ COMET_LOG_CONFUSION_MATRIX = (os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true')
46
  COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true'
47
  COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100))
48
 
 
51
  IOU_THRES = float(os.getenv('IOU_THRES', 0.6))
52
 
53
  # Batch Logging Settings
54
+ COMET_LOG_BATCH_METRICS = (os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true')
55
  COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1)
56
  COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1)
57
+ COMET_LOG_PER_CLASS_METRICS = (os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true')
58
 
59
  RANK = int(os.getenv('RANK', -1))
60
 
 
82
  self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
83
 
84
  # Dataset Artifact Settings
85
+ self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET
86
  self.resume = self.opt.resume
87
 
88
  # Default parameters to pass to Experiment objects
 
90
  'log_code': False,
91
  'log_env_gpu': True,
92
  'log_env_cpu': True,
93
+ 'project_name': COMET_PROJECT_NAME, }
94
  self.default_experiment_kwargs.update(experiment_kwargs)
95
  self.experiment = self._get_experiment(self.comet_mode, run_id)
96
+ self.experiment.set_name(self.opt.name)
97
 
98
  self.data_dict = self.check_dataset(self.opt.data)
99
  self.class_names = self.data_dict['names']
 
137
 
138
  self.comet_log_predictions = COMET_LOG_PREDICTIONS
139
  if self.opt.bbox_interval == -1:
140
+ self.comet_log_prediction_interval = (1 if self.opt.epochs < 10 else self.opt.epochs // 10)
141
  else:
142
  self.comet_log_prediction_interval = self.opt.bbox_interval
143
 
 
153
  'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS,
154
  'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS,
155
  'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX,
156
+ 'comet_model_name': COMET_MODEL_NAME, })
157
 
158
  # Check if running the Experiment with the Comet Optimizer
159
  if hasattr(self.opt, 'comet_optimizer_id'):
 
170
  **self.default_experiment_kwargs,
171
  )
172
 
173
+ return comet_ml.OfflineExperiment(**self.default_experiment_kwargs, )
174
 
175
  else:
176
  try:
 
214
  'fitness_score': fitness_score[-1],
215
  'epochs_trained': epoch + 1,
216
  'save_period': opt.save_period,
217
+ 'total_epochs': opt.epochs, }
218
 
219
  model_files = glob.glob(f'{path}/*.pt')
220
  for model_path in model_files:
 
232
  with open(data_file) as f:
233
  data_config = yaml.safe_load(f)
234
 
235
+ path = data_config.get('path')
236
+ if path and path.startswith(COMET_PREFIX):
237
  path = data_config['path'].replace(COMET_PREFIX, '')
238
  data_dict = self.download_dataset_artifact(path)
239
 
 
271
  'x': xyxy[0],
272
  'y': xyxy[1],
273
  'x2': xyxy[2],
274
+ 'y2': xyxy[3]}, })
275
  for *xyxy, conf, cls in filtered_detections.tolist():
276
  metadata.append({
277
  'label': f'{self.class_names[int(cls)]}',
 
280
  'x': xyxy[0],
281
  'y': xyxy[1],
282
  'x2': xyxy[2],
283
+ 'y2': xyxy[3]}, })
284
 
285
  self.metadata_dict[image_name] = metadata
286
  self.logged_images_count += 1
 
314
  image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file])
315
 
316
  try:
317
+ artifact.add(
318
+ image_file,
319
+ logical_path=image_logical_path,
320
+ metadata={'split': split},
321
+ )
322
+ artifact.add(
323
+ label_file,
324
+ logical_path=label_logical_path,
325
+ metadata={'split': split},
326
+ )
327
  except ValueError as e:
328
  logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.')
329
  logger.error(f'COMET ERROR: {e}')
 
365
  data_dict['path'] = artifact_save_dir
366
 
367
  metadata_names = metadata.get('names')
368
+ if isinstance(metadata_names, dict):
369
  data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()}
370
+ elif isinstance(metadata_names, list):
371
  data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)}
372
  else:
373
  raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
374
 
375
+ return self.update_data_paths(data_dict)
 
376
 
377
  def update_data_paths(self, data_dict):
378
  path = data_dict.get('path', '')
 
484
  'f1': f1[i],
485
  'true_positives': tp[i],
486
  'false_positives': fp[i],
487
+ 'support': nt[c], },
488
+ prefix=class_name,
489
+ )
490
 
491
  if self.comet_log_confusion_matrix:
492
  epoch = self.experiment.curr_epoch
ultralytics/yolov5/utils/plots.py CHANGED
@@ -8,7 +8,6 @@ import math
8
  import os
9
  from copy import copy
10
  from pathlib import Path
11
- from urllib.error import URLError
12
 
13
  import cv2
14
  import matplotlib
@@ -17,14 +16,13 @@ import numpy as np
17
  import pandas as pd
18
  import seaborn as sn
19
  import torch
20
- from PIL import Image, ImageDraw, ImageFont
21
  from scipy.ndimage.filters import gaussian_filter1d
 
22
 
23
  from utils import TryExcept, threaded
24
- from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path,
25
- is_ascii, xywh2xyxy, xyxy2xywh)
26
  from utils.metrics import fitness
27
- from utils.segment.general import scale_image
28
 
29
  # Settings
30
  RANK = int(os.getenv('RANK', -1))
@@ -53,120 +51,6 @@ class Colors:
53
  colors = Colors() # create instance for 'from utils.plots import colors'
54
 
55
 
56
- def check_pil_font(font=FONT, size=10):
57
- # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
58
- font = Path(font)
59
- font = font if font.exists() else (CONFIG_DIR / font.name)
60
- try:
61
- return ImageFont.truetype(str(font) if font.exists() else font.name, size)
62
- except Exception: # download if missing
63
- try:
64
- check_font(font)
65
- return ImageFont.truetype(str(font), size)
66
- except TypeError:
67
- check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
68
- except URLError: # not online
69
- return ImageFont.load_default()
70
-
71
-
72
- class Annotator:
73
- # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
74
- def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
75
- assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
76
- non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
77
- self.pil = pil or non_ascii
78
- if self.pil: # use PIL
79
- self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
80
- self.draw = ImageDraw.Draw(self.im)
81
- self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font,
82
- size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
83
- else: # use cv2
84
- self.im = im
85
- self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
86
-
87
- def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
88
- # Add one xyxy box to image with label
89
- if self.pil or not is_ascii(label):
90
- self.draw.rectangle(box, width=self.lw, outline=color) # box
91
- if label:
92
- w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0
93
- # _, _, w, h = self.font.getbbox(label) # text width, height (New)
94
- outside = box[1] - h >= 0 # label fits outside box
95
- self.draw.rectangle(
96
- (box[0], box[1] - h if outside else box[1], box[0] + w + 1,
97
- box[1] + 1 if outside else box[1] + h + 1),
98
- fill=color,
99
- )
100
- # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
101
- self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
102
- else: # cv2
103
- p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
104
- cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
105
- if label:
106
- tf = max(self.lw - 1, 1) # font thickness
107
- w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
108
- outside = p1[1] - h >= 3
109
- p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
110
- cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
111
- cv2.putText(self.im,
112
- label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
113
- 0,
114
- self.lw / 3,
115
- txt_color,
116
- thickness=tf,
117
- lineType=cv2.LINE_AA)
118
-
119
- def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
120
- """Plot masks at once.
121
- Args:
122
- masks (tensor): predicted masks on cuda, shape: [n, h, w]
123
- colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
124
- im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
125
- alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
126
- """
127
- if self.pil:
128
- # convert to numpy first
129
- self.im = np.asarray(self.im).copy()
130
- if len(masks) == 0:
131
- self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
132
- colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
133
- colors = colors[:, None, None] # shape(n,1,1,3)
134
- masks = masks.unsqueeze(3) # shape(n,h,w,1)
135
- masks_color = masks * (colors * alpha) # shape(n,h,w,3)
136
-
137
- inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
138
- mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
139
-
140
- im_gpu = im_gpu.flip(dims=[0]) # flip channel
141
- im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
142
- im_gpu = im_gpu * inv_alph_masks[-1] + mcs
143
- im_mask = (im_gpu * 255).byte().cpu().numpy()
144
- self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape)
145
- if self.pil:
146
- # convert im back to PIL and update draw
147
- self.fromarray(self.im)
148
-
149
- def rectangle(self, xy, fill=None, outline=None, width=1):
150
- # Add rectangle to image (PIL-only)
151
- self.draw.rectangle(xy, fill, outline, width)
152
-
153
- def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
154
- # Add text to image (PIL-only)
155
- if anchor == 'bottom': # start y from font bottom
156
- w, h = self.font.getsize(text) # text width, height
157
- xy[1] += 1 - h
158
- self.draw.text(xy, text, fill=txt_color, font=self.font)
159
-
160
- def fromarray(self, im):
161
- # Update self.im from a numpy array
162
- self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
163
- self.draw = ImageDraw.Draw(self.im)
164
-
165
- def result(self):
166
- # Return annotated image as array
167
- return np.asarray(self.im)
168
-
169
-
170
  def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
171
  """
172
  x: Features to be visualized
@@ -266,7 +150,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
266
  x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
267
  annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
268
  if paths:
269
- annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
270
  if len(targets) > 0:
271
  ti = targets[targets[:, 0] == i] # image targets
272
  boxes = xywh2xyxy(ti[:, 2:6]).T
 
8
  import os
9
  from copy import copy
10
  from pathlib import Path
 
11
 
12
  import cv2
13
  import matplotlib
 
16
  import pandas as pd
17
  import seaborn as sn
18
  import torch
19
+ from PIL import Image, ImageDraw
20
  from scipy.ndimage.filters import gaussian_filter1d
21
+ from ultralytics.utils.plotting import Annotator
22
 
23
  from utils import TryExcept, threaded
24
+ from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
 
25
  from utils.metrics import fitness
 
26
 
27
  # Settings
28
  RANK = int(os.getenv('RANK', -1))
 
51
  colors = Colors() # create instance for 'from utils.plots import colors'
52
 
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
55
  """
56
  x: Features to be visualized
 
150
  x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
151
  annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
152
  if paths:
153
+ annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
154
  if len(targets) > 0:
155
  ti = targets[targets[:, 0] == i] # image targets
156
  boxes = xywh2xyxy(ti[:, 2:6]).T
ultralytics/yolov5/utils/segment/metrics.py CHANGED
@@ -196,7 +196,7 @@ KEYS = [
196
  'val/cls_loss',
197
  'x/lr0',
198
  'x/lr1',
199
- 'x/lr2',]
200
 
201
  BEST_KEYS = [
202
  'best/epoch',
@@ -207,4 +207,4 @@ BEST_KEYS = [
207
  'best/precision(M)',
208
  'best/recall(M)',
209
  'best/mAP_0.5(M)',
210
- 'best/mAP_0.5:0.95(M)',]
 
196
  'val/cls_loss',
197
  'x/lr0',
198
  'x/lr1',
199
+ 'x/lr2', ]
200
 
201
  BEST_KEYS = [
202
  'best/epoch',
 
207
  'best/precision(M)',
208
  'best/recall(M)',
209
  'best/mAP_0.5(M)',
210
+ 'best/mAP_0.5:0.95(M)', ]
ultralytics/yolov5/utils/segment/plots.py CHANGED
@@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg'
54
  x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
55
  annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
56
  if paths:
57
- annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
58
  if len(targets) > 0:
59
  idx = targets[:, 0] == i
60
  ti = targets[idx] # image targets
 
54
  x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
55
  annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
56
  if paths:
57
+ annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
58
  if len(targets) > 0:
59
  idx = targets[:, 0] == i
60
  ti = targets[idx] # image targets
ultralytics/yolov5/utils/torch_utils.py CHANGED
@@ -170,7 +170,7 @@ def profile(input, ops, n=10, device=None):
170
  m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
171
  tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
172
  try:
173
- flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
174
  except Exception:
175
  flops = 0
176
 
@@ -284,7 +284,7 @@ def model_info(model, verbose=False, imgsz=640):
284
  p = next(model.parameters())
285
  stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride
286
  im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format
287
- flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
288
  imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float
289
  fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs
290
  except Exception:
 
170
  m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
171
  tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
172
  try:
173
+ flops = thop.profile(m, inputs=(x, ), verbose=False)[0] / 1E9 * 2 # GFLOPs
174
  except Exception:
175
  flops = 0
176
 
 
284
  p = next(model.parameters())
285
  stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride
286
  im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format
287
+ flops = thop.profile(deepcopy(model), inputs=(im, ), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
288
  imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float
289
  fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs
290
  except Exception:
ultralytics/yolov5/val.py CHANGED
@@ -304,6 +304,8 @@ def run(
304
  if save_json and len(jdict):
305
  w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
306
  anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
 
 
307
  pred_json = str(save_dir / f'{w}_predictions.json') # predictions
308
  LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
309
  with open(pred_json, 'w') as f:
 
304
  if save_json and len(jdict):
305
  w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
306
  anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
307
+ if not os.path.exists(anno_json):
308
+ anno_json = os.path.join(data['path'], 'annotations', 'instances_val2017.json')
309
  pred_json = str(save_dir / f'{w}_predictions.json') # predictions
310
  LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
311
  with open(pred_json, 'w') as f: