Upload 12 files
#1
by
wangfangyuan
- opened
- README.md +4 -5
- onnx_eval.py +6 -2
- onnx_inference.py +7 -5
- utils.py +3 -2
- yolov8m_qat.onnx +2 -2
README.md
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
tags:
|
4 |
-
- RyzenAI
|
5 |
- object-detection
|
6 |
- vision
|
7 |
- YOLO
|
@@ -43,7 +42,7 @@ You can use the raw model for object detection. See the [model hub](https://hugg
|
|
43 |
|
44 |
The dataset MSCOCO2017 contains 118287 images for training and 5000 images for validation.
|
45 |
|
46 |
-
Download COCO dataset and create directories
|
47 |
```plain
|
48 |
βββ datasets
|
49 |
βββ coco
|
@@ -62,7 +61,7 @@ Download COCO dataset and create directories in your code like this:
|
|
62 |
βββ val2017.txt
|
63 |
```
|
64 |
1. put the val2017 image folder under images directory or use a softlink
|
65 |
-
2. the labels folder and val2017.txt above are generate by **general_json2yolo.py
|
66 |
3. modify the coco.yaml like this:
|
67 |
```markdown
|
68 |
path: /path/to/your/datasets/coco # dataset root dir
|
@@ -115,9 +114,9 @@ for batch in dataset:
|
|
115 |
|
116 |
### Performance
|
117 |
|
118 |
-
|Metric |
|
119 |
| :----: | :----: |
|
120 |
-
|
|
121 |
|
122 |
|
123 |
```bibtex
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
tags:
|
|
|
4 |
- object-detection
|
5 |
- vision
|
6 |
- YOLO
|
|
|
42 |
|
43 |
The dataset MSCOCO2017 contains 118287 images for training and 5000 images for validation.
|
44 |
|
45 |
+
Download COCO dataset and create directories like this:
|
46 |
```plain
|
47 |
βββ datasets
|
48 |
βββ coco
|
|
|
61 |
βββ val2017.txt
|
62 |
```
|
63 |
1. put the val2017 image folder under images directory or use a softlink
|
64 |
+
2. the labels folder and val2017.txt above are generate by **general_json2yolo.py**, you need put these file in to the datasets/coco folder
|
65 |
3. modify the coco.yaml like this:
|
66 |
```markdown
|
67 |
path: /path/to/your/datasets/coco # dataset root dir
|
|
|
114 |
|
115 |
### Performance
|
116 |
|
117 |
+
|Metric |Quantized onnx|
|
118 |
| :----: | :----: |
|
119 |
+
|AP0.50:0.95|48.4|
|
120 |
|
121 |
|
122 |
```bibtex
|
onnx_eval.py
CHANGED
@@ -78,8 +78,10 @@ class DetectionValidator:
|
|
78 |
batch = self.preprocess(batch)
|
79 |
|
80 |
# inference
|
81 |
-
outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: batch["img"].cpu().numpy()})
|
82 |
-
outputs =
|
|
|
|
|
83 |
preds = post_process(outputs)
|
84 |
|
85 |
# pre-process predictions
|
@@ -95,6 +97,7 @@ class DetectionValidator:
|
|
95 |
return stats
|
96 |
|
97 |
def get_dataloader(self, dataset_path, batch_size):
|
|
|
98 |
# calculate stride - check if model is initialized
|
99 |
return build_dataloader(self.args, batch_size, img_path=dataset_path, stride=32, names=self.data['names'], mode="val")[0]
|
100 |
|
@@ -178,6 +181,7 @@ class DetectionValidator:
|
|
178 |
ratio_pad=batch["ratio_pad"][si]) # native-space labels
|
179 |
labelsn = torch.cat((cls, tbox), 1) # native-space labels
|
180 |
correct_bboxes = self._process_batch(predn, labelsn)
|
|
|
181 |
self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls)
|
182 |
|
183 |
# Save
|
|
|
78 |
batch = self.preprocess(batch)
|
79 |
|
80 |
# inference
|
81 |
+
# outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: batch["img"].cpu().numpy()})
|
82 |
+
outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: batch["img"].permute(0, 2, 3, 1).cpu().numpy()})
|
83 |
+
# outputs = [torch.tensor(item).to(self.device) for item in outputs]
|
84 |
+
outputs = [torch.tensor(item).permute(0, 3, 1, 2).to(self.device) for item in outputs]
|
85 |
preds = post_process(outputs)
|
86 |
|
87 |
# pre-process predictions
|
|
|
97 |
return stats
|
98 |
|
99 |
def get_dataloader(self, dataset_path, batch_size):
|
100 |
+
# TODO: manage splits differently
|
101 |
# calculate stride - check if model is initialized
|
102 |
return build_dataloader(self.args, batch_size, img_path=dataset_path, stride=32, names=self.data['names'], mode="val")[0]
|
103 |
|
|
|
181 |
ratio_pad=batch["ratio_pad"][si]) # native-space labels
|
182 |
labelsn = torch.cat((cls, tbox), 1) # native-space labels
|
183 |
correct_bboxes = self._process_batch(predn, labelsn)
|
184 |
+
# TODO: maybe remove these `self.` arguments as they already are member variable
|
185 |
self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls)
|
186 |
|
187 |
# Save
|
onnx_inference.py
CHANGED
@@ -78,21 +78,21 @@ def make_parser():
|
|
78 |
"--model",
|
79 |
type=str,
|
80 |
default="./yolov8m_qat.onnx",
|
81 |
-
help="
|
82 |
)
|
83 |
parser.add_argument(
|
84 |
"-i",
|
85 |
"--image_path",
|
86 |
type=str,
|
87 |
default='./demo.jpg',
|
88 |
-
help="
|
89 |
)
|
90 |
parser.add_argument(
|
91 |
"-o",
|
92 |
"--output_path",
|
93 |
type=str,
|
94 |
default='./demo_infer.jpg',
|
95 |
-
help="
|
96 |
)
|
97 |
parser.add_argument(
|
98 |
"--ipu", action='store_true', help='flag for ryzen ai'
|
@@ -133,8 +133,10 @@ if __name__ == '__main__':
|
|
133 |
im = preprocess(im)
|
134 |
if len(im.shape) == 3:
|
135 |
im = im[None]
|
136 |
-
outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: im.cpu().numpy()})
|
137 |
-
outputs = [torch.tensor(item) for item in outputs]
|
|
|
|
|
138 |
preds = post_process(outputs)
|
139 |
preds = non_max_suppression(
|
140 |
preds, 0.25, 0.7, agnostic=False, max_det=300, classes=None
|
|
|
78 |
"--model",
|
79 |
type=str,
|
80 |
default="./yolov8m_qat.onnx",
|
81 |
+
help="Input your onnx model.",
|
82 |
)
|
83 |
parser.add_argument(
|
84 |
"-i",
|
85 |
"--image_path",
|
86 |
type=str,
|
87 |
default='./demo.jpg',
|
88 |
+
help="Path to your input image.",
|
89 |
)
|
90 |
parser.add_argument(
|
91 |
"-o",
|
92 |
"--output_path",
|
93 |
type=str,
|
94 |
default='./demo_infer.jpg',
|
95 |
+
help="Path to your output directory.",
|
96 |
)
|
97 |
parser.add_argument(
|
98 |
"--ipu", action='store_true', help='flag for ryzen ai'
|
|
|
133 |
im = preprocess(im)
|
134 |
if len(im.shape) == 3:
|
135 |
im = im[None]
|
136 |
+
# outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: im.cpu().numpy()})
|
137 |
+
# outputs = [torch.tensor(item) for item in outputs]
|
138 |
+
outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: im.permute(0, 2, 3, 1).cpu().numpy()})
|
139 |
+
outputs = [torch.tensor(item).permute(0, 3, 1, 2) for item in outputs]
|
140 |
preds = post_process(outputs)
|
141 |
preds = non_max_suppression(
|
142 |
preds, 0.25, 0.7, agnostic=False, max_det=300, classes=None
|
utils.py
CHANGED
@@ -851,7 +851,7 @@ def build_dataloader(cfg, batch, img_path, stride=32, rect=False, names=None, ra
|
|
851 |
imgsz=cfg.imgsz,
|
852 |
batch_size=batch,
|
853 |
augment=mode == "train", # augmentation
|
854 |
-
hyp=cfg,
|
855 |
rect=cfg.rect or rect, # rectangular batches
|
856 |
cache=cfg.cache or None,
|
857 |
single_cls=cfg.single_cls or False,
|
@@ -1170,6 +1170,7 @@ class Bboxes:
|
|
1170 |
assert bboxes.shape[1] == 4
|
1171 |
self.bboxes = bboxes
|
1172 |
self.format = format
|
|
|
1173 |
|
1174 |
def convert(self, format):
|
1175 |
assert format in _formats
|
@@ -1576,7 +1577,7 @@ class YOLODataset(BaseDataset):
|
|
1576 |
lb["segments"] = []
|
1577 |
return labels
|
1578 |
|
1579 |
-
|
1580 |
def build_transforms(self, hyp=None):
|
1581 |
transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)])
|
1582 |
transforms.append(
|
|
|
851 |
imgsz=cfg.imgsz,
|
852 |
batch_size=batch,
|
853 |
augment=mode == "train", # augmentation
|
854 |
+
hyp=cfg, # TODO: probably add a get_hyps_from_cfg function
|
855 |
rect=cfg.rect or rect, # rectangular batches
|
856 |
cache=cfg.cache or None,
|
857 |
single_cls=cfg.single_cls or False,
|
|
|
1170 |
assert bboxes.shape[1] == 4
|
1171 |
self.bboxes = bboxes
|
1172 |
self.format = format
|
1173 |
+
# self.normalized = normalized
|
1174 |
|
1175 |
def convert(self, format):
|
1176 |
assert format in _formats
|
|
|
1577 |
lb["segments"] = []
|
1578 |
return labels
|
1579 |
|
1580 |
+
# TODO: use hyp config to set all these augmentations
|
1581 |
def build_transforms(self, hyp=None):
|
1582 |
transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)])
|
1583 |
transforms.append(
|
yolov8m_qat.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:119038397368b01fee9ad8adcc62061babcf2e2dd417be1946d5bfccb07eb65f
|
3 |
+
size 103874987
|