Spaces:
Runtime error
Runtime error
onescotch
commited on
Commit
•
0515b76
1
Parent(s):
010a8bc
update config for new version mmdet
Browse files
pretrained_models/mmdet/mmdet_faster_rcnn_r50_fpn_coco.py
CHANGED
@@ -26,7 +26,12 @@ total_epochs = 12
|
|
26 |
|
27 |
model = dict(
|
28 |
type='FasterRCNN',
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
30 |
backbone=dict(
|
31 |
type='ResNet',
|
32 |
depth=50,
|
@@ -35,7 +40,8 @@ model = dict(
|
|
35 |
frozen_stages=1,
|
36 |
norm_cfg=dict(type='BN', requires_grad=True),
|
37 |
norm_eval=True,
|
38 |
-
style='pytorch'
|
|
|
39 |
neck=dict(
|
40 |
type='FPN',
|
41 |
in_channels=[256, 512, 1024, 2048],
|
@@ -134,49 +140,59 @@ model = dict(
|
|
134 |
|
135 |
dataset_type = 'CocoDataset'
|
136 |
data_root = 'data/coco/'
|
137 |
-
|
138 |
-
|
139 |
train_pipeline = [
|
140 |
-
dict(type='LoadImageFromFile'),
|
141 |
dict(type='LoadAnnotations', with_bbox=True),
|
142 |
-
dict(type='Resize',
|
143 |
-
dict(type='RandomFlip',
|
144 |
-
dict(type='
|
145 |
-
dict(type='Pad', size_divisor=32),
|
146 |
-
dict(type='DefaultFormatBundle'),
|
147 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
148 |
]
|
149 |
test_pipeline = [
|
150 |
-
dict(type='LoadImageFromFile'),
|
|
|
|
|
|
|
151 |
dict(
|
152 |
-
type='
|
153 |
-
|
154 |
-
|
155 |
-
transforms=[
|
156 |
-
dict(type='Resize', keep_ratio=True),
|
157 |
-
dict(type='RandomFlip'),
|
158 |
-
dict(type='Normalize', **img_norm_cfg),
|
159 |
-
dict(type='Pad', size_divisor=32),
|
160 |
-
dict(type='DefaultFormatBundle'),
|
161 |
-
dict(type='Collect', keys=['img']),
|
162 |
-
])
|
163 |
]
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
|
168 |
type=dataset_type,
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
type=dataset_type,
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
model = dict(
|
28 |
type='FasterRCNN',
|
29 |
+
data_preprocessor=dict(
|
30 |
+
type='DetDataPreprocessor',
|
31 |
+
mean=[123.675, 116.28, 103.53],
|
32 |
+
std=[58.395, 57.12, 57.375],
|
33 |
+
bgr_to_rgb=True,
|
34 |
+
pad_size_divisor=32),
|
35 |
backbone=dict(
|
36 |
type='ResNet',
|
37 |
depth=50,
|
|
|
40 |
frozen_stages=1,
|
41 |
norm_cfg=dict(type='BN', requires_grad=True),
|
42 |
norm_eval=True,
|
43 |
+
style='pytorch',
|
44 |
+
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
|
45 |
neck=dict(
|
46 |
type='FPN',
|
47 |
in_channels=[256, 512, 1024, 2048],
|
|
|
140 |
|
141 |
dataset_type = 'CocoDataset'
|
142 |
data_root = 'data/coco/'
|
143 |
+
backend_args = None
|
144 |
+
|
145 |
train_pipeline = [
|
146 |
+
dict(type='LoadImageFromFile', backend_args=backend_args),
|
147 |
dict(type='LoadAnnotations', with_bbox=True),
|
148 |
+
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
|
149 |
+
dict(type='RandomFlip', prob=0.5),
|
150 |
+
dict(type='PackDetInputs')
|
|
|
|
|
|
|
151 |
]
|
152 |
test_pipeline = [
|
153 |
+
dict(type='LoadImageFromFile', backend_args=backend_args),
|
154 |
+
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
|
155 |
+
# If you don't have a gt annotation, delete the pipeline
|
156 |
+
dict(type='LoadAnnotations', with_bbox=True),
|
157 |
dict(
|
158 |
+
type='PackDetInputs',
|
159 |
+
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
|
160 |
+
'scale_factor'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
]
|
162 |
+
train_dataloader = dict(
|
163 |
+
batch_size=2,
|
164 |
+
num_workers=2,
|
165 |
+
persistent_workers=True,
|
166 |
+
sampler=dict(type='DefaultSampler', shuffle=True),
|
167 |
+
batch_sampler=dict(type='AspectRatioBatchSampler'),
|
168 |
+
dataset=dict(
|
169 |
type=dataset_type,
|
170 |
+
data_root=data_root,
|
171 |
+
ann_file='annotations/instances_train2017.json',
|
172 |
+
data_prefix=dict(img='train2017/'),
|
173 |
+
filter_cfg=dict(filter_empty_gt=True, min_size=32),
|
174 |
+
pipeline=train_pipeline,
|
175 |
+
backend_args=backend_args))
|
176 |
+
val_dataloader = dict(
|
177 |
+
batch_size=1,
|
178 |
+
num_workers=2,
|
179 |
+
persistent_workers=True,
|
180 |
+
drop_last=False,
|
181 |
+
sampler=dict(type='DefaultSampler', shuffle=False),
|
182 |
+
dataset=dict(
|
183 |
type=dataset_type,
|
184 |
+
data_root=data_root,
|
185 |
+
ann_file='annotations/instances_val2017.json',
|
186 |
+
data_prefix=dict(img='val2017/'),
|
187 |
+
test_mode=True,
|
188 |
+
pipeline=test_pipeline,
|
189 |
+
backend_args=backend_args))
|
190 |
+
test_dataloader = val_dataloader
|
191 |
+
|
192 |
+
val_evaluator = dict(
|
193 |
+
type='CocoMetric',
|
194 |
+
ann_file=data_root + 'annotations/instances_val2017.json',
|
195 |
+
metric='bbox',
|
196 |
+
format_only=False,
|
197 |
+
backend_args=backend_args)
|
198 |
+
test_evaluator = val_evaluator
|