num_batch_size = 4 num_epochs = 15 num_frozen_stages = 2 auto_scale_lr = dict(base_batch_size=2, enable=False) backend_args = None data_root = 'C:/vs_code_workspaces/mmdetection/mmdetection/data/ins/v9' dataset_type = 'CocoDataset' default_hooks = dict( checkpoint=dict(interval=1, type='CheckpointHook'), logger=dict(interval=50, type='LoggerHook'), param_scheduler=dict(type='ParamSchedulerHook'), sampler_seed=dict(type='DistSamplerSeedHook'), timer=dict(type='IterTimerHook'), visualization=dict(type='DetVisualizationHook')) default_scope = 'mmdet' env_cfg = dict(cudnn_benchmark=False, dist_cfg=dict(backend='nccl'), mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) launcher = 'none' load_from = 'C:/vs_code_workspaces/mmdetection/mmdetection/ins_development/resources/add300_frozen2/epoch_9.pth' log_level = 'INFO' log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) metainfo = dict(classes=('waste', ), palette=[ (220, 20, 60, ),]) model = dict( backbone=dict( depth=101, frozen_stages=num_frozen_stages, init_cfg=dict(checkpoint='C:/Users/INS/.cache/torch/hub/checkpoints/resnet101-63fe2227.pth', type='Pretrained'), norm_cfg=dict(requires_grad=True, type='BN'), norm_eval=True, num_stages=4, out_indices=(0, 1, 2, 3, ), style='pytorch', type='ResNet'), data_preprocessor=dict( bgr_to_rgb=True, mean=[123.675, 116.28, 103.53, ], pad_size_divisor=32, std=[58.395, 57.12, 57.375, ], type='DetDataPreprocessor'), neck=dict(in_channels=[256, 512, 1024, 2048, ], num_outs=5, out_channels=256, type='FPN'), roi_head=dict( bbox_head=dict( bbox_coder=dict( target_means=[0.0, 0.0, 0.0, 0.0,], target_stds=[0.1, 0.1, 0.2, 0.2,], type='DeltaXYWHBBoxCoder'), fc_out_channels=1024, in_channels=256, loss_bbox=dict(loss_weight=1.0, type='L1Loss'), loss_cls=dict( loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=False), num_classes=1, reg_class_agnostic=False, roi_feat_size=7, type='Shared2FCBBoxHead'), bbox_roi_extractor=dict( featmap_strides=[4, 8, 16, 32, ], out_channels=256, roi_layer=dict(output_size=7, sampling_ratio=0, type='RoIAlign'), type='SingleRoIExtractor'), type='StandardRoIHead'), rpn_head=dict( anchor_generator=dict( ratios=[0.5, 1.0, 2.0, ], scales=[8,], strides=[4, 8, 16, 32, 64, ], type='AnchorGenerator'), bbox_coder=dict( target_means=[0.0, 0.0, 0.0, 0.0, ], target_stds=[1.0, 1.0, 1.0, 1.0, ], type='DeltaXYWHBBoxCoder'), feat_channels=256, in_channels=256, loss_bbox=dict(loss_weight=1.0, type='L1Loss'), loss_cls=dict(loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=True), type='RPNHead'), test_cfg=dict( rcnn=dict( max_per_img=100, nms=dict(iou_threshold=0.5, type='nms'), score_thr=0.05), rpn=dict( max_per_img=1000, min_bbox_size=0, nms=dict(iou_threshold=0.7, type='nms'), nms_pre=1000)), train_cfg=dict( rcnn=dict( assigner=dict( ignore_iof_thr=-1, match_low_quality=False, min_pos_iou=0.5, neg_iou_thr=0.5, pos_iou_thr=0.5, type='MaxIoUAssigner'), debug=False, pos_weight=-1, sampler=dict( add_gt_as_proposals=True, neg_pos_ub=-1, num=512, pos_fraction=0.25, type='RandomSampler')), rpn=dict( allowed_border=-1, assigner=dict( ignore_iof_thr=-1, match_low_quality=True, min_pos_iou=0.3, neg_iou_thr=0.3, pos_iou_thr=0.7, type='MaxIoUAssigner'), debug=False, pos_weight=-1, sampler=dict( add_gt_as_proposals=False, neg_pos_ub=-1, num=256, pos_fraction=0.5, type='RandomSampler')), rpn_proposal=dict( max_per_img=1000, min_bbox_size=0, nms=dict(iou_threshold=0.7, type='nms'), nms_pre=2000)), type='FasterRCNN') optim_wrapper = dict( optimizer=dict(lr=0.02, momentum=0.9, type='SGD', weight_decay=0.0001), type='OptimWrapper') param_scheduler = [ dict(begin=0, by_epoch=False, end=500, start_factor=0.001, type='LinearLR'), dict(begin=0, by_epoch=True, end=12, gamma=0.1, milestones=[8, 11, ], type='MultiStepLR'), ] resume = False test_cfg = dict(type='TestLoop') test_dataloader = dict( batch_size=num_batch_size, dataset=dict( ann_file='test/annotations_coco.json', backend_args=None, data_prefix=dict(img='test/'), data_root=data_root, metainfo=dict(classes=('waste', ), palette=[(220, 20, 60, ), ]), pipeline=[ dict(backend_args=None, type='LoadImageFromFile'), dict(keep_ratio=True, scale=(1280, 1280, ), type='Resize'), dict(type='LoadAnnotations', with_bbox=True), dict(meta_keys=( 'img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor',), type='PackDetInputs'), ], test_mode=True, type='CocoDataset'), drop_last=False, num_workers=2, persistent_workers=True, sampler=dict(shuffle=False, type='DefaultSampler')) test_evaluator = dict( ann_file='data/ins_annotated_v9/test/annotations_coco.json', backend_args=None, format_only=False, metric='bbox', type='CocoMetric') test_pipeline = [ dict(backend_args=None, type='LoadImageFromFile'), dict(keep_ratio=True, scale=(1280, 1280,), type='Resize'), dict(type='LoadAnnotations', with_bbox=True), dict(meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor',),type='PackDetInputs'), ] train_cfg = dict(max_epochs=num_epochs, type='EpochBasedTrainLoop', val_interval=1) train_dataloader = dict( batch_sampler=dict(type='AspectRatioBatchSampler'), batch_size=num_batch_size, dataset=dict( ann_file='train/annotations_coco.json', backend_args=None, data_prefix=dict(img='train/'), data_root=data_root, filter_cfg=dict(filter_empty_gt=True, min_size=32), metainfo=dict(classes=('waste', ), palette=[(220, 20, 60, ),]), pipeline=[ dict(backend_args=None, type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(keep_ratio=True, scale=(1280, 1280, ), type='Resize'), dict(prob=0.5, type='RandomFlip'), dict(type='PackDetInputs'), ], type='CocoDataset'), num_workers=2, persistent_workers=True, sampler=dict(shuffle=True, type='DefaultSampler')) train_pipeline = [ dict(backend_args=None, type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(keep_ratio=True, scale=(1280, 1280, ), type='Resize'), dict(prob=0.5, type='RandomFlip'), dict(type='PackDetInputs'), ] val_cfg = dict(type='ValLoop') val_dataloader = dict( batch_size=num_batch_size, dataset=dict( ann_file='valid/annotations_coco.json', backend_args=None, data_prefix=dict(img='valid/'), data_root=data_root, metainfo=dict(classes=('waste', ), palette=[(220, 20, 60, ),]), pipeline=[ dict(backend_args=None, type='LoadImageFromFile'), dict(keep_ratio=True, scale=(1280, 1280,), type='Resize'), dict(type='LoadAnnotations', with_bbox=True), dict(meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', ),type='PackDetInputs'), ], test_mode=True, type='CocoDataset'), drop_last=False, num_workers=2, persistent_workers=True, sampler=dict(shuffle=False, type='DefaultSampler')) val_evaluator = dict( ann_file='data/ins_annotated_v9/valid/annotations_coco.json', backend_args=None, format_only=False, metric='bbox', type='CocoMetric') val_pipeline = [ dict(backend_args=None, type='LoadImageFromFile'), dict(keep_ratio=True, scale=(1280, 1280, ), type='Resize'), dict(type='LoadAnnotations', with_bbox=True), dict(meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor',), type='PackDetInputs'), ] vis_backends = [dict(type='LocalVisBackend'), ] visualizer = dict(name='visualizer', type='DetLocalVisualizer', vis_backends=[dict(type='LocalVisBackend'), ]) work_dir = './ins_development/training/ins_annotated_v9/pretrained/add300/faster/2frozen/e9\\'