728x90
반응형

kitti Dataset을 중립 데이터형태로 변환하여 메모리 로드

# 원본 kitti Dataset는 10개의 Class로 되어 있음. 'Car Van Truck Pedestrian Person_sitting Cyclist Tram Misc DontCare'
CLASSES = ('Car', 'Truck', 'Pedestrian', 'Cyclist')
cat2label = {k:i for i, k in enumerate(CLASSES)}
print(cat2label)
cat2label['Car']

# {'Car': 0, 'Truck': 1, 'Pedestrian': 2, 'Cyclist': 3}
# 0
# data_anno {label}에 넣기 위한것
image_list = mmcv.list_from_file('/content/kitti_tiny/train.txt')

lines = mmcv.list_from_file('/content/kitti_tiny/training/label_2/000064.txt')
#print(lines)
content = [line.strip().split(' ') for line in lines]
bbox_names = [x[0] for x in content]
#print(bbox_names)
# bounding box 읽기
bboxes = [ [float(info) for info in x[4:8]] for x in content]
print(bboxes)

# [[657.65, 179.93, 709.86, 219.92], [731.51, 180.39, 882.28, 275.8], [715.18, 175.63, 762.77, 203.9], [816.58, 59.74, 1112.51, 266.07], [626.78, 174.27, 647.77, 192.18], [546.19, 168.97, 554.01, 177.09]]
import copy
import os.path as osp
import cv2

import mmcv
import numpy as np

from mmdet.datasets.builder import DATASETS
from mmdet.datasets.custom import CustomDataset

# 반드시 아래 Decorator 설정 할것.@DATASETS.register_module() 설정 시 force=True를 입력하지 않으면 Dataset 재등록 불가. 
@DATASETS.register_module(force=True)
class KittyTinyDataset(CustomDataset):
  CLASSES = ('Car', 'Truck', 'Pedestrian', 'Cyclist')
  # __init__ 가 없는것은 customdataset것을 이용
  ##### self.data_root: /content/kitti_tiny/ self.ann_file: /content/kitti_tiny/train.txt self.img_prefix: /content/kitti_tiny/training/image_2
  #### ann_file: /content/kitti_tiny/train.txt
  # annotation에 대한 모든 파일명을 가지고 있는 텍스트 파일을 __init__(self, ann_file)로 입력 받고, 이 self.ann_file이 load_annotations()의 인자로 입력
  def load_annotations(self, ann_file):
    print('##### self.data_root:', self.data_root, 'self.ann_file:', self.ann_file, 'self.img_prefix:', self.img_prefix)
    print('#### ann_file:', ann_file)
    cat2label = {k:i for i, k in enumerate(self.CLASSES)}
    image_list = mmcv.list_from_file(self.ann_file)
    # 포맷 중립 데이터를 담을 list 객체
    data_infos = []
    
    for image_id in image_list: # 000000
      filename = '{0:}/{1:}.jpeg'.format(self.img_prefix, image_id)
      # 원본 이미지의 너비, 높이를 image를 직접 로드하여 구함. 
      image = cv2.imread(filename)
      height, width = image.shape[:2]
      # 개별 image의 annotation 정보 저장용 Dict 생성. key값 filename 에는 image의 파일명만 들어감(디렉토리는 제외)
      data_info = {'filename': str(image_id) + '.jpeg',
                   'width': width, 'height': height}
      # 개별 annotation이 있는 서브 디렉토리의 prefix 변환. 
      label_prefix = self.img_prefix.replace('image_2', 'label_2')
      # 개별 annotation 파일을 1개 line 씩 읽어서 list 로드 
      lines = mmcv.list_from_file(osp.join(label_prefix, str(image_id)+'.txt'))

      # 전체 lines를 개별 line별 공백 레벨로 parsing 하여 다시 list로 저장. content는 list의 list형태임.
      # ann 정보는 numpy array로 저장되나 텍스트 처리나 데이터 가공이 list 가 편하므로 일차적으로 list로 변환 수행.   
      content = [line.strip().split(' ') for line in lines]
      # 오브젝트의 클래스명은 bbox_names로 저장. 
      bbox_names = [x[0] for x in content]
      # bbox 좌표를 저장
      bboxes = [ [float(info) for info in x[4:8]] for x in content]

      # 클래스명이 해당 사항이 없는 대상 Filtering out, 'DontCare'sms ignore로 별도 저장.
      gt_bboxes = []
      gt_labels = []
      gt_bboxes_ignore = []
      gt_labels_ignore = []
	  
      # 파일 내용을 읽는 loop
      for bbox_name, bbox in zip(bbox_names, bboxes):
        # 만약 bbox_name이 클래스명에 해당 되면, gt_bboxes와 gt_labels에 추가, 그렇지 않으면 gt_bboxes_ignore, gt_labels_ignore에 추가
        if bbox_name in cat2label:
          gt_bboxes.append(bbox)
          # gt_labels에는 class id를 입력
          gt_labels.append(cat2label[bbox_name])
        else:
          gt_bboxes_ignore.append(bbox)
          gt_labels_ignore.append(-1)
      # 개별 image별 annotation 정보를 가지는 Dict 생성. 해당 Dict의 value값은 모두 np.array임. 
      data_anno = {
          'bboxes': np.array(gt_bboxes, dtype=np.float32).reshape(-1, 4),
          'labels': np.array(gt_labels, dtype=np.long),
          'bboxes_ignore': np.array(gt_bboxes_ignore, dtype=np.float32).reshape(-1, 4),
          'labels_ignore': np.array(gt_labels_ignore, dtype=np.long)
      }
      # image에 대한 메타 정보를 가지는 data_info Dict에 'ann' key값으로 data_anno를 value로 저장. 
      data_info.update(ann=data_anno)
      # 전체 annotation 파일들에 대한 정보를 가지는 data_infos에 data_info Dict를 추가
      data_infos.append(data_info)

    return data_infos

 

### Config 설정하고 Pretrained 모델 다운로드
config_file = '/content/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
checkpoint_file = '/content/mmdetection/checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
!cd mmdetection; mkdir checkpoints
!wget -O /content/mmdetection/checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth

 

from mmcv import Config

cfg = Config.fromfile(config_file)
print(cfg.pretty_text)

# model = dict(
    type='FasterRCNN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[0.0, 0.0, 0.0, 0.0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=80,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0.0, 0.0, 0.0, 0.0],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=-1,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=2000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=1000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(
        type='Normalize',
        mean=[123.675, 116.28, 103.53],
        std=[58.395, 57.12, 57.375],
        to_rgb=True),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(
                type='Normalize',
                mean=[123.675, 116.28, 103.53],
                std=[58.395, 57.12, 57.375],
                to_rgb=True),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img'])
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        type='CocoDataset',
        ann_file='data/coco/annotations/instances_train2017.json',
        img_prefix='data/coco/train2017/',
        pipeline=[
            dict(type='LoadImageFromFile'),
            dict(type='LoadAnnotations', with_bbox=True),
            dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
            dict(type='RandomFlip', flip_ratio=0.5),
            dict(
                type='Normalize',
                mean=[123.675, 116.28, 103.53],
                std=[58.395, 57.12, 57.375],
                to_rgb=True),
            dict(type='Pad', size_divisor=32),
            dict(type='DefaultFormatBundle'),
            dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
        ]),
    val=dict(
        type='CocoDataset',
        ann_file='data/coco/annotations/instances_val2017.json',
        img_prefix='data/coco/val2017/',
        pipeline=[
            dict(type='LoadImageFromFile'),
            dict(
                type='MultiScaleFlipAug',
                img_scale=(1333, 800),
                flip=False,
                transforms=[
                    dict(type='Resize', keep_ratio=True),
                    dict(type='RandomFlip'),
                    dict(
                        type='Normalize',
                        mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True),
                    dict(type='Pad', size_divisor=32),
                    dict(type='ImageToTensor', keys=['img']),
                    dict(type='Collect', keys=['img'])
                ])
        ]),
    test=dict(
        type='CocoDataset',
        ann_file='data/coco/annotations/instances_val2017.json',
        img_prefix='data/coco/val2017/',
        pipeline=[
            dict(type='LoadImageFromFile'),
            dict(
                type='MultiScaleFlipAug',
                img_scale=(1333, 800),
                flip=False,
                transforms=[
                    dict(type='Resize', keep_ratio=True),
                    dict(type='RandomFlip'),
                    dict(
                        type='Normalize',
                        mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True),
                    dict(type='Pad', size_divisor=32),
                    dict(type='ImageToTensor', keys=['img']),
                    dict(type='Collect', keys=['img'])
                ])
        ]))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=0.001,
    step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]

 

from mmdet.apis import set_random_seed

# dataset에 대한 환경 파라미터 수정. 
cfg.dataset_type = 'KittyTinyDataset'
cfg.data_root = '/content/kitti_tiny/'

# train, val, test dataset에 대한 type, data_root, ann_file, img_prefix 환경 파라미터 수정. 
cfg.data.train.type = 'KittyTinyDataset'
cfg.data.train.data_root = '/content/kitti_tiny/'
cfg.data.train.ann_file = 'train.txt'
cfg.data.train.img_prefix = 'training/image_2'

cfg.data.val.type = 'KittyTinyDataset'
cfg.data.val.data_root = '/content/kitti_tiny/'
cfg.data.val.ann_file = 'val.txt'
cfg.data.val.img_prefix = 'training/image_2'

cfg.data.test.type = 'KittyTinyDataset'
cfg.data.test.data_root = '/content/kitti_tiny/'
cfg.data.test.ann_file = 'val.txt'
cfg.data.test.img_prefix = 'training/image_2'

# class의 갯수 수정. 
cfg.model.roi_head.bbox_head.num_classes = 4
# pretrained 모델
cfg.load_from = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'

# 학습 weight 파일로 로그를 저장하기 위한 디렉토리 설정. 
cfg.work_dir = './tutorial_exps'

# 학습율 변경 환경 파라미터 설정. 
cfg.optimizer.lr = 0.02 / 8

cfg.lr_config.warmup = None
cfg.log_config.interval = 10

# config 수행 시마다 policy값이 없어지는 bug로 인하여 설정. 
cfg.lr_config.policy = 'step'

# Change the evaluation metric since we use customized dataset.
cfg.evaluation.metric = 'mAP'
# We can set the evaluation interval to reduce the evaluation times
cfg.evaluation.interval = 12
# We can set the checkpoint saving interval to reduce the storage cost
cfg.checkpoint_config.interval = 12

# Set seed thus the results are more reproducible
cfg.seed = 0
set_random_seed(0, deterministic=False)
cfg.gpu_ids = range(1)


# We can initialize the logger for training and have a look
# at the final config used for training
print(f'Config:\n{cfg.pretty_text}')



# Config:
model = dict(
    type='FasterRCNN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch',
        pretrained='torchvision://resnet50'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[0.0, 0.0, 0.0, 0.0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=4,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0.0, 0.0, 0.0, 0.0],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
        train_cfg=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False),
        test_cfg=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100),
        pretrained='torchvision://resnet50'),
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=-1,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=2000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=1000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)))
dataset_type = 'KittyTinyDataset'
data_root = '/content/kitti_tiny/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(
        type='Normalize',
        mean=[123.675, 116.28, 103.53],
        std=[58.395, 57.12, 57.375],
        to_rgb=True),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(
                type='Normalize',
                mean=[123.675, 116.28, 103.53],
                std=[58.395, 57.12, 57.375],
                to_rgb=True),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img'])
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        type='KittyTinyDataset',
        ann_file='train.txt',
        img_prefix='training/image_2',
        pipeline=[
            dict(type='LoadImageFromFile'),
            dict(type='LoadAnnotations', with_bbox=True),
            dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
            dict(type='RandomFlip', flip_ratio=0.5),
            dict(
                type='Normalize',
                mean=[123.675, 116.28, 103.53],
                std=[58.395, 57.12, 57.375],
                to_rgb=True),
            dict(type='Pad', size_divisor=32),
            dict(type='DefaultFormatBundle'),
            dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
        ],
        data_root='/content/kitti_tiny/'),
    val=dict(
        type='KittyTinyDataset',
        ann_file='val.txt',
        img_prefix='training/image_2',
        pipeline=[
            dict(type='LoadImageFromFile'),
            dict(
                type='MultiScaleFlipAug',
                img_scale=(1333, 800),
                flip=False,
                transforms=[
                    dict(type='Resize', keep_ratio=True),
                    dict(type='RandomFlip'),
                    dict(
                        type='Normalize',
                        mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True),
                    dict(type='Pad', size_divisor=32),
                    dict(type='ImageToTensor', keys=['img']),
                    dict(type='Collect', keys=['img'])
                ])
        ],
        data_root='/content/kitti_tiny/'),
    test=dict(
        type='KittyTinyDataset',
        ann_file='val.txt',
        img_prefix='training/image_2',
        pipeline=[
            dict(type='LoadImageFromWebcam'),
            dict(
                type='MultiScaleFlipAug',
                img_scale=(1333, 800),
                flip=False,
                transforms=[
                    dict(type='Resize', keep_ratio=True),
                    dict(type='RandomFlip'),
                    dict(
                        type='Normalize',
                        mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True),
                    dict(type='Pad', size_divisor=32),
                    dict(type='DefaultFormatBundle'),
                    dict(type='Collect', keys=['img'])
                ])
        ],
        data_root='/content/kitti_tiny/'))
evaluation = dict(interval=12, metric='mAP', by_epoch=True)
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None, type='OptimizerHook')
lr_config = dict(
    warmup=None,
    warmup_iters=500,
    warmup_ratio=0.001,
    step=[8, 11],
    type='StepLrUpdaterHook',
    policy='step')
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=12, type='CheckpointHook')
log_config = dict(interval=10, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
resume_from = None
workflow = [('train', 1)]
work_dir = './tutorial_exps'
seed = 0
gpu_ids = range(0, 1)

Config에서 설정한 Dataset과 Model, 동적 학습율, Pipeline 설정에 따라 모델 학습 수행.

  • train용 Dataset을 생성하고 이를 이용하여 학습 수행.
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.apis import train_detector

# train용 Dataset 생성. 
datasets = [build_dataset(cfg.data.train)]
datasets

# [
#  KittyTinyDataset Train dataset with number of images 50, and instance counts: 
#  +----------+-------+-----------+-------+----------------+-------+-------------+-------+---------------+-------+
#  | category | count | category  | count | category       | count | category    | count | category      | count |
#  +----------+-------+-----------+-------+----------------+-------+-------------+-------+---------------+-------+
#  | 0 [Car]  | 147   | 1 [Truck] | 7     | 2 [Pedestrian] | 23    | 3 [Cyclist] | 7     | -1 background | 0     |
#  +----------+-------+-----------+-------+----------------+-------+-------------+-------+---------------+-------+]
datasets[0].CLASSES
# ('Car', 'Truck', 'Pedestrian', 'Cyclist')
model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
model.CLASSES = datasets[0].CLASSES

# /usr/local/lib/python3.7/dist-packages/mmdet-2.12.0-py3.7.egg/mmdet/models/backbones/resnet.py:400: UserWarning: DeprecationWarning: pretrained is a deprecated, please use "init_cfg" instead
#  warnings.warn('DeprecationWarning: pretrained is a deprecated, '
# 주의, config에 pretrained 모델 지정이 상대 경로로 설정됨 cfg.load_from = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
# 아래와 같이 %cd mmdetection 지정 필요. 
 
%cd mmdetection 

mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# epochs는 config의 runner 파라미터로 지정됨. 기본 12회 
train_detector(model, datasets, cfg, distributed=False, validate=True)



/content/mmdetection
2021-05-27 02:01:03,488 - mmdet - INFO - load checkpoint from checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
2021-05-27 02:01:03,489 - mmdet - INFO - Use load_from_local loader
##### self.data_root: /content/kitti_tiny/ self.ann_file: /content/kitti_tiny/val.txt self.img_prefix: /content/kitti_tiny/training/image_2
#### ann_file: /content/kitti_tiny/val.txt
2021-05-27 02:01:03,630 - mmdet - WARNING - The model and loaded state dict do not match exactly

size mismatch for roi_head.bbox_head.fc_cls.weight: copying a param with shape torch.Size([81, 1024]) from checkpoint, the shape in current model is torch.Size([5, 1024]).
size mismatch for roi_head.bbox_head.fc_cls.bias: copying a param with shape torch.Size([81]) from checkpoint, the shape in current model is torch.Size([5]).
size mismatch for roi_head.bbox_head.fc_reg.weight: copying a param with shape torch.Size([320, 1024]) from checkpoint, the shape in current model is torch.Size([16, 1024]).
size mismatch for roi_head.bbox_head.fc_reg.bias: copying a param with shape torch.Size([320]) from checkpoint, the shape in current model is torch.Size([16]).
2021-05-27 02:01:03,639 - mmdet - INFO - Start running, host: root@d7fd648a5af0, work_dir: /content/mmdetection/tutorial_exps
2021-05-27 02:01:03,640 - mmdet - INFO - workflow: [('train', 1)], max: 12 epochs
2021-05-27 02:01:08,457 - mmdet - INFO - Epoch [1][10/25]	lr: 2.500e-03, eta: 0:02:18, time: 0.476, data_time: 0.229, memory: 2226, loss_rpn_cls: 0.0275, loss_rpn_bbox: 0.0179, loss_cls: 0.7554, acc: 73.3594, loss_bbox: 0.4131, loss: 1.2139
2021-05-27 02:01:10,877 - mmdet - INFO - Epoch [1][20/25]	lr: 2.500e-03, eta: 0:01:40, time: 0.242, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0178, loss_rpn_bbox: 0.0128, loss_cls: 0.2395, acc: 92.2559, loss_bbox: 0.3446, loss: 0.6147
2021-05-27 02:01:16,800 - mmdet - INFO - Epoch [2][10/25]	lr: 2.500e-03, eta: 0:01:29, time: 0.464, data_time: 0.228, memory: 2226, loss_rpn_cls: 0.0151, loss_rpn_bbox: 0.0156, loss_cls: 0.2097, acc: 93.5547, loss_bbox: 0.3205, loss: 0.5609
2021-05-27 02:01:19,231 - mmdet - INFO - Epoch [2][20/25]	lr: 2.500e-03, eta: 0:01:20, time: 0.243, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0133, loss_rpn_bbox: 0.0131, loss_cls: 0.1617, acc: 94.0332, loss_bbox: 0.2242, loss: 0.4123
2021-05-27 02:01:25,113 - mmdet - INFO - Epoch [3][10/25]	lr: 2.500e-03, eta: 0:01:15, time: 0.462, data_time: 0.227, memory: 2226, loss_rpn_cls: 0.0048, loss_rpn_bbox: 0.0117, loss_cls: 0.1387, acc: 95.1465, loss_bbox: 0.1990, loss: 0.3543
2021-05-27 02:01:27,568 - mmdet - INFO - Epoch [3][20/25]	lr: 2.500e-03, eta: 0:01:10, time: 0.246, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0069, loss_rpn_bbox: 0.0125, loss_cls: 0.1741, acc: 93.4863, loss_bbox: 0.2716, loss: 0.4651
2021-05-27 02:01:33,510 - mmdet - INFO - Epoch [4][10/25]	lr: 2.500e-03, eta: 0:01:05, time: 0.466, data_time: 0.225, memory: 2226, loss_rpn_cls: 0.0071, loss_rpn_bbox: 0.0151, loss_cls: 0.1503, acc: 94.2578, loss_bbox: 0.2479, loss: 0.4204
2021-05-27 02:01:35,960 - mmdet - INFO - Epoch [4][20/25]	lr: 2.500e-03, eta: 0:01:01, time: 0.245, data_time: 0.013, memory: 2226, loss_rpn_cls: 0.0028, loss_rpn_bbox: 0.0123, loss_cls: 0.1339, acc: 95.1172, loss_bbox: 0.2092, loss: 0.3582
2021-05-27 02:01:41,874 - mmdet - INFO - Epoch [5][10/25]	lr: 2.500e-03, eta: 0:00:57, time: 0.466, data_time: 0.226, memory: 2226, loss_rpn_cls: 0.0038, loss_rpn_bbox: 0.0100, loss_cls: 0.1278, acc: 94.9512, loss_bbox: 0.2143, loss: 0.3559
2021-05-27 02:01:44,334 - mmdet - INFO - Epoch [5][20/25]	lr: 2.500e-03, eta: 0:00:53, time: 0.246, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0071, loss_rpn_bbox: 0.0119, loss_cls: 0.1317, acc: 94.7852, loss_bbox: 0.2035, loss: 0.3542
2021-05-27 02:01:50,296 - mmdet - INFO - Epoch [6][10/25]	lr: 2.500e-03, eta: 0:00:49, time: 0.467, data_time: 0.228, memory: 2226, loss_rpn_cls: 0.0014, loss_rpn_bbox: 0.0094, loss_cls: 0.1039, acc: 96.0059, loss_bbox: 0.1820, loss: 0.2968
2021-05-27 02:01:52,762 - mmdet - INFO - Epoch [6][20/25]	lr: 2.500e-03, eta: 0:00:45, time: 0.247, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0039, loss_rpn_bbox: 0.0104, loss_cls: 0.1045, acc: 96.0840, loss_bbox: 0.1877, loss: 0.3065
2021-05-27 02:01:58,690 - mmdet - INFO - Epoch [7][10/25]	lr: 2.500e-03, eta: 0:00:41, time: 0.465, data_time: 0.225, memory: 2226, loss_rpn_cls: 0.0019, loss_rpn_bbox: 0.0106, loss_cls: 0.1018, acc: 96.2402, loss_bbox: 0.1835, loss: 0.2978
2021-05-27 02:02:01,159 - mmdet - INFO - Epoch [7][20/25]	lr: 2.500e-03, eta: 0:00:38, time: 0.247, data_time: 0.011, memory: 2226, loss_rpn_cls: 0.0039, loss_rpn_bbox: 0.0125, loss_cls: 0.0970, acc: 96.2500, loss_bbox: 0.1859, loss: 0.2993
2021-05-27 02:02:07,094 - mmdet - INFO - Epoch [8][10/25]	lr: 2.500e-03, eta: 0:00:33, time: 0.464, data_time: 0.225, memory: 2226, loss_rpn_cls: 0.0029, loss_rpn_bbox: 0.0088, loss_cls: 0.0875, acc: 96.6309, loss_bbox: 0.1584, loss: 0.2575
2021-05-27 02:02:09,550 - mmdet - INFO - Epoch [8][20/25]	lr: 2.500e-03, eta: 0:00:30, time: 0.246, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0020, loss_rpn_bbox: 0.0089, loss_cls: 0.0976, acc: 96.4160, loss_bbox: 0.1765, loss: 0.2850
2021-05-27 02:02:15,504 - mmdet - INFO - Epoch [9][10/25]	lr: 2.500e-04, eta: 0:00:26, time: 0.466, data_time: 0.225, memory: 2226, loss_rpn_cls: 0.0029, loss_rpn_bbox: 0.0093, loss_cls: 0.0829, acc: 96.8457, loss_bbox: 0.1472, loss: 0.2423
2021-05-27 02:02:17,968 - mmdet - INFO - Epoch [9][20/25]	lr: 2.500e-04, eta: 0:00:23, time: 0.246, data_time: 0.013, memory: 2226, loss_rpn_cls: 0.0012, loss_rpn_bbox: 0.0071, loss_cls: 0.0760, acc: 97.2168, loss_bbox: 0.1358, loss: 0.2201
2021-05-27 02:02:23,924 - mmdet - INFO - Epoch [10][10/25]	lr: 2.500e-04, eta: 0:00:19, time: 0.466, data_time: 0.226, memory: 2226, loss_rpn_cls: 0.0027, loss_rpn_bbox: 0.0095, loss_cls: 0.0899, acc: 96.4160, loss_bbox: 0.1554, loss: 0.2575
2021-05-27 02:02:26,391 - mmdet - INFO - Epoch [10][20/25]	lr: 2.500e-04, eta: 0:00:15, time: 0.247, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0013, loss_rpn_bbox: 0.0064, loss_cls: 0.0731, acc: 97.2168, loss_bbox: 0.1324, loss: 0.2132
2021-05-27 02:02:32,334 - mmdet - INFO - Epoch [11][10/25]	lr: 2.500e-04, eta: 0:00:11, time: 0.465, data_time: 0.225, memory: 2226, loss_rpn_cls: 0.0037, loss_rpn_bbox: 0.0084, loss_cls: 0.0911, acc: 96.3965, loss_bbox: 0.1458, loss: 0.2490
2021-05-27 02:02:34,811 - mmdet - INFO - Epoch [11][20/25]	lr: 2.500e-04, eta: 0:00:08, time: 0.248, data_time: 0.012, memory: 2226, loss_rpn_cls: 0.0012, loss_rpn_bbox: 0.0087, loss_cls: 0.0703, acc: 97.3047, loss_bbox: 0.1367, loss: 0.2168
2021-05-27 02:02:40,779 - mmdet - INFO - Epoch [12][10/25]	lr: 2.500e-05, eta: 0:00:04, time: 0.469, data_time: 0.226, memory: 2226, loss_rpn_cls: 0.0016, loss_rpn_bbox: 0.0065, loss_cls: 0.0630, acc: 97.5586, loss_bbox: 0.1168, loss: 0.1879
2021-05-27 02:02:43,234 - mmdet - INFO - Epoch [12][20/25]	lr: 2.500e-05, eta: 0:00:01, time: 0.245, data_time: 0.013, memory: 2226, loss_rpn_cls: 0.0016, loss_rpn_bbox: 0.0070, loss_cls: 0.0700, acc: 97.2949, loss_bbox: 0.1068, loss: 0.1853
[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 25/25, 13.7 task/s, elapsed: 2s, ETA:     0s
---------------iou_thr: 0.5---------------
2021-05-27 02:02:46,577 - mmdet - INFO - 
+------------+-----+------+--------+-------+
| class      | gts | dets | recall | ap    |
+------------+-----+------+--------+-------+
| Car        | 62  | 145  | 0.935  | 0.829 |
| Truck      | 3   | 16   | 1.000  | 0.600 |
| Pedestrian | 13  | 49   | 0.923  | 0.781 |
| Cyclist    | 7   | 51   | 0.714  | 0.180 |
+------------+-----+------+--------+-------+
| mAP        |     |      |        | 0.598 |
+------------+-----+------+--------+-------+
2021-05-27 02:02:46,581 - mmdet - INFO - Saving checkpoint at 12 epochs
2021-05-27 02:02:47,549 - mmdet - INFO - Epoch(val) [12][25]	AP50: 0.5980, mAP: 0.5975

학습된 model을 이용하여 inference 수행.

from mmdet.apis import inference_detector, init_detector, show_result_pyplot

# BGR Image 사용 
img = cv2.imread('/content/kitti_tiny/training/image_2/000068.jpeg')

model.cfg = cfg

result = inference_detector(model, img)
show_result_pyplot(model, img, result)

 

 

반응형

+ Recent posts