mmdetectionv1.0.0-inference代碼--修改config-batch圖像數目,學習率下降等--訓練期間測試開啓-訓練灰度

inference代碼 

import mmcv
import os
from mmdet.apis import init_detector, inference_detector, show_result

config_file = 'configs/mask_rcnn_r50_fpn_1x.py'
checkpoint_file = 'checkpoint/epoch_200.pth'
model = init_detector(config_file, checkpoint_file, device='cuda:0')

path = "data/box_for_label"
imgs = []
trainimg = os.listdir("data/box_for_label")
for lab in range(len(trainimg)):
    subname = trainimg[lab]
    name = os.path.join(path, subname)
    imgs.append(name)

import numpy as np
import cv2
import pycocotools.mask as maskUtils
score_thr=0.3
for lab in range(len(imgs)):
    img = imgs[lab]

    result = inference_detector(model, img)
    img = mmcv.imread(img)
    img = img.copy()
    oriimg = img.copy()
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    # draw segmentation masks
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        np.random.seed(42)
        color_masks = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
        for i in inds:
            i = int(i)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_masks * 0.5

    h1, w1 = oriimg.shape[:2]
    h2, w2 = img.shape[:2]
    vis = np.zeros((max(h1, h2), w1 + w2, 3), np.uint8)
    vis[:h1, :w1, :] = oriimg
    vis[:h2, w1:w1 + w2, :] = img

    out_file = 'result_{}.jpg'.format(lab)
    cv2.imwrite(out_file, vis)

 

訓練灰度

如果你想訓練灰度圖,在這個版本,你應該:

mmdetection/mmdet/datasets/pipelines/loading.py

@PIPELINES.register_module
class LoadImageFromFile(object):

    def __init__(self, to_float32=False, color_type='color'):
        self.to_float32 = to_float32
        self.color_type = color_type

    def __call__(self, results):
        if results['img_prefix'] is not None:
            filename = osp.join(results['img_prefix'],
                                results['img_info']['filename'])
        else:
            filename = results['img_info']['filename']
        img = mmcv.imread(filename, self.color_type)
        if self.to_float32:
            img = img.astype(np.float32)
        results['filename'] = filename
        results['img'] = img
        results['img_shape'] = img.shape
        results['ori_shape'] = img.shape
        return results

    def __repr__(self):
        return '{} (to_float32={}, color_type={})'.format(
            self.__class__.__name__, self.to_float32, self.color_type)

 

img = mmcv.imread(filename, self.color_type)  後面接一段代碼:

import cv2
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)



訓練期間測試開啓

--validate

#!/usr/bin/env bash

PYTHON=/home/apple/anaconda3/envs/py36_mmdetection/bin/python

CONFIG=$1
GPUS=$2
GPUNAME=$3
PORT=${PORT:-29500}

$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
    $(dirname "$0")/train.py $CONFIG --gpus $GPUNAME --validate --launcher pytorch ${@:4}

修改多少次進行一次測試:

configs/mask_rcnn_r50_fpn_1x.py

# yapf:enable
evaluation = dict(interval=50)



修改config-batch圖像數目,學習率下降等

修改:

train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]

..............................

data = dict(
    imgs_per_gpu=4,
    workers_per_gpu=2,
    train=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_train2017.json',
        img_prefix=data_root + 'train2017/',
        pipeline=train_pipeline),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline))
..............................

# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=1.0 / 3,
    step=[40, 80, 120])

52%   71C    P2   185W / 250W |  10227MiB / 11019MiB |     72%      Default 

顯存剛好利用上,再大一點就不行了

https://github.com/open-mmlab/mmdetection/tree/v1.0.0



 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章