Faster RCNN minibatch.py

minibatch.py 的功能是: Compute minibatch blobs for training a Fast R-CNN network. 與roidb不同的是, minibatch中存儲的並不是完整的整張圖像圖像,而是從圖像經過轉換後得到的四維blob以及從圖像中截取的proposals,以及與之對應的labels等

def get_minibatch(roidb, num_classes) 該函數的功能是“Given a roidb, construct a minibatch sampled from it” , 即從roidb中構造出一個minibatch來用於訓練

def get_minibatch(roidb, num_classes):
    """Given a roidb, construct a minibatch sampled from it."""
    # 給定一個roidb,這個roidb中存儲的可能是多張圖片,也可能是單張或者多張圖片,這個視cfg中的IMS_PER_BATCH 而定。比如在layer.py的 def _get_next_minibatch函數中,調用get_minibatch(minibatch_db, self._num_classes)中的 minibatch_db 只包含 IMS_PER_BATCH 張圖片
    num_images = len(roidb)

    # Sample random scales to use for each image in this batch 給roidb中的圖像隨機分配縮放比例,從而加入圖像的多尺度信息
    random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                                    size=num_images)
    assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
        'num_images ({}) must divide BATCH_SIZE ({})'. \
        format(num_images, cfg.TRAIN.BATCH_SIZE)
    rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
    fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)

    # Get the input image blob, **formatted for caffe**
    # 將給定的roidb經過預處理(resize以及resize的scale),然後再利用im_list_to_blob函數來將圖像轉換成caffe支持的數據結構,即 N * C * H * W的四維結構
    im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

    # 最終的返回,字典。其中最主要的部分是‘data’,對應將roidb轉換後得到的im_blob,其他的根據各個階段,還會加入‘gt_boxes’、‘im_info’、‘rois’、‘labels’、‘bbox_targets’、‘bbox_inside_weights’、‘bbox_outside_weights’的信息
    blobs = {'data': im_blob}

    if cfg.TRAIN.HAS_RPN:
        assert len(im_scales) == 1, "Single batch only"
        assert len(roidb) == 1, "Single batch only"
        # gt boxes: (x1, y1, x2, y2, cls)
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
        blobs['gt_boxes'] = gt_boxes
        # im_info表示h w scale這些信息
        blobs['im_info'] = np.array(
            [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
            dtype=np.float32)
    else: # not using RPN
        # Now, build the region of interest and label blobs
        # labels_blob 和 rois_blob的shape都沒有聲明,後面會利用hstack和vstack將元素添加進去
        rois_blob = np.zeros((0, 5), dtype=np.float32)
        labels_blob = np.zeros((0), dtype=np.float32)

        bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
        bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
        # all_overlaps = []
        for im_i in xrange(num_images):
            # 遍歷給定的roidb中的每張圖片,隨機組合sample of RoIs, 來生成前景樣本和背景樣本。
            # 返回包括每張圖片中的roi(proposal)的座標,所屬的類別,bbox迴歸目標,bbox的inside_weight等
            labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
                = _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
                               num_classes)

            # Add to RoIs blob
            # _sample_rois返回的im_rois並沒有縮放,所以這裏要先縮放
            rois = _project_im_rois(im_rois, im_scales[im_i])

            batch_ind = im_i * np.ones((rois.shape[0], 1))
            rois_blob_this_image = np.hstack((batch_ind, rois))#給rois添加“所屬哪張圖片”信息
            # rois_blob存儲的是這個roidb中每張圖像的所有roi(proposals),每個proposals的存儲信息包括其所屬於哪張圖片,以及roi的座標
            rois_blob = np.vstack((rois_blob, rois_blob_this_image))
            # Add to labels, bbox targets, and bbox loss blobs
            # 將每張圖像中rois的labels水平排列,得到的還是一個一維數組
            labels_blob = np.hstack((labels_blob, labels))

            bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
            bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
            # all_overlaps = np.hstack((all_overlaps, overlaps))

        # For debug visualizations
        # _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)

        blobs['rois'] = rois_blob
        blobs['labels'] = labels_blob

        if cfg.TRAIN.BBOX_REG:
            blobs['bbox_targets'] = bbox_targets_blob
            blobs['bbox_inside_weights'] = bbox_inside_blob
            blobs['bbox_outside_weights'] = \
                np.array(bbox_inside_blob > 0).astype(np.float32)

    return blobs

def _get_image_blob(roidb, scale_inds)
該函數返回的是多張圖像的blob 和 多張圖像各自對應的scale
minibatch.py 中的_get_image_blob函數與generate.py 中的_get_image_blob 函數功能一樣,只不過處理的是單張圖像還是多張圖像的問題。

def _get_image_blob(roidb, scale_inds):
    """Builds an input blob from the images in the roidb at the specified
    scales.
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    for i in xrange(num_images):
        im = cv2.imread(roidb[i]['image'])

        #如果當前處理的是flipped的圖像,那麼,需要將圖像所有的像素翻轉,因爲之前在準備imdb的時候,只是將
        #proposals,boxes的座標翻轉
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]

        # prep_im_for_blob 函數的功能是獲取經過resize的圖像以及縮放的比例
        im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                                        cfg.TRAIN.MAX_SIZE)
        im_scales.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images 調用im_list_to_blob來將經過預處理的processed_ims轉換成caffe支持的數據結構,即 N * C * H * W的四維結構
    blob = im_list_to_blob(processed_ims)

    return blob, im_scales

def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes)
在 def get_minibatch(roidb, num_classes) 中調用此函數,傳進來的實參爲單張圖像的roidb ,該函數主要功能是隨機組合sample of RoIs, 來生成前景樣本和背景樣本。

def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
    """Generate a random sample of RoIs comprising foreground and background
    examples.
    """
    # label = class RoI has max overlap with
    labels = roidb['max_classes']
    overlaps = roidb['max_overlaps']
    rois = roidb['boxes']

    # Select foreground RoIs as those with >= FG_THRESH overlap
    fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
    # Guard against the case when an image has fewer than fg_rois_per_image
    # foreground RoIs
    fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
    # Sample foreground regions without replacement
    if fg_inds.size > 0:
        fg_inds = npr.choice(
                fg_inds, size=fg_rois_per_this_image, replace=False)

    # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
    bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
                       (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
    # Compute number of background RoIs to take from this image (guarding
    # against there being fewer than desired)
    bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
    bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
                                        bg_inds.size)
    # Sample foreground regions without replacement
    if bg_inds.size > 0:
        bg_inds = npr.choice(
                bg_inds, size=bg_rois_per_this_image, replace=False)

    # The indices that we're selecting (both fg and bg)
    keep_inds = np.append(fg_inds, bg_inds)
    # Select sampled values from various arrays:
    labels = labels[keep_inds]
    # Clamp labels for the background RoIs to 0
    labels[fg_rois_per_this_image:] = 0
    overlaps = overlaps[keep_inds]
    rois = rois[keep_inds]

    # 調用_get_bbox_regression_labels函數,生成bbox_targets 和 bbox_inside_weights,
    #它們都是N * 4K 的ndarray,N表示keep_inds的size,也就是minibatch中樣本的個數;bbox_inside_weights 
    #也隨之生成
    bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
            roidb['bbox_targets'][keep_inds, :], num_classes)

    return labels, overlaps, rois, bbox_targets, bbox_inside_weights

def _get_bbox_regression_labels(bbox_target_data, num_classes):
該函數主要是獲取bbox_target_data中迴歸目標的的4個座標編碼作爲bbox_targets,同時生成bbox_inside_weights,它們都是N * 4K 的ndarray,N表示keep_inds的size,也就是minibatch中樣本的個數。
該函數在def _sample_rois 中得以調用

def _get_bbox_regression_labels(bbox_target_data, num_classes):
    """Bounding-box regression targets are stored in a compact form in the
    roidb.

    This function expands those targets into the 4-of-4*K representation used
    by the network (i.e. only one class has non-zero targets). The loss weights
    are similarly expanded. 將非背景類的每個targets從(4,)拓展爲(4K,),而其中只有對應類的
    bbox_targets才爲非0

    Returns:
        bbox_targets (ndarray): N x 4K blob of regression targets
        bbox_inside_weights (ndarray): N x 4K blob of loss weights
    """
    clss = bbox_target_data[:, 0]
    bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
    bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
    inds = np.where(clss > 0)[0]
    for ind in inds:
        cls = clss[ind]
        start = 4 * cls
        end = start + 4
        bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
        bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
    return bbox_targets, bbox_inside_weights

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章