Faster RCNN layer.py

def setup(self, bottom, top)方法: 该方法主要是在创建RoIDataLayer的时候调用,初始化self._name_to_top_map(从blobname 到 blobid的一个映射)。结合_caffe.cpp里面.def("setup", &Layer<Dtype>::LayerSetUp),个人认为,setup(self, bottom, top)应该还是调用底层的Layer::LayerSetUp方法,同时bottom, top也分别对应着:const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top
回顾底层的src/Net.cpp文件中,caffe将在Creating Layer,AppendTob 和 AppendBottom完成之后,再调用Layer::SetUp方法来 setting up layer…

def setup(self, bottom, top):
        """Setup the RoIDataLayer."""
        # print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Creating layer input-data'
        # parse the layer parameter string, which must be valid YAML
        layer_params = yaml.load(self.param_str_)

        # 解析prototxt文件中Python Layer的python_param参数
        self._num_classes = layer_params['num_classes']

        self._name_to_top_map = {}
        # print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ _name_to_top_map'
        # data blob: holds a batch of N images, each with 3 channels
        idx = 0

        # 设定top[0]即‘data’的shape,这样,即使每次迭代的minibatch中图片的shape不同,也能保证在前向传播的
        # 时候不发生错误,训练日志中输出的Top shape信息也是在这里设置的。但是每次具体的foward的时候都需要重新reshape top blobs。
        top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 3,
            max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
        self._name_to_top_map['data'] = idx
        idx += 1

        # 在训练RPN的时候,cfg.TRAIN.HAS_RPN为true
        if cfg.TRAIN.HAS_RPN:
            top[idx].reshape(1, 3)
            self._name_to_top_map['im_info'] = idx
            idx += 1

            top[idx].reshape(1, 4)
            self._name_to_top_map['gt_boxes'] = idx
            idx += 1
        else: # not using RPN
            # rois blob: holds R regions of interest, each is a 5-tuple
            # (n, x1, y1, x2, y2) specifying an image batch index n and a
            # rectangle (x1, y1, x2, y2)
            top[idx].reshape(1, 5)
            self._name_to_top_map['rois'] = idx
            idx += 1

            # labels blob: R categorical labels in [0, ..., K] for K foreground
            # classes plus background
            top[idx].reshape(1)
            self._name_to_top_map['labels'] = idx
            idx += 1

            # 例如,在训练fast rcnn的时候,cfg.TRAIN.BBOX_REG
            if cfg.TRAIN.BBOX_REG:
                # bbox_targets blob: R bounding-box regression targets with 4
                # targets per class
                top[idx].reshape(1, self._num_classes * 4)
                self._name_to_top_map['bbox_targets'] = idx
                idx += 1

                # bbox_inside_weights blob: At most 4 targets per roi are active;
                # thisbinary vector sepcifies the subset of active targets
                # bbox_inside_weights blob 和 bbox_outside_weights blob 是用在SmoothL1Loss layer
                #中
                top[idx].reshape(1, self._num_classes * 4)
                self._name_to_top_map['bbox_inside_weights'] = idx
                idx += 1

                top[idx].reshape(1, self._num_classes * 4)
                self._name_to_top_map['bbox_outside_weights'] = idx
                idx += 1

        print 'RoiDataLayer: name_to_top:', self._name_to_top_map
        assert len(top) == len(self._name_to_top_map)

def _shuffle_roidb_inds(self): 打乱training roidb的顺序

def _shuffle_roidb_inds(self):
        """Randomly permute the training roidb."""
        if cfg.TRAIN.ASPECT_GROUPING:
       # 将roidb中长宽比近似的图像放在一起(其实也就2种情况,扁的还是竖的),有利于计算速度(具体的,还不清除)
            widths = np.array([r['width'] for r in self._roidb])
            heights = np.array([r['height'] for r in self._roidb])
            horz = (widths >= heights)
            vert = np.logical_not(horz)
            horz_inds = np.where(horz)[0]
            vert_inds = np.where(vert)[0]
            inds = np.hstack((
                np.random.permutation(horz_inds),
                np.random.permutation(vert_inds)))
            inds = np.reshape(inds, (-1, 2))
            # permutation随机打乱,而且返回的元素没有重复(np.random.choice()中replace=False), 类似功能的函数还有np.random.choice()
            row_perm = np.random.permutation(np.arange(inds.shape[0]))
            inds = np.reshape(inds[row_perm, :], (-1,))
            self._perm = inds
        else:
            self._perm = np.random.permutation(np.arange(len(self._roidb)))
        #当前处理的图像的索引
        self._cur = 0

def _get_next_minibatch_inds(self) 在这个方法中,为什么要考虑“self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb)” 这是因为训练的时候要迭代好几遍整个训练集

def _get_next_minibatch_inds(self):
        """Return the roidb indices for the next minibatch."""
        if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
            self._shuffle_roidb_inds()

        db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
        self._cur += cfg.TRAIN.IMS_PER_BATCH
        return db_inds

def _get_next_minibatch(self)

def _get_next_minibatch(self):
        """Return the blobs to be used for the next minibatch.

        If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
        separate process and made available through self._blob_queue.
        """
        if cfg.TRAIN.USE_PREFETCH:
            return self._blob_queue.get()
        else:
            db_inds = self._get_next_minibatch_inds()
            minibatch_db = [self._roidb[i] for i in db_inds]
            # 调用minibatch.py中的get_minibatch方法
            return get_minibatch(minibatch_db, self._num_classes)

def forward(self, bottom, top): 前向传播,这个层的前向传播只需要进行拷贝就可以了,在不同的阶段下,根据各自的prototxt文件定义的网络结构来拷贝数据;
+ 有一点需要记住的是:在模板类Layer的forward函数里面,会再次调用调用Reshape()函数,也就是说,即使我们每次迭代每个minibatch里的图像(或者特征)的shape不一致,也没有关系,因为在真正调用forward_cpu / forward_gpu 之前都会重新Reshape;SetUp里面的Reshape只是设置了初始的Top blobs 的shape

def forward(self, bottom, top):
        """Get blobs and copy them into this layer's top blob vector."""
        blobs = self._get_next_minibatch()

        # 1. 对于stage1_rpn_train.pt文件中,该layer只有3个top blob:'data'、'im_info'、'gt_boxes'
        # 2. 对于stage1_fast_rcnn_train.pt文件中,该layer有6个top blob:top: 'data'、 
        #'rois'、'labels'、'bbox_targets'、'bbox_inside_weights'、'bbox_outside_weights'
        for blob_name, blob in blobs.iteritems():
            top_ind = self._name_to_top_map[blob_name]
            # Reshape net's input blobs  调用Caffe.Blob的reshape方法
            # 每次迭代forwad的时候都需要reshape,是因为每次迭代都需要去取minibatch,即
            # _get_next_minibatch, 在train fast-rcnn的时候,每个minibatch所包含的图像的data,rois, 
            # labels, bbox_targets等的具体的shape都会有所改变,所以每次迭代都需要reshape top blobs
            top[top_ind].reshape(*(blob.shape))
            # Copy data into net's input blobs
            top[top_ind].data[...] = blob.astype(np.float32, copy=False)

def backward(self, top, propagate_down, bottom):

def backward(self, top, propagate_down, bottom):
        """This layer does not propagate gradients."""
        pass

def reshape(self, bottom, top):

def reshape(self, bottom, top):
        """Reshaping happens during the call to forward."""
        pass

def set_roidb(self, roidb): 主要工作:1. RoIDataLayer设置roidb,2. 打乱shuffle

def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        # self._roidb = roidb,self表示RoIDataLayer的实例对象,而非类 pascal_voc 或者 imdb的实例对象;
        # 赋值符号右侧的roidb是我们在创建imdb 或者pascal_voc实例对象时设置的,并且在新建SolverWrapper实例
        # 之后在其__init__方法中调用self.solver.net.layers[0].set_roidb(roidb) 传参而来。
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章