FCN模型實現-Pytorch+預訓練VGG16

FCN模型的網絡與VGG16類似,之後後邊將全連接層換成了卷基層,具體的網絡結構與細節可以去看論文:

https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf

 

下邊詳細講一下用Pytorch對FCN的實現:

本文參考了https://zhuanlan.zhihu.com/p/32506912 但是修改了部分代碼,加上了很多新的註釋,並將代碼更新到Pytorch1.x

首先是讀取圖像

#使用的VOC數據目錄
voc_root = '/media/cyq/CU/Ubuntu system files/VOCdevkit/VOC2012'



#此函數用來讀取圖像和標籤的名字
def read_images(root=voc_root, train=True):
    txt_fname = root + '/ImageSets/Segmentation/' + ('train.txt' if train else 'val.txt')
    with open(txt_fname, 'r') as f:
        images = f.read().split()
    data = [os.path.join(root, 'JPEGImages', i+'.jpg') for i in images]
    label = [os.path.join(root, 'SegmentationClass', i+'.png') for i in images]
    return data, label

對輸入圖像做出裁剪,使圖像大小一致,方便訓練

#這裏對圖像和標籤都截取對應的部分
def rand_crop(data, label, height, width):
    '''
    data is PIL.Image object
    label is PIL.Image object
    '''
    x = random.uniform(0,data.size[0]-width)
    x = int(x)
    y = random.uniform(0,data.size[1]-height)
    y = int(y)

    box = (x,y,x+width,y+height)
    data = data.crop(box)
    label = label.crop(box)
    return data, label

label圖像與標籤的映射

#21個類
classes = ['background','aeroplane','bicycle','bird','boat',
           'bottle','bus','car','cat','chair','cow','diningtable',
           'dog','horse','motorbike','person','potted plant',
           'sheep','sofa','train','tv/monitor']

# 每個類對應的RGB值
colormap = [[0,0,0],[128,0,0],[0,128,0], [128,128,0], [0,0,128],
            [128,0,128],[0,128,128],[128,128,128],[64,0,0],[192,0,0],
            [64,128,0],[192,128,0],[64,0,128],[192,0,128],
            [64,128,128],[192,128,128],[0,64,0],[128,64,0],
            [0,192,0],[128,192,0],[0,64,128]]



#下邊就是將label中每種顏色映射成0-20的數字
cm2lbl = np.zeros(256**3) # 每個像素點有 0 ~ 255 的選擇,RGB 三個通道
for i,cm in enumerate(colormap):
    cm2lbl[(cm[0]*256+cm[1])*256+cm[2]] = i # 建立索引

def image2label(im):
    data = np.array(im, dtype='int32')
    idx = (data[:, :, 0] * 256 + data[:, :, 1]) * 256 + data[:, :, 2]
    return np.array(cm2lbl[idx], dtype='int64') # 根據索引得到 label 矩陣

數據集製作

def img_transforms(im, label, crop_size):
    im, label = rand_crop(im, label, *crop_size)
    im_tfs = tfs.Compose([
        tfs.ToTensor(),
        tfs.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    im = im_tfs(im)
    label = image2label(label)
    label = torch.from_numpy(label)
    return im, label


class VOCSegDataset(data.Dataset):
    '''
    voc dataset
    '''

    def __init__(self, train, crop_size, transforms):
        self.crop_size = crop_size
        self.transforms = transforms
        data_list, label_list = read_images(train=train)
        self.data_list = self._filter(data_list)
        self.label_list = self._filter(label_list)
        print('Read ' + str(len(self.data_list)) + ' images')

    def _filter(self, images):  # 過濾掉圖片大小小於 crop 大小的圖片
        return [im for im in images if (Image.open(im).size[1] >= self.crop_size[0] and
                                        Image.open(im).size[0] >= self.crop_size[1])]

    def __getitem__(self, idx):
        img = self.data_list[idx]
        label = self.label_list[idx]
        img = Image.open(img)
        label = Image.open(label).convert('RGB')
        img, label = self.transforms(img, label, self.crop_size)
        return img, label

    def __len__(self):
        return len(self.data_list)




# 實例化數據集
input_shape = (320, 480)
voc_train = VOCSegDataset(True, input_shape, img_transforms)
voc_test = VOCSegDataset(False, input_shape, img_transforms)


train_data = DataLoader(voc_train, 4, shuffle=True, num_workers=4)
valid_data = DataLoader(voc_test, 4, num_workers=4)

模型定義(這裏使用FCN32s)

# 使用預訓練的 VGG16
pretrained_net = models.vgg16(pretrained=True)
num_classes = len(classes)


class fcn(nn.Module):
    def __init__(self, num_classes):
        super(fcn, self).__init__()
        #卷積層使用VGG16的
        self.features = pretrained_net.features
        #將全連接層替換成卷積層
        self.conv1 = nn.Conv2d(512, 4096, 1)
        self.conv2 = nn.Conv2d(4096, 21, 1)

        self.relu = nn.ReLU(inplace=True)
        #上採樣,這裏只用到了32的
        self.upsample2x = nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False)
        self.upsample8x = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=False)
        self.upsample32x = nn.Upsample(scale_factor=32,mode='bilinear',align_corners=False)


    def forward(self, x):
        s = self.features(x)
        s = self.conv1(s)
        s = self.relu(s)
        s = self.conv2(s)
        s = self.relu(s)
        s = self.upsample32x(s)
        return s


#創建模型
net = fcn(num_classes)
net.cuda()

參數設定,這裏還是有很大可優化空間的

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=1e-2, weight_decay=1e-4)

計算準確率,這裏使用像素點準確率

def acc_simu(label_true,label_pred):
    #所有像素點個數
    sum = len(voc_train.data_list)*label_true.shape[1]*label_true.shape[2]

    cnt = 0.
    check = label_true==label_pred

    for i in range(0,label_pred.shape[0]):
        for j in range(0,label_pred.shape[1]):
            for k in range(0,label_pred.shape[2]):
                if check[i][j][k]:
                    cnt = cnt + 1
  
    return 100.*cnt/sum

模型訓練

for e in range(80):
 
    train_loss = 0
    train_acc = 0
    #記錄所花時間
    prev_time = datetime.datetime.now()
    net = net.train()
    for data in train_data:
        im = data[0].cuda()
        label = data[1].cuda()
        # forward
        out = net(im)
        out = F.log_softmax(out, dim=1)  # (b, n, h, w)
        loss = criterion(out, label)
        # backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()

        label_pred = out.max(dim=1)[1].data.cpu().numpy()
        label_true = label.data.cpu().numpy()
        acc = acc_simu(label_true,label_pred)
        train_acc += acc
        print(train_acc,'%')


    net = net.eval()
    eval_loss = 0
    eval_acc = 0

    for data in valid_data:
        im = data[0].cuda()
        label = data[1].cuda()
        # forward
        with torch.no_grad():
            out = net(im)
            out = F.log_softmax(out, dim=1)
        loss = criterion(out, label)
        eval_loss += loss.item()

        label_pred = out.max(dim=1)[1].data.cpu().numpy()
        label_true = label.data.cpu().numpy()
        acc = acc_simu(label_true,label_pred)
        eval_acc += acc


    cur_time = datetime.datetime.now()
    h, remainder = divmod((cur_time - prev_time).seconds, 3600)
    m, s = divmod(remainder, 60)
    epoch_str = ('Epoch: {}, Train Loss: {:.5f}, Train Acc: {:.5f}, \
Valid Loss: {:.5f}, Valid Acc: {:.5f} '.format(
        e, train_loss / len(train_data), train_acc,
           eval_loss / len(valid_data), eval_acc))
    time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
    print(epoch_str+time_str)
    torch.save(net, 'model.pkl')

運行結果

       由於使用了VGG16預訓練模型,使得模型訓練容易了很多,經過了10次迭代,訓練集正確率達到90.38%,測試集達到了81.77%

       然後我調節了學習率,又迭代了五次,訓練集正確率達到了93.29%,然而測試機依舊才達到81.94% ,和論文中達到了89.1%還是有些差距的

下邊貼出幾張該模型圖像分割之後的示例:

 

       可以看出分割的物體大致還是被區別開了,但是輪廓地區錯誤率依舊很高,有一些圖像內容比較複雜的出入就會相當大

 

       博客代碼可以按順序copy到編譯器中運行

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章