keras實現GoogLeNet-InceptionV3

GoogLeNet Inception v3 結構 及 pytorch、tensorflow、keras、paddle實現ImageNet識別

 

環境

python3.6, keras2.2.4, tensorflow-gpu 1.12.0

 

代碼

# -*- coding: utf-8 -*-
# @Time : 2020/2/21 13:53
# @Author : Zhao HL
# @File : InceptionV3-paddle.py
import os, sys
from PIL import Image
import numpy as np
import pandas as pd
import paddle
from paddle import fluid
from paddle.fluid.layers import data, conv2d, pool2d, flatten, fc, cross_entropy, accuracy, mean, concat, dropout,batch_norm,softmax
from my_utils import process_show, draw_loss_acc

# region parameters
# region paths
Data_path = "./data/"
Data_csv_path = "./data/split.txt"
Model_path = 'model/'
Model_file_tf = "model/InceptionV1_tf.ckpt"
Model_file_keras = "model/InceptionV1_keras.h5"
Model_file_torch = "model/InceptionV1_torch.pth"
Model_file_paddle = "model/InceptionV1_paddle.model"
# endregion

# region image parameter
Img_size = 299
Img_chs = 3
Label_size = 1
Label_class = ['agricultural',
               'airplane',
               'baseballdiamond',
               'beach',
               'buildings',
               'chaparral',
               'denseresidential',
               'forest',
               'freeway',
               'golfcourse',
               'harbor',
               'intersection',
               'mediumresidential',
               'mobilehomepark',
               'overpass',
               'parkinglot',
               'river',
               'runway',
               'sparseresidential',
               'storagetanks',
               'tenniscourt']
Labels_nums = len(Label_class)
# endregion

# region net parameter
Conv1_chs = 32
Conv2_chs = 32
Conv3_chs = 64
Conv4_chs = 80
Conv5_chs = 192
Conv6_chs = 288
Icp3a_size = (288, 64, 64, 96, 48, 64, 64)
Icp3b_size = (288, 64, 64, 96, 48, 64, 64)
Icp3c_size = (288, 0, 192, 384, 64, 96, 288)
Icp5a_size = (768, 192, 160, 192, 160, 192, 192)
Icp5b_size = (768, 192, 160, 192, 160, 192, 192)
Icp5c_size = (768, 192, 160, 192, 160, 192, 192)
Icp5d_size = (768, 192, 160, 192, 160, 192, 192)
Icp5e_size = (768, 0, 192, 320, 192, 192, 768)
Icp2a_size = (1280, 320, 384, 384, 448, 384, 192)
Icp2b_size = (2048, 320, 384, 384, 448, 384, 192)
# endregion

# region hpyerparameter
Learning_rate = 0.045
Batch_size = 1
Buffer_size = 256
Infer_size = 1
Epochs = 20
Train_num = 1470
Train_batch_num = Train_num // Batch_size
Val_num = 210
Val_batch_num = Val_num // Batch_size
Test_num = 420
Test_batch_num = Test_num // Batch_size
# endregion
place = fluid.CUDAPlace(0) if fluid.cuda_places() else fluid.CPUPlace()
# endregion


class MyDataset():
    def __init__(self, root_path, batch_size, files_list=None, ):
        self.root_path = root_path
        self.files_list = files_list if files_list else os.listdir(root_path)
        self.size = len(files_list)
        self.batch_size = batch_size

    def __len__(self):
        return self.size

    def dataset_reader(self):
        pass
        files_list = self.files_list if self.files_list is not None else os.listdir(self.root_path)

        def reader():
            np.random.shuffle(files_list)
            for file_name in files_list:
                label_str = os.path.basename(file_name)[:-6]
                label = Label_class.index(label_str)
                img = Image.open(os.path.join(self.root_path, file_name))
                yield img, label

        return paddle.batch(paddle.reader.xmap_readers(self.transform, reader, 2, Buffer_size),
                            batch_size=self.batch_size)

    def transform(self, sample):
        def Normalize(image, means, stds):
            for band in range(len(means)):
                image[:, :, band] = image[:, :, band] / 255.0
                image[:, :, band] = (image[:, :, band] - means[band]) / stds[band]
            image = np.transpose(image, [2, 1, 0])
            return image

        pass
        image, label = sample
        image = image.resize((Img_size, Img_size), Image.ANTIALIAS)
        image = Normalize(np.array(image).astype(np.float), [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        return image, label


class InceptionV3:
    def __init__(self, structShow=False):
        self.structShow = structShow
        self.image = data(shape=[Img_chs, Img_size, Img_size], dtype='float32', name='image')
        self.label = data(shape=[Label_size], dtype='int64', name='label')
        self.predict = self.get_Net()

    def InceptionV3_ModelA(self, input, model_size, downsample=False):
        input_chs, con1_chs, con31_chs, con3_chs, con51_chs, con5_chs, pool1_chs = model_size
        stride = 2 if downsample else 1
        padding = 'VALID' if downsample else 'SAME'

        if downsample == False:
            conv1 = conv2d(input, con1_chs, filter_size=1, padding='SAME', act='relu')
            conv1 = batch_norm(conv1)

        conv31 = conv2d(input, con31_chs, filter_size=1, padding='SAME', act='relu')
        conv31 = batch_norm(conv31)
        conv3 = conv2d(conv31, con3_chs, filter_size=3, stride = stride, padding=padding, act='relu')
        conv3 = batch_norm(conv3)

        conv51 = conv2d(input, con51_chs, filter_size=1, padding='SAME', act='relu')
        conv51 = batch_norm(conv51)
        conv5 = conv2d(conv51, con5_chs, filter_size=3, padding='SAME', act='relu')
        conv5 = batch_norm(conv5)
        conv5 = conv2d(conv5, con5_chs, filter_size=3, stride = stride, padding=padding, act='relu')
        conv5 = batch_norm(conv5)

        pool1 = pool2d(input, pool_size=3, pool_stride=stride, pool_padding=padding, pool_type='max')
        convp = conv2d(pool1, pool1_chs, filter_size=1, padding='SAME', act='relu')
        convp = batch_norm(convp)

        if downsample:
            return concat([conv3, conv5, convp], axis=1)
        return concat([conv1, conv3, conv5, convp], axis=1)

    def InceptionV3_ModelB(self, input, model_size, downsample=False):
        input_chs, con1_chs, con31_chs, con3_chs, con51_chs, con5_chs, pool1_chs = model_size
        stride = 2 if downsample else 1
        padding = 'VALID' if downsample else 'SAME'

        pool1 = pool2d(input, pool_size=3, pool_stride=stride, pool_padding=padding, pool_type='max')
        convp = conv2d(pool1, pool1_chs, filter_size=1, padding='SAME', act='relu')
        convp = batch_norm(convp)

        if downsample:
            conv31 = conv2d(input, con31_chs, filter_size=1, padding='SAME', act='relu')
            conv31 = batch_norm(conv31)
            conv3 = conv2d(conv31, con3_chs, filter_size=3, stride=stride, padding=padding, act='relu')
            conv3 = batch_norm(conv3)

            conv51 = conv2d(input, con51_chs, filter_size=1, padding='SAME', act='relu')
            conv51 = batch_norm(conv51)
            conv5 = conv2d(conv51, con5_chs, filter_size=(1, 7), padding='SAME', act='relu')
            conv5 = batch_norm(conv5)
            conv5 = conv2d(conv5, con5_chs, filter_size=(7, 1), padding='SAME', act='relu')
            conv5 = batch_norm(conv5)
            conv5 = conv2d(conv5, con5_chs, filter_size=3, stride=stride, padding=padding, act='relu')
            conv5 = batch_norm(conv5)
        else:
            conv1 = conv2d(input, con1_chs, filter_size=1, padding='SAME', act='relu')
            conv1 = batch_norm(conv1)

            conv31 = conv2d(input, con31_chs, filter_size=1, padding='SAME', act='relu')
            conv31 = batch_norm(conv31)
            conv3 = conv2d(conv31, con3_chs, filter_size=(1, 7), stride=stride, padding=padding, act='relu')
            conv3 = batch_norm(conv3)
            conv3 = conv2d(conv3, con3_chs, filter_size=(7, 1), stride=stride, padding=padding, act='relu')
            conv3 = batch_norm(conv3)

            conv51 = conv2d(input, con51_chs, filter_size=1, padding='SAME', act='relu')
            conv51 = batch_norm(conv51)
            conv5 = conv2d(conv51, con5_chs, filter_size=(1, 7), padding='SAME', act='relu')
            conv5 = batch_norm(conv5)
            conv5 = conv2d(conv5, con5_chs, filter_size=(7, 1), padding='SAME', act='relu')
            conv5 = batch_norm(conv5)
            conv5 = conv2d(conv5, con5_chs, filter_size=(1, 7), padding='SAME', act='relu')
            conv5 = batch_norm(conv5)
            conv5 = conv2d(conv5, con5_chs, filter_size=(7, 1), padding='SAME', act='relu')
            conv5 = batch_norm(conv5)



        if downsample:
            return concat([conv3, conv5, convp], axis=1)
        return concat([conv1, conv3, conv5, convp], axis=1)

    def InceptionV3_ModelC(self, input, model_size):
        input_chs, con1_chs, con31_chs, con3_chs, con51_chs, con5_chs, pool1_chs = model_size


        pool1 = pool2d(input, pool_size=3, pool_stride=1, pool_padding='SAME', pool_type='max')
        convp = conv2d(pool1, pool1_chs, filter_size=1, padding='SAME', act='relu')
        convp = batch_norm(convp)

        conv1 = conv2d(input, con1_chs, filter_size=1, padding='SAME', act='relu')
        conv1 = batch_norm(conv1)

        conv30 = conv2d(input, con31_chs, filter_size=1, padding='SAME', act='relu')
        conv30 = batch_norm(conv30)
        conv31 = conv2d(conv30, con3_chs, filter_size=(1, 3), stride=1, padding='SAME', act='relu')
        conv31 = batch_norm(conv31)
        conv32 = conv2d(conv30, con3_chs, filter_size=(3, 1), stride=1, padding='SAME', act='relu')
        conv32 = batch_norm(conv32)
        conv3 = concat([conv31,conv32],axis=1)

        conv50 = conv2d(input, con51_chs, filter_size=1, padding='SAME', act='relu')
        conv50 = batch_norm(conv50)
        conv50 = conv2d(conv50, con51_chs, filter_size=3, padding='SAME', act='relu')
        conv50 = batch_norm(conv50)
        conv51 = conv2d(conv50, con5_chs, filter_size=(1, 3), padding='SAME', act='relu')
        conv51 = batch_norm(conv51)
        conv52 = conv2d(conv50, con5_chs, filter_size=(3, 1), padding='SAME', act='relu')
        conv52 = batch_norm(conv52)
        conv5 = concat([conv51, conv52], axis=1)

        return concat([conv1, conv3, conv5, convp], axis=1)

    def InceptionV1_Out(self, input, name=None):
        pool = pool2d(input, pool_size=5, pool_stride=3, pool_type='max', pool_padding='VALID')
        conv1 = conv2d(pool, 128, filter_size=1, padding='SAME', act='relu')
        conv1 = batch_norm(conv1)
        conv2 = conv2d(conv1, 128, filter_size=1, padding='SAME', act='relu')
        conv2 = batch_norm(conv2)

        flat = flatten(conv2, axis=1)
        dp = dropout(flat, 0.3)
        output = fc(dp, Labels_nums,  act='softmax',name=name)
        return output

    def get_Net(self):
        # region conv pool
        conv1 = conv2d(self.image, Conv1_chs, filter_size=3, stride=2, padding='VALID', act='relu')
        conv1 = batch_norm(conv1)
        conv2 = conv2d(conv1, Conv2_chs, filter_size=3, padding='VALID', act='relu')
        conv2 = batch_norm(conv2)
        conv3 = conv2d(conv2, Conv3_chs, filter_size=3, padding='SAME', act='relu')
        conv3 = batch_norm(conv3)
        pool1 = pool2d(conv3, pool_size=3, pool_stride=2, pool_type='max', pool_padding='SAME')

        conv4 = conv2d(pool1, Conv4_chs, filter_size=3, padding='VALID', act='relu')
        conv4 = batch_norm(conv4)
        conv5 = conv2d(conv4, Conv5_chs, filter_size=3, stride=2, padding='VALID', act='relu')
        conv5 = batch_norm(conv5)
        conv6 = conv2d(conv5, Conv6_chs, filter_size=3, stride=1, padding='SAME', act='relu')
        conv6 = batch_norm(conv6)
        # endregion

        # region inception3
        inception3a = self.InceptionV3_ModelA(conv6, Icp3a_size)

        inception3b = self.InceptionV3_ModelA(inception3a, Icp3b_size)

        inception3c = self.InceptionV3_ModelA(inception3b, Icp3c_size, downsample=True)
        # endregion

        # region inception3
        inception5a = self.InceptionV3_ModelB(inception3c, Icp5a_size)

        inception5b = self.InceptionV3_ModelB(inception5a, Icp5b_size)

        inception5c = self.InceptionV3_ModelB(inception5b, Icp5c_size)

        inception5d = self.InceptionV3_ModelB(inception5c, Icp5d_size)
        auxout = self.InceptionV1_Out(inception5d, 'auxout')

        inception5e = self.InceptionV3_ModelB(inception5d, Icp5e_size,downsample=True)
        # endregion

        # region inception5
        inception2a = self.InceptionV3_ModelC(inception5e, Icp2a_size)

        inception2b = self.InceptionV3_ModelC(inception2a, Icp2b_size)
        # endregion

        # region output
        pool = pool2d(inception2b, pool_size=8, pool_stride=1, pool_type='max', pool_padding='VALID')
        flat = flatten(pool, axis=1)
        dp = dropout(flat, 0.4)
        output = fc(dp, Labels_nums, act='softmax',name='output')
        # endregion

        if self.structShow:
            print(conv1.name, conv1.shape)
            print(conv2.name, conv2.shape)
            print(conv3.name, conv3.shape)
            print(pool1.name, pool1.shape)
            print(conv4.name, conv4.shape)
            print(conv5.name, conv5.shape)
            print(conv6.name, conv6.shape)

            print(inception3a.name, inception3a.shape)
            print(inception3b.name, inception3b.shape)
            print(inception3c.name, inception3c.shape)

            print(inception5a.name, inception5a.shape)
            print(inception5b.name, inception5b.shape)
            print(inception5c.name, inception5c.shape)
            print(inception5d.name, inception5d.shape)
            print(inception5e.name, inception5e.shape)

            print(inception2a.name, inception2a.shape)
            print(inception2b.name, inception2b.shape)
            print(pool.name, pool.shape)

            print(flat.name, flat.shape)
            print(output.name, output.shape)

        # if self.trainModel == True:
        #     return [output, auxout]
        # return output
        return [output, auxout]


def train():
    net = InceptionV3(structShow=True)
    image, label, [predict, predict1] = net.image, net.label, net.predict
    feeder = fluid.DataFeeder(place=place, feed_list=[image, label])

    df = pd.read_csv(Data_csv_path, header=0, index_col=0)
    train_list = df[df['split'] == 'train']['filename'].tolist()
    val_list = df[df['split'] == 'val']['filename'].tolist()

    train_reader = MyDataset(Data_path, batch_size=Batch_size, files_list=train_list).dataset_reader()
    val_reader = MyDataset(Data_path, batch_size=Batch_size, files_list=val_list).dataset_reader()

    loss = cross_entropy(input=predict, label=label)
    loss1 = cross_entropy(input=predict1, label=label)
    loss_mean = mean(loss)
    loss1_mean = mean(loss1)
    loss_total = loss_mean * 0.7 + loss1_mean * 0.3

    acc = accuracy(input=predict, label=label,k=1)
    optimizer = fluid.optimizer.AdamOptimizer(learning_rate=Learning_rate)
    optimizer.minimize(loss_total)

    val_program = fluid.default_main_program().clone(for_test=True)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    train_losses = np.ones(Epochs)
    train_accs = np.ones(Epochs)
    val_losses = np.ones(Epochs)
    val_accs = np.ones(Epochs)
    best_loss = float("inf")
    best_loss_epoch = 0
    for epoch in range(Epochs):
        print('Epoch %d/%d:' % (epoch + 1, Epochs))
        train_sum_loss = 0
        train_sum_acc = 0
        val_sum_loss = 0
        val_sum_acc = 0
        for batch_num, data in enumerate(train_reader()):
            train_loss, train_acc = exe.run(program=fluid.default_main_program(),  # 運行主程序
                                            feed=feeder.feed(data),  # 給模型喂入數據
                                            fetch_list=[loss_total, acc])  # fetch 誤差、準確率
            train_sum_loss += train_loss[0]
            train_sum_acc += train_acc[0]
            process_show(batch_num + 1, Train_num / Batch_size, train_acc, train_loss, prefix='train:')

        for batch_num, data in enumerate(val_reader()):
            val_loss, val_acc = exe.run(program=val_program,  # 執行訓練程序
                                        feed=feeder.feed(data),  # 喂入數據
                                        fetch_list=[loss_total, acc])  # fetch 誤差、準確率
            val_sum_loss += val_loss[0]
            val_sum_acc += val_acc[0]
            process_show(batch_num + 1, Val_num / Batch_size, val_acc, val_loss, prefix='train:')

        train_sum_loss /= (Train_num // Batch_size)
        train_sum_acc /= (Train_num // Batch_size)
        val_sum_loss /= (Val_num // Batch_size)
        val_sum_acc /= (Val_num // Batch_size)

        train_losses[epoch] = train_sum_loss
        train_accs[epoch] = train_sum_acc
        val_losses[epoch] = val_sum_loss
        val_accs[epoch] = val_sum_acc
        print('average summary:\ntrain acc %.4f, loss %.4f ; val acc %.4f, loss %.4f'
              % (train_sum_acc, train_sum_loss, val_sum_acc, val_sum_loss))

        if val_sum_loss < best_loss:
            print('val_loss improve from %.4f to %.4f, model save to %s ! \n' % (
                best_loss, val_sum_loss, Model_file_paddle))
            best_loss = val_sum_loss
            best_loss_epoch = epoch + 1
            fluid.io.save_inference_model(Model_file_paddle,  # 保存推理model的路徑
                                          ['image'],  # 推理(inference)需要 feed 的數據
                                          [predict],  # 保存推理(inference)結果的 Variables
                                          exe)  # executor 保存 inference model
        else:
            print('val_loss do not improve from %.4f \n' % (best_loss))
    print('best loss %.4f at epoch %d \n' % (best_loss, best_loss_epoch))
    draw_loss_acc(train_losses, train_accs, 'train')
    draw_loss_acc(val_losses, val_accs, 'val')


if __name__ == '__main__':
    pass
    train()

my_utils.py

# -*- coding: utf-8 -*- 
# @Time : 2020/1/21 11:39 
# @Author : Zhao HL
# @File : my_utils.py
import sys,os,random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
def process_show(num, nums, train_acc, train_loss, prefix='', suffix=''):
    rate = num / nums
    ratenum = int(round(rate, 2) * 100)
    bar = '\r%s batch %3d/%d:train accuracy %.4f, train loss %00.4f [%s%s]%.1f%% %s; ' % (
        prefix, num, nums, train_acc, train_loss, '#' * (ratenum//2), '_' * (50 - ratenum//2), ratenum, suffix)
    sys.stdout.write(bar)
    sys.stdout.flush()
    if num >= nums:
        print()

def dataInfo_show(data_path,csv_pth,cls_dic_path,shapesShow=True,classesShow=True):
    cls_dict = get_cls_dic(cls_dic_path)
    if classesShow:
        print('\n'+'*'*50)
        df = pd.read_csv(csv_pth)
        labels = df['label'].unique()
        label_cls = {label:cls_dict[label] for label in labels}
        print(label_cls)
        cls_count = df['label'].value_counts()
        cls_count = {cls_dict[k]:v for k,v in cls_count.items()}
        for k,v in cls_count.items():
            print(k,v)

    if shapesShow:
        print('\n'+'*'*50)
        shapes = []
        for filename in os.listdir(data_path):
            img = Image.open(os.path.join(data_path, filename))
            img = np.array(img)
            shapes.append(img.shape)
        shapes = pd.Series(shapes)
        print(shapes.value_counts())

def get_cls_dic(cls_dic_path):
    # 讀取類標籤字典,只取第一個逗號前的信息
    cls_df = pd.read_csv(cls_dic_path)
    cls_df['cls'] = cls_df['info'].apply(lambda x:x[:9]).tolist()
    cls_df['label'] = cls_df['info'].apply(lambda x: x[10:]).tolist()
    cls_df = cls_df.drop(columns=['info','other'])

    cls_dict = cls_df.set_index('cls').T.to_dict('list')
    cls_dict = {k:v[0] for k,v in cls_dict.items()}
    return cls_dict

def dataset_divide(csv_pth):
    cls_df = pd.read_csv(csv_pth, header=0,index_col=0)
    cls_df.insert(1,'split',None)
    filenames = list(cls_df['filename'])
    random.shuffle(filenames)
    train_num,train_val_num = int(len(filenames)*0.7),int(len(filenames)*0.8)
    train_names = filenames[:train_num]
    val_names = filenames[train_num:train_val_num]
    test_names = filenames[train_val_num:]
    cls_df.loc[cls_df['filename'].isin(train_names),'split'] = 'train'
    cls_df.loc[cls_df['filename'].isin(val_names), 'split'] = 'val'
    cls_df.loc[cls_df['filename'].isin(test_names), 'split'] = 'test'
    cls_df.to_csv(csv_pth)

def draw_loss_acc(loss,acc,type='',save_path=None):
    assert len(acc) == len(loss)
    x = [epoch for epoch in range(len(acc))]
    plt.subplot(2, 1, 1)
    plt.plot(x, acc, 'o-')
    plt.title(type+'  accuracy vs. epoches')
    plt.ylabel('accuracy')
    plt.subplot(2, 1, 2)
    plt.plot(x, loss, '.-')
    plt.xlabel(type+'  loss vs. epoches')
    plt.ylabel('loss')
    plt.show()
    if save_path:
        plt.savefig(os.path.join(save_path,type+"_acc_loss.png"))


if __name__ == '__main__':
    pass

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章