keras實現GoogLeNet-InceptionV1

GoogLeNet Inception v1 結構 及 pytorch、tensorflow、keras、paddle實現ImageNet識別

 

環境

python3.6, keras2.2.4, tensorflow-gpu 1.12.0

 

代碼

# -*- coding: utf-8 -*- 
# @Time : 2020/2/3 9:56 
# @Author : Zhao HL
# @File : InceptionV1-keras.py
import keras
from keras.utils import Sequence
from keras.layers import *
from keras.models import *
from keras.optimizers import *
from keras.callbacks import *
import numpy as np
import pandas as pd
from PIL import Image
from my_utils import draw_loss_acc, dataInfo_show, dataset_divide

# region parameters
# region paths
Data_path = "./data/"
Data_csv_path = "./data/split.txt"
Model_path = 'model/'
Model_file_tf = "model/InceptionV1_tf.ckpt"
Model_file_keras = "model/InceptionV1_keras.h5"
Model_file_torch = "model/InceptionV1_torch.pth"
Model_file_paddle = "model/InceptionV1_paddle.model"
# endregion

# region image parameter
Img_size = 224
Img_chs = 3
Label_size = 1
Label_class = ['agricultural',
               'airplane',
               'baseballdiamond',
               'beach',
               'buildings',
               'chaparral',
               'denseresidential',
               'forest',
               'freeway',
               'golfcourse',
               'harbor',
               'intersection',
               'mediumresidential',
               'mobilehomepark',
               'overpass',
               'parkinglot',
               'river',
               'runway',
               'sparseresidential',
               'storagetanks',
               'tenniscourt']
Labels_nums = len(Label_class)
# endregion

# region net parameter
Conv1_kernel_size = 7
Conv1_chs = 64
Conv21_kernel_size = 1
Conv21_chs = 64
Conv2_kernel_size = 3
Conv2_chs = 192
Icp3a_size = (64, 96, 128, 16, 32, 32)
Icp3b_size = (128, 128, 192, 32, 96, 64)
Icp4a_size = (192, 96, 208, 16, 48, 64)
Icp4b_size = (160, 112, 224, 24, 64, 64)
Icp4c_size = (128, 128, 256, 24, 64, 64)
Icp4d_size = (112, 144, 288, 32, 64, 64)
Icp4e_size = (256, 160, 320, 32, 128, 128)
Icp5a_size = (256, 160, 320, 32, 128, 128)
Icp5b_size = (384, 192, 384, 48, 128, 128)
Out_chs1 = 128
Out_chs2 = 1024
# endregion

# region hpyerparameter
Learning_rate = 1e-3
Batch_size = 2
Buffer_size = 256
Infer_size = 1
Epochs = 5
Train_num = 1470
Train_batch_num = Train_num // Batch_size
Val_num = 210
Val_batch_num = Val_num // Batch_size
Test_num = 420
Test_batch_num = Test_num // Batch_size


# endregion

# endregion
class MyDataset(Sequence):
    def __init__(self, root_path, batch_size, files_list=None,shuffle=True):
        self.shuffle = shuffle
        self.root_path = root_path
        self.batch_size = batch_size
        self.files_list = files_list if files_list else os.listdir(root_path)
        self.size = len(files_list)
        self.list_shuffle()

    def __len__(self):
        return self.size

    def __getitem__(self, batch_index):
        images, labels = [], []
        if batch_index >= self.size // self.batch_size:
            batch_index = batch_index%(self.size // self.batch_size)
        start_index = batch_index * self.batch_size
        end_index = (batch_index + 1) * self.batch_size
        for index in range(start_index, end_index):
            label_str = os.path.basename(self.files_list[index])[:-6]
            label = Label_class.index(label_str)
            img = Image.open(os.path.join(self.root_path, self.files_list[index]))
            img, label = self.transform(img, label)
            images.append(img)
            labels.append(label)
        images = np.array(images)
        labels = np.array(labels)

        return ({'input': images}, {'output': labels,'output1':labels,'output2':labels})

    def transform(self, image, label):
        def Normalize(image, means, stds):
            for band in range(len(means)):
                image[:, :, band] = image[:, :, band] / 255.0
                image[:, :, band] = (image[:, :, band] - means[band]) / stds[band]
            return image

        def ToOnehot(labels):
            labels = np.eye(Labels_nums)[labels].reshape(Labels_nums)
            return labels

        pass
        image = image.resize((Img_size, Img_size), Image.ANTIALIAS)
        image = Normalize(np.array(image).astype(np.float), [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        label = ToOnehot(label)
        return (image, label)

    def list_shuffle(self):
        if self.shuffle:
            np.random.shuffle(self.files_list)

class InceptionV1:
    def __init__(self, structShow=False):
        self.structShow = structShow

    def InceptionV1_Model(self, input, model_size):
        con11_chs, con31_chs, con3_chs, con51_chs, con5_chs, pool1_chs = model_size

        conv11 = Conv2D(con11_chs, 1, padding='SAME', activation='relu', kernel_initializer='he_normal')(input)

        conv31 = Conv2D(con31_chs, 1, padding='SAME', activation='relu', kernel_initializer='he_normal')(input)
        conv3 = Conv2D(con3_chs, 3, padding='SAME', activation='relu', kernel_initializer='he_normal')(conv31)

        conv51 = Conv2D(con51_chs, 1, padding='SAME', activation='relu', kernel_initializer='he_normal')(input)
        conv5 = Conv2D(con5_chs, 5, padding='SAME', activation='relu', kernel_initializer='he_normal')(conv51)

        pool1 = MaxPooling2D(pool_size=3, strides=1, padding='SAME')(input)
        conv1 = Conv2D(pool1_chs, 1, padding='SAME', activation='relu', kernel_initializer='he_normal')(pool1)

        output = concatenate([conv11, conv3, conv5, conv1], axis=3)
        return output

    def InceptionV1_Out(self, input, name=None):
        pool = AvgPool2D(pool_size=5, strides=3, padding='VALID')(input)
        conv = Conv2D(Out_chs1, 1, padding='SAME', activation='relu', kernel_initializer='he_normal')(pool)

        flat = Flatten()(conv)
        dropout = Dropout(0.3)(flat)
        output = Dense(Labels_nums,name=name)(dropout)

        return output

    def getNet(self):
        input = Input(shape=(Img_size, Img_size, Img_chs),name='input')

        # region conv pool
        conv1 = Conv2D(Conv1_chs, kernel_size=Conv1_kernel_size, padding='SAME', activation='relu', strides=2,
                       kernel_initializer='he_normal')(input)
        pool1 = MaxPooling2D(pool_size=3, strides=2, padding='SAME')(conv1)

        conv21 = Conv2D(Conv21_chs, kernel_size=Conv21_kernel_size, padding='SAME', activation='relu',
                        kernel_initializer='he_normal')(pool1)
        conv2 = Conv2D(Conv2_chs, kernel_size=Conv2_kernel_size, padding='SAME', activation='relu',
                       kernel_initializer='he_normal')(conv21)
        pool2 = MaxPooling2D(pool_size=3, strides=2, padding='SAME')(conv2)
        # endregion

        # region inception3
        inception3a = self.InceptionV1_Model(pool2,  Icp3a_size)

        inception3b = self.InceptionV1_Model(inception3a,  Icp3b_size)
        pool3 = MaxPooling2D(pool_size=3, strides=2, padding='SAME')(inception3b)
        # endregion

        # region inception3
        inception4a = self.InceptionV1_Model(pool3,  Icp4a_size)
        output1 = self.InceptionV1_Out(inception4a, 'output1')

        inception4b = self.InceptionV1_Model(inception4a,  Icp4b_size)

        inception4c = self.InceptionV1_Model(inception4b,  Icp4c_size)

        inception4d = self.InceptionV1_Model(inception4c,  Icp4d_size)
        output2 = self.InceptionV1_Out(inception4d, 'output2')

        inception4e = self.InceptionV1_Model(inception4d,  Icp4e_size)
        pool4 = MaxPooling2D(pool_size=3, strides=2, padding='SAME')(inception4e)
        # endregion

        # region inception5
        inception5a = self.InceptionV1_Model(pool4,  Icp5a_size)

        inception5b = self.InceptionV1_Model(inception5a,  Icp5b_size)
        pool5 = MaxPooling2D(pool_size=7, strides=1, padding='SAME')(inception5b)
        # endregion

        # region output
        flat = Flatten()(pool5)
        dropout = Dropout(0.4)(flat)
        output = Dense(Labels_nums,name='output')(dropout)
        # endregion

        model = Model(inputs=input, outputs=[output,output1,output2])
        model.compile(Adam(lr=Learning_rate), loss='categorical_crossentropy', metrics=['accuracy'],
                      loss_weights=[0.6, 0.2, 0.2]
                      )
        if self.structShow:
            model.summary()
        return model


def train():
    df = pd.read_csv(Data_csv_path, header=0, index_col=0)
    train_list = df[df['split'] == 'train']['filename'].tolist()
    val_list = df[df['split'] == 'val']['filename'].tolist()
    train_dataset = MyDataset(Data_path, batch_size=Batch_size, files_list=train_list)
    val_dataset = MyDataset(Data_path, batch_size=Batch_size, files_list=val_list)
    net = InceptionV1(structShow=True)
    model = net.getNet()
    # if os.path.exists(Model_file_keras):
    #     model = load_model(Model_file_keras)
    # else:
    #     model = net.get_alexNet()

    model_checkpoint = ModelCheckpoint(Model_file_keras, monitor='val_loss', save_best_only=True)
    history = model.fit_generator(train_dataset,
                                  steps_per_epoch=train_dataset.size//train_dataset.batch_size,
                                  epochs=Epochs,
                                  use_multiprocessing=True,
                                  validation_data=val_dataset,
                                  validation_steps=val_dataset.size//val_dataset.batch_size,
                                  shuffle=True,
                                  callbacks=[model_checkpoint]
                                  )

    print(history.history.keys())
    train_losses = history.history['loss']
    train_accs = history.history['output_acc']
    train_accs1 = history.history['output1_acc']
    train_accs2 = history.history['output2_acc']
    val_losses = history.history['val_loss']
    val_accs = history.history['val_output_acc']
    val_accs1 = history.history['val_output1_acc']
    val_accs2 = history.history['val_output2_acc']

    draw_loss_acc(train_losses, train_accs, 'train')
    draw_loss_acc(train_accs1, train_accs2, 'train')
    draw_loss_acc(val_losses, val_accs, 'val')
    draw_loss_acc(val_accs1, val_accs2, 'val')

    print('best loss %.4f at epoch %d \n' % (max(val_losses), int(np.argmin(np.array(val_losses)))))


if __name__ == '__main__':
    pass
    # dataset_divide(r'E:\_Python\01_deeplearning\04_GoogLeNet\Inception1\data\split.txt')
    train()

my_utils.py

# -*- coding: utf-8 -*- 
# @Time : 2020/1/21 11:39 
# @Author : Zhao HL
# @File : my_utils.py
import sys,os,random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
def process_show(num, nums, train_acc, train_loss, prefix='', suffix=''):
    rate = num / nums
    ratenum = int(round(rate, 2) * 100)
    bar = '\r%s batch %3d/%d:train accuracy %.4f, train loss %00.4f [%s%s]%.1f%% %s; ' % (
        prefix, num, nums, train_acc, train_loss, '#' * (ratenum//2), '_' * (50 - ratenum//2), ratenum, suffix)
    sys.stdout.write(bar)
    sys.stdout.flush()
    if num >= nums:
        print()

def dataInfo_show(data_path,csv_pth,cls_dic_path,shapesShow=True,classesShow=True):
    cls_dict = get_cls_dic(cls_dic_path)
    if classesShow:
        print('\n'+'*'*50)
        df = pd.read_csv(csv_pth)
        labels = df['label'].unique()
        label_cls = {label:cls_dict[label] for label in labels}
        print(label_cls)
        cls_count = df['label'].value_counts()
        cls_count = {cls_dict[k]:v for k,v in cls_count.items()}
        for k,v in cls_count.items():
            print(k,v)

    if shapesShow:
        print('\n'+'*'*50)
        shapes = []
        for filename in os.listdir(data_path):
            img = Image.open(os.path.join(data_path, filename))
            img = np.array(img)
            shapes.append(img.shape)
        shapes = pd.Series(shapes)
        print(shapes.value_counts())

def get_cls_dic(cls_dic_path):
    # 讀取類標籤字典,只取第一個逗號前的信息
    cls_df = pd.read_csv(cls_dic_path)
    cls_df['cls'] = cls_df['info'].apply(lambda x:x[:9]).tolist()
    cls_df['label'] = cls_df['info'].apply(lambda x: x[10:]).tolist()
    cls_df = cls_df.drop(columns=['info','other'])

    cls_dict = cls_df.set_index('cls').T.to_dict('list')
    cls_dict = {k:v[0] for k,v in cls_dict.items()}
    return cls_dict

def dataset_divide(csv_pth):
    cls_df = pd.read_csv(csv_pth, header=0,index_col=0)
    cls_df.insert(1,'split',None)
    filenames = list(cls_df['filename'])
    random.shuffle(filenames)
    train_num,train_val_num = int(len(filenames)*0.7),int(len(filenames)*0.8)
    train_names = filenames[:train_num]
    val_names = filenames[train_num:train_val_num]
    test_names = filenames[train_val_num:]
    cls_df.loc[cls_df['filename'].isin(train_names),'split'] = 'train'
    cls_df.loc[cls_df['filename'].isin(val_names), 'split'] = 'val'
    cls_df.loc[cls_df['filename'].isin(test_names), 'split'] = 'test'
    cls_df.to_csv(csv_pth)

def draw_loss_acc(loss,acc,type='',save_path=None):
    assert len(acc) == len(loss)
    x = [epoch for epoch in range(len(acc))]
    plt.subplot(2, 1, 1)
    plt.plot(x, acc, 'o-')
    plt.title(type+'  accuracy vs. epoches')
    plt.ylabel('accuracy')
    plt.subplot(2, 1, 2)
    plt.plot(x, loss, '.-')
    plt.xlabel(type+'  loss vs. epoches')
    plt.ylabel('loss')
    plt.show()
    if save_path:
        plt.savefig(os.path.join(save_path,type+"_acc_loss.png"))


if __name__ == '__main__':
    pass

 

發佈了126 篇原創文章 · 獲贊 46 · 訪問量 8萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章