卷积神经网络迁移学习(如何冰冻,fineturning)

利用已经训练好的权重做迁移学习只需要训练最后一层全连接层就可以,当然看效果也可以往前多几层。具体代码讲解如下:

import keras
print("KERAS version:{}".format(keras.__version__))#版本号,如果版本不对造成错误及时调整
import os 
import numpy as np 
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import optimizers, layers
from keras.applications import VGG16
#from keras.layers import Dense, MaxPooling2D
#from DropBlock import DropBlock2D, DropBlock1D
from matplotlib import pyplot as plt 
import h5py
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#from keras.layers.noise import GaussianNoise
#from keras.layers.normalization import BatchNormalization
# early stoppping
#from keras.callbacks import EarlyStopping
from keras import regularizers

base_dir = '' #path to datasets
train_dir ='' #path to train datasets

#test_dir ='' # path to test datasets
validation_dir ='' # path to validition  datasets

Keras.preprocessing.imgae.ImageDataGenerator图片生成器,可以批量生成数据,防止模型过拟合并提高泛化能力,数据不够,generator来凑。在keras中文文档中:通过实时数据增强生成张量图像数据批次。数据将不断循环(按批次)。

train_datagen = ImageDataGenerator(
      rescale=1./255,#
      rotation_range=40,#随机旋转度数范围
      width_shift_range=0.2,#float<1,除以总宽度的值,>1为像素值
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True,
      fill_mode='nearest')
#test datasets preprocess
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(224, 224),
        batch_size=64,
        class_mode='categorical')

validation_generator = val_datagen.flow_from_directory(
        validation_dir,
        target_size=(224, 224),
        batch_size=16,
        class_mode='categorical')

构建网络:

base_conv.trainable = True


model = models.Sequential()
model.add(base_conv)
'''
for layer in model.layers[:-3]:
        print(layer.trainable)
        layer.trainable=False
for layer in base_conv.layers:
        print('{}: {}'.format(layer.name, layer.trainable)) #检查模型是否解锁(不知道为什么我用这句代码并不能冷冻模型)
'''
frozen_layers = [ 'dense3']

#layer.trainable = True if layer.name in frozen_layers else Flase
for layer in base_conv.layers:
    if layer.name in frozen_layers:
        layer.trainable = True
    else:
        layer.trainable = False

for layer in base_conv.layers:
    print('{}: {}'.format(layer.name, layer.trainable)) #检查模型是否解锁


#model = models.Sequential()
#model.add(base_conv)
'''
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D(pool_size=(2, 2),strides=(2,2)))
#视情况而定,只构造解冻层
'''

model.add(layers.Flatten())

#model.add(layers.Dense(4096, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.Dense(4096, activation='relu'))
#model.add(layers.Dropout(0.5))
model.add(layers.Dense(classes, activation='softmax',name='prediction')) #注意修改类别数和激活函数>2类改为softmax,以及class=类别数 
#kernel_regularizer=regularizers.l2(0.01)
model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.Adam(lr=1e-5),
              metrics=['acc'])
model.summary()

保存模型并绘图:

#early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
history = model.fit_generator(
      train_generator,
      steps_per_epoch=50,
      epochs=500,
      validation_data=validation_generator,
      validation_steps=10,
      verbose=2, shuffle=False)
      #callbacks=[early_stopping
#early_stopping = EarlyStopping(monitor='val_loss', patience=50, verbose=2)
model.save('vgg_cell.h5') #你要保存模型的名字h5格式
'''
#输出测试精确度
test_loss, test_acc = model.evaluate_generator(test_generator, steps=5)
print('Test Acc is: {}'.format(test_acc))
'''

#绘制训练的loss图
acc = history.history['acc']
tloss = history.history['loss']
val_acc = history.history['val_acc']
val_loss = history.history['val_loss']
epoches = range(len(acc))

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章