卷積神經網絡遷移學習(如何冰凍,fineturning)

利用已經訓練好的權重做遷移學習只需要訓練最後一層全連接層就可以,當然看效果也可以往前多幾層。具體代碼講解如下:

import keras
print("KERAS version:{}".format(keras.__version__))#版本號,如果版本不對造成錯誤及時調整
import os 
import numpy as np 
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import optimizers, layers
from keras.applications import VGG16
#from keras.layers import Dense, MaxPooling2D
#from DropBlock import DropBlock2D, DropBlock1D
from matplotlib import pyplot as plt 
import h5py
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#from keras.layers.noise import GaussianNoise
#from keras.layers.normalization import BatchNormalization
# early stoppping
#from keras.callbacks import EarlyStopping
from keras import regularizers

base_dir = '' #path to datasets
train_dir ='' #path to train datasets

#test_dir ='' # path to test datasets
validation_dir ='' # path to validition  datasets

Keras.preprocessing.imgae.ImageDataGenerator圖片生成器,可以批量生成數據,防止模型過擬合併提高泛化能力,數據不夠,generator來湊。在keras中文文檔中:通過實時數據增強生成張量圖像數據批次。數據將不斷循環(按批次)。

train_datagen = ImageDataGenerator(
      rescale=1./255,#
      rotation_range=40,#隨機旋轉度數範圍
      width_shift_range=0.2,#float<1,除以總寬度的值,>1爲像素值
      height_shift_range=0.2,
      shear_range=0.2,
      zoom_range=0.2,
      horizontal_flip=True,
      fill_mode='nearest')
#test datasets preprocess
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(224, 224),
        batch_size=64,
        class_mode='categorical')

validation_generator = val_datagen.flow_from_directory(
        validation_dir,
        target_size=(224, 224),
        batch_size=16,
        class_mode='categorical')

構建網絡:

base_conv.trainable = True


model = models.Sequential()
model.add(base_conv)
'''
for layer in model.layers[:-3]:
        print(layer.trainable)
        layer.trainable=False
for layer in base_conv.layers:
        print('{}: {}'.format(layer.name, layer.trainable)) #檢查模型是否解鎖(不知道爲什麼我用這句代碼並不能冷凍模型)
'''
frozen_layers = [ 'dense3']

#layer.trainable = True if layer.name in frozen_layers else Flase
for layer in base_conv.layers:
    if layer.name in frozen_layers:
        layer.trainable = True
    else:
        layer.trainable = False

for layer in base_conv.layers:
    print('{}: {}'.format(layer.name, layer.trainable)) #檢查模型是否解鎖


#model = models.Sequential()
#model.add(base_conv)
'''
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D(pool_size=(2, 2),strides=(2,2)))
#視情況而定,只構造解凍層
'''

model.add(layers.Flatten())

#model.add(layers.Dense(4096, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.Dense(4096, activation='relu'))
#model.add(layers.Dropout(0.5))
model.add(layers.Dense(classes, activation='softmax',name='prediction')) #注意修改類別數和激活函數>2類改爲softmax,以及class=類別數 
#kernel_regularizer=regularizers.l2(0.01)
model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.Adam(lr=1e-5),
              metrics=['acc'])
model.summary()

保存模型並繪圖:

#early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
history = model.fit_generator(
      train_generator,
      steps_per_epoch=50,
      epochs=500,
      validation_data=validation_generator,
      validation_steps=10,
      verbose=2, shuffle=False)
      #callbacks=[early_stopping
#early_stopping = EarlyStopping(monitor='val_loss', patience=50, verbose=2)
model.save('vgg_cell.h5') #你要保存模型的名字h5格式
'''
#輸出測試精確度
test_loss, test_acc = model.evaluate_generator(test_generator, steps=5)
print('Test Acc is: {}'.format(test_acc))
'''

#繪製訓練的loss圖
acc = history.history['acc']
tloss = history.history['loss']
val_acc = history.history['val_acc']
val_loss = history.history['val_loss']
epoches = range(len(acc))

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章