TensorFlow-2.x-10-AlexNet的實現

        AlexNet可以說是使得深度學習大火的一個網絡模型,它構建了多層CNN,增大感受野,加入LRN等方法,使得網絡擁有極高的特徵提取能力,但受限於設備的元素,當時用了2塊GPU來提升訓練效率。相關結構及原理可參考一下這篇文章:

深入理解AlexNet網絡

        本章使用tf-2.x來實現AlexNet,由於設備的原因,修改了全連接層神經元個數、輸出層個數。又因爲LRN對模型的提升作用不大,且容易造成模型訓練難以收斂等因素,所以並沒有使用LRN。

1、讀取數據集並Resize

        下面的代碼隨機從數據集中選取batch_size個訓練數據和測試數據,並將圖片大小resize成224*224大小。

class Data_load():
    def __init__(self):
        fashion_mnist = tf.keras.datasets.fashion_mnist
        (self.train_images, self.train_labels), (self.test_images, self.test_labels)\
            = fashion_mnist.load_data()
        # 數據維度擴充成[n,h,w,c]的模式
        self.train_images = np.expand_dims(self.train_images.astype(np.float32) / 255.0, axis=-1)
        self.test_images = np.expand_dims(self.test_images.astype(np.float32)/255.0,axis=-1)
        # 標籤
        self.train_labels = self.train_labels.astype(np.int32)
        self.test_labels = self.test_labels.astype(np.int32)
        # 訓練和測試的數據個數
        self.num_train, self.num_test = self.train_images.shape[0], self.test_images.shape[0]
    def get_train_batch(self,batch_size):
        # 隨機取batch_size個索引
        index = np.random.randint(0, np.shape(self.train_images)[0], batch_size)
        # resize
        resized_images = tf.image.resize_with_pad(self.train_images[index], 224, 224 )
        return resized_images.numpy(), self.train_labels[index]
    def get_test_batch(self,batch_size):
        index = np.random.randint(0, np.shape(self.test_images)[0], batch_size)
        # resize
        resized_images = tf.image.resize_with_pad(self.test_images[index], 224, 224 )
        return resized_images.numpy(), self.test_labels[index]

2、定義模型

根據AlexNet的網絡結構,定義相關模型結構:

def AlexNet():
    net=tf.keras.Sequential()
    net.add(tf.keras.layers.Conv2D(96,11,(4,4),"same",activation="relu"))
    net.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2))
    net.add(tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'))
    net.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2))
    net.add(tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'))
    net.add(tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'))
    net.add(tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'))
    net.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2))
    net.add(tf.keras.layers.Flatten())
    net.add(tf.keras.layers.Dense(512, activation='relu'))# 爲了方便訓練 神經元個數改小,原來是1024
    net.add(tf.keras.layers.Dropout(0.5))
    net.add(tf.keras.layers.Dense(256, activation='relu'))# 爲了方便訓練 神經元個數改小,原來是1024
    net.add(tf.keras.layers.Dropout(0.5))
    net.add(tf.keras.layers.Dense(10, activation='sigmoid'))

    return net

3、訓練

訓練比較簡單,利用keras方法即可:

def train(num_epoches,net):
    optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
    net.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    num_iter = data_load.num_train // batch_size
    for e in range(num_epoches):
        for n in range(num_iter):
            x_batch, y_batch = data_load.get_train_batch(batch_size)
            test_x_batch, test_y_batch = data_load.get_test_batch(batch_size)
            net.fit(x_batch, y_batch,validation_data=(test_x_batch, test_y_batch))
train(5,net)

訓練結果:
在這裏插入圖片描述

附上所有源碼:

import tensorflow as tf
from tensorflow.keras.datasets import fashion_mnist
from tensorflow import data as tfdata
import numpy as np

# 將 GPU 的顯存使用策略設置爲 “僅在需要時申請顯存空間”。
for gpu in tf.config.experimental.list_physical_devices('GPU'):
    tf.config.experimental.set_memory_growth(gpu, True)

# 1、讀取數據
'''由於Imagenet數據集是一個比較龐大的數據集,且網絡的輸入爲224*224,所以,我們定義一個方法,
來讀取數據集並將數據resize到224*224的大小'''

class Data_load():
    def __init__(self):
        fashion_mnist = tf.keras.datasets.fashion_mnist
        (self.train_images, self.train_labels), (self.test_images, self.test_labels)\
            = fashion_mnist.load_data()
        # 數據維度擴充成[n,h,w,c]的模式
        self.train_images = np.expand_dims(self.train_images.astype(np.float32) / 255.0, axis=-1)
        self.test_images = np.expand_dims(self.test_images.astype(np.float32)/255.0,axis=-1)
        # 標籤
        self.train_labels = self.train_labels.astype(np.int32)
        self.test_labels = self.test_labels.astype(np.int32)
        # 訓練和測試的數據個數
        self.num_train, self.num_test = self.train_images.shape[0], self.test_images.shape[0]
    def get_train_batch(self,batch_size):
        # 隨機取batch_size個索引
        index = np.random.randint(0, np.shape(self.train_images)[0], batch_size)
        # resize
        resized_images = tf.image.resize_with_pad(self.train_images[index], 224, 224 )
        return resized_images.numpy(), self.train_labels[index]
    def get_test_batch(self,batch_size):
        index = np.random.randint(0, np.shape(self.test_images)[0], batch_size)
        # resize
        resized_images = tf.image.resize_with_pad(self.test_images[index], 224, 224 )
        return resized_images.numpy(), self.test_labels[index]


batch_size=64
data_load=Data_load()
x_train_batch,y_train_batch=data_load.get_train_batch(batch_size)
print("x_batch shape:",x_train_batch.shape,"y_batch shape:", y_train_batch.shape)

# 2、定義模型
def AlexNet():
    net=tf.keras.Sequential()
    net.add(tf.keras.layers.Conv2D(96,11,(4,4),"same",activation="relu"))
    net.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2))
    net.add(tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'))
    net.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2))
    net.add(tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'))
    net.add(tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'))
    net.add(tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'))
    net.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2))
    net.add(tf.keras.layers.Flatten())
    net.add(tf.keras.layers.Dense(512, activation='relu'))# 爲了方便訓練 神經元個數改小,原來是1024
    net.add(tf.keras.layers.Dropout(0.5))
    net.add(tf.keras.layers.Dense(256, activation='relu'))# 爲了方便訓練 神經元個數改小,原來是1024
    net.add(tf.keras.layers.Dropout(0.5))
    net.add(tf.keras.layers.Dense(10, activation='sigmoid'))

    return net
net=AlexNet()
# 查看網絡結構
X = tf.random.uniform((1,224,224,1))
for layer in net.layers:
    X = layer(X)
    print(layer.name, 'output shape\t', X.shape)


def train(num_epoches,net):
    optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
    net.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    num_iter = data_load.num_train // batch_size
    for e in range(num_epoches):
        for n in range(num_iter):
            x_batch, y_batch = data_load.get_train_batch(batch_size)
            test_x_batch, test_y_batch = data_load.get_test_batch(batch_size)
            net.fit(x_batch, y_batch,validation_data=(test_x_batch, test_y_batch))
train(5,net)


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章