ResNet18-TensorFlow

1、ResNet代碼

# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 19:38:01 2020

@author: HongyongHan
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers,Sequential

class BasicBlock(layers.Layer):
    #初始化函數
    #filter_num:理解爲卷積核通道的數目,也就是channel的通道數
    #stride = 1意味着對圖片不進行採樣
    def __init__(self,filter_num,strides=1):
        #調用母類的初始化方法
        super(BasicBlock,self).__init__()
        #filter_num:卷積核通道的數目.(3,3):卷積核的size
        #padding='same'如果stride等於1,那麼輸出等於輸入。
        #如果stride大於等於2的話,padding=same,會自動補全,
        # 如果等於2的話,輸入是32x32,可能輸出是14x14,那麼如果padding=same
        #會padding輸入的大小,使得輸出是16x16


        self.conv1=layers.Conv2D(filter_num,(3,3),strides=strides,padding='same')
        self.bn1=layers.BatchNormalization()
        #非線性激活函數
        self.relu=layers.Activation('relu')

        #那麼這裏設置stride=1,就始終保持一樣
        self.conv2=layers.Conv2D(filter_num,(3,3),strides=1,padding='same')
        self.bn2=layers.BatchNormalization()

        if strides != 1:
            #下采樣
            self.downsample=Sequential()
            self.downsample.add(layers.Conv2D(filter_num,(1,1),strides=strides))
        else:
            self.downsample=lambda x:x



    def call(self,inputs,training=None):
        #[b,h,w,c]
        out=self.conv1(inputs)
        out=self.bn1(out)
        out=self.relu(out)

        out=self.conv2(out)
        out=self.bn2(out)

        identify=self.downsample(inputs)
        output=layers.add([out,identify])
        #使用tf的函數功能
        output=tf.nn.relu(output)

        return output


class ResNet(keras.Model):
    def __init__(self,layer_dims,num_classes=100):
        #layer_dims:resnet18裏面有[2,2,2,2],也就是四個resblock
        #這裏指定了一共有多少個resblock層,每個層有多少個basicblock
        #後面在設置blocks的數量的時候,就是用的這裏的層的個數
      #一個resblock裏面包含了兩層basicblock
        #num_classes = 100:就是我們設置的輸出的類的個數
        super(ResNet, self).__init__()

        #實現預處理層
        self.stem=Sequential([layers.Conv2D(64,(3,3),strides=(1,1)),
                              layers.BatchNormalization(),
                              layers.Activation('relu'),
                              layers.MaxPool2D(pool_size=(2,2),strides=(1,1),padding='same')
                              ])
        #創建4個res_block
        #這裏blocks的數量是layer_dims[0]
        #這裏創建的四個res_block與前面的layer_dims:[2,2,2,2]對應
        #將stride設置爲2是爲了讓feature_size越來越小
        self.layer1=self.build_resblock(64,layer_dims[0])
        self.layer2=self.build_resblock(128,layer_dims[1],strides=2)
        self.layer3=self.build_resblock(256,layer_dims[2],strides=2)
        self.layer4=self.build_resblock(512,layer_dims[3],strides=2)


        #out:[b,512,h,w]
        #經過運算之後不能得到h和w的值,
        #使用自適應的方法得到h,w
        #GlobalAveragePooling2D:就是不管你的長和寬是多少
        #會在某個channel上面的長和寬加起來,取均值
        self.avgpool=layers.GlobalAveragePooling2D()
        #創建全連接層
        #這裏的Dense是用來分類的,這裏輸出是之前輸出的類別,num_classes
        self.fc=layers.Dense(num_classes)



    def call(self,inputs,training=None):
        #完成前向運算過程
        x = self.stem(inputs)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        #這裏已經變成[b,c]的shape,不需要reshape了
        x=self.avgpool(x)
        #這裏輸出是[b,100]
        x=self.fc(x)

        return x



    def build_resblock(self,filter_num,blocks,strides=1):
        res_blocks=Sequential()
        #添加第一層basicblock
        #可能有下采樣的功能的
        res_blocks.add(BasicBlock(filter_num,strides))
        #但是對於後面的basicblock不讓有下采樣功能
        #從1開始,一直到blocks個
        for _ in range(1,blocks):
            #這樣只會在第一個下采樣,後面的不在下采樣,保持shape不變
            res_blocks.add(BasicBlock(filter_num,strides=1))
        return res_blocks

def resnet18():
    return ResNet([2,2,2,2])

def resnet34():
    return ResNet([3,4,6,3])

2、resnet18_train

# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 19:38:40 2020

@author: HongyongHan
"""

import tensorflow as tf
from tensorflow.keras import layers,optimizers,datasets,Sequential
import os
from ResNet import resnet18
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2345)



#預處理函數
def preprocess(x,y):
    #【-1,1】
    x=2 * tf.cast(x,dtype=tf.float32)/255.-1
    y=tf.cast(y,dtype=tf.int32)
    return x,y

(x,y),(x_test,y_test)=datasets.cifar100.load_data()
#因爲y的維度是(64,1)的,需要squeeze掉。
y=tf.squeeze(y,axis=1)
y_test=tf.squeeze(y_test,axis=1)
print(x.shape,y.shape,x_test.shape,y_test.shape)

train_db=tf.data.Dataset.from_tensor_slices((x,y))
train_db=train_db.shuffle(1000).map(preprocess).batch(64)

test_db=tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db=test_db.map(preprocess).batch(64)

sample=next(iter(train_db))
print('sample',sample[0].shape,sample[1].shape,
      tf.reduce_min(sample[0]),tf.reduce_max(sample[1]))

def main():
    #[b,32,32,3] => [b,1,1,512]
    model=resnet18()
    model.build(input_shape=(None,32,32,3))
    optimizer=optimizers.Adam(lr=1e-3)
    for epoch in range(50):
        for step,(x,y) in enumerate(train_db):
            #這裏做一個前向循環,將需要求解梯度放進來
            with tf.GradientTape() as tape:
                #[b,32,32,3] => [b,100]
                logits=model(x)
                #[b] => [b,100]
                y_onehot=tf.one_hot(y,depth=100)
                #compute loss
                loss=tf.losses.categorical_crossentropy(y_onehot,logits,from_logits=True)
                #計算均值,對每個batch的均值進行計算
                loss=tf.reduce_mean(loss)
            #計算gradient
            grads=tape.gradient(loss,model.trainable_variables)
            #傳給優化器兩個參數:grads和variable,完成梯度更新
            optimizer.apply_gradients(zip(grads,model.trainable_variables))

            if step % 100 == 0:
                print(epoch,step,'losses:',float(loss))
        total_num=0
        total_correct=0
        for x,y in test_db:
            logits=model(x)
            prob=tf.nn.softmax(logits,axis=1)
            pred=tf.argmax(prob,axis=1)
            pred=tf.cast(pred,dtype=tf.int32)
            correct=tf.cast(tf.equal(pred,y),dtype=tf.int32)
            correct=tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)
        acc=total_correct / total_num
        print(epoch,'acc:',acc)

if __name__ == '__main__':
    main()



 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章