[深度之眼]TensorFlow2.0項目班-貓狗圖片分類

貓狗數據集官網
貓示例:
在這裏插入圖片描述
狗示例:
在這裏插入圖片描述
訓練集:貓狗各11500張圖片
驗證集:貓狗各1000張圖片
難點:圖片大小不統一,標籤未配對
首先加載需要的包:

import tensorflow as tf
import os
import matplotlib.pyplot as plt
print(tf.__version__)

因爲官方數據集太大,本人的筆記本跑不動,手動將數據集刪減至十分之一,有條件的小夥伴可以用GPU跑,準確度應該會比我高很多
加載數據集,並查看數據結構和其中一張圖片:

data_dir = './cat_and_dog/datasets_drop'
train_cats_dir = data_dir + '/train/cats/'
train_dogs_dir = data_dir + '/train/dogs/'
test_cats_dir = data_dir + '/valid/cats/'
test_dogs_dir = data_dir + '/valid/dogs/'

print(len(os.listdir(train_cats_dir)))

# 構建訓練數據集
train_cat_filenames = tf.constant([train_cats_dir + filename for filename in os.listdir(train_cats_dir)])
train_dog_filenames = tf.constant([train_dogs_dir + filename for filename in os.listdir(train_dogs_dir)])
train_filenames = tf.concat([train_cat_filenames, train_dog_filenames], axis=-1)

# cat 0  dog :1
train_labels = tf.concat([
    tf.zeros(train_cat_filenames.shape, dtype=tf.int32),
    tf.ones(train_dog_filenames.shape, dtype=tf.int32)],
    axis=-1)

print(train_filenames)
print(train_labels)

def _decode_and_resize(filename, label):
    image_string = tf.io.read_file(filename)            # 讀取原始文件
    image_decoded = tf.image.decode_jpeg(image_string)  # 解碼JPEG圖片
    image_resized = tf.image.resize(image_decoded, [256, 256]) / 255.0
    return image_resized, label

img,label = _decode_and_resize(tf.constant('./cat_and_dog/datasets_drop/train/cats/cat.0.jpg'),tf.constant(0))
plt.imshow(img.numpy())
plt.show

構建訓練集和驗證集:

def _decode_and_resize(filename, label):
    image_string = tf.io.read_file(filename)            # 讀取原始文件
    image_decoded = tf.image.decode_jpeg(image_string)  # 解碼JPEG圖片
    image_resized = tf.image.resize(image_decoded, [256, 256]) / 255.0
    return image_resized, label


batch_size = 32
train_dataset = tf.data.Dataset.from_tensor_slices((train_filenames, train_labels))
#名字
train_dataset = train_dataset.map(
    map_func=_decode_and_resize,
    num_parallel_calls=tf.data.experimental.AUTOTUNE)

for img,label in train_dataset.take(1):
    print(img,label)

# 取出前buffer_size個數據放入buffer,並從其中隨機採樣,採樣後的數據用後續數據替換
train_dataset = train_dataset.shuffle(buffer_size=23000)
train_dataset = train_dataset.repeat(count=3)
train_dataset = train_dataset.batch(batch_size)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)

# 構建測試數據集
test_cat_filenames = tf.constant([test_cats_dir + filename for filename in os.listdir(test_cats_dir)])
test_dog_filenames = tf.constant([test_dogs_dir + filename for filename in os.listdir(test_dogs_dir)])
test_filenames = tf.concat([test_cat_filenames, test_dog_filenames], axis=-1)
test_labels = tf.concat([
    tf.zeros(test_cat_filenames.shape, dtype=tf.int32),
    tf.ones(test_dog_filenames.shape, dtype=tf.int32)],
    axis=-1)

test_dataset = tf.data.Dataset.from_tensor_slices((test_filenames, test_labels))
test_dataset = test_dataset.map(_decode_and_resize)
test_dataset = test_dataset.batch(batch_size)

構建簡單的CNN網絡模型:

class CNNModel(tf.keras.models.Model):
    def __init__(self):
        super(CNNModel, self).__init__()
        self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu')
        self.maxpool1 = tf.keras.layers.MaxPooling2D()
        self.conv2 = tf.keras.layers.Conv2D(32, 5, activation='relu')
        self.maxpool2 = tf.keras.layers.MaxPooling2D()
        self.flatten = tf.keras.layers.Flatten()
        self.d1 = tf.keras.layers.Dense(64, activation='relu')
        self.d2 = tf.keras.layers.Dense(2, activation='softmax') #sigmoid 和softmax

    def call(self, x):
        x = self.conv1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.maxpool2(x)
        x = self.flatten(x)
        x = self.d1(x)
        x = self.d2(x)
        return x

訓練模型,並實施輸出評價指標:

learning_rate = 0.001
model = CNNModel()

loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
#label 沒有one-hot


optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)


train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')


@tf.function
def train_step(images, labels):
    with tf.GradientTape() as tape:
        predictions = model(images)
        loss = loss_object(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss(loss)
    train_accuracy(labels, predictions)

def test_step(images, labels):
    predictions = model(images)
    t_loss = loss_object(labels, predictions)

    test_loss(t_loss)
    test_accuracy(labels, predictions)

EPOCHS=10
for epoch in range(EPOCHS):
    # 在下一個epoch開始時,重置評估指標
    train_loss.reset_states()
    train_accuracy.reset_states()
    test_loss.reset_states()
    test_accuracy.reset_states()

    for images, labels in train_dataset:
        train_step(images, labels)

    for test_images, test_labels in test_dataset:
        test_step(test_images, test_labels)

    template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
    print(template.format(epoch + 1,
                          train_loss.result(),
                          train_accuracy.result() * 100,
                          test_loss.result(),
                          test_accuracy.result() * 100
                         ))

至此完整代碼全部結束,運行結果:
在這裏插入圖片描述
造成驗證集準確率低的主要原因有以下兩點:
1、訓練數據集過小,電腦不太行
2、網絡結構簡單,可以試試Yolo等新型網絡結構

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章