tensorflow 用自己的數據集訓練CNN模型

轉載:https://blog.csdn.net/m0_37167788/article/details/79066487

最近用tensorflow訓練自己的模型的時候發現,tensorflow官網上所給的例子,都是用處理好數據格式的mnist數據或者其他格式的數據,所以在訓練自己的模型的時候的第一步就卡住了。所以上網搜索了相關的資料之後便得出了相關的解決方案(有好幾種,這裏只說明一種,另外有TFRecord的格式的網上很多教程,將不在這敘述)….

 

import os
import glob
import time
import numpy as np
import tensorflow as tf
from skimage import io, transform

# os.environ["TF_CPP_MIN_LOG_LEVEL"] = '1'
# 這是默認的顯示等級,顯示所有信息
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
# 只顯示 warning 和 Error
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
# 只顯示 Error


# 讀取圖片
def read_img(path, w, h):
    cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
    # print(cate)

    imgs = []
    labels = []

    print('Start read the image ...')

    for index, folder in enumerate(cate):
        # print(index, folder)
        for im in glob.glob(folder + '/*.jpg'):
            # print('Reading The Image: %s' % im)
            img = io.imread(im)
            img = transform.resize(img, (w, h))
            imgs.append(img)
            labels.append(index)

    print('Finished ...')

    return np.asarray(imgs, np.float32), np.asarray(labels, np.float32)


# 打亂順序
def messUpOrder(data, label):
    num_example = data.shape[0]
    arr = np.arange(num_example)
    np.random.shuffle(arr)
    data = data[arr]
    label = label[arr]

    return data, label


# 將所有數據分爲訓練集和驗證集
def segmentation(data, label, ratio=0.8):
    num_example = data.shape[0]
    s = np.int(num_example * ratio)
    x_train = data[:s]
    y_train = label[:s]
    x_val = data[s:]
    y_val = label[s:]

    return x_train, y_train, x_val, y_val


# 構建網絡
def buildCNN(w, h, c):
    # 佔位符
    x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
    y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')

    # 第一個卷積層 + 池化層(100——>50)
    conv1 = tf.layers.conv2d(
        inputs=x,
        filters=32,
        kernel_size=[5, 5],
        padding="same",
        activation=tf.nn.relu,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

    # 第二個卷積層 + 池化層 (50->25)
    conv2 = tf.layers.conv2d(
        inputs=pool1,
        filters=64,
        kernel_size=[5, 5],
        padding="same",
        activation=tf.nn.relu,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    # 第三個卷積層 + 池化層 (25->12)
    conv3 = tf.layers.conv2d(
        inputs=pool2,
        filters=128,
        kernel_size=[3, 3],
        padding="same",
        activation=tf.nn.relu,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)

    # 第四個卷積層 + 池化層 (12->6)
    conv4 = tf.layers.conv2d(
        inputs=pool3,
        filters=128,
        kernel_size=[3, 3],
        padding="same",
        activation=tf.nn.relu,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
    pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)

    re1 = tf.reshape(pool4, [-1, 6 * 6 * 128])

    # 全連接層
    dense1 = tf.layers.dense(inputs=re1,
                             units=1024,
                             activation=tf.nn.relu,
                             kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                             kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    dense2 = tf.layers.dense(inputs=dense1,
                             units=512,
                             activation=tf.nn.relu,
                             kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                             kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    logits = tf.layers.dense(inputs=dense2,
                             units=20,  
                             activation=None,
                             kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                             kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

    return logits, x, y_


# 返回損失函數的值,準確值等參數
def accCNN(logits, y_):
    loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)
    train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
    correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
    acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    return loss, train_op, correct_prediction, acc


# 定義一個函數,按批次取數據
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
    assert len(inputs) == len(targets)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield inputs[excerpt], targets[excerpt]


def runable(x_train, y_train, train_op, loss, acc, x, y_, x_val, y_val):
    # 訓練和測試數據,可將n_epoch設置更大一些
    n_epoch = 50
    batch_size = 64
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    for epoch in range(n_epoch):
        # training
        train_loss, train_acc, n_batch = 0, 0, 0
        for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
            _, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a})
            train_loss += err
            train_acc += ac
            n_batch += 1
        print("train loss: %f" % (train_loss / n_batch))
        print("train acc: %f" % (train_acc / n_batch))

        # validation
        val_loss, val_acc, n_batch = 0, 0, 0
        for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
            err, ac = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})
            val_loss += err
            val_acc += ac
            n_batch += 1
        print("validation loss: %f" % (val_loss / n_batch))
        print("validation acc: %f" % (val_acc / n_batch))
        print('*' * 50)

    sess.close()


if __name__ == '__main__':
    imgpath = '../dataset/classify/'

    w = 100
    h = 100
    c = 3

    ratio = 0.8  # 選取訓練集的比例

    data, label = read_img(path=imgpath, w=w, h=h)

    data, label = messUpOrder(data=data, label=label)

    x_train, y_train, x_val, y_val = segmentation(data=data, label=label, ratio=ratio)

    logits, x, y_ = buildCNN(w=w, h=h, c=c)

    loss, train_op, correct_prediction, acc = accCNN(logits=logits, y_=y_)

    runable(x_train=x_train, y_train=y_train, train_op=train_op, loss=loss,
            acc=acc, x=x, y_=y_, x_val=x_val, y_val=y_val)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章