tensorflow模型持久化

模型持久化


爲了使代碼有更好的可讀性和擴展性,需要將之按功能分爲不同的模塊,並將可重用的代碼抽象成庫函數

所以可以把以前臃腫的 MNIST 代碼分成三個模塊

  • inference
  • train
  • eval

具體的文件夾目錄如下

mnist/
    data/
        ......
    best/
        inference.py
        train.py
        eval.py

完整代碼


代碼實現自《TensorFlow:實戰Google深度學習框架》

首先是 inference.py ,這個庫函數負責模型訓練及測試的前向傳播過程

import tensorflow as tf

# 定義神經網絡相關參數
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500


# 創建權重變量,並加入正則化損失集合
def get_weight_variable(shape, regularizer):
    weights = tf.get_variable(
        'weights',
        shape,
        initializer=tf.truncated_normal_initializer(stddev=0.1))
    if regularizer != None:
        tf.add_to_collection('losses', regularizer(weights))
    return weights


# 前向傳播
def inference(input_tensor, regularizer):
    # 聲明隱藏層的變量並進行前向傳播
    with tf.variable_scope('layer1'):
        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
        biases = tf.get_variable(
            'biases', [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)

    # 聲明輸出層的變量並進行前向傳播
    with tf.variable_scope('layer2'):
        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
        biases = tf.get_variable(
            'biases', [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
        layer2 = tf.matmul(layer1, weights) + biases
    return layer2

然後是 train.py ,訓練模型的模塊

import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import inference

# 優化方法參數
LEARNING_RATE_BASE = 0.8  # 基礎學習率
LEARNING_RATE_DECAY = 0.99  # 學習率的衰減率
REGULARIZATION_RATE = 0.0001  # 正則化項在損失函數中的係數
MOVING_AVERAGE_DECAY = 0.99  # 滑動平均衰減率

# 訓練參數
BATCH_SIZE = 100  # 一個訓練batch中的圖片數
TRAINING_STEPS = 30000  # 訓練輪數

# 模型保存的路徑和文件名
MODEL_SAVE_PATH = 'model/'
MODEL_NAME = 'mnist.ckpt'


def train(mnist):
    # 實現模型
    x = tf.placeholder(
        tf.float32, [None, inference.INPUT_NODE], name='x-input')  # 輸入層
    y_ = tf.placeholder(
        tf.float32, [None, inference.OUTPUT_NODE], name='y-input')  # 標籤
    regularizer = tf.contrib.layers.l2_regularizer(
        REGULARIZATION_RATE)  # 定義L2正則化損失函數
    y = inference.inference(x, regularizer)  # 輸出層

    # 存儲訓練輪數,設置爲不可訓練
    global_step = tf.Variable(0, trainable=False)

    # 設置滑動平均方法
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)  # 定義滑動平均類
    variable_averages_op = variable_averages.apply(
        tf.trainable_variables())  # 在所有可訓練的變量上使用滑動平均值

    # 設置指數衰減法
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)

    # 最小化損失函數
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))  # 計算每張圖片的交叉熵
    cross_entropy_mean = tf.reduce_mean(cross_entropy)  # 計算當前batch中所有圖片的交叉熵平均值
    loss = cross_entropy_mean + tf.add_n(
        tf.get_collection('losses'))  # 總損失等於交叉熵損失和正則化損失的和
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)  # 優化損失函數

    # 同時反向傳播和滑動平均
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化持久化類
    saver = tf.train.Saver()

    # 開始訓練
    with tf.Session() as sess:
        # 初始化所有變量
        tf.global_variables_initializer().run()

        # 迭代訓練
        for i in range(TRAINING_STEPS):
            # 產生該輪batch
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run(
                [train_op, loss, global_step], feed_dict={
                    x: xs,
                    y_: ys
                })

            # 每1000輪保存一次模型
            if i % 1000 == 0:
                # 輸出訓練情況
                print('After %d training steps, loss is %g.' % (step,
                                                                loss_value))

                # 保存當前模型
                saver.save(
                    sess,
                    os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                    global_step=global_step)


# 主程序入口
def main(argv=None):
    mnist = input_data.read_data_sets('../data/', one_hot=True)
    train(mnist)


if __name__ == '__main__':
    tf.app.run()

最後是 eval.py ,可以在訓練模型的同時,每隔一段時間利用最新保存的模型進行測試

import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import inference
import train

# 每10秒加載一次最新的模型,並在測試數據上測試最新模型的正確率
EVAL_INTERVAL_SECS = 10


def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # 定義輸入輸出的格式
        x = tf.placeholder(
            tf.float32, [None, inference.INPUT_NODE], name='x-input')
        y_ = tf.placeholder(
            tf.float32, [None, inference.OUTPUT_NODE], name='y-input')
        y = inference.inference(x, None)

        # 驗證集
        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        # 評估模型
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通過變量重命名方式加載模型,獲取滑動平均值
        variable_averages = tf.train.ExponentialMovingAverage(
            train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # 每隔10秒檢測正確率
        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # 加載模型
                    saver.restore(sess, ckpt.model_checkpoint_path)

                    # 通過文件名字獲取該模型保存的輪數
                    global_step = ckpt.model_checkpoint_path.split('/')[
                        -1].split('-')[-1]

                    # 驗證並輸出結果
                    accuracy_score = sess.run(
                        accuracy, feed_dict=validate_feed)
                    print(
                        'After %s training steps, validattion accuracy = %g' %
                        (global_step, accuracy_score))
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(EVAL_INTERVAL_SECS)


def main(argv=None):
    mnist = input_data.read_data_sets('../data/', one_hot=True)
    evaluate(mnist)


if __name__ == '__main__':
    tf.app.run()

運行結果


train.py 訓練模型的結果如下

$ python train.py

Extracting ../data/train-images-idx3-ubyte.gz
Extracting ../data/train-labels-idx1-ubyte.gz
Extracting ../data/t10k-images-idx3-ubyte.gz
Extracting ../data/t10k-labels-idx1-ubyte.gz

After 1 training steps, loss is 2.75381.
After 1001 training steps, loss is 0.26364.
After 2001 training steps, loss is 0.160792.
After 3001 training steps, loss is 0.144208.
After 4001 training steps, loss is 0.120926.
After 5001 training steps, loss is 0.10708.
After 6001 training steps, loss is 0.102106.
......
After 22001 training steps, loss is 0.0399828.
After 23001 training steps, loss is 0.0408827.
After 24001 training steps, loss is 0.0355409.
After 25001 training steps, loss is 0.0378072.
After 26001 training steps, loss is 0.0352473.
After 27001 training steps, loss is 0.0357247.
After 28001 training steps, loss is 0.0318179.
After 29001 training steps, loss is 0.0417907.

eval.py 評估模型的結果如下

$ python eval.py

Extracting ../data/train-images-idx3-ubyte.gz
Extracting ../data/train-labels-idx1-ubyte.gz
Extracting ../data/t10k-images-idx3-ubyte.gz
Extracting ../data/t10k-labels-idx1-ubyte.gz

After 26001 training steps, validattion accuracy = 0.983
After 28001 training steps, validattion accuracy = 0.985
After 29001 training steps, validattion accuracy = 0.986
......

轉載: https://blog.csdn.net/white_idiot/article/details/78777022

發佈了11 篇原創文章 · 獲贊 38 · 訪問量 9萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章