tensorflow學習——tf.train.Supervisor()與tf.train.saver()

原文:https://blog.csdn.net/ei1990/article/details/77948001

1、tf.train.Supervisor()

import tensorflow as tf
import numpy as np
import os

log_path = 'ckptdir/'
log_name = 'liner.ckpt'
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1 + 0.3

w = tf.Variable(tf.random_normal([1]))
b = tf.Variable(tf.zeros([1]))
y = w*x_data + b

loss = tf.reduce_mean(tf.square(y-y_data))
train = tf.train.AdamOptimizer(0.5).minimize(loss)

tf.summary.scalar('loss', loss)

saver = tf.train.Saver()
init = tf.global_variables_initializer()
merged = tf.summary.merge_all()

sv = tf.train.Supervisor(logdir=log_path, init_op=init)  # logdir用來保存checkpoint和summary
saver = sv.saver   # 創建saver

with sv.managed_session() as sess:  # 會自動去logdir中去找checkpoint,如果沒有的話,自動執行初始化
#    sess.run(init)
#    if len(os.listdir(log_path)) != 0:
#        saver.restore(sess, os.path.join(log_path, log_name))
    for step in range(201):
        sess.run(train)
        if step%50 == 0:
            print(step, sess.run(w), sess.run(b))
            merged_summary = sess.run(merged)
            sv.summary_computed(sess, merged_summary,global_step=step)
    saver.save(sess, os.path.join(log_path, 'liner.ckpt'))

從上面代碼可以看出,Supervisor幫助我們處理一些事情
(1)自動去checkpoint加載數據或初始化數據
(2)自身有一個Saver,可以用來保存checkpoint
(3)有一個summary_computed用來保存Summary
所以,我們就不需要:
(1)手動初始化或從checkpoint中加載數據
(2)不需要創建Saver,使用sv內部的就可以
(3)不需要創建summary writer
2、tf.train.Saver()

import tensorflow as tf
import numpy as np
import os

log_path = 'ckptdir'
log_name = 'liner.ckpt'
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1 + 0.3

w = tf.Variable(tf.random_normal([1]))
b = tf.Variable(tf.zeros([1]))
y = w*x_data + b

loss = tf.reduce_mean(tf.square(y-y_data))
train = tf.train.AdamOptimizer(0.5).minimize(loss)
tf.summary.scalar('loss', loss)

saver = tf.train.Saver()
init = tf.global_variables_initializer()
merged = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(init)
    print("loading model from checkpoint")
    checkpoint = tf.train.latest_checkpoint(os.path.join(log_path, log_name))
    restore_saver.restore(sess, checkpoint)
    #if len(os.listdir(log_path)) != 0:
    #    saver.restore(sess, os.path.join(log_path, log_name))
    for step in range(201):
        sess.run(train)
        if step%50 ==0:
            print(step, sess.run(w), sess.run(b))
    summary_writer = tf.summary.FileWriter(log_path, sess.graph)
    summary_all = sess.run(merged)
    summary_writer.add_summary(summary_all)
    summary_writer.close()

    saver.save(sess, os.path.join(log_path, 'liner.ckpt'))
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章