tensorflow加載模型並測試的方法

利用tensorflow搭建模型並保存時,保存模型的方法爲

saver = tf.train.Saver()
saver.save(sess, model_path + model_name)

這樣會在model_path路徑下得到3個名爲model_name的文件和一個checkpoint文件,例如,model_name=alexnet201809101818,則會得到如下四個文件
這裏寫圖片描述
.data-00000-of-00001和.index保存了所有的weights、biases、gradients等變量。
.meta保存了圖結構。
checkpoint文件是個文本文件,裏面記錄了保存的最新的checkpoint文件以及其它checkpoint文件列表。
在加載模型時,需要加載兩個東西:圖結構和變量值。加載圖結構可以手動重新搭建網絡,也可以直接加載.meta文件。

手動重新搭建網絡

網絡結構同https://blog.csdn.net/sjtuxx_lee/article/details/82594265中的AlenNet結構,使用同樣的代碼重新搭建。

import tensorflow as tf
import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical
import os
import cv2
from tqdm import tqdm
import random

os.environ['CUDA_VISIBLE_DEVICES']='0'

log_dir = '/opt/Data/lixiang/alexnet/log'
model_path = '/opt/Data/lixiang/alexnet/model'

n_output = 10
lr = 0.00001
dropout_rate = 0.75
epochs = 20
test_step = 10
batch_size = 32
image_size = 224

def load_mnist(image_size):
    (x_train,y_train),(x_test,y_test) = mnist.load_data()
    train_image = [cv2.cvtColor(cv2.resize(img,(image_size,image_size)),cv2.COLOR_GRAY2BGR) for img in x_train]
    test_image = [cv2.cvtColor(cv2.resize(img,(image_size,image_size)),cv2.COLOR_GRAY2BGR) for img in x_test]    
    train_image = np.asarray(train_image)
    test_image = np.asarray(test_image)
    train_label = to_categorical(y_train)
    test_label = to_categorical(y_test)
    print('finish loading data!')
    return train_image, train_label, test_image, test_label

def get_batch(image, label, batch_size, now_batch, total_batch):
    if now_batch < total_batch:
        image_batch = image[now_batch*batch_size:(now_batch+1)*batch_size]
        label_batch = label[now_batch*batch_size:(now_batch+1)*batch_size]
    else:
        image_batch = image[now_batch*batch_size:]
        label_batch = label[now_batch*batch_size:]
#    image_batch = tf.convert_to_tensor(image_batch)
#    label_batch = tf.convert_to_tensor(label_batch)
    return image_batch, label_batch

def shuffle_set(train_image, train_label, test_image, test_label):
    train_row = range(len(train_label))
    random.shuffle(train_row)
    train_image = train_image[train_row]
    train_label = train_label[train_row]

    test_row = range(len(test_label))
    random.shuffle(test_row)
    test_image = test_image[test_row]
    test_label = test_label[test_row]
    return train_image, train_label, test_image, test_label

def print_layer(layer):
    print(layer.op.name + ':' + str(layer.get_shape().as_list()))

# define layers
def conv(x, kernel, strides, b):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, kernel, strides, padding = 'SAME'), b))

def max_pooling(x, kernel, strides):
    return tf.nn.max_pool(x, kernel, strides, padding = 'VALID')

def fc(x, w, b):
    return tf.nn.relu(tf.add(tf.matmul(x,w),b))

# define variables
weights = {
        'wc1':tf.Variable(tf.random_normal([11,11,3,96], dtype=tf.float32, stddev=0.1), name='weights1'),
        'wc2':tf.Variable(tf.random_normal([5,5,96,256], dtype=tf.float32, stddev=0.1), name='weights2'),
        'wc3':tf.Variable(tf.random_normal([3,3,256,384], dtype=tf.float32, stddev=0.1), name='weights3'),
        'wc4':tf.Variable(tf.random_normal([3,3,384,384], dtype=tf.float32, stddev=0.1), name='weights4'),
        'wc5':tf.Variable(tf.random_normal([3,3,384,256], dtype=tf.float32, stddev=0.1), name='weights5'),
        'wd1':tf.Variable(tf.random_normal([6*6*256, 4096], dtype=tf.float32, stddev=0.1), name='weights_fc1'),
        'wd2':tf.Variable(tf.random_normal([4096, 1000], dtype=tf.float32, stddev=0.1), name='weights_fc2'),
        'wd3':tf.Variable(tf.random_normal([1000, n_output], dtype=tf.float32, stddev=0.1), name='weights_fc3'),
        'd1':tf.Variable(tf.random_normal([28*28*3, 1000], dtype=tf.float32, stddev=0.1), name='weights_fc1'),
        'd2':tf.Variable(tf.random_normal([1000, 1000], dtype=tf.float32, stddev=0.1), name='weights_fc2'),
        'd3':tf.Variable(tf.random_normal([1000, n_output], dtype=tf.float32, stddev=0.1), name='weights_fc3'),
        }

bias = {
        'bc1':tf.Variable(tf.random_normal([96]), name='bias1'),
        'bc2':tf.Variable(tf.random_normal([256]), name='bias2'),
        'bc3':tf.Variable(tf.random_normal([384]), name='bias3'),
        'bc4':tf.Variable(tf.random_normal([384]), name='bias4'),
        'bc5':tf.Variable(tf.random_normal([256]), name='bias5'),
        'bd1':tf.Variable(tf.random_normal([4096]), name='bias_fc1'),
        'bd2':tf.Variable(tf.random_normal([1000]), name='bias_fc2'),
        'bd3':tf.Variable(tf.random_normal([n_output]), name='bias_fc3'),
        'd1':tf.Variable(tf.random_normal([1000]), name='bias_fc1'),
        'd2':tf.Variable(tf.random_normal([1000]), name='bias_fc2'),
        'd3':tf.Variable(tf.random_normal([n_output]), name='bias_fc3'),
        }

strides = {
        'sc1':[1,4,4,1],
        'sc2':[1,1,1,1],
        'sc3':[1,1,1,1],
        'sc4':[1,1,1,1],
        'sc5':[1,1,1,1],
        'sp1':[1,2,2,1],
        'sp2':[1,2,2,1],
        'sp3':[1,2,2,1]
        }

pooling_size = {
        'kp1':[1,3,3,1],
        'kp2':[1,3,3,1],
        'kp3':[1,3,3,1]
        }

#build model
def alexnet(inputs, weights, bias, strides, pooling_size, keep_prob):
    with tf.name_scope('conv1'):
        conv1 = conv(inputs, weights['wc1'], strides['sc1'], bias['bc1'])
        print_layer(conv1)

    with tf.name_scope('pool1'):
        pool1 = max_pooling(conv1, pooling_size['kp1'], strides['sp1'])
        print_layer(pool1)

    with tf.name_scope('conv2'):
        conv2 = conv(pool1, weights['wc2'], strides['sc2'], bias['bc2'])
        print_layer(conv2)

    with tf.name_scope('pool2'):
        pool2 = max_pooling(conv2, pooling_size['kp2'], strides['sp2'])
        print_layer(pool2)

    with tf.name_scope('conv3'):
        conv3 = conv(pool2, weights['wc3'], strides['sc3'], bias['bc3'])
        print_layer(conv3)

    with tf.name_scope('conv4'):
        conv4 = conv(conv3, weights['wc4'], strides['sc4'], bias['bc4'])
        print_layer(conv4)

    with tf.name_scope('conv5'):
        conv5 = conv(conv4, weights['wc5'], strides['sc5'], bias['bc5'])
        print_layer(conv5)

    with tf.name_scope('pool3'):
        pool3 = max_pooling(conv5, pooling_size['kp3'], strides['sp3'])
        print_layer(pool3)

    flatten = tf.reshape(pool3, [-1,6*6*256])

    with tf.name_scope('fc1'):
        fc1 = fc(flatten, weights['wd1'], bias['bd1'])
        fc1_drop = tf.nn.dropout(fc1, keep_prob)
        print_layer(fc1_drop)

    with tf.name_scope('fc2'):
        fc2 = fc(fc1_drop, weights['wd2'], bias['bd2'])
        fc2_drop = tf.nn.dropout(fc2, keep_prob)
        print_layer(fc2_drop)

    with tf.name_scope('fc3'):
        outputs = tf.matmul(fc2_drop, weights['wd3']) + bias['bd3']
        print_layer(outputs)

    return outputs

x = tf.placeholder(tf.float32, [None, image_size, image_size, 3])
y = tf.placeholder(tf.float32, [None, n_output])
keep_prob = tf.placeholder(tf.float32)

pred = alexnet(x, weights, bias, strides, pooling_size, keep_prob)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = pred))
tf.summary.scalar('loss', loss)
train_step = tf.train.AdamOptimizer(learning_rate = lr).minimize(loss)
correct = tf.equal(tf.argmax(y,1), tf.argmax(pred,1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar('accuracy', accuracy)

init = tf.global_variables_initializer()
merge = tf.summary.merge_all()
saver = tf.train.Saver()

值得注意的是,由於只建立了一個圖,所以所有的op和變量都會被添加到這個默認圖中,不用專門設置。建立好圖結構後,再建立新的會話,導入變量值。

train_image, train_label, test_image, test_label = load_mnist(image_size)

with tf.Session() as sess:
    sess.run(init)
    test_total_batch = int(len(test_label)/batch_size)
    ckpt = tf.train.latest_checkpoint(model_path)# 找到存儲變量值的位置
    saver.restore(sess, ckpt)# 加載到當前環境中
    print('finish loading model!')

    test_writer = tf.summary.FileWriter(log_dir + '/restore')
    test_accuracy_list = []
    test_loss_list = []
    # test
    for j in tqdm(range(test_total_batch)):
        x_test_batch, y_test_batch = get_batch(test_image, test_label, batch_size, j, test_total_batch)
        summary,test_accuracy,test_loss = sess.run([merge, accuracy,loss], feed_dict = {x:x_test_batch, y:y_test_batch, keep_prob:dropout_rate})
        test_accuracy_list.append(test_accuracy)
        test_loss_list.append(test_loss)
    test_writer.add_summary(summary,j)
    print('test_acc:'+ str(np.mean(test_accuracy_list)))
    print('test_loss:'+ str(np.mean(test_loss_list)))

測試效果
這裏寫圖片描述

加載.meta文件構建圖

在保存模型的時候我們已經將圖結構也保存下來了,所以不需要再重新搭建,只要導入保存的.meta文件即可。

import tensorflow as tf
import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical
import os
import cv2
from tqdm import tqdm

os.environ['CUDA_VISIBLE_DEVICES']='0'

log_dir = '/opt/Data/lixiang/alexnet/log'
model_path = '/opt/Data/lixiang/alexnet/model'

batch_size = 32
image_size = 224

def load_mnist(image_size):
    (x_train,y_train),(x_test,y_test) = mnist.load_data()
    train_image = [cv2.cvtColor(cv2.resize(img,(image_size,image_size)),cv2.COLOR_GRAY2BGR) for img in x_train]
    test_image = [cv2.cvtColor(cv2.resize(img,(image_size,image_size)),cv2.COLOR_GRAY2BGR) for img in x_test]    
    train_image = np.asarray(train_image)
    test_image = np.asarray(test_image)
    train_label = to_categorical(y_train)
    test_label = to_categorical(y_test)
    print('finish loading data!')
    return train_image, train_label, test_image, test_label

def get_batch(image, label, batch_size, now_batch, total_batch):
    if now_batch < total_batch:
        image_batch = image[now_batch*batch_size:(now_batch+1)*batch_size]
        label_batch = label[now_batch*batch_size:(now_batch+1)*batch_size]
    else:
        image_batch = image[now_batch*batch_size:]
        label_batch = label[now_batch*batch_size:]
    return image_batch, label_batch

saver = tf.train.import_meta_graph(model_path + '/alexnet201809101818.meta')# 加載圖結構
gragh = tf.get_default_graph()# 獲取當前圖,爲了後續訓練時恢復變量
tensor_name_list = [tensor.name for tensor in gragh.as_graph_def().node]# 得到當前圖中所有變量的名稱

x = gragh.get_tensor_by_name('Placeholder:0')# 獲取輸入變量(佔位符,由於保存時未定義名稱,tf自動賦名稱“Placeholder”)
y = gragh.get_tensor_by_name('Placeholder_1:0')# 獲取輸出變量
keep_prob = gragh.get_tensor_by_name('Placeholder_2:0')# 獲取dropout的保留參數

pred = gragh.get_tensor_by_name('fc3/add:0')# 獲取網絡輸出值
# 定義評價指標
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = pred))
correct = tf.equal(tf.argmax(y,1), tf.argmax(pred,1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

在這段代碼中,需要關注的有以下幾個點
1. 由於加載圖結構後,獲取一個網絡中間變量的方法是依靠變量名稱,所以需要依靠tensor_name_list = [tensor.name for tensor in gragh.as_graph_def().node]來得到變量名稱的列表
2. 使用模型測試時不能再重新定義輸入輸出佔位符,因爲無法導入網絡中進行傳遞數據,而要採用gragh.get_tensor_by_name()方法獲取加載的圖結構中的輸入輸出佔位符,所以建議大家在保存模型時給佔位符們定義好專屬的名稱,便於恢復模型時獲取,name中Placeholder:0一定要寫“:0”。
3. 網絡的輸出值也要通過gragh.get_tensor_by_name()的方法獲取,不清楚輸出值的名字時要在tensor_name_list中進行查看,同樣要加“:0”。
這裏寫圖片描述

這裏寫圖片描述
從tensor_name_list中可以獲取中間層變量名稱。

恢復好模型後就可以進行測試了

train_image, train_label, test_image, test_label = load_mnist(image_size)

with tf.Session() as sess:
    saver.restore(sess, tf.train.latest_checkpoint(model_path))# 加載變量值
    print('finish loading model!')   
    # test
    test_total_batch = int(len(test_label)/batch_size)
    test_accuracy_list = []
    test_loss_list = []
    for j in tqdm(range(test_total_batch)):
        x_test_batch, y_test_batch = get_batch(test_image, test_label, batch_size, j, test_total_batch)
        test_accuracy,test_loss = sess.run([accuracy,loss], feed_dict = {x:x_test_batch, y:y_test_batch, keep_prob:dropout_rate})
        test_accuracy_list.append(test_accuracy)
        test_loss_list.append(test_loss)
    print('test_acc:'+ str(np.mean(test_accuracy_list)))
    print('test_loss:'+ str(np.mean(test_loss_list)))

測試結果
這裏寫圖片描述
與第一種方法的結果一致。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章