深入理解Tensorflow模型保存與載入(Ineception V3爲例)

一、預備知識

(1)tensor : 符號化的句柄,指向操作的運算結果。 

(2)operation : 符號化的運算過程,即圖中的節點。

(3)graph : 模型結構的流程圖,tensor 和 operation 都是圖中的對象。

(4)tf.gfile.FastGFile(path,decodestyle) 

         函數功能:實現對圖片的讀取。 

         函數參數:(1)path:圖片所在路徑 (2)decodestyle:圖片的解碼方式。(‘r’:UTF-8編碼; ‘rb’:非UTF-8編碼)。

import matplotlib.pyplot as plt 
import tensorflow as tf 

#tf.gfileGFile()函數:讀取圖像  
image_jpg = tf.gfile.FastGFile('dog.jpg','rb').read()  
image_png = tf.gfile.FastGFile('lizard.png','rb').read()  

with tf.Session() as sess:  

    image_jpg = tf.image.decode_jpeg(image_jpg) #圖像解碼
    print(sess.run(image_jpg))#打印解碼後的圖像(即爲一個三維矩陣[w,h,3])
    image_jpg = tf.image.convert_image_dtype(image_jpg,dtype=tf.uint8) #改變圖像數據類型  

    image_png = tf.image.decode_png(image_png) 
    print(sess.run(image_jpg))
    image_png = tf.image.convert_image_dtype(image_png,dtype=tf.uint8)  

    plt.figure(1) 
    plt.imshow(image_jpg.eval())  
    plt.figure(2)  
    plt.imshow(image_png.eval())  

二、使用 Inception V3 做圖像分類

# coding: utf-8
import tensorflow as tf
import os
import numpy as np
import re
from PIL import Image
import matplotlib.pyplot as plt

class NodeLookup(object):
    def __init__(self):
        label_lookup_path = 'inception_model/imagenet_2012_challenge_label_map_proto.pbtxt'
        uid_lookup_path = 'inception_model/imagenet_synset_to_human_label_map.txt'
        self.node_lookup = self.load(label_lookup_path, uid_lookup_path)

    def load(self, label_lookup_path, uid_lookup_path):
        # 加載分類字符串n********對應分類名稱的文件
        proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
        uid_to_human = {}
        #一行一行讀取數據
        for line in proto_as_ascii_lines :
            #去掉換行符
            line=line.strip('\n')
            #按照'\t'分割
            parsed_items = line.split('\t')
            #獲取分類編號
            uid = parsed_items[0]
            #獲取分類名稱
            human_string = parsed_items[1]
            #保存編號字符串n********與分類名稱映射關係
            uid_to_human[uid] = human_string

        # 加載分類字符串n********對應分類編號1-1000的文件
        proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
        node_id_to_uid = {}
        for line in proto_as_ascii:
            if line.startswith('  target_class:'):
                #獲取分類編號1-1000
                target_class = int(line.split(': ')[1])
            if line.startswith('  target_class_string:'):
                #獲取編號字符串n********
                target_class_string = line.split(': ')[1]
                #保存分類編號1-1000與編號字符串n********映射關係
                node_id_to_uid[target_class] = target_class_string[1:-2]

        #建立分類編號1-1000對應分類名稱的映射關係
        node_id_to_name = {}
        for key, val in node_id_to_uid.items():
            #獲取分類名稱
            name = uid_to_human[val]
            #建立分類編號1-1000到分類名稱的映射關係
            node_id_to_name[key] = name
        return node_id_to_name

    #傳入分類編號1-1000返回分類名稱
    def id_to_string(self, node_id):
        if node_id not in self.node_lookup:
            return ''
        return self.node_lookup[node_id]

#創建一個圖來存放google訓練好的模型
with tf.gfile.FastGFile('inception_model/classify_image_graph_def.pb', 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    tf.import_graph_def(graph_def, name='')

with tf.Session() as sess:
    softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
    #遍歷目錄
    for root,dirs,files in os.walk('images/'):
        for file in files:
            #載入圖片
            image_data = tf.gfile.FastGFile(os.path.join(root,file), 'rb').read()
            predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
            predictions = np.squeeze(predictions)#把結果轉爲1維數據

            #打印圖片路徑及名稱
            image_path = os.path.join(root,file)
            print(image_path)
            #顯示圖片
            img=Image.open(image_path)
            plt.imshow(img)
            plt.axis('off')
            plt.show()

            top_k = predictions.argsort()[-5:][::-1]
            node_lookup = NodeLookup()
            for node_id in top_k:
                #獲取分類名稱
                human_string = node_lookup.id_to_string(node_id)
                #獲取該分類的置信度
                score = predictions[node_id]
                print('%s (score = %.5f)' % (human_string, score))
            print()

建一個文件夾將圖片放進去就可以,運行結果就不演示了。

三、解讀模型

# coding: utf-8
import tensorflow as tf
import os
import tarfile
import requests

# inception模型下載地址
inception_pretrain_model_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'

# 模型存放地址
inception_pretrain_model_dir = "inception_model"
if not os.path.exists(inception_pretrain_model_dir):
    os.makedirs(inception_pretrain_model_dir)

# 獲取文件名,以及文件路徑
filename = inception_pretrain_model_url.split('/')[-1]
filepath = os.path.join(inception_pretrain_model_dir, filename)

# 下載模型
if not os.path.exists(filepath):
    print("download: ", filename)
    r = requests.get(inception_pretrain_model_url, stream=True)
    with open(filepath, 'wb') as f:
        #一塊一塊地下載數據,防止內存不夠用
        for chunk in r.iter_content(chunk_size=1024):
            if chunk:
                f.write(chunk)

print("finish: ", filename)
# 解壓文件
tarfile.open(filepath, 'r:gz').extractall(inception_pretrain_model_dir)

# 模型結構存放文件
log_dir = 'inception_log'
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

# classify_image_graph_def.pb爲google訓練好的模型
inception_graph_def_file = os.path.join(inception_pretrain_model_dir, 'classify_image_graph_def.pb')

with tf.Session() as sess:
    # 創建一個圖來存放google訓練好的模型
    with tf.gfile.FastGFile(inception_graph_def_file, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def, name='')
    # 保存圖的結構
    print('save sess.graph in ',log_dir)
    writer = tf.summary.FileWriter(log_dir, sess.graph)
    writer.close()

命令行輸入

tensorboard --logdir=/home/hadoop/PycharmProject/tensorflow/inception_log

打開tensorboard 。

可以看到模型結構,除了Inception module group 值得研究,還有數據的輸入節點(operation)值得注意,我們就是通過

predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})

feed進圖片的。後面的節點經過維度的擴張將圖片數據轉換成 Inception V3 中規定的格式。


四、保存和載入模型

第一種加載方式:

#coding=utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size=100
n_batch=mnist.train.num_examples//batch_size

x=tf.placeholder(tf.float32,[None,784], name='input_x')
y=tf.placeholder(tf.float32,[None,10], name='input_y')

W = tf.Variable(tf.zeros([784,10]), name='w1')
b = tf.Variable(tf.zeros([10]), name='b1')
prediction = tf.nn.softmax(tf.matmul(x,W)+b,name='op')

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

init = tf.global_variables_initializer()
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

saver = tf.train.Saver()
tf.add_to_collection('pred_network', prediction)

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(11):
        for batch in range(n_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})
        acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
        print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
    saver.save(sess, 'net/my_net.ckpt')
#coding=utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size=100
n_batch=mnist.train.num_examples//batch_size

x=tf.placeholder(tf.float32,[None,784])
y=tf.placeholder(tf.float32,[None,10])

W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

init = tf.global_variables_initializer()
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

saver=tf.train.Saver()

with tf.Session() as sess:
    sess.run(init)
    print('參數全部初始化爲零')
    print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))
    print('加載已訓練的模型')
    saver.restore(sess,'net/my_net.ckpt')
    #print(mnist.test.images.shape[0])
    print(sess.run(accuracy,feed_dict={x: mnist.test.images, y: mnist.test.labels}))

第二種加載方式:

上面代碼都一樣,就是上下文管理器中修改一處:

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(11):
        for batch in range(n_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys})
        acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
        print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
        saver.save(sess, './my-model/my_model', global_step=n_batch*epoch)
#coding=utf-8
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

with tf.Session() as sess:
  new_saver = tf.train.import_meta_graph('my-model/my_model-5500.meta')
  new_saver.restore(sess, 'my-model/my_model-5500')
  # tf.get_collection() 返回一個list. 但是這裏只要第一個參數即可
  predict = tf.get_collection('pred_network')[0]

  graph = tf.get_default_graph()

  # 因爲y中有placeholder,所以sess.run(y)的時候還需要用實際待預測的樣本以及相應的參數來填充這些placeholder,
  # 而這些需要通過graph的get_operation_by_name方法來獲取。
  input_x = graph.get_operation_by_name('input_x').outputs[0]
  #keep_prob = graph.get_operation_by_name('keep_prob').outputs[0]

  # 使用X進行預測
  X = mnist.test.images[0]
  X = X.reshape((1,784))
  print(sess.run(predict, feed_dict={input_x: X}))

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章