tensorflow在深度學習領域是一個應用最爲廣泛的框架,但是其對於嵌入式端的部署並不是特別友好,因爲其訓練好的模型沒有直接導出工具。本博文主要是針對tensorflow訓練好的模型,將其轉換到c語言運行,這樣便於嵌入式的部署。這裏主要是基於arm的軟件代碼進行c代碼部署的,具體參考https://github.com/ARM-software/CMSIS_5/tree/develop/CMSIS/NN。該博文以mnist爲例進行 講解,在tensorflow上進行mnist模型訓練,然後將訓練好的模型部署到c代碼中。mnist的tensorflow的具體實現如下:
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 11:40:21 2019
@author: Administrator
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tensorflow.python.platform
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=tf.float32):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = tf.as_dtype(dtype).base_dtype
if dtype not in (tf.uint8, tf.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
if dtype == tf.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
data_sets.train = fake()
data_sets.validation = fake()
data_sets.test = fake()
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
data_sets.validation = DataSet(validation_images, validation_labels, dtype=dtype)
data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
return data_sets
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 10:39:34 2019
@author: Administrator
"""
import input_data
import tensorflow as tf
import numpy as np
mnist = input_data.read_data_sets('MNIST_data',one_hot = True)
def weight_variable(shape,name):
initial= tf.truncated_normal(shape,stddev = 0.1)
return tf.Variable(initial,name = name)
def bias_variable(shape,name):
initial = tf.constant(0.1,shape = shape)
return tf.Variable(initial,name = name)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides = [1,1,1,1],padding = 'SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize = [1,2,2,1],strides = [1,2,2,1],padding = 'SAME')
def mnist_model(x,is_train = True):
W_conv1 = weight_variable([5,5,1,32],'W_conv1')
b_conv1 = bias_variable([32],'b_conv1')
x_image = tf.reshape(x,[-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5,5,32,64],'W_conv2')
b_conv2 = bias_variable([64],'b_conv2')
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64,128],'W_fc1')
b_fc1 = bias_variable([128],'b_fc1')
h_pool2_flat = tf.reshape(h_pool2,[-1,7 * 7 * 64])
h_fc1_pre = tf.matmul(h_pool2_flat,W_fc1) + b_fc1
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
if is_train:
keep_prob = tf.placeholder('float')
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
else:
h_fc1_drop = h_fc1
W_fc2 = weight_variable([128,10],'W_fc2')
b_fc2 = bias_variable([10],'b_fc2')
h_fc2_pre = tf.matmul(h_fc1_drop,W_fc2) + b_fc2
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2) + b_fc2)
if is_train:
return y_conv,keep_prob
else:
return y_conv,x_image,h_conv1,h_pool1,h_conv2,h_pool2,h_pool2_flat,h_fc1_pre,h_fc1,h_fc2_pre
def mnist_train():
x_ = tf.placeholder('float',shape = [None,784])
y_ = tf.placeholder('float',shape = [None,10])
sess = tf.InteractiveSession()
y,keep_prob = mnist_model(x_)
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,'float'))
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
for i in range(5000):
batch = mnist.train.next_batch(50)
if i % 500 == 0:
train_accuracy = accuracy.eval(feed_dict = {
x_:batch[0],y_:batch[1],keep_prob:1.0})
print('step %d, training accuracy %g'%(i,train_accuracy))
saver.save(sess,'./model/model_' + str(i) + '_' + str(train_accuracy) + '_.ckpt')
train_step.run(feed_dict = {x_: batch[0],y_: batch[1],keep_prob: 0.5})
saver.save(sess,'./model/model_5000.ckpt')
print('test_accuracy %g'%accuracy.eval(feed_dict = {
x_: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
def load_model():
tf.reset_default_graph()
x_ = tf.placeholder('float',shape = [None,784])
y_ = tf.placeholder('float',shape = [None,10])
sess = tf.InteractiveSession()
yy,inp,conv1,p1,conv2,p2,p2_flat,h_fc1_pre,h_fc1,h_fc2_pre = mnist_model(x_,False)
saver = tf.train.Saver()
ckpt = './model128/model_4500_0.98_.ckpt'
saver.restore(sess,ckpt)
xx0 = mnist.test.images[0,:]
xx0 = np.reshape(xx0,[-1,784])
yy0 = mnist.test.labels[0,:]
yy0 = np.reshape(yy0,[-1,10])
print('x.shape = ',xx0.shape)
print('x.shape = ',yy0.shape)
yy1,inp1,rconv1,rp1,rconv2,rp2,rp2_flat,rh_fc1_pre,rh_fc1,rh_fc2_pre = sess.run([yy,inp,conv1,p1,conv2,p2,p2_flat,h_fc1_pre,h_fc1,h_fc2_pre],feed_dict = {x_: xx0, y_: yy0})
print('yy1 = ',yy1)
print('inp1 = ',inp1.shape)
print('rp2_flat = ',rp2_flat.shape)
f = open('input.h','w')
f.close()
f = open('rconv1.h','w')
f.close()
f = open('rp1.h','w')
f.close()
f = open('rconv2.h','w')
f.close()
f = open('rp2.h','w')
f.close()
f = open('rp2_flat.h','w')
f.close()
f = open('rh_fc1_pre.h','w')
f.close()
f = open('rh_fc1.h','w')
f.close()
f = open('rh_fc2_pre.h','w')
f.close()
with open('input.h','a') as f:
f.write('#define ' + 'input' + ' {')
xx0.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rconv1.h','a') as f:
f.write('#define ' + 'rconv1' + ' {')
rconv1.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rp1.h','a') as f:
f.write('#define ' + 'rp1' + ' {')
rp1.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rconv2.h','a') as f:
f.write('#define ' + 'rconv2' + ' {')
rconv2.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rp2.h','a') as f:
f.write('#define ' + 'rp2' + ' {')
rp2.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rp2_flat.h','a') as f:
f.write('#define ' + 'rp2_flat' + ' {')
rp2_flat.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rh_fc1_pre.h','a') as f:
f.write('#define ' + 'rh_fc1_pre' + ' {')
rh_fc1_pre.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rh_fc1.h','a') as f:
f.write('#define ' + 'rh_fc1' + ' {')
rh_fc1.tofile(f,sep = ',',format = '%f')
f.write('}\n')
with open('rh_fc2_pre.h','a') as f:
f.write('#define ' + 'rh_fc2_pre' + ' {')
rh_fc2_pre.tofile(f,sep = ',',format = '%f')
f.write('}\n')
'''
reader = pywrap_tensorflow.NewCheckpointReader(ckpt)
param_dict = reader.get_variable_to_shape_map()
'''
f = open('params.cpp','w')
f.close()
for v in tf.trainable_variables():
print('var_name = ',v.name)
var_name = v.name
var_name = var_name.replace(':','_')
var_values = sess.run(v)
print('var.shape = ',var_values.shape)
'''
print('var.shape = ',var_values.shape)
if (len(var_values.shape) > 2):
save_txt.store_4d_array(var_values,var_name)
else:
save_txt.store_1d_2d_array(var_values,var_name)
'''
with open('params.cpp','a') as f:
f.write('f32 ' + var_name + '[] = {')
if(len(var_values.shape) > 2):
transposed_wts = np.transpose(var_values,(3,0,1,2))
else:
transposed_wts = np.transpose(var_values)
with open('params.cpp','a') as f:
transposed_wts.tofile(f,sep = ',',format = '%f')
f.write('};\n')
if __name__ == '__main__':
#mnist_train()
load_model()
上面就是tensorflow實現mnist的代碼,若有不當之處請指教,謝謝!