[TF進階] 卷積神經網絡

實例37:卷積函數的使用

# -*- coding: utf-8 -*-
import tensorflow as tf  
  
# [batch, in_height, in_width, in_channels] [訓練時一個batch的圖片數量, 圖片高度, 圖片寬度, 圖像通道數]  
input = tf.Variable(tf.constant(1.0,shape = [1, 5, 5, 1])) 
input2 = tf.Variable(tf.constant(1.0,shape = [1, 5, 5, 2]))
input3 = tf.Variable(tf.constant(1.0,shape = [1, 4, 4, 1])) 

# [filter_height, filter_width, in_channels, out_channels] [卷積核的高度,卷積核的寬度,圖像通道數,卷積核個數]   
filter1 =  tf.Variable(tf.constant([-1.0,0,0,-1],shape = [2, 2, 1, 1]))
filter2 =  tf.Variable(tf.constant([-1.0,0,0,-1,-1.0,0,0,-1],shape = [2, 2, 1, 2])) 
filter3 =  tf.Variable(tf.constant([-1.0,0,0,-1,-1.0,0,0,-1,-1.0,0,0,-1],shape = [2, 2, 1, 3])) 
filter4 =  tf.Variable(tf.constant([-1.0,0,0,-1,
                                   -1.0,0,0,-1,
                                   -1.0,0,0,-1,
                                   -1.0,0,0,-1],shape = [2, 2, 2, 2])) 
filter5 =  tf.Variable(tf.constant([-1.0,0,0,-1,-1.0,0,0,-1],shape = [2, 2, 2, 1])) 



# padding的值爲‘VALID’,表示邊緣不填充, 當其爲‘SAME’時,表示填充到卷積核可以到達圖像邊緣  
op1 = tf.nn.conv2d(input, filter1, strides=[1, 2, 2, 1], padding='SAME') #1個通道輸入,生成1個feature ma
op2 = tf.nn.conv2d(input, filter2, strides=[1, 2, 2, 1], padding='SAME') #1個通道輸入,生成2個feature map
op3 = tf.nn.conv2d(input, filter3, strides=[1, 2, 2, 1], padding='SAME') #1個通道輸入,生成3個feature map

op4 = tf.nn.conv2d(input2, filter4, strides=[1, 2, 2, 1], padding='SAME') # 2個通道輸入,生成2個feature
op5 = tf.nn.conv2d(input2, filter5, strides=[1, 2, 2, 1], padding='SAME') # 2個通道輸入,生成一個feature map

vop1 = tf.nn.conv2d(input, filter1, strides=[1, 2, 2, 1], padding='VALID') # 5*5 對於pading不同而不同
op6 = tf.nn.conv2d(input3, filter1, strides=[1, 2, 2, 1], padding='SAME') 
vop6 = tf.nn.conv2d(input3, filter1, strides=[1, 2, 2, 1], padding='VALID')  #4*4與pading無關
  


init = tf.global_variables_initializer()  
with tf.Session() as sess:  
    sess.run(init)  
    
    print("op1:\n",sess.run([op1,filter1]))#1-1  後面補0
    print("------------------")
    
    print("op2:\n",sess.run([op2,filter2])) #1-2多卷積核 按列取
    print("op3:\n",sess.run([op3,filter3])) #1-3
    print("------------------")   
    
    print("op4:\n",sess.run([op4,filter4]))#2-2    通道疊加
    print("op5:\n",sess.run([op5,filter5]))#2-1        
    print("------------------")
  
    print("op1:\n",sess.run([op1,filter1]))#1-1
    print("vop1:\n",sess.run([vop1,filter1]))
    print("op6:\n",sess.run([op6,filter1]))
    print("vop6:\n",sess.run([vop6,filter1]))    

實例38:使用卷積提取圖片的輪廓

sobel算子:將彩色的圖片生成帶有邊緣化信息的圖片。

# -*- coding: utf-8 -*-

import matplotlib.pyplot as plt # plt 用於顯示圖片
import matplotlib.image as mpimg # mpimg 用於讀取圖片
import numpy as np
import tensorflow as tf  


myimg = mpimg.imread('img.jpg') # 讀取和代碼處於同一目錄下的圖片
plt.imshow(myimg) # 顯示圖片
plt.axis('off') # 不顯示座標軸
plt.show()
print(myimg.shape)


full=np.reshape(myimg,[1,140, 121, 3])  
inputfull = tf.Variable(tf.constant(1.0,shape = [1, 140, 121, 3]))

filter =  tf.Variable(tf.constant([[-1.0,-1.0,-1.0],  [0,0,0],  [1.0,1.0,1.0],
                                    [-2.0,-2.0,-2.0], [0,0,0],  [2.0,2.0,2.0],
                                    [-1.0,-1.0,-1.0], [0,0,0],  [1.0,1.0,1.0]],shape = [3, 3, 3, 1]))                                    

op = tf.nn.conv2d(inputfull, filter, strides=[1, 1, 1, 1], padding='SAME') #3個通道輸入,生成1個feature ma
o=tf.cast(  ((op-tf.reduce_min(op))/(tf.reduce_max(op)-tf.reduce_min(op)) ) *255 ,tf.uint8)



with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer()  )  

    t,f=sess.run([o,filter],feed_dict={ inputfull:full})
    #print(f)
    t=np.reshape(t,[140, 121]) 
 
    plt.imshow(t,cmap='Greys_r') # 顯示圖片
    plt.axis('off') # 不顯示座標軸
    plt.show()

實例39:池化函數的使用

  • tf.nn.max_pool(input, ksize, strides, padding, name = None)
  • tf.nn.avg_pool(input, ksize, strides, padding, name = None)
# -*- coding: utf-8 -*-
import tensorflow as tf  
  
img=tf.constant([  
        [[0.0,4.0],[0.0,4.0],[0.0,4.0],[0.0,4.0]],  
        [[1.0,5.0],[1.0,5.0],[1.0,5.0],[1.0,5.0]],  
        [[2.0,6.0],[2.0,6.0],[2.0,6.0],[2.0,6.0]],  
        [[3.0,7.0],[3.0,7.0], [3.0,7.0],[3.0,7.0]]
    ])  
  
img=tf.reshape(img,[1,4,4,2])  
  
pooling=tf.nn.max_pool(img,[1,2,2,1],[1,2,2,1],padding='VALID')  
pooling1=tf.nn.max_pool(img,[1,2,2,1],[1,1,1,1],padding='VALID')
pooling2=tf.nn.avg_pool(img,[1,4,4,1],[1,1,1,1],padding='SAME')  
pooling3=tf.nn.avg_pool(img,[1,4,4,1],[1,4,4,1],padding='SAME') 
nt_hpool2_flat = tf.reshape(tf.transpose(img), [-1, 16]) 
pooling4=tf.reduce_mean(nt_hpool2_flat,1) #1對行求均值(1表示軸是列)   0 對列求均值


with tf.Session() as sess:  
    print("image:")  
    image=sess.run(img)  
    print (image)  
    result=sess.run(pooling)  
    print ("reslut:\n",result)  
    result=sess.run(pooling1)  
    print ("reslut1:\n",result)     
    result=sess.run(pooling2)  
    print ("reslut2:\n",result)
    result=sess.run(pooling3)  
    print ("reslut3:\n",result) 
    flat,result=sess.run([nt_hpool2_flat,pooling4])  
    print ("reslut4:\n",result) 
    print("flat:\n",flat)     

實例40:導入並顯示CIFAR數據集

# -*- coding: utf-8 -*-

#放在cifar目錄下
import cifar10

cifar10.maybe_download_and_extract(data_dir="H:/tensorflow_projects/chap8/")
# -*- coding: utf-8 -*-
#放在cifar目錄下
import cifar10_input
import tensorflow as tf
import pylab 
import numpy as np

#取數據
batch_size = 12
data_dir = 'H:/tensorflow_projects/chap8/cifar-10-batches-bin'
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)


#sess = tf.InteractiveSession()
#tf.global_variables_initializer().run()
#tf.train.start_queue_runners()
#image_batch, label_batch = sess.run([images_test, labels_test])
#print("__\n",image_batch[0])
#
#print("__\n",label_batch[0])
#pylab.imshow(image_batch[0])
#pylab.show()
#

sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
tf.train.start_queue_runners(sess=sess)
image_batch, label_batch = sess.run([images_test, labels_test])
print("__\n",image_batch[0])

print("__\n",label_batch[0])
pylab.imshow(  (image_batch[0]-np.min(image_batch[0]))  / (np.max(image_batch[0])-np.min(image_batch[0]) )   )
pylab.show()

#with tf.Session() as sess:
#    tf.global_variables_initializer().run()
#    tf.train.start_queue_runners()
#    image_batch, label_batch = sess.run([images_test, labels_test])
#    print("__\n",image_batch[0])
#    
#    print("__\n",label_batch[0])
#    pylab.imshow(image_batch[0])
#    pylab.show()

實例41:顯示CIFAR數據集的原始圖片

# -*- coding: utf-8 -*-
import numpy as np  
from scipy.misc import imsave  
  
filename = '../cifar-10-batches-bin/test_batch.bin'  
  
bytestream = open(filename, "rb")  
buf = bytestream.read(10000 * (1 + 32 * 32 * 3))  
bytestream.close()  
  
data = np.frombuffer(buf, dtype=np.uint8)  
data = data.reshape(10000, 1 + 32*32*3)  
labels_images = np.hsplit(data, [1])  
labels = labels_images[0].reshape(10000)  
images = labels_images[1].reshape(10000, 32, 32, 3)  
  
img = np.reshape(images[0], (3, 32, 32)) #導出第一幅圖  
img = img.transpose(1, 2, 0)  
  
import pylab 
print(labels[0]) 
pylab.imshow(img)
pylab.show()

# -*- coding: utf-8 -*-
import tensorflow as tf  

#創建長度爲100的隊列  
queue = tf.FIFOQueue(100,"float")  

c = tf.Variable(0.0)  #計數器  
#加1操作 
op = tf.assign_add(c,tf.constant(1.0))  
#操作:將計數器的結果加入隊列  
enqueue_op = queue.enqueue(c)  
  
#創建一個隊列管理器QueueRunner,用這兩個操作向q中添加元素。目前我們只使用一個線程:  
qr = tf.train.QueueRunner(queue,enqueue_ops=[op,enqueue_op]) 

with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer())  
       
    coord = tf.train.Coordinator()  
      
    ## 啓動入隊線程, Coordinator是線程的參數  
    enqueue_threads = qr.create_threads(sess, coord = coord,start=True)  # 啓動入隊線程  
      
    # 主線程  
    for i in range(0, 10):  
        print ("-------------------------")  
        print(sess.run(queue.dequeue()))  
      
     
    coord.request_stop()  #通知其他線程關閉 其他所有線程關閉之後,這一函數才能返回  


    #join操作經常用在線程當中,其作用是等待某線程結束  
    #coord.join(enqueue_threads) 

實例42:協調器的用法演示

# -*- coding: utf-8 -*-
#放在cifar目錄下
import  cifar10_input
import tensorflow as tf
import pylab 

#取數據
batch_size = 12
data_dir = '../cifar-10-batches-bin'
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)


with tf.Session() as sess:
    tf.global_variables_initializer().run()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess, coord)
    
    image_batch, label_batch = sess.run([images_test, labels_test])
    print("__\n",image_batch[0])
    
    print("__\n",label_batch[0])
    pylab.imshow(image_batch[0])
    pylab.show()
    coord.request_stop()

實例43:爲session中的隊列加上協調器

import cifar10_input
import tensorflow as tf
import pylab 
import numpy as np

#取數據
batch_size = 12
data_dir = 'H:/tensorflow_projects/chap8/cifar-10-batches-bin'
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)


with tf.Session() as sess:
    tf.global_variables_initializer().run()

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess, coord)
    
    image_batch, label_batch = sess.run([images_test, labels_test])
    print("__\n",image_batch[0])
    
    print("__\n",label_batch[0])
    pylab.imshow(image_batch[0])
    pylab.show()
    coord.request_stop()

實例44:建立一個帶有全局平均池化層的卷積神經網絡

# -*- coding: utf-8 -*-
import cifar10_input
import tensorflow as tf
import numpy as np

# 1. 引入數據集
batch_size = 128
data_dir = '../cifar-10-batches-bin'
print("begin")
images_train, labels_train = cifar10_input.inputs(
    eval_data=False, data_dir=data_dir, batch_size=batch_size)
images_test, labels_test = cifar10_input.inputs(
    eval_data=True, data_dir=data_dir, batch_size=batch_size)
print("begin data")

# 2. 定義網絡結構


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


def conv2d(x, W):


return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1], padding='SAME')


def avg_pool_6x6(x):


return tf.nn.avg_pool(x, ksize=[1, 6, 6, 1],
                      strides=[1, 6, 6, 1], padding='SAME')

# tf Graph Input
# cifar data image of shape 24*24*3
x = tf.placeholder(tf.float32, [None, 24, 24, 3])
y = tf.placeholder(tf.float32, [None, 10])  # 0-9 數字=> 10 classes


W_conv1 = weight_variable([5, 5, 3, 64])
b_conv1 = bias_variable([64])

x_image = tf.reshape(x, [-1, 24, 24, 3])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 64, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

W_conv3 = weight_variable([5, 5, 64, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3 = avg_pool_6x6(h_conv3)  # 10
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv = tf.nn.softmax(nt_hpool3_flat)

cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))

train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# 3. 運行session進行訓練
sess = tf.Session()
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for i in range(15000):
    # 20000
    image_batch, label_batch = sess.run([images_train, labels_train])
    label_b = np.eye(10, dtype=float)[label_batch]  # one hot

    train_step.run(feed_dict={x: image_batch, y: label_b}, session=sess)

    if i % 200 == 0:
        train_accuracy = accuracy.eval(
            feed_dict={x: image_batch, y: label_b}, session=sess)
    print("step %d, training accuracy %g" % (i, train_accuracy))

# 4. 評估結果
image_batch, label_batch = sess.run([images_test, labels_test])
label_b = np.eye(10, dtype=float)[label_batch]  # one hot
print("finished! test accuracy %g" % accuracy.eval(feed_dict={
    x: image_batch, y: label_b}, session=sess))
begin
begin data
step 0, training accuracy 0.164062
step 200, training accuracy 0.320312
step 400, training accuracy 0.414062
step 600, training accuracy 0.4375
step 800, training accuracy 0.429688
step 1000, training accuracy 0.507812
step 1200, training accuracy 0.367188
step 1400, training accuracy 0.523438
step 1600, training accuracy 0.507812
step 1800, training accuracy 0.445312
step 2000, training accuracy 0.5625
step 2200, training accuracy 0.5
step 2400, training accuracy 0.507812
step 2600, training accuracy 0.460938
step 2800, training accuracy 0.585938
step 3000, training accuracy 0.617188
step 3200, training accuracy 0.554688
step 3400, training accuracy 0.546875
step 3600, training accuracy 0.554688
step 3800, training accuracy 0.523438
step 4000, training accuracy 0.554688
step 4200, training accuracy 0.59375
step 4400, training accuracy 0.546875
step 4600, training accuracy 0.601562
step 4800, training accuracy 0.625
step 5000, training accuracy 0.554688
step 5200, training accuracy 0.632812
step 5400, training accuracy 0.617188
step 5600, training accuracy 0.5625
step 5800, training accuracy 0.59375
step 6000, training accuracy 0.515625
step 6200, training accuracy 0.6875
step 6400, training accuracy 0.578125
step 6600, training accuracy 0.578125
step 6800, training accuracy 0.601562
step 7000, training accuracy 0.640625
step 7200, training accuracy 0.640625
step 7400, training accuracy 0.609375
step 7600, training accuracy 0.617188
step 7800, training accuracy 0.609375
step 8000, training accuracy 0.640625
step 8200, training accuracy 0.578125
step 8400, training accuracy 0.609375
step 8600, training accuracy 0.59375
step 8800, training accuracy 0.625
step 9000, training accuracy 0.617188
step 9200, training accuracy 0.648438
step 9400, training accuracy 0.617188
step 9600, training accuracy 0.648438
step 9800, training accuracy 0.617188
step 10000, training accuracy 0.632812
step 10200, training accuracy 0.65625
step 10400, training accuracy 0.679688
step 10600, training accuracy 0.609375
step 10800, training accuracy 0.664062
step 11000, training accuracy 0.734375
step 11200, training accuracy 0.632812
step 11400, training accuracy 0.6875
step 11600, training accuracy 0.71875
step 11800, training accuracy 0.585938
step 12000, training accuracy 0.679688
step 12200, training accuracy 0.671875
step 12400, training accuracy 0.640625
step 12600, training accuracy 0.609375
step 12800, training accuracy 0.679688
step 13000, training accuracy 0.664062
step 13200, training accuracy 0.5625
step 13400, training accuracy 0.625
step 13600, training accuracy 0.65625
step 13800, training accuracy 0.640625
step 14000, training accuracy 0.6875
step 14200, training accuracy 0.664062
step 14400, training accuracy 0.695312
step 14600, training accuracy 0.703125
step 14800, training accuracy 0.671875
finished! test accuracy 0.664062

MNIST卷積和CIFAR卷積分類

# -*- coding: utf-8 -*-
import tensorflow as tf
# 導入 MINST 數據集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/data/", one_hot=True)


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
  
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')  
def avg_pool_7x7(x):
    return tf.nn.avg_pool(x, ksize=[1, 7, 7, 1],
                        strides=[1, 7, 7, 1], padding='SAME')

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data維度 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes



W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])

x_image = tf.reshape(x, [-1,28,28,1])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#########################################################new
W_conv3 = weight_variable([5, 5, 64, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3=avg_pool_7x7(h_conv3)#64
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv=tf.nn.softmax(nt_hpool3_flat)


cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# 啓動session
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(200):#20000
        batch = mnist.train.next_batch(50)#50
        if i%20 == 0:
            train_accuracy = accuracy.eval(feed_dict={
                x:batch[0], y: batch[1]})
            print( "step %d, training accuracy %g"%(i, train_accuracy))
        train_step.run(feed_dict={x: batch[0], y: batch[1]})
    
    print ("test accuracy %g"%accuracy.eval(feed_dict={
        x: mnist.test.images, y: mnist.test.labels}))
step 0, training accuracy 0.26
step 20, training accuracy 0.14
step 40, training accuracy 0.14
step 60, training accuracy 0.26
step 80, training accuracy 0.18
step 100, training accuracy 0.22
step 120, training accuracy 0.22
step 140, training accuracy 0.3
step 160, training accuracy 0.12
step 180, training accuracy 0.34
test accuracy 0.2344

實例45:演示反捲積操作

# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf 

img = tf.Variable(tf.constant(1.0,shape = [1, 4, 4, 1])) 

filter =  tf.Variable(tf.constant([1.0,0,-1,-2],shape = [2, 2, 1, 1]))

conv = tf.nn.conv2d(img, filter, strides=[1, 2, 2, 1], padding='VALID')  
cons = tf.nn.conv2d(img, filter, strides=[1, 2, 2, 1], padding='SAME')
print(conv.shape)
print(cons.shape)
 
contv= tf.nn.conv2d_transpose(conv, filter, [1,4,4,1],strides=[1, 2, 2, 1], padding='VALID')
conts = tf.nn.conv2d_transpose(cons, filter, [1,4,4,1],strides=[1, 2, 2, 1], padding='SAME')
 
with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer() )  

    print("conv:\n",sess.run([conv,filter])) 
    print("cons:\n",sess.run([cons]))    
    print("contv:\n",sess.run([contv])) 
    print("conts:\n",sess.run([conts]))

實例46:演示反池化操作

# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np


def max_pool_with_argmax(net, stride):
    _, mask = tf.nn.max_pool_with_argmax(net, ksize=[1, stride, stride, 1], strides=[1, stride, stride, 1], padding='SAME')
    mask = tf.stop_gradient(mask)
    net = tf.nn.max_pool(net, ksize=[1, stride, stride, 1],strides=[1, stride, stride, 1], padding='SAME') 
    return net, mask
 

def unpool(net, mask, stride):
    ksize = [1, stride, stride, 1]
    input_shape = net.get_shape().as_list()
    #  calculation new shape
    output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
    # calculation indices for batch, height, width and feature maps
    one_like_mask = tf.ones_like(mask)
    batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int64), shape=[input_shape[0], 1, 1, 1])
    b = one_like_mask * batch_range
    y = mask // (output_shape[2] * output_shape[3])
    x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
    feature_range = tf.range(output_shape[3], dtype=tf.int64)
    f = one_like_mask * feature_range
    # transpose indices & reshape update values to one dimension
    updates_size = tf.size(net)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
    values = tf.reshape(net, [updates_size])
    ret = tf.scatter_nd(indices, values, output_shape)
    return ret
    
    
img=tf.constant([  
        [[0.0,4.0],[0.0,4.0],[0.0,4.0],[0.0,4.0]],  
        [[1.0,5.0],[1.0,5.0],[1.0,5.0],[1.0,5.0]],  
        [[2.0,6.0],[2.0,6.0],[2.0,6.0],[2.0,6.0]],  
        [[3.0,7.0],[3.0,7.0], [3.0,7.0],[3.0,7.0]]
    ])  
  
img=tf.reshape(img,[1,4,4,2])  
pooling2=tf.nn.max_pool(img,[1,2,2,1],[1,2,2,1],padding='SAME')  
encode, mask = max_pool_with_argmax(img, 2)
img2 = unpool(encode,mask,2)
print(img.shape)
print(encode.shape)
print(mask.shape)
print(img2.shape)
with tf.Session() as sess:  
    print("image:")  
    print (sess.run(img))     
    result=sess.run(pooling2)  
    print ("pooling2:\n",result)
    result,mask2=sess.run([encode, mask])  
    print ("encode:\n",result,mask2)
    result=sess.run(img2)  
    print ("reslut:\n",result)

實例47:演示gradients的基本用法

# -*- coding: utf-8 -*-
import tensorflow as tf
w1 = tf.Variable([[1.,2]])
w2 = tf.Variable([[3.,4]])

y = tf.matmul(w1, [[9.],[10]])
#grads = tf.gradients(y,[w1,w2])#w2不相干,會報錯
grads = tf.gradients(y,[w1])

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    gradval = sess.run(grads)
    print(gradval)
[array([[ 9., 10.]], dtype=float32)]

實例48:使用gradients對多個式子求多變量求導

# -*- coding: utf-8 -*-

import tensorflow as tf


tf.reset_default_graph()
w1 = tf.get_variable('w1', shape=[2])
w2 = tf.get_variable('w2', shape=[2])

w3 = tf.get_variable('w3', shape=[2])
w4 = tf.get_variable('w4', shape=[2])

y1 = w1 + w2+ w3
y2 = w3 + w4

a = w1+w2
a_stoped = tf.stop_gradient(a)
y3= a_stoped+w3

gradients = tf.gradients([y1, y2], [w1, w2, w3, w4], grad_ys=[tf.convert_to_tensor([1.,2.]),
                                                          tf.convert_to_tensor([3.,4.])])
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(gradients))
[array([1., 2.], dtype=float32), array([1., 2.], dtype=float32), array([4., 6.], dtype=float32), array([3., 4.], dtype=float32)]

實例49:演示gradients停止

# -*- coding: utf-8 -*-
import tensorflow as tf
tf.reset_default_graph()
w1 = tf.get_variable('w1', shape=[2])
w2 = tf.get_variable('w2', shape=[2])

w3 = tf.get_variable('w3', shape=[2])
w4 = tf.get_variable('w4', shape=[2])

y1 = w1 + w2+ w3
y2 = w3 + w4

a = w1+w2
a_stoped = tf.stop_gradient(a)
y3= a_stoped+w3

gradients = tf.gradients([y1, y2], [w1, w2, w3, w4], grad_ys=[tf.convert_to_tensor([1.,2.]),
                                                          tf.convert_to_tensor([3.,4.])])
                                                          
gradients2 = tf.gradients(y3, [w1, w2, w3], grad_ys=tf.convert_to_tensor([1.,2.]))                                                          
print(gradients2) 
 
gradients3 = tf.gradients(y3, [ w3], grad_ys=tf.convert_to_tensor([1.,2.])) 
                                                       
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(gradients))
    #print(sess.run(gradients2))#報錯
    print(sess.run(gradients3))
[None, None, <tf.Tensor 'gradients_1/grad_ys_0:0' shape=(2,) dtype=float32>]
[array([1., 2.], dtype=float32), array([1., 2.], dtype=float32), array([4., 6.], dtype=float32), array([3., 4.], dtype=float32)]
[array([1., 2.], dtype=float32)]

實例50:用反捲積復原CNN各層圖像

# -*- coding: utf-8 -*-
import cifar10_input
import tensorflow as tf
import numpy as np


#最大池化
def max_pool_with_argmax(net, stride):
    _, mask = tf.nn.max_pool_with_argmax( net,ksize=[1, stride, stride, 1], strides=[1, stride, stride, 1],padding='SAME')
    mask = tf.stop_gradient(mask)
    net = tf.nn.max_pool(net, ksize=[1, stride, stride, 1],strides=[1, stride, stride, 1], padding='SAME') 
    return net, mask
#4*4----2*2--=2*2 【6,8,12,16】    
#反池化
def unpool(net, mask, stride):
    ksize = [1, stride, stride, 1]
    input_shape = net.get_shape().as_list()

    output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])

    one_like_mask = tf.ones_like(mask)
    batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int64), shape=[input_shape[0], 1, 1, 1])
    b = one_like_mask * batch_range
    y = mask // (output_shape[2] * output_shape[3])
    x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
    feature_range = tf.range(output_shape[3], dtype=tf.int64)
    f = one_like_mask * feature_range

    updates_size = tf.size(net)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
    values = tf.reshape(net, [updates_size])
    ret = tf.scatter_nd(indices, values, output_shape)
    return ret


batch_size = 128
data_dir = '../cifar-10-batches-bin'
print("begin")
images_train, labels_train = cifar10_input.inputs(eval_data = False,data_dir = data_dir, batch_size = batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)
print("begin data")



def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.01)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.01, shape=shape)
    return tf.Variable(initial)
  
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')  
                        
def avg_pool_6x6(x):
    return tf.nn.avg_pool(x, ksize=[1, 6, 6, 1], strides=[1, 6, 6, 1], padding='SAME')

# tf Graph Input
x = tf.placeholder(tf.float32, [batch_size, 24,24,3]) # cifar data image of shape 24*24*3
y = tf.placeholder(tf.float32, [batch_size, 10]) # 0-9 數字=> 10 classes


W_conv1 = weight_variable([5, 5, 3, 64])
b_conv1 = bias_variable([64])

x_image = tf.reshape(x, [-1,24,24,3])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
#h_pool1 = max_pool_2x2(h_conv1)
h_pool1, mask1 = max_pool_with_argmax(h_conv1, 2)

W_conv2 = weight_variable([5, 5, 64, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
#h_pool2 = max_pool_2x2(h_conv2)

#############################################################
h_pool2, mask = max_pool_with_argmax(h_conv2, 2)#(128, 6, 6, 64)
print(h_pool2.shape)
t_conv2 = unpool(h_pool2, mask, 2)#(128, 12, 12, 64)
t_pool1 = tf.nn.conv2d_transpose(t_conv2-b_conv2, W_conv2, h_pool1.shape,[1,1,1,1])#(128, 24, 24, 64)
print(t_conv2.shape,h_pool1.shape,t_pool1.shape)
t_conv1 = unpool(t_pool1, mask1, 2)
t_x_image = tf.nn.conv2d_transpose(t_conv1-b_conv1, W_conv1, x_image.shape,[1,1,1,1])

#第一層卷積還原
t1_conv1 = unpool(h_pool1, mask1, 2)
t1_x_image = tf.nn.conv2d_transpose(t1_conv1-b_conv1, W_conv1, x_image.shape,[1,1,1,1])

# 生成最終圖像
stitched_decodings = tf.concat((x_image, t1_x_image,t_x_image), axis=2)
decoding_summary_op = tf.summary.image('source/cifar', stitched_decodings)

#############################################################

W_conv3 = weight_variable([5, 5, 64, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3=avg_pool_6x6(h_conv3)#10
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv=tf.nn.softmax(nt_hpool3_flat)



cross_entropy = -tf.reduce_sum(y*tf.log(y_conv)) +(tf.nn.l2_loss(W_conv1)+tf.nn.l2_loss(W_conv2)+tf.nn.l2_loss(W_conv3))

train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))


sess = tf.Session()
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('./log/', sess.graph)

tf.train.start_queue_runners(sess=sess)

for i in range(15000):#20000
    image_batch, label_batch = sess.run([images_train, labels_train])
    label_b = np.eye(10,dtype=float)[label_batch] #one hot
  
    train_step.run(feed_dict={x:image_batch, y: label_b},session=sess)
  #_, decoding_summary = sess.run([train_step, decoding_summary_op],feed_dict={x:image_batch, y: label_b})
  
    if i%200 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x:image_batch, y: label_b},session=sess)
        print("step %d, training accuracy %g"%(i, train_accuracy))
        print("cross_entropy",cross_entropy.eval(feed_dict={x:image_batch, y: label_b},session=sess))
    #summary_writer.add_summary(decoding_summary)


image_batch, label_batch = sess.run([images_test, labels_test])
label_b = np.eye(10,dtype=float)[label_batch]#one hot
print ("finished! test accuracy %g"%accuracy.eval(feed_dict={
     x:image_batch, y: label_b},session=sess))
decoding_summary = sess.run(decoding_summary_op,feed_dict={x:image_batch, y: label_b})
summary_writer.add_summary(decoding_summary)
step 0, training accuracy 0.109375
cross_entropy 299.33258
step 200, training accuracy 0.265625
cross_entropy 255.26352
step 400, training accuracy 0.328125
cross_entropy 220.17691
step 600, training accuracy 0.304688
cross_entropy 239.9032
step 800, training accuracy 0.4375
cross_entropy 215.55
step 1000, training accuracy 0.34375
cross_entropy 240.33012
step 1200, training accuracy 0.507812
cross_entropy 208.6071
step 1400, training accuracy 0.414062
cross_entropy 215.67905
step 1600, training accuracy 0.484375
cross_entropy 213.92612
step 1800, training accuracy 0.335938
cross_entropy 233.1239
step 2000, training accuracy 0.5
cross_entropy 208.82675
step 2200, training accuracy 0.492188
cross_entropy 191.40732
step 2400, training accuracy 0.460938
cross_entropy 193.65083
step 2600, training accuracy 0.492188
cross_entropy 186.18613
step 2800, training accuracy 0.484375
cross_entropy 189.64601
step 3000, training accuracy 0.539062
cross_entropy 183.80658
step 3200, training accuracy 0.421875
cross_entropy 204.8297
step 3400, training accuracy 0.476562
cross_entropy 193.53462
step 3600, training accuracy 0.515625
cross_entropy 204.49474
step 3800, training accuracy 0.523438
cross_entropy 194.06113
step 4000, training accuracy 0.53125
cross_entropy 194.76062
step 4200, training accuracy 0.476562
cross_entropy 191.30899
step 4400, training accuracy 0.484375
cross_entropy 198.17056
step 4600, training accuracy 0.523438
cross_entropy 176.11752
step 4800, training accuracy 0.5
cross_entropy 185.19417
step 5000, training accuracy 0.570312
cross_entropy 188.2881
step 5200, training accuracy 0.515625
cross_entropy 184.50743
step 5400, training accuracy 0.632812
cross_entropy 175.47253
step 5600, training accuracy 0.515625
cross_entropy 193.34174
step 5800, training accuracy 0.554688
cross_entropy 181.11494
step 6000, training accuracy 0.53125
cross_entropy 172.79439
step 6200, training accuracy 0.65625
cross_entropy 153.55139
step 6400, training accuracy 0.523438
cross_entropy 183.32246
step 6600, training accuracy 0.570312
cross_entropy 166.75311
step 6800, training accuracy 0.539062
cross_entropy 192.8255
step 7000, training accuracy 0.585938
cross_entropy 172.40381
step 7200, training accuracy 0.625
cross_entropy 154.44427
step 7400, training accuracy 0.523438
cross_entropy 173.51205
step 7600, training accuracy 0.617188
cross_entropy 158.45212
step 7800, training accuracy 0.515625
cross_entropy 194.20056
step 8000, training accuracy 0.59375
cross_entropy 177.66724
step 8200, training accuracy 0.476562
cross_entropy 190.3621
step 8400, training accuracy 0.59375
cross_entropy 173.1125
step 8600, training accuracy 0.53125
cross_entropy 181.5401
step 8800, training accuracy 0.570312
cross_entropy 160.98604
step 9000, training accuracy 0.59375
cross_entropy 169.50435
step 9200, training accuracy 0.65625
cross_entropy 159.84276
step 9400, training accuracy 0.53125
cross_entropy 185.61572
step 9600, training accuracy 0.609375
cross_entropy 163.57227
step 9800, training accuracy 0.539062
cross_entropy 172.54062
step 10000, training accuracy 0.570312
cross_entropy 182.86372
step 10200, training accuracy 0.5625
cross_entropy 175.23633
step 10400, training accuracy 0.625
cross_entropy 158.63727
step 10600, training accuracy 0.570312
cross_entropy 183.32611
step 10800, training accuracy 0.539062
cross_entropy 186.68808
step 11000, training accuracy 0.554688
cross_entropy 180.54099
step 11200, training accuracy 0.523438
cross_entropy 187.641
step 11400, training accuracy 0.601562
cross_entropy 157.28458
step 11600, training accuracy 0.5625
cross_entropy 177.28255
step 11800, training accuracy 0.601562
cross_entropy 180.65999
step 12000, training accuracy 0.601562
cross_entropy 155.04745
step 12200, training accuracy 0.625
cross_entropy 164.97495
step 12400, training accuracy 0.554688
cross_entropy 160.26328
step 12600, training accuracy 0.609375
cross_entropy 169.59125
step 12800, training accuracy 0.578125
cross_entropy 181.78036
step 13000, training accuracy 0.65625
cross_entropy 155.21802
step 13200, training accuracy 0.507812
cross_entropy 183.50308
step 13400, training accuracy 0.617188
cross_entropy 162.36247
step 13600, training accuracy 0.585938
cross_entropy 172.93718
step 13800, training accuracy 0.617188
cross_entropy 167.09555
step 14000, training accuracy 0.664062
cross_entropy 153.10864
step 14200, training accuracy 0.648438
cross_entropy 154.17062
step 14400, training accuracy 0.546875
cross_entropy 185.39635
step 14600, training accuracy 0.640625
cross_entropy 152.71246
step 14800, training accuracy 0.617188
cross_entropy 162.1098
finished! test accuracy 0.601562

實例51: CIFAR封裝代碼

使用tf.contrib.layers重構

# -*- coding: utf-8 -*-
import cifar10_input
import tensorflow as tf
import numpy as np

batch_size = 128
data_dir = '../cifar-10-batches-bin'
print("begin")
images_train, labels_train = cifar10_input.inputs(eval_data = False,data_dir = data_dir, batch_size = batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)
print("begin data")

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 24,24,3]) # cifar data image of shape 24*24*3
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes

x_image = tf.reshape(x, [-1,24,24,3])

h_conv1 =tf.contrib.layers.conv2d(x_image,64,[5,5],1,'SAME',activation_fn=tf.nn.relu)
h_pool1 = tf.contrib.layers.max_pool2d(h_conv1,[2,2],stride=2,padding='SAME')

h_conv2 =tf.contrib.layers.conv2d(h_pool1,64,[5,5],1,'SAME',activation_fn=tf.nn.relu)
h_pool2 = tf.contrib.layers.max_pool2d(h_conv2,[2,2],stride=2,padding='SAME')

nt_hpool2 = tf.contrib.layers.avg_pool2d(h_pool2,[6,6],stride=6,padding='SAME')

nt_hpool2_flat = tf.reshape(nt_hpool2, [-1, 64])

y_conv = tf.contrib.layers.fully_connected(nt_hpool2_flat,10,activation_fn=tf.nn.softmax)

cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))


sess = tf.Session()
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for i in range(15000):#20000
    image_batch, label_batch = sess.run([images_train, labels_train])
    label_b = np.eye(10,dtype=float)[label_batch] #one hot
  
    train_step.run(feed_dict={x:image_batch, y: label_b},session=sess)
  
    if i%200 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x:image_batch, y: label_b},session=sess)
        print( "step %d, training accuracy %g"%(i, train_accuracy))


image_batch, label_batch = sess.run([images_test, labels_test])
label_b = np.eye(10,dtype=float)[label_batch]#one hot
print ("finished! test accuracy %g"%accuracy.eval(feed_dict={
     x:image_batch, y: label_b},session=sess))
step 0, training accuracy 0.148438
step 200, training accuracy 0.328125
step 400, training accuracy 0.421875
step 600, training accuracy 0.34375
step 800, training accuracy 0.382812
step 1000, training accuracy 0.414062
step 1200, training accuracy 0.5
step 1400, training accuracy 0.414062
step 1600, training accuracy 0.476562
step 1800, training accuracy 0.398438
step 2000, training accuracy 0.554688
step 2200, training accuracy 0.40625
step 2400, training accuracy 0.5625
step 2600, training accuracy 0.523438
step 2800, training accuracy 0.46875
step 3000, training accuracy 0.53125
step 3200, training accuracy 0.40625
step 3400, training accuracy 0.523438
step 3600, training accuracy 0.53125
step 3800, training accuracy 0.515625
step 4000, training accuracy 0.492188
step 4200, training accuracy 0.515625
step 4400, training accuracy 0.5
step 4600, training accuracy 0.617188
step 4800, training accuracy 0.546875
step 5000, training accuracy 0.539062
step 5200, training accuracy 0.578125
step 5400, training accuracy 0.5625
step 5600, training accuracy 0.492188
step 5800, training accuracy 0.554688
step 6000, training accuracy 0.632812
step 6200, training accuracy 0.523438
step 6400, training accuracy 0.523438
step 6600, training accuracy 0.523438
step 6800, training accuracy 0.492188
step 7000, training accuracy 0.570312
step 7200, training accuracy 0.523438
step 7400, training accuracy 0.601562
step 7600, training accuracy 0.601562
step 7800, training accuracy 0.570312
step 8000, training accuracy 0.625
step 8200, training accuracy 0.5
step 8400, training accuracy 0.609375
step 8600, training accuracy 0.601562
step 8800, training accuracy 0.539062
step 9000, training accuracy 0.578125
step 9200, training accuracy 0.609375
step 9400, training accuracy 0.546875
step 9600, training accuracy 0.609375
step 9800, training accuracy 0.601562
step 10000, training accuracy 0.601562
step 10200, training accuracy 0.5625
step 10400, training accuracy 0.6875
step 10600, training accuracy 0.609375
step 10800, training accuracy 0.570312
step 11000, training accuracy 0.578125
step 11200, training accuracy 0.53125
step 11400, training accuracy 0.65625
step 11600, training accuracy 0.609375
step 11800, training accuracy 0.5625
step 12000, training accuracy 0.679688
step 12200, training accuracy 0.632812
step 12400, training accuracy 0.625
step 12600, training accuracy 0.65625
step 12800, training accuracy 0.632812
step 13000, training accuracy 0.585938
step 13200, training accuracy 0.539062
step 13400, training accuracy 0.640625
step 13600, training accuracy 0.617188
step 13800, training accuracy 0.679688
step 14000, training accuracy 0.664062
step 14200, training accuracy 0.609375
step 14400, training accuracy 0.585938
step 14600, training accuracy 0.65625
step 14800, training accuracy 0.609375
finished! test accuracy 0.617188

實例52:cifar卷積核優化

# -*- coding: utf-8 -*-
import cifar10_input
import tensorflow as tf
import numpy as np


batch_size = 128
data_dir = '../cifar-10-batches-bin'
print("begin")
images_train, labels_train = cifar10_input.inputs(eval_data = False,data_dir = data_dir, batch_size = batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)
print("begin data")

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
  
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')  
                        
def avg_pool_6x6(x):
    return tf.nn.avg_pool(x, ksize=[1, 6, 6, 1],
                        strides=[1, 6, 6, 1], padding='SAME')

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 24,24,3]) # cifar data image of shape 24*24*3
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes


W_conv1 = weight_variable([5, 5, 3, 64])
b_conv1 = bias_variable([64])

x_image = tf.reshape(x, [-1,24,24,3])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#########################################################new########
W_conv21 = weight_variable([5, 1, 64, 64])
b_conv21 = bias_variable([64])
h_conv21 = tf.nn.relu(conv2d(h_pool1, W_conv21) + b_conv21)

W_conv2 = weight_variable([1, 5, 64, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_conv21, W_conv2) + b_conv2)
###########################################################old#########
#W_conv2 = weight_variable([5, 5, 64, 64])
#b_conv2 = bias_variable([64])
#h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
###################################################################

h_pool2 = max_pool_2x2(h_conv2)
W_conv3 = weight_variable([5, 5, 64, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3=avg_pool_6x6(h_conv3)#10
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv=tf.nn.softmax(nt_hpool3_flat)

cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))


sess = tf.Session()
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for i in range(15000):#20000
    image_batch, label_batch = sess.run([images_train, labels_train])
    label_b = np.eye(10,dtype=float)[label_batch] #one hot
  
    train_step.run(feed_dict={x:image_batch, y: label_b},session=sess)
  
    if i%200 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x:image_batch, y: label_b},session=sess)
        print( "step %d, training accuracy %g"%(i, train_accuracy))


image_batch, label_batch = sess.run([images_test, labels_test])
label_b = np.eye(10,dtype=float)[label_batch]#one hot
print ("finished! test accuracy %g"%accuracy.eval(feed_dict={
     x:image_batch, y: label_b},session=sess))
step 0, training accuracy 0.125
step 200, training accuracy 0.34375
step 400, training accuracy 0.375
step 600, training accuracy 0.367188
step 800, training accuracy 0.507812
step 1000, training accuracy 0.382812
step 1200, training accuracy 0.429688
step 1400, training accuracy 0.5
step 1600, training accuracy 0.4375
step 1800, training accuracy 0.390625
step 2000, training accuracy 0.515625
step 2200, training accuracy 0.59375
step 2400, training accuracy 0.554688
step 2600, training accuracy 0.539062
step 2800, training accuracy 0.53125
step 3000, training accuracy 0.5
step 3200, training accuracy 0.414062
step 3400, training accuracy 0.515625
step 3600, training accuracy 0.515625
step 3800, training accuracy 0.476562
step 4000, training accuracy 0.585938
step 4200, training accuracy 0.5625
step 4400, training accuracy 0.570312
step 4600, training accuracy 0.523438
step 4800, training accuracy 0.523438
step 5000, training accuracy 0.5625
step 5200, training accuracy 0.53125
step 5400, training accuracy 0.59375
step 5600, training accuracy 0.53125
step 5800, training accuracy 0.601562
step 6000, training accuracy 0.585938
step 6200, training accuracy 0.617188
step 6400, training accuracy 0.59375
step 6600, training accuracy 0.617188
step 6800, training accuracy 0.570312
step 7000, training accuracy 0.585938
step 7200, training accuracy 0.664062
step 7400, training accuracy 0.648438
step 7600, training accuracy 0.65625
step 7800, training accuracy 0.632812
step 8000, training accuracy 0.65625
step 8200, training accuracy 0.585938
step 8400, training accuracy 0.578125
step 8600, training accuracy 0.585938
step 8800, training accuracy 0.578125
step 9000, training accuracy 0.695312
step 9200, training accuracy 0.570312
step 9400, training accuracy 0.546875
step 9600, training accuracy 0.742188
step 9800, training accuracy 0.6875
step 10000, training accuracy 0.59375
step 10200, training accuracy 0.5625
step 10400, training accuracy 0.679688
step 10600, training accuracy 0.617188
step 10800, training accuracy 0.65625
step 11000, training accuracy 0.578125
step 11200, training accuracy 0.679688
step 11400, training accuracy 0.617188
step 11600, training accuracy 0.609375
step 11800, training accuracy 0.632812
step 12000, training accuracy 0.671875
step 12200, training accuracy 0.664062
step 12400, training accuracy 0.609375
step 12600, training accuracy 0.710938
step 12800, training accuracy 0.617188
step 13000, training accuracy 0.679688
step 13200, training accuracy 0.625
step 13400, training accuracy 0.632812
step 13600, training accuracy 0.554688
step 13800, training accuracy 0.65625
step 14000, training accuracy 0.679688
step 14200, training accuracy 0.710938
step 14400, training accuracy 0.554688
step 14600, training accuracy 0.75
step 14800, training accuracy 0.632812
finished! test accuracy 0.609375

實例53:cifar多通道卷

# -*- coding: utf-8 -*-
import cifar10_input
import tensorflow as tf
import numpy as np


batch_size = 128
data_dir = '../cifar-10-batches-bin'
print("begin")
images_train, labels_train = cifar10_input.inputs(eval_data = False,data_dir = data_dir, batch_size = batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)
print("begin data")


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
  
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')  
                        
def avg_pool_6x6(x):
    return tf.nn.avg_pool(x, ksize=[1, 6, 6, 1],
                        strides=[1, 6, 6, 1], padding='SAME')

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 24,24,3]) # cifar data image of shape 24*24*3
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes


W_conv1 = weight_variable([5, 5, 3, 64])
b_conv1 = bias_variable([64])

x_image = tf.reshape(x, [-1,24,24,3])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#######################################################多卷積核
W_conv2_5x5 = weight_variable([5, 5, 64, 64]) 
b_conv2_5x5 = bias_variable([64]) 
W_conv2_7x7 = weight_variable([7, 7, 64, 64]) 
b_conv2_7x7 = bias_variable([64]) 

W_conv2_3x3 = weight_variable([3, 3, 64, 64]) 
b_conv2_3x3 = bias_variable([64]) 

W_conv2_1x1 = weight_variable([1, 1, 64, 64]) 
b_conv2_1x1 = bias_variable([64]) 

h_conv2_1x1 = tf.nn.relu(conv2d(h_pool1, W_conv2_1x1) + b_conv2_1x1)
h_conv2_3x3 = tf.nn.relu(conv2d(h_pool1, W_conv2_3x3) + b_conv2_3x3)
h_conv2_5x5 = tf.nn.relu(conv2d(h_pool1, W_conv2_5x5) + b_conv2_5x5)
h_conv2_7x7 = tf.nn.relu(conv2d(h_pool1, W_conv2_7x7) + b_conv2_7x7)
h_conv2 = tf.concat([h_conv2_5x5,h_conv2_7x7,h_conv2_3x3,h_conv2_1x1],3)

#######################################################
#W_conv2 = weight_variable([5, 5, 64, 64])
#b_conv2 = bias_variable([64])
#
#h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#######################################################

W_conv3 = weight_variable([5, 5, 256, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3=avg_pool_6x6(h_conv3)#10
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv=tf.nn.softmax(nt_hpool3_flat)


cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))

#不同的優化方法測測效果
#train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
#train_step = tf.train.AdagradOptimizer(1e-5).minimize(cross_entropy)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))


sess = tf.Session()
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for i in range(15000):#20000
    image_batch, label_batch = sess.run([images_train, labels_train])
    label_b = np.eye(10,dtype=float)[label_batch] #one hot
  
    train_step.run(feed_dict={x:image_batch, y: label_b},session=sess)
  
    if i%200 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x:image_batch, y: label_b},session=sess)
        print( "step %d, training accuracy %g"%(i, train_accuracy))


image_batch, label_batch = sess.run([images_test, labels_test])
label_b = np.eye(10,dtype=float)[label_batch]#one hot
print ("finished! test accuracy %g"%accuracy.eval(feed_dict={
     x:image_batch, y: label_b},session=sess))
step 0, training accuracy 0.09375
step 200, training accuracy 0.304688
step 400, training accuracy 0.398438
step 600, training accuracy 0.4375
step 800, training accuracy 0.507812
step 1000, training accuracy 0.46875
step 1200, training accuracy 0.421875
step 1400, training accuracy 0.585938
step 1600, training accuracy 0.585938
step 1800, training accuracy 0.484375
step 2000, training accuracy 0.546875
step 2200, training accuracy 0.539062
step 2400, training accuracy 0.625
step 2600, training accuracy 0.570312
step 2800, training accuracy 0.570312
step 3000, training accuracy 0.546875
step 3200, training accuracy 0.601562
step 3400, training accuracy 0.609375
step 3600, training accuracy 0.609375
step 3800, training accuracy 0.625
step 4000, training accuracy 0.671875
step 4200, training accuracy 0.625
step 4400, training accuracy 0.601562
step 4600, training accuracy 0.609375
step 4800, training accuracy 0.570312
step 5000, training accuracy 0.65625
step 5200, training accuracy 0.640625
step 5400, training accuracy 0.671875
step 5600, training accuracy 0.5625
step 5800, training accuracy 0.65625
step 6000, training accuracy 0.695312
step 6200, training accuracy 0.757812
step 6400, training accuracy 0.648438
step 6600, training accuracy 0.734375
step 6800, training accuracy 0.632812
step 7000, training accuracy 0.6875
step 7200, training accuracy 0.679688
step 7400, training accuracy 0.695312
step 7600, training accuracy 0.75
step 7800, training accuracy 0.65625
step 8000, training accuracy 0.71875
step 8200, training accuracy 0.617188
step 8400, training accuracy 0.632812
step 8600, training accuracy 0.679688
step 8800, training accuracy 0.679688
step 9000, training accuracy 0.625
step 9200, training accuracy 0.710938
step 9400, training accuracy 0.625
step 9600, training accuracy 0.765625
step 9800, training accuracy 0.679688
step 10000, training accuracy 0.710938
step 10200, training accuracy 0.710938
step 10400, training accuracy 0.71875
step 10600, training accuracy 0.679688
step 10800, training accuracy 0.671875
step 11000, training accuracy 0.75
step 11200, training accuracy 0.609375
step 11400, training accuracy 0.6875
step 11600, training accuracy 0.695312
step 11800, training accuracy 0.6875
step 12000, training accuracy 0.75
step 12200, training accuracy 0.773438
step 12400, training accuracy 0.71875
step 12600, training accuracy 0.671875
step 12800, training accuracy 0.679688
step 13000, training accuracy 0.664062
step 13200, training accuracy 0.710938
step 13400, training accuracy 0.773438
step 13600, training accuracy 0.734375
step 13800, training accuracy 0.6875
step 14000, training accuracy 0.75
step 14200, training accuracy 0.765625
step 14400, training accuracy 0.757812
step 14600, training accuracy 0.796875
step 14800, training accuracy 0.773438
finished! test accuracy 0.625

實例54:BN

# -*- coding: utf-8 -*-
import cifar10_input
import tensorflow as tf
import numpy as np
from tensorflow.contrib.layers.python.layers import batch_norm

batch_size = 128
data_dir = '../cifar-10-batches-bin'
print("begin")
images_train, labels_train = cifar10_input.inputs(eval_data = False,data_dir = data_dir, batch_size = batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)
print("begin data")



def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
  
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')  
                        
def avg_pool_6x6(x):
    return tf.nn.avg_pool(x, ksize=[1, 6, 6, 1],
                        strides=[1, 6, 6, 1], padding='SAME')
                        
def batch_norm_layer(value,train = None, name = 'batch_norm'): 
    if train is not None:       
        return batch_norm(value, decay = 0.9,updates_collections=None, is_training = True)
    else:
        return batch_norm(value, decay = 0.9,updates_collections=None, is_training = False)

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 24,24,3]) # cifar data image of shape 24*24*3
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes
train = tf.placeholder(tf.float32)

W_conv1 = weight_variable([5, 5, 3, 64])
b_conv1 = bias_variable([64])

x_image = tf.reshape(x, [-1,24,24,3])

h_conv1 = tf.nn.relu(batch_norm_layer((conv2d(x_image, W_conv1) + b_conv1),train))
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 64, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(batch_norm_layer((conv2d(h_pool1, W_conv2) + b_conv2),train))
h_pool2 = max_pool_2x2(h_conv2)


W_conv3 = weight_variable([5, 5, 64, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3=avg_pool_6x6(h_conv3)#10
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv=tf.nn.softmax(nt_hpool3_flat)

cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))

global_step = tf.Variable(0, trainable=False)
decaylearning_rate = tf.train.exponential_decay(0.04, global_step,1000, 0.9)

train_step = tf.train.AdamOptimizer(decaylearning_rate).minimize(cross_entropy,global_step=global_step)


correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))


sess = tf.Session()
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for i in range(20000):
    image_batch, label_batch = sess.run([images_train, labels_train])
    label_b = np.eye(10,dtype=float)[label_batch] #one hot
  
    train_step.run(feed_dict={x:image_batch, y: label_b,train:1},session=sess)
  
    if i%200 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x:image_batch, y: label_b},session=sess)
        print( "step %d, training accuracy %g"%(i, train_accuracy))


image_batch, label_batch = sess.run([images_test, labels_test])
label_b = np.eye(10,dtype=float)[label_batch]#one hot
print ("finished! test accuracy %g"%accuracy.eval(feed_dict={
     x:image_batch, y: label_b},session=sess))
step 0, training accuracy 0.15625
step 200, training accuracy 0.078125
step 400, training accuracy 0.117188
step 600, training accuracy 0.0859375
step 800, training accuracy 0.0625
step 1000, training accuracy 0.125
step 1200, training accuracy 0.109375
step 1400, training accuracy 0.09375
step 1600, training accuracy 0.078125
step 1800, training accuracy 0.0859375
step 2000, training accuracy 0.109375
step 2200, training accuracy 0.0859375
step 2400, training accuracy 0.117188
step 2600, training accuracy 0.09375
step 2800, training accuracy 0.0625
step 3000, training accuracy 0.09375
step 3200, training accuracy 0.109375
step 3400, training accuracy 0.0625
step 3600, training accuracy 0.109375
step 3800, training accuracy 0.078125
step 4000, training accuracy 0.125
step 4200, training accuracy 0.09375
step 4400, training accuracy 0.117188
step 4600, training accuracy 0.125
step 4800, training accuracy 0.101562
step 5000, training accuracy 0.09375
step 5200, training accuracy 0.078125
step 5400, training accuracy 0.132812
step 5600, training accuracy 0.078125
step 5800, training accuracy 0.117188
step 6000, training accuracy 0.132812
step 6200, training accuracy 0.109375
step 6400, training accuracy 0.101562
step 6600, training accuracy 0.078125
step 6800, training accuracy 0.09375
step 7000, training accuracy 0.09375
step 7200, training accuracy 0.117188
step 7400, training accuracy 0.125
step 7600, training accuracy 0.109375
step 7800, training accuracy 0.101562
step 8000, training accuracy 0.109375
step 8200, training accuracy 0.109375
step 8400, training accuracy 0.078125
step 8600, training accuracy 0.078125
step 8800, training accuracy 0.09375
step 9000, training accuracy 0.0859375
step 9200, training accuracy 0.109375
step 9400, training accuracy 0.117188
step 9600, training accuracy 0.0859375
step 9800, training accuracy 0.109375
step 10000, training accuracy 0.09375
step 10200, training accuracy 0.109375
step 10400, training accuracy 0.117188
step 10600, training accuracy 0.09375
step 10800, training accuracy 0.0625
step 11000, training accuracy 0.132812
step 11200, training accuracy 0.078125
step 11400, training accuracy 0.101562
step 11600, training accuracy 0.109375
step 11800, training accuracy 0.117188
step 12000, training accuracy 0.0859375
step 12200, training accuracy 0.0703125
step 12400, training accuracy 0.117188
step 12600, training accuracy 0.078125
step 12800, training accuracy 0.078125
step 13000, training accuracy 0.148438
step 13200, training accuracy 0.101562
step 13400, training accuracy 0.140625
step 13600, training accuracy 0.0859375
step 13800, training accuracy 0.101562
step 14000, training accuracy 0.09375
step 14200, training accuracy 0.09375
step 14400, training accuracy 0.09375
step 14600, training accuracy 0.125
step 14800, training accuracy 0.109375
step 15000, training accuracy 0.09375
step 15200, training accuracy 0.0859375
step 15400, training accuracy 0.109375
step 15600, training accuracy 0.0859375
step 15800, training accuracy 0.078125
step 16000, training accuracy 0.125
step 16200, training accuracy 0.125
step 16400, training accuracy 0.101562
step 16600, training accuracy 0.101562
step 16800, training accuracy 0.132812
step 17000, training accuracy 0.117188
step 17200, training accuracy 0.0859375
step 17400, training accuracy 0.0859375
step 17600, training accuracy 0.0859375
step 17800, training accuracy 0.09375
step 18000, training accuracy 0.109375
step 18200, training accuracy 0.101562
step 18400, training accuracy 0.0703125
step 18600, training accuracy 0.109375
step 18800, training accuracy 0.09375
step 19000, training accuracy 0.09375
step 19200, training accuracy 0.03125
step 19400, training accuracy 0.0546875
step 19600, training accuracy 0.109375
step 19800, training accuracy 0.0625
finished! test accuracy 0.101562

帶BN的多通道mnist

# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from tensorflow.contrib.layers.python.layers import batch_norm
# 導入 MINST 數據集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/data/", one_hot=True)


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
  
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')  
def avg_pool_7x7(x):
    return tf.nn.avg_pool(x, ksize=[1, 7, 7, 1],
                        strides=[1, 7, 7, 1], padding='SAME')
def batch_norm_layer(value,train = None, name = 'batch_norm'): 
    if train is not None:       
        return batch_norm(value, decay = 0.9,updates_collections=None, is_training = True)
    else:
        return batch_norm(value, decay = 0.9,updates_collections=None, is_training = False)
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data維度 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes
train = tf.placeholder(tf.float32)


W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])

x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(batch_norm_layer((conv2d(x_image, W_conv1) + b_conv1),train))
h_pool1 = max_pool_2x2(h_conv1)
######################################################多卷積核
W_conv2_5x5 = weight_variable([5, 5, 32, 32]) 
b_conv2_5x5 = bias_variable([32]) 
W_conv2_7x7 = weight_variable([7, 7, 32, 32]) 
b_conv2_7x7 = bias_variable([32]) 
h_conv2_5x5 = tf.nn.relu(batch_norm_layer((conv2d(h_pool1, W_conv2_5x5) + b_conv2_5x5),train))
h_conv2_7x7 = tf.nn.relu(batch_norm_layer((conv2d(h_pool1, W_conv2_7x7) + b_conv2_7x7),train))
h_conv2 = tf.concat([h_conv2_5x5,h_conv2_7x7],3)

h_pool2 = max_pool_2x2(h_conv2)
#########################################################new 池化

W_conv3 = weight_variable([5, 5, 64, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3=avg_pool_7x7(h_conv3)#10
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv=tf.nn.softmax(nt_hpool3_flat)

keep_prob = tf.placeholder("float")


cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))

decaylearning_rate = tf.train.exponential_decay(0.04, 20000,1000, 0.9)
train_step = tf.train.AdamOptimizer(decaylearning_rate).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# 啓動session
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(20000):#20000
        batch = mnist.train.next_batch(50)
        if i%100 == 0:
            train_accuracy = accuracy.eval(feed_dict={
                x:batch[0], y: batch[1], keep_prob: 1.0})
            print( "step %d, training accuracy %g"%(i, train_accuracy))
        train_step.run(feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})
    
    print ("test accuracy %g"%accuracy.eval(feed_dict={
        x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}))
step 0, training accuracy 0.04
step 100, training accuracy 0.78
step 200, training accuracy 0.98
step 300, training accuracy 0.94
step 400, training accuracy 0.98
step 500, training accuracy 0.98
step 600, training accuracy 0.98
step 700, training accuracy 1
step 800, training accuracy 0.98
step 900, training accuracy 0.94
step 1000, training accuracy 0.96
step 1100, training accuracy 0.98
step 1200, training accuracy 0.94
step 1300, training accuracy 0.94
step 1400, training accuracy 1
step 1500, training accuracy 0.98
step 1600, training accuracy 0.98
step 1700, training accuracy 0.98
step 1800, training accuracy 0.98
step 1900, training accuracy 0.98
step 2000, training accuracy 0.98
step 2100, training accuracy 1
step 2200, training accuracy 0.96
step 2300, training accuracy 0.96
step 2400, training accuracy 1
step 2500, training accuracy 0.96
step 2600, training accuracy 0.98
step 2700, training accuracy 0.98
step 2800, training accuracy 1
step 2900, training accuracy 1
step 3000, training accuracy 0.98
step 3100, training accuracy 1
step 3200, training accuracy 0.98
step 3300, training accuracy 0.98
step 3400, training accuracy 0.94
step 3500, training accuracy 1
step 3600, training accuracy 0.98
step 3700, training accuracy 0.96
step 3800, training accuracy 1
step 3900, training accuracy 1
step 4000, training accuracy 0.98
step 4100, training accuracy 0.98
step 4200, training accuracy 1
step 4300, training accuracy 1
step 4400, training accuracy 0.98
step 4500, training accuracy 0.98
step 4600, training accuracy 1
step 4700, training accuracy 1
step 4800, training accuracy 1
step 4900, training accuracy 1
step 5000, training accuracy 0.98
step 5100, training accuracy 0.98
step 5200, training accuracy 0.98
step 5300, training accuracy 1
step 5400, training accuracy 1
step 5500, training accuracy 0.96
step 5600, training accuracy 1
step 5700, training accuracy 0.98
step 5800, training accuracy 0.96
step 5900, training accuracy 1
step 6000, training accuracy 1
step 6100, training accuracy 0.98
step 6200, training accuracy 0.98
step 6300, training accuracy 1
step 6400, training accuracy 0.96
step 6500, training accuracy 1
step 6600, training accuracy 1
step 6700, training accuracy 0.98
step 6800, training accuracy 1
step 6900, training accuracy 1
step 7000, training accuracy 1
step 7100, training accuracy 1
step 7200, training accuracy 0.94
step 7300, training accuracy 1
step 7400, training accuracy 0.96
step 7500, training accuracy 0.98
step 7600, training accuracy 1
step 7700, training accuracy 1
step 7800, training accuracy 0.98
step 7900, training accuracy 0.98
step 8000, training accuracy 1
step 8100, training accuracy 0.98
step 8200, training accuracy 1
step 8300, training accuracy 1
step 8400, training accuracy 1
step 8500, training accuracy 0.98
step 8600, training accuracy 1
step 8700, training accuracy 1
step 8800, training accuracy 1
step 8900, training accuracy 1
step 9000, training accuracy 1
step 9100, training accuracy 1
step 9200, training accuracy 1
step 9300, training accuracy 0.98
step 9400, training accuracy 1
step 9500, training accuracy 1
step 9600, training accuracy 1
step 9700, training accuracy 1
step 9800, training accuracy 1
step 9900, training accuracy 0.96
step 10000, training accuracy 1
step 10100, training accuracy 1
step 10200, training accuracy 1
step 10300, training accuracy 1
step 10400, training accuracy 0.98
step 10500, training accuracy 0.96
step 10600, training accuracy 0.98
step 10700, training accuracy 0.98
step 10800, training accuracy 0.98
step 10900, training accuracy 1
step 11000, training accuracy 0.98
step 11100, training accuracy 1
step 11200, training accuracy 1
step 11300, training accuracy 1
step 11400, training accuracy 1
step 11500, training accuracy 1
step 11600, training accuracy 0.98
step 11700, training accuracy 1
step 11800, training accuracy 0.98
step 11900, training accuracy 1
step 12000, training accuracy 1
step 12100, training accuracy 1
step 12200, training accuracy 1
step 12300, training accuracy 0.98
step 12400, training accuracy 1
step 12500, training accuracy 1
step 12600, training accuracy 0.98
step 12700, training accuracy 1
step 12800, training accuracy 1
step 12900, training accuracy 1
step 13000, training accuracy 0.98
step 13100, training accuracy 1
step 13200, training accuracy 1
step 13300, training accuracy 1
step 13400, training accuracy 1
step 13500, training accuracy 1
step 13600, training accuracy 1
step 13700, training accuracy 1
step 13800, training accuracy 1
step 13900, training accuracy 1
step 14000, training accuracy 1
step 14100, training accuracy 1
step 14200, training accuracy 1
step 14300, training accuracy 1
step 14400, training accuracy 1
step 14500, training accuracy 1
step 14600, training accuracy 1
step 14700, training accuracy 1
step 14800, training accuracy 1
step 14900, training accuracy 0.98
step 15000, training accuracy 1
step 15100, training accuracy 0.98
step 15200, training accuracy 1
step 15300, training accuracy 1
step 15400, training accuracy 0.98
step 15500, training accuracy 1
step 15600, training accuracy 1
step 15700, training accuracy 1
step 15800, training accuracy 1
step 15900, training accuracy 1
step 16000, training accuracy 1
step 16100, training accuracy 1
step 16200, training accuracy 1
step 16300, training accuracy 0.98
step 16400, training accuracy 1
step 16500, training accuracy 1
step 16600, training accuracy 1
step 16700, training accuracy 1
step 16800, training accuracy 1
step 16900, training accuracy 1
step 17000, training accuracy 1
step 17100, training accuracy 1
step 17200, training accuracy 1
step 17300, training accuracy 1
step 17400, training accuracy 1
step 17500, training accuracy 1
step 17600, training accuracy 0.96
step 17700, training accuracy 1
step 17800, training accuracy 1
step 17900, training accuracy 1
step 18000, training accuracy 0.98
step 18100, training accuracy 1
step 18200, training accuracy 1
step 18300, training accuracy 1
step 18400, training accuracy 1
step 18500, training accuracy 1
step 18600, training accuracy 1
step 18700, training accuracy 1
step 18800, training accuracy 1
step 18900, training accuracy 1
step 19000, training accuracy 1
step 19100, training accuracy 1
step 19200, training accuracy 1
step 19300, training accuracy 1
step 19400, training accuracy 1
step 19500, training accuracy 1
step 19600, training accuracy 1
step 19700, training accuracy 1
step 19800, training accuracy 1
step 19900, training accuracy 1
test accuracy 0.9933

多通道mnist

# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
# 導入 MINST 數據集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/data/", one_hot=True)


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
  
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')  
def avg_pool_7x7(x):
    return tf.nn.avg_pool(x, ksize=[1, 7, 7, 1],
                        strides=[1, 7, 7, 1], padding='SAME')

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data維度 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes



W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])

x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
######################################################多卷積核
W_conv2_5x5 = weight_variable([5, 5, 32, 32]) 
b_conv2_5x5 = bias_variable([32]) 
W_conv2_7x7 = weight_variable([7, 7, 32, 32]) 
b_conv2_7x7 = bias_variable([32]) 
h_conv2_5x5 = tf.nn.relu(conv2d(h_pool1, W_conv2_5x5) + b_conv2_5x5)
h_conv2_7x7 = tf.nn.relu(conv2d(h_pool1, W_conv2_7x7) + b_conv2_7x7)
h_conv2 = tf.concat([h_conv2_5x5,h_conv2_7x7],3)

h_pool2 = max_pool_2x2(h_conv2)
#########################################################new 池化

W_conv3 = weight_variable([5, 5, 64, 10])
b_conv3 = bias_variable([10])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

nt_hpool3=avg_pool_7x7(h_conv3)#10
nt_hpool3_flat = tf.reshape(nt_hpool3, [-1, 10])
y_conv=tf.nn.softmax(nt_hpool3_flat)


keep_prob = tf.placeholder("float")


cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))

#不同的優化方法測測效果
#train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
#train_step = tf.train.AdagradOptimizer(1e-5).minimize(cross_entropy)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# 啓動session
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(20000):#20000
        batch = mnist.train.next_batch(50)
        if i%100 == 0:
            train_accuracy = accuracy.eval(feed_dict={
                x:batch[0], y: batch[1], keep_prob: 1.0})
            print( "step %d, training accuracy %g"%(i, train_accuracy))
        train_step.run(feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})
    
    print ("test accuracy %g"%accuracy.eval(feed_dict={
        x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}))
step 0, training accuracy 0.14
step 100, training accuracy 0.08
step 200, training accuracy 0.44
step 300, training accuracy 0.52
step 400, training accuracy 0.44
step 500, training accuracy 0.36
step 600, training accuracy 0.68
step 700, training accuracy 0.5
step 800, training accuracy 0.74
step 900, training accuracy 0.7
step 1000, training accuracy 0.76
step 1100, training accuracy 0.8
step 1200, training accuracy 0.84
step 1300, training accuracy 0.8
step 1400, training accuracy 0.84
step 1500, training accuracy 0.82
step 1600, training accuracy 0.86
step 1700, training accuracy 0.88
step 1800, training accuracy 0.88
step 1900, training accuracy 0.9
step 2000, training accuracy 0.98
step 2100, training accuracy 0.9
step 2200, training accuracy 0.92
step 2300, training accuracy 0.94
step 2400, training accuracy 0.9
step 2500, training accuracy 0.98
step 2600, training accuracy 0.92
step 2700, training accuracy 0.92
step 2800, training accuracy 0.88
step 2900, training accuracy 0.92
step 3000, training accuracy 0.9
step 3100, training accuracy 0.92
step 3200, training accuracy 0.94
step 3300, training accuracy 0.92
step 3400, training accuracy 0.92
step 3500, training accuracy 0.96
step 3600, training accuracy 0.92
step 3700, training accuracy 0.88
step 3800, training accuracy 0.96
step 3900, training accuracy 0.94
step 4000, training accuracy 0.98
step 4100, training accuracy 0.86
step 4200, training accuracy 0.94
step 4300, training accuracy 0.88
step 4400, training accuracy 0.92
step 4500, training accuracy 0.94
step 4600, training accuracy 0.94
step 4700, training accuracy 0.88
step 4800, training accuracy 0.94
step 4900, training accuracy 0.94
step 5000, training accuracy 0.88
step 5100, training accuracy 0.92
step 5200, training accuracy 0.98
step 5300, training accuracy 0.88
step 5400, training accuracy 0.94
step 5500, training accuracy 0.92
step 5600, training accuracy 0.94
step 5700, training accuracy 0.96
step 5800, training accuracy 0.96
step 5900, training accuracy 0.98
step 6000, training accuracy 1
step 6100, training accuracy 0.94
step 6200, training accuracy 0.98
step 6300, training accuracy 0.96
step 6400, training accuracy 0.92
step 6500, training accuracy 0.94
step 6600, training accuracy 0.98
step 6700, training accuracy 0.92
step 6800, training accuracy 0.94
step 6900, training accuracy 0.96
step 7000, training accuracy 1
step 7100, training accuracy 0.94
step 7200, training accuracy 0.92
step 7300, training accuracy 0.96
step 7400, training accuracy 0.98
step 7500, training accuracy 0.96
step 7600, training accuracy 0.94
step 7700, training accuracy 0.96
step 7800, training accuracy 0.96
step 7900, training accuracy 0.98
step 8000, training accuracy 0.96
step 8100, training accuracy 0.92
step 8200, training accuracy 0.98
step 8300, training accuracy 0.98
step 8400, training accuracy 0.96
step 8500, training accuracy 0.96
step 8600, training accuracy 0.96
step 8700, training accuracy 0.94
step 8800, training accuracy 0.98
step 8900, training accuracy 0.96
step 9000, training accuracy 0.98
step 9100, training accuracy 0.9
step 9200, training accuracy 0.96
step 9300, training accuracy 0.98
step 9400, training accuracy 0.88
step 9500, training accuracy 0.98
step 9600, training accuracy 0.98
step 9700, training accuracy 1
step 9800, training accuracy 0.92
step 9900, training accuracy 0.96
step 10000, training accuracy 0.98
step 10100, training accuracy 0.96
step 10200, training accuracy 0.98
step 10300, training accuracy 0.9
step 10400, training accuracy 0.92
step 10500, training accuracy 0.96
step 10600, training accuracy 0.96
step 10700, training accuracy 0.94
step 10800, training accuracy 0.98
step 10900, training accuracy 0.98
step 11000, training accuracy 0.98
step 11100, training accuracy 0.98
step 11200, training accuracy 1
step 11300, training accuracy 0.92
step 11400, training accuracy 1
step 11500, training accuracy 1
step 11600, training accuracy 1
step 11700, training accuracy 0.94
step 11800, training accuracy 0.98
step 11900, training accuracy 0.98
step 12000, training accuracy 0.96
step 12100, training accuracy 0.94
step 12200, training accuracy 0.96
step 12300, training accuracy 0.98
step 12400, training accuracy 0.94
step 12500, training accuracy 0.92
step 12600, training accuracy 0.92
step 12700, training accuracy 0.98
step 12800, training accuracy 0.98
step 12900, training accuracy 0.98
step 13000, training accuracy 0.96
step 13100, training accuracy 0.96
step 13200, training accuracy 0.98
step 13300, training accuracy 0.96
step 13400, training accuracy 0.92
step 13500, training accuracy 0.96
step 13600, training accuracy 0.98
step 13700, training accuracy 0.94
step 13800, training accuracy 0.98
step 13900, training accuracy 0.98
step 14000, training accuracy 0.98
step 14100, training accuracy 0.9
step 14200, training accuracy 0.98
step 14300, training accuracy 1
step 14400, training accuracy 0.9
step 14500, training accuracy 0.98
step 14600, training accuracy 0.92
step 14700, training accuracy 0.94
step 14800, training accuracy 0.94
step 14900, training accuracy 0.98
step 15000, training accuracy 1
step 15100, training accuracy 1
step 15200, training accuracy 0.98
step 15300, training accuracy 0.94
step 15400, training accuracy 0.96
step 15500, training accuracy 0.94
step 15600, training accuracy 0.98
step 15700, training accuracy 0.98
step 15800, training accuracy 0.92
step 15900, training accuracy 0.94
step 16000, training accuracy 0.98
step 16100, training accuracy 0.98
step 16200, training accuracy 0.98
step 16300, training accuracy 1
step 16400, training accuracy 0.96
step 16500, training accuracy 0.94
step 16600, training accuracy 0.96
step 16700, training accuracy 0.96
step 16800, training accuracy 0.98
step 16900, training accuracy 0.96
step 17000, training accuracy 1
step 17100, training accuracy 0.98
step 17200, training accuracy 0.98
step 17300, training accuracy 0.94
step 17400, training accuracy 1
step 17500, training accuracy 0.94
step 17600, training accuracy 0.96
step 17700, training accuracy 1
step 17800, training accuracy 0.96
step 17900, training accuracy 0.92
step 18000, training accuracy 0.98
step 18100, training accuracy 0.96
step 18200, training accuracy 0.96
step 18300, training accuracy 0.98
step 18400, training accuracy 0.98
step 18500, training accuracy 0.98
step 18600, training accuracy 0.98
step 18700, training accuracy 0.98
step 18800, training accuracy 0.98
step 18900, training accuracy 0.98
step 19000, training accuracy 1
step 19100, training accuracy 0.98
step 19200, training accuracy 0.98
step 19300, training accuracy 1
step 19400, training accuracy 1
step 19500, training accuracy 0.98
step 19600, training accuracy 0.96
step 19700, training accuracy 0.98
step 19800, training accuracy 0.98
step 19900, training accuracy 1
test accuracy 0.9768

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章