深度學習入門!三種方式實現minist分類!詳細代碼實現!cnn(卷積神經網絡),感知機,邏輯迴歸!

提示:文章寫完後,目錄可以自動生成,如何生成可參考右邊的幫助文檔


前言

minist數字識別,是深度學習入門數據集。這裏使用了三種方式來實現對minist數字分類。分別是邏輯迴歸,多層感知機,以及我們熟悉的cnn(卷積神經網絡)。這裏是基於tensorflow來實現的代碼,很好入門。

一、cnn實現minist代碼

import time
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets( 'Minist_data',one_hot=True)

#參數初始化
input_num = 784 # 輸入的列數
labels = 10 #輸出的列數
batchsize = 128 #訓練集每一批次的照片
max_epochs = 1000 #迭代的次數
dropout = 0.85 

#這裏設置的x,y的作用是來存儲輸入的照片個數,和標籤個數
x = tf.placeholder(tf.float32,[None, input_num])
y = tf.placeholder(tf.float32,[None, labels])

# 數據處理,標註化 
def normallize( x ):
    mean_x = np.mean( x )
    std_x = np.std( x )
    x = (x - mean_x)/ std_x
    return x
#設置卷積層,x:輸入的照片,w對應的權值,這裏纔去的是不填充
def con2d(x , w , b, strides = 1):
    x = tf.nn.conv2d(x, w, strides=[1, strides,strides,1], padding = 'VALID')
    x = tf.nn.bias_add(x, b)
    return tf.nn.relu( x)
#池化層
def maxpool2d(x, k=2):
    return tf.nn.max_pool(x, ksize=[1,k,k,1], strides =[1, k, k, 1] , padding = 'SAME')


#設置模型
def con2dnet(x, weights, biases ,dropout):
    #因爲輸入的數據是1784行,需要轉化爲28行,28列,這裏只是對於一張圖開始討論的哈
    x = tf.reshape(x, shape=[-1, 28, 28, 1])
    #第一個卷積層  28x28x1 change to 24x24x32
    con_1 = con2d( x, weights['wc1'] , biases['bd1'])
     #第一個池化層 24x24x32 change to 12x12x32
    con_1_maxpol = maxpool2d(con_1, k=2)
     #第二個卷積層 12x12x32 change to 8x8x64
    con_2 = con2d( con_1_maxpol, weights['wc2'] , biases['bd2'])
     #第二個池化層 8x8x64 change to 4x4x64
    con_1_maxpo2 = maxpool2d(con_2, k=2)
    #全連接層 4*4*64(每一個特徵圖4*4,共有64),變化成一行4*4*64,便於全連接
    #這裏批次是128張圖,那麼就是128個行4*4*64
    fc1 = tf.reshape(con_1_maxpo2,[-1,weight['wd1'].get_shape().as_list()[0]])
    #這個就是全連接層的計算 [1,4x4x64] change to [1, 1024] 
    fc2 = tf.add(tf.matmul(fc1, weight['wd1']), biases['bd3'])


weight = {
   
    'wc1':tf.Variable(tf.random_normal([5,5,1,32])),

          'wc2':tf.Variable(tf.random_normal([5,5,32,64])),

          'wd1':tf.Variable(tf.random_normal([4*4*64,1024])),

          'wd2':tf.Variable(tf.random_normal([1024,10]))}

biases = {
   
    'bd1':tf.Variable(tf.random_normal([32])),

          'bd2':tf.Variable(tf.random_normal([64])),
          
          'bd3':tf.Variable(tf.random_normal([1024])),
          
          'bd4':tf.Variable(tf.random_normal([10]))}
   
    fc2 = tf.nn.relu(fc2)
    # dropout層
    fc3 = tf.nn.dropout(fc2,dropout)
    # [1,1024] change to [1, 10] 
    fc3 = tf.add(tf.matmul(fc2, weight['wd2']),biases['bd4'])
    return  fc3



pred = con2dnet( x, weight, biases , dropout)
coss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels = y))
optimizer = tf.train.AdamOptimizer(learning_rate = 0.01).minimize(coss)

correct_prediction = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
corrct_num = tf.reduce_sum(tf.cast(correct_prediction, "float"))
init_op = tf.global_variables_initializer()

with tf.Session() as sess:
    accucy_list = [] #存續每次迭代後的準確率
    accucy_coss = [] #存續每次迭代後的損失值率
    sess.run(init_op)
    for eopch in range(max_epochs):
        train_x, train_y = mnist.train.next_batch(batchsize) 
        z = sess.run(optimizer, feed_dict={
   
    x: train_x, y: train_y})
        coss_1, num_1 = sess.run([coss, corrct_num], feed_dict={
   
    x:mnist.test.images, y:mnist.test.labels})
        print('epoch:{0}, accucy:{1}:'.format(eopch, num_1/10000))
        accucy_list.append(num_1/10000)
        accucy_coss.append(coss_1/10000)
        
    plt.title('test_accucy')
    plt.xlabel('epochs')
    plt.ylabel('accucy')
    plt.plot(accucy_list) 
    plt.show()
    
    plt.title('test_coss')
    plt.xlabel('epochs')
    plt.ylabel('coss')
    plt.plot(accucy_coss) 
    plt.show()
    
  實現結果:

在這裏插入圖片描述
在這裏插入圖片描述

epoch:986, accucy:0.9639:
epoch:987, accucy:0.9645:
epoch:988, accucy:0.9653:
epoch:989, accucy:0.9649:
epoch:990, accucy:0.9643:
epoch:991, accucy:0.9634:
epoch:992, accucy:0.9623:
epoch:993, accucy:0.9625:
epoch:994, accucy:0.963:
epoch:995, accucy:0.9635:
epoch:996, accucy:0.9638:
epoch:997, accucy:0.9643:
epoch:998, accucy:0.9645:
epoch:999, accucy:0.964:

二、多層感知器實現minist代碼

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time

mnist =  input_data.read_data_sets('Minist_data', one_hot=True)


n_input = 784 #input num
n_labels = 10 # output num
n_hidden_layer = 30 # hidden layer
max_epochs = 10000
batch_size = 100
alphy = 0.2
seed = 0

# 設置sigmoid 函數求導公式
def sigmoid_derivation( x ):
    return tf.multiply(tf.sigmoid( x ), tf.subtract(tf.constant(1.0), tf.sigmoid( x)))

# 設置權重

weigths = {
   
    'w_1':tf.Variable(tf.random_normal([ n_input, n_hidden_layer], seed=seed)), 'w_2':tf.Variable(tf.random_normal([n_hidden_layer, n_labels],seed=seed))}
basis = {
   
    'basis_1': tf.Variable(tf.random_normal([1,n_hidden_layer],seed=seed)),'basis_2':tf.Variable(tf.Variable(tf.random_normal([1,n_labels],seed=seed)))}

#設置佔位符
x_in = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_labels])


def creat_model(x_in , weight, basis):
    h_1 = tf.matmul(x_in, weight['w_1']) + basis['basis_1']
    o_1 = tf.sigmoid( h_1 )
    h_2 = tf.matmul( o_1, weight['w_2']) + basis['basis_2']
    o_2 = tf.sigmoid (h_2)
    return h_1, o_1, h_2, o_2

#Forward pass
h_1, o_1, h_2, y_hat = creat_model(x_in, weigths, basis)

# #error
error = y_hat - y

#backward pass
delta_2 = tf.multiply( error, sigmoid_derivation( h_2 ))
delta_w_2 = tf.matmul(tf.transpose(o_1), delta_2)

wtd_error = tf.matmul(delta_2, tf.transpose(weigths['w_2']))
delta_1 = tf.multiply(wtd_error, sigmoid_derivation( h_1 ))
delta_w_1 = tf.matmul(tf.transpose(x_in), delta_1 )

alphy = tf.constant( alphy )
# #upgraduate weights
step = [tf.assign(weigths['w_1'], tf.subtract(weigths['w_1'],tf.multiply(alphy, delta_w_1)))
        ,tf.assign(weigths['w_2'], tf.subtract(weigths['w_2'],tf.multiply(alphy, delta_w_2)))]
                 
acc_mat =  tf.equal(tf.argmax(y_hat,1), tf.argmax(y,1))
acc_num = tf.reduce_sum(tf.cast(acc_mat, tf.float32))
#initiallizer
initi_op = tf.global_variables_initializer()

#start session
with tf.Session( ) as sess:
    sess.run(initi_op)
    
    for epoch in range(max_epochs):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        sess.run(step, feed_dict={
   
    x_in:batch_xs, y : batch_ys})
        if epoch % 1000 == 0:
            acc_test = sess.run(acc_num, feed_dict={
   
    x_in:mnist.test.images, y:mnist.test.labels})
            acc_train = sess.run(acc_num, feed_dict={
   
    x_in:mnist.train.images, y:mnist.train.labels})
            print('Epoch:{0} ,  accurcy_test:{1},   accurcy_train:{2}'.format(epoch, acc_test/10000,acc_train/55000))
    
  實現結果:
Epoch:0 ,  accurcy_test:0.1063,   accurcy_train:0.10627272727272727
Epoch:1000 ,  accurcy_test:0.653,   accurcy_train:0.6551636363636364
Epoch:2000 ,  accurcy_test:0.6668,   accurcy_train:0.6692
Epoch:3000 ,  accurcy_test:0.6667,   accurcy_train:0.6737272727272727
Epoch:4000 ,  accurcy_test:0.7601,   accurcy_train:0.7695272727272727
Epoch:5000 ,  accurcy_test:0.7634,   accurcy_train:0.7738181818181818
Epoch:6000 ,  accurcy_test:0.8517,   accurcy_train:0.862890909090909
Epoch:7000 ,  accurcy_test:0.858,   accurcy_train:0.8671454545454546
Epoch:8000 ,  accurcy_test:0.8631,   accurcy_train:0.8733090909090909
Epoch:9000 ,  accurcy_test:0.9413,   accurcy_train:0.9572

三、邏輯迴歸實現minist代碼

#利用邏輯迴歸實現對於mnist的數據分類
import tensorflow as tf
import matplotlib.pyplot as plt, matplotlib.image as mpimg
from tensorflow.examples.tutorials.mnist import input_data

#讀取數據
mnist = input_data.read_data_sets('\Minist_data',one_hot = True)

#給權值賦值
w = tf.Variable(tf.zeros([784,10]), name = 'w')
b = tf.Variable(tf.zeros([10]), name = 'b')

# x = tf.Variable(tf.float32, name = 'x', shape = [None,784])
# y = tf.Variable(tf.float32, name = 'y', shape = [None,10])

#設置x,y的佔位符,很簡單,對於x 784一個照片的大小,None 不知道你每次需要訓練的批次大小
x = tf.placeholder(tf.float32, [None,784],name = 'x')
y = tf.placeholder(tf.float32, [None,10], name = 'y')

# 設置預測值
y_hat = tf.matmul(x,w)+b

#設置損失函數,交叉熵 tf.reduce_mean求得是所有交叉熵的平均值
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = y_hat))
#選擇最優梯度下降參數
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(loss)
#預測
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_hat,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#初始化
initi_op = tf.global_variables_initializer()



with tf.Session() as sess:
    total = [] #便於畫圖
    
    sess.run(initi_op)
    

    for epoch in range(50):
        loss_avg = 0
        batch_size = 100
        num_of_batch = int(mnist.train.num_examples/batch_size)
        for i in range(num_of_batch):
            batch_xs,batch_ys = mnist.train.next_batch(100)
            _, l = sess.run([optimizer, loss], feed_dict = {
   
    x:batch_xs, y:batch_ys})
            loss_avg = loss_avg + l
            print('Epoch :{0} ,loss: {1}'.format(epoch,loss_avg/num_of_batch))
        total.append(loss_avg/num_of_batch)

    print('Done')
    print(sess.run(accuracy, feed_dict={
   
    x: mnist.test.images, y:mnist.test.labels}))
    plt.plot(total)
    plt.show()
  實現結果:

在這裏插入圖片描述

Epoch :49 ,loss: 0.2860406456481327
Epoch :49 ,loss: 0.2863932258974422
Epoch :49 ,loss: 0.2869208517399701
Epoch :49 ,loss: 0.28743210982192646
Epoch :49 ,loss: 0.28812870155681264
Epoch :49 ,loss: 0.2885602289167317
Epoch :49 ,loss: 0.28918006840077315
Epoch :49 ,loss: 0.28969469279050825
Epoch :49 ,loss: 0.29052486633712593
Epoch :49 ,loss: 0.29099014274098656
Epoch :49 ,loss: 0.2915572559020736
Epoch :49 ,loss: 0.29206312930042094
Epoch :49 ,loss: 0.29243259806524624
Epoch :49 ,loss: 0.2931043164567514
Epoch :49 ,loss: 0.29356380319053477
Epoch :49 ,loss: 0.29405322256413374
Epoch :49 ,loss: 0.2947297677397728
Epoch :49 ,loss: 0.29508687217127194
Epoch :49 ,loss: 0.29597209028222343
Epoch :49 ,loss: 0.2963061141154983
Epoch :49 ,loss: 0.2969307041439143
Epoch :49 ,loss: 0.2975302829796618
Epoch :49 ,loss: 0.29785606977614487
Epoch :49 ,loss: 0.29842295774004673
Epoch :49 ,loss: 0.2990792263366959
Epoch :49 ,loss: 0.299645564745773
Epoch :49 ,loss: 0.3001909241080284
Epoch :49 ,loss: 0.30061151071028275
Epoch :49 ,loss: 0.30132105106657203
Epoch :49 ,loss: 0.30179316797039724
Epoch :49 ,loss: 0.3024873532490297
Epoch :49 ,loss: 0.3029060740362514
Epoch :49 ,loss: 0.3035559590296312
Epoch :49 ,loss: 0.30405486394058573
Epoch :49 ,loss: 0.30449492687528784
Done
0.9191

該處使用的url網絡請求的數據。


總結

代碼簡單,易懂。通過對比,你會發現cnn的運算數度慢,準確率三者差不多。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章