TensorFlow2.0利用DNN實現手寫圖片識別

import tensorflow as tf
from tensorflow.keras.datasets import mnist
import numpy as np
from sklearn.preprocessing import StandardScaler

print(tf.__version__)
2.0.0
##加載數據     60000條訓練集   10000條測試集  
(x_train_all, y_train_all), (x_test, y_test) = mnist.load_data()
print(type(x_train_all))
<class 'numpy.ndarray'>
#print((x_train.shape),(x_test.shape))   #(60000, 28, 28) (10000, 28, 28)
#數據歸一化
scaler = StandardScaler()
scaled_x_train_all = scaler.fit_transform(x_train_all.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
scaled_x_test = scaler.transform(x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)

#劃分驗證集和訓練集
scaled_x_train,scaled_x_valid = scaled_x_train_all[5000:],scaled_x_train_all[:5000]
y_train,y_valid = y_train_all[5000:],y_train_all[:5000]
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense

a = Input(shape=(784,))   #單條數據維度,不包括數據總數
#構建多層神經網絡
b = Dense(300,activation='relu')(a)
c = Dense(200,activation='relu')(b)
d = Dense(100,activation='relu')(c)
e = Dense(10)(d)
f = Dense(10,activation='softmax')(e)  #最後一層softmax分類
model = Model(a,f)
model.summary()
Model: "model_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         [(None, 784)]             0         
_________________________________________________________________
dense_5 (Dense)              (None, 300)               235500    
_________________________________________________________________
dense_6 (Dense)              (None, 200)               60200     
_________________________________________________________________
dense_7 (Dense)              (None, 100)               20100     
_________________________________________________________________
dense_8 (Dense)              (None, 10)                1010      
_________________________________________________________________
dense_9 (Dense)              (None, 10)                110       
=================================================================
Total params: 316,920
Trainable params: 316,920
Non-trainable params: 0
_________________________________________________________________
model.compile(loss="sparse_categorical_crossentropy",optimizer = "sgd",metrics=["accuracy"])  #sparse_categorical_crossentropy可以將target由數字編碼自動變爲one-hot編碼  categorical_crossentropy用於你target本身就是one-hot編碼的情況 
model.fit(scaled_x_train.reshape(-1,784),y_train,epochs=100,validation_data=(scaled_x_valid.reshape(-1,784),y_valid),verbose=2)
過程信息太長,省略....
#測試集評估
model.evaluate(scaled_x_test.reshape(-1,784),y_test,verbose=0)  #verbose是否打印相關信息
[0.1206463799060793, 0.9783]  
#隨機選中圖片測試
img_random = scaled_x_test[np.random.randint(0,len(scaled_x_test))]
import matplotlib.pyplot as plt
%matplotlib inline

plt.imshow(img_random)
plt.show()

在這裏插入圖片描述

#模型預測
prob = model.predict(img_random.reshape(-1,784))
print(np.argmax(prob))
9

明顯最終結果要比只用softmax分類要好,準確率較高

發佈了43 篇原創文章 · 獲贊 9 · 訪問量 1萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章