tensorflow v2.0入門教程——04邏輯迴歸

個人博客
本教程需先理解邏輯迴歸原理
採用MNIST手寫數字數據集,60000張訓練圖片,10000張測試圖片,圖片大小爲28*28,像素值爲0到255。

邏輯迴歸

import tensorflow as tf
import numpy as np
from tensorflow.keras.datasets import mnist

# MNIST數據集參數
num_classes = 10  # 數字0到9, 10類
num_features = 784  # 28*28

# 訓練參數
learing_rate = 0.01
training_steps = 1000
batch_size = 256
display_step =50

# 預處理數據集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 轉爲float32
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)
# 轉爲一維向量
x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features])
# [0, 255] 到 [0, 1]
x_train, x_test = x_train / 255, x_test / 255

# tf.data.Dataset.from_tensor_slices 是使用x_train, y_train構建數據集
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# 將數據集打亂,並設置batch_size大小
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)

# 權重[748, 10],圖片大小28*28,類數
W = tf.Variable(tf.ones([num_features, num_classes]), name="weight")
# 偏置[10],共10類
b = tf.Variable(tf.zeros([num_classes]), name="bias")

# 邏輯迴歸函數
def logistic_regression(x):
    return tf.nn.softmax(tf.matmul(x, W) + b)

# 損失函數
def cross_entropy(y_pred, y_true):
    # tf.one_hot()函數的作用是將一個值化爲一個概率分佈的向量
    y_true = tf.one_hot(y_true, depth=num_classes)
    # tf.clip_by_value將y_pred的值控制在1e-9和1.0之間
    y_pred = tf.clip_by_value(y_pred, 1e-9, 1.0)
    return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))

# 計算精度
def accuracy(y_pred, y_true):
    # tf.cast作用是類型轉換
    correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
    return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# 優化器採用隨機梯度下降
optimizer = tf.optimizers.SGD(learning_rate)

# 梯度下降
def run_optimization(x, y):
    with tf.GradientTape() as g:
        pred = logistic_regression(x)
        loss = cross_entropy(pred, y)
    # 計算梯度
    gradients = g.gradient(loss, [W, b])
    # 更新梯度
    optimizer.apply_gradients(zip(gradients, [W, b]))

# 開始訓練
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
    run_optimization(batch_x, batch_y)
    if step % display_step == 0:
        pred = logistic_regression(batch_x)
        loss = cross_entropy(pred, batch_y)
        acc = accuracy(pred, batch_y)
        print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
    
# 測試模型的準確率
pred = logistic_regression(x_test)
print("Test Accuracy: %f" % accuracy(pred, y_test))

輸出結果

step: 50, loss: 1244.371582, accuracy: 0.656250
step: 100, loss: 926.965942, accuracy: 0.792969
step: 150, loss: 667.272644, accuracy: 0.832031
step: 200, loss: 489.627258, accuracy: 0.871094
step: 250, loss: 416.455811, accuracy: 0.878906
step: 300, loss: 633.148315, accuracy: 0.796875
step: 350, loss: 708.499329, accuracy: 0.816406
step: 400, loss: 567.255005, accuracy: 0.765625
step: 450, loss: 418.291779, accuracy: 0.890625
step: 500, loss: 596.595642, accuracy: 0.824219
step: 550, loss: 718.982849, accuracy: 0.746094
step: 600, loss: 785.499329, accuracy: 0.781250
step: 650, loss: 495.821411, accuracy: 0.847656
step: 700, loss: 544.291626, accuracy: 0.871094
step: 750, loss: 557.276123, accuracy: 0.867188
step: 800, loss: 588.374207, accuracy: 0.843750
step: 850, loss: 826.526855, accuracy: 0.804688
step: 900, loss: 515.780884, accuracy: 0.851562
step: 950, loss: 514.978210, accuracy: 0.855469
step: 1000, loss: 580.234985, accuracy: 0.843750
Test Accuracy: 0.826200

個人博客

在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章