學習篇之tensorflow

創建會話,啓動會話

import tensorflow as tf

# 創建一個常量
m1 = tf.constant([[3,3]])
# 創建一個常量
m2 = tf.constant([[2],[3]])
# 矩陣乘法op
product = tf.matmul(m1, m2)
print(product)
Tensor("MatMul_3:0", shape=(1, 1), dtype=int32)

法一

#定義會話
sess = tf.Session()
# 調用sess中的run方法來執行矩陣乘法op
result = sess.run(product)
print(result)
sess.close()
[[15]]

法二

with tf.Session() as sess:
    # 調用sess中的run方法來執行矩陣乘法op
    result = sess.run(product)
    print(result)
    [[15]]

變量

# 定義一個變量
x = tf.Variable([1,2])
# 定義一個常量
a = tf.constant([3,3])
# 減法op
sub = tf.subtract(x, a)
# 加法op
add = tf.add(x,sub)

# 所有變量必須初始化!!!
init = tf.global_variables_initializer()

with tf.Session() as sess:
    # 執行變量初始化
    sess.run(init)
    print(sess.run(sub))
    print(sess.run(add))
    
[-2 -1]
[-1  1]

Fetch_Feed

fetch

# Fetch:可以在session中同時計算多個tensor或執行多個操作
# 定義三個常量
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
# 加法op
add = tf.add(input2,input3)
# 乘法op
mul = tf.multiply(input1, add)

with tf.Session() as sess:
    result1,result2 = sess.run([mul, add])
    print(result1,result2)

21.0 7.0

feed

# Feed:先定義佔位符,等需要的時候再傳入數據
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
# 乘法op
output = tf.multiply(input1, input2)

with tf.Session() as sess:
    print(sess.run(output, feed_dict={input1:8.0,input2:2.0}))

16.0

線性迴歸

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

x_data = np.random.rand(100)
noise = np.random.normal(0,0.01,x_data.shape)
y_data = x_data*0.1 + 0.2 + noise

plt.scatter(x_data,y_data)
plt.show()

# 構建一個線性模型
d = tf.Variable(np.random.rand(1))
k = tf.Variable(np.random.rand(1))
y = k*x_data + d

# 二次代價函數
loss = tf.losses.mean_squared_error(y_data, y)
# 定義一個梯度下降法優化器
optimizer = tf.train.GradientDescentOptimizer(0.3)
# 最小化代價函數
train = optimizer.minimize(loss)

# 初始化變量
init= tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for i in range(201):
        sess.run(train)
        if i%20==0:
            print(i,sess.run([k,d]))
    y_pred = sess.run(y)
    plt.scatter(x_data,y_data)
    plt.plot(x_data,y_pred,'r-',lw=3)
    plt.show()

0 [array([0.42558686]), array([0.07772181])]
20 [array([0.24686251]), array([0.1212207])]
40 [array([0.17103131]), array([0.16282419])]
60 [array([0.13410329]), array([0.18308412])]
80 [array([0.1161202]), array([0.19295024])]
100 [array([0.10736286]), array([0.1977548])]
120 [array([0.10309823]), array([0.20009452])]
140 [array([0.10102146]), array([0.2012339])]
160 [array([0.10001012]), array([0.20178875])]
180 [array([0.09951763]), array([0.20205895])]
200 [array([0.09927779]), array([0.20219054])]

非線性迴歸

# numpy生成200個隨機點
x_data = np.linspace(-0.5,0.5,200)[:,np.newaxis]
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise

plt.scatter(x_data, y_data)
plt.show()

# 定義兩個placeholder
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])

# 神經網絡結構:1-30-1
w1 = tf.Variable(tf.random_normal([1,30]))
b1 = tf.Variable(tf.zeros([30]))
wx_plus_b_1 = tf.matmul(x,w1) + b1
l1 = tf.nn.tanh(wx_plus_b_1)

w2 = tf.Variable(tf.random_normal([30,1]))
b2 = tf.Variable(tf.zeros([1]))
wx_plus_b_2 = tf.matmul(l1,w2) + b2
prediction = tf.nn.tanh(wx_plus_b_2)

# 二次代價函數
loss = tf.losses.mean_squared_error(y,prediction)
# 使用梯度下降法最小化loss
train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

with tf.Session() as sess:
    # 變量初始化
    sess.run(tf.global_variables_initializer())
    for _ in range(3000):
        sess.run(train,feed_dict={x:x_data,y:y_data})
        
    # 獲得預測值
    prediction_value = sess.run(prediction,feed_dict={x:x_data})
    # 畫圖
    plt.scatter(x_data, y_data)
    plt.plot(x_data, prediction_value, 'r-', lw=5)
    plt.show()

MNIST數據集分類簡單版本

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 載入數據集
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)

# 批次大小
batch_size = 64
# 計算一個週期一共有多少個批次
n_batch = mnist.train.num_examples // batch_size


# 定義兩個placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])

# 創建一個簡單的神經網絡:784-10
W = tf.Variable(tf.truncated_normal([784,10],stddev=0.1))
b = tf.Variable(tf.zeros([10]) + 0.1)
prediction = tf.nn.softmax(tf.matmul(x,W)+b)

# 二次代價函數
loss = tf.losses.mean_squared_error(y, prediction)
# 使用梯度下降法
train = tf.train.GradientDescentOptimizer(0.3).minimize(loss)

# 結果存放在一個布爾型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
# 求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

with tf.Session() as sess:
    # 變量初始化
    sess.run(tf.global_variables_initializer())
    # 週期epoch:所有數據訓練一次,就是一個週期
    for epoch in range(21):
        for batch in range(n_batch):
            # 獲取一個批次的數據和標籤
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train,feed_dict={x:batch_xs,y:batch_ys})
        # 每訓練一個週期做一次測試
        acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))

交叉熵
loss = tf.losses.softmax_cross_entropy(y, prediction)

Dropout

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

#載入數據集
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)

#每個批次的大小
batch_size = 64
#計算一共有多少個批次
n_batch = mnist.train.num_examples // batch_size

#定義三個placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
keep_prob=tf.placeholder(tf.float32)

# 784-1000-500-10
W1 = tf.Variable(tf.truncated_normal([784,1000],stddev=0.1))
b1 = tf.Variable(tf.zeros([1000])+0.1)
L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)
L1_drop = tf.nn.dropout(L1,keep_prob) 

W2 = tf.Variable(tf.truncated_normal([1000,500],stddev=0.1))
b2 = tf.Variable(tf.zeros([500])+0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop,W2)+b2)
L2_drop = tf.nn.dropout(L2,keep_prob) 

W3 = tf.Variable(tf.truncated_normal([500,10],stddev=0.1))
b3 = tf.Variable(tf.zeros([10])+0.1)
prediction = tf.nn.softmax(tf.matmul(L2_drop,W3)+b3)

#交叉熵
loss = tf.losses.softmax_cross_entropy(y,prediction)
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

#初始化變量
init = tf.global_variables_initializer()

#結果存放在一個布爾型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(31):
        for batch in range(n_batch):
            batch_xs,batch_ys =  mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.5})
        
        test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
        train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
        print("Iter " + str(epoch) + ",Testing Accuracy " + str(test_acc) +",Training Accuracy " + str(train_acc))

正則化

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

#載入數據集
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)

#每個批次的大小
batch_size = 64
#計算一共有多少個批次
n_batch = mnist.train.num_examples // batch_size

#定義兩個placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
keep_prob=tf.placeholder(tf.float32)

# 784-1000-500-10
#創建一個簡單的神經網絡
W1 = tf.Variable(tf.truncated_normal([784,1000],stddev=0.1))
b1 = tf.Variable(tf.zeros([1000])+0.1)
L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)
L1_drop = tf.nn.dropout(L1,keep_prob) 

W2 = tf.Variable(tf.truncated_normal([1000,500],stddev=0.1))
b2 = tf.Variable(tf.zeros([500])+0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop,W2)+b2)
L2_drop = tf.nn.dropout(L2,keep_prob) 

W3 = tf.Variable(tf.truncated_normal([500,10],stddev=0.1))
b3 = tf.Variable(tf.zeros([10])+0.1)
prediction = tf.nn.softmax(tf.matmul(L2_drop,W3)+b3)

#正則項
l2_loss = tf.nn.l2_loss(W1) + tf.nn.l2_loss(b1) + tf.nn.l2_loss(W2) + tf.nn.l2_loss(b2) + tf.nn.l2_loss(W3) + tf.nn.l2_loss(b3)

#交叉熵
loss = tf.losses.softmax_cross_entropy(y,prediction) + 0.0005*l2_loss
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

#初始化變量
init = tf.global_variables_initializer()

#結果存放在一個布爾型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(31):
        for batch in range(n_batch):
            batch_xs,batch_ys =  mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
        
        test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
        train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
        print("Iter " + str(epoch) + ",Testing Accuracy " + str(test_acc) +",Training Accuracy " + str(train_acc))
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章