# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import matplotlib.pylab as plt
from tensorflow.examples.tutorials.mnist import input_data
# 用Tensorflow計算 a = (b + c) * (c + 2)
def test01():
# 首先,創建一個TensorFlow常量=>2
const = tf.constant(2.0, name='const')
# 創建TensorFlow變量b和c
b = tf.Variable(2.0, name='b')
c = tf.Variable(1.0, dtype=tf.float32, name='c')
# 創建operation
d = tf.add(b, c, name='d')
e = tf.add(c, const, name='e')
a = tf.multiply(d, e, name='a')
# 1. 定義init operation
init_op = tf.global_variables_initializer()
# session
with tf.Session() as sess:
# 2. 運行init operation
sess.run(init_op)
# 計算
a_out = sess.run(a)
print("Variable a is {}".format(a_out))
def test02():
# 首先,創建一個TensorFlow常量=>2
const = tf.constant(2.0, name='const')
# 創建TensorFlow變量b和c
b = tf.placeholder(tf.float32, [None, 1], name='b')
c = tf.Variable(1.0, dtype=tf.float32, name='c')
# 創建operation
d = tf.add(b, c, name='d')
e = tf.add(c, const, name='e')
a = tf.multiply(d, e, name='a')
# 1. 定義init operation
init_op = tf.global_variables_initializer()
# session
with tf.Session() as sess:
# 2. 運行init operation
sess.run(init_op)
# 計算
#a_out = sess.run(a)
a_out = sess.run(a, feed_dict={b: np.arange(0, 10)[:, np.newaxis]})
print("Variable a is {}".format(a_out))
def test03():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# 超參數
learning_rate = 0.5
epochs = 10
batch_size = 100
# placeholder
# 輸入圖片爲28 x 28 像素 = 784
x = tf.placeholder(tf.float32, [None, 784])
# 輸出爲0-9的one-hot編碼
y = tf.placeholder(tf.float32, [None, 10])
# hidden layer => w, b
W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1')
b1 = tf.Variable(tf.random_normal([300]), name='b1')
# output layer => w, b
W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2')
b2 = tf.Variable(tf.random_normal([10]), name='b2')
# hidden layer
hidden_out = tf.add(tf.matmul(x, W1), b1)
hidden_out = tf.nn.relu(hidden_out)
# 計算輸出
out = tf.add(tf.matmul(hidden_out, W2), b2)
y_ = tf.nn.softmax(out)
# tf.clip_by_value(A, min, max):輸入一個張量A,把A中的每一個元素的值都壓縮在
# min和max之間。小於min的讓它等於min,大於max的元素的值等於max。
y_clipped = tf.clip_by_value(y_, 1e-10, 0.9999999)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped) + (1 - y) * tf.log(1 - y_clipped), axis=1))
# 創建優化器,確定優化目標
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# init operator
init_op = tf.global_variables_initializer()
# 創建準確率節點
# 0:按列計算,1:行計算;0:按列計算,1:行計算
# correct_predicion會返回一個m×1 m\times 1m×1的tensor,tensor的值爲True/False表示是否正確預測。
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 創建session
with tf.Session() as sess:
# 變量初始化
sess.run(init_op)
total_batch = int(len(mnist.train.labels) / batch_size)
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
_, c = sess.run([optimizer, cross_entropy], feed_dict={x: batch_x, y: batch_y})
avg_cost += c / total_batch
print("Epoch:", (epoch + 1), "cost = ", "{:.3f}".format(avg_cost))
# test
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))
# 線性迴歸
def test04():
#create data
x_data=np.random.rand(100).astype(np.float32)
y_data=x_data*0.1+0.3
#create tensorflow structure
Weights=tf.Variable(tf.random_uniform([1],-1.0,1.0)) #一維,範圍[-1,1]
biases=tf.Variable(tf.zeros([1]))
y=Weights*x_data+biases
# 均方誤差
loss=tf.reduce_mean(tf.square(y-y_data))
#建立優化器,減小誤差,提高參數準確度,每次迭代都會優化
optimizer=tf.train.GradientDescentOptimizer(0.5) #學習效率<1
train=optimizer.minimize(loss)
#初始化變量
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#train
for step in range(201):
sess.run(train)
if step%20==0:
print(step,sess.run(Weights),sess.run(biases))
def test05():
matrix1 = tf.constant([[3, 3]])
matrix2 = tf.constant([[2], [2]])
# matrix multiply
# np.dot(m1,m2)
product = tf.matmul(matrix1, matrix2)
# # method 1
# sess = tf.Session() # Session是一個object,首字母要大寫
# # 只有sess.run()之後,tensorflow纔會執行一次
# result = sess.run(product)
# print(result)
# # close 不影響,會顯得更整潔
# sess.close()
# method 2
# with 可以自己關閉會話
with tf.Session() as sess:
result2 = sess.run(product)
print(result2)
def test06():
state=tf.Variable(0,name='counter')
# print(state.name)
# 變量+常量=變量
one=tf.constant(1)
new_value=tf.add(state,one)
#將state用new_value代替
updata=tf.assign(state,new_value)
#變量必須要激活
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(3):
sess.run(updata)
print(sess.run(state))
# placeholder
def test07():
# 給定type,tf大部分只能處理float32數據
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
# Tensorflow 1.0 修改版
# tf.mul---tf.multiply
# tf.sub---tf.subtract
# tf.neg---tf.negative
output = tf.multiply(input1, input2)
with tf.Session() as sess:
# placeholder在sess.run()的時候傳入值
print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))
print(sess.run(output, feed_dict={input1: [7., 2], input2: [[2.], [2]]}))
def add_layer(inputs, in_size, out_size, activation_function=None):
#Weights是一個矩陣,[行,列]爲[in_size,out_size]
Weights=tf.Variable(tf.random_normal([in_size,out_size]))#正態分佈
#初始值推薦不爲0,所以加上0.1,一行,out_size列
biases=tf.Variable(tf.zeros([1,out_size])+0.1)
#Weights*x+b的初始化的值,也就是未激活的值
Wx_plus_b=tf.matmul(inputs,Weights)+biases
#激活
if activation_function is None:
#激活函數爲None,也就是線性函數
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
def test08():
"""定義數據形式"""
# (-1,1)之間,有300個單位,後面的是維度,x_data是有300行(300個例子)
x_data=np.linspace(-1,1,300)[:,np.newaxis]
# 加噪聲,均值爲0,方差爲0.05,大小和x_data一樣
noise=np.random.normal(0,0.05,x_data.shape)
y_data=np.square(x_data)-0.5+noise
xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
"""建立網絡"""
#定義隱藏層,輸入1個節點,輸出10個節點
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
#定義輸出層
prediction=add_layer(l1,10,1,activation_function=None)
"""預測"""
#損失函數,算出的是每個例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
"""訓練"""
#優化算法,minimize(loss)以0.1的學習率對loss進行減小
train_step=tf.train.GradientDescentOptimizer(0.08).minimize(loss)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(5000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i%100==0:
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
def test09():
"""定義數據形式"""
# (-1,1)之間,有300個單位,後面的是維度,x_data是有300行(300個例子)
x_data=np.linspace(-1,1,300)[:,np.newaxis]
# 加噪聲,均值爲0,方差爲0.05,大小和x_data一樣
noise=np.random.normal(0,0.05,x_data.shape)
y_data=np.square(x_data)-0.5+noise
xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
"""建立網絡"""
#定義隱藏層,輸入1個節點,輸出10個節點
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
#定義輸出層
prediction=add_layer(l1,10,1,activation_function=None)
"""預測"""
#損失函數,算出的是每個例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
"""訓練"""
#優化算法,minimize(loss)以0.1的學習率對loss進行減小
train_step=tf.train.GradientDescentOptimizer(0.08).minimize(loss)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
fig=plt.figure()
#連續性的畫圖
ax=fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
# 不暫停
plt.ion()
# plt.show()繪製一次就會暫停
# plt.show() #也可以用plt.show(block=False)來取消暫停,但是python3.5以後提供了ion的功能,更方便
for i in range(5000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i%100==0:
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
try:
# 畫出一條後抹除掉,去除第一個線段,但是隻有一個,也就是抹除當前的線段
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value=sess.run(prediction,feed_dict={xs:x_data})
lines=ax.plot(x_data,prediction_value,'r-',lw=5) #lw線寬
# 暫停0.1s
plt.pause(0.1)
# 手寫體
def test10():
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
# define placeholder for inputs to networks
# 不規定有多少個sample,但是每個sample大小爲784(28*28)
xs=tf.placeholder(tf.float32,[None,784])
ys=tf.placeholder(tf.float32,[None,10])
#add output layer
prediction=add_layer(xs,784,10,activation_function=tf.nn.softmax)
#the error between prediction and real data
cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_strp=tf.train.GradientDescentOptimizer(0.3).minimize(cross_entropy)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(3000):
batch_xs, batch_ys=mnist.train.next_batch(100)
sess.run(train_strp,feed_dict={xs:batch_xs,ys:batch_ys})
if i%100==0:
y_pre=sess.run(prediction,feed_dict={xs:mnist.test.images})
correct_prediction=tf.equal(tf.arg_max(y_pre,1),tf.arg_max(mnist.test.labels,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result=sess.run(accuracy,feed_dict={xs:mnist.test.images,ys:mnist.test.labels})
print(i, " accuracy:", result)
def add_layer_EX(inputs,in_size,out_size,layer_name,activation_function=None):
#Weights是一個矩陣,[行,列]爲[in_size,out_size]
Weights=tf.Variable(tf.random_normal([in_size,out_size]))#正態分佈
#初始值推薦不爲0,所以加上0.1,一行,out_size列
biases=tf.Variable(tf.zeros([1,out_size])+0.1)
#Weights*x+b的初始化的值,也就是未激活的值
Wx_plus_b=tf.matmul(inputs,Weights)+biases
#激活
if activation_function is None:
#激活函數爲None,也就是線性函數
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
# 下面的表示outputs的值
tf.summary.histogram(layer_name+'/outputs',outputs)
return outputs
# 未使用dropout:
def test11():
#load data
digits=load_digits()
#0~9的圖像
X=digits.data
print(type(X))
print("X.shape:", X.shape)
#y是binary的,表示數字1,就在第二個位置放上1,其餘都爲0
y=digits.target
print(type(y))
print("y.shape:", y.shape)
y=LabelBinarizer().fit_transform(y)
print(type(y))
print("y.shape:", y.shape)
#切分
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=round(X.shape[0]*0.2))
print("X_train.shape:", X_train.shape)
print("y_train.shape:", y_train.shape)
print("X_test.shape:", X_test.shape)
print("y_test.shape:", y_test.shape)
#define placeholder for inputs to network
"""dropout"""
# 確定保留多少結果不被捨棄掉
keep_prob=tf.placeholder(tf.float32)
#define placeholder for inputs to network
xs=tf.placeholder(tf.float32,[None,64])
ys=tf.placeholder(tf.float32,[None,10])
#add output layer
# l1爲隱藏層,爲了更加看出overfitting,所以輸出給了100
l1=add_layer_EX(xs,64,100,'l1',activation_function=tf.nn.tanh)
prediction=add_layer_EX(l1,100,10,'l2',activation_function=tf.nn.softmax)
#the error between prediction and real data
cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
#添加標量統計結果
tf.summary.scalar('loss',cross_entropy)
train_step=tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#添加一個操作,代表執行所有summary操作,這樣可以避免人工執行每一個summary op
merged=tf.summary.merge_all()
#summary writer goes in here
train_writer=tf.summary.FileWriter("logs/train",sess.graph)#train爲log的子文件夾
test_writer=tf.summary.FileWriter("logs/test",sess.graph)
for i in range(2500):
#sess.run(train_step,feed_dict={xs:X_train,ys:y_train})
# drop掉60%,保持40%不被drop掉
sess.run(train_step,feed_dict={xs:X_train,ys:y_train,keep_prob:0.4})
if i%50==0:
#record loss
y_pre=sess.run(prediction,feed_dict={xs:X_test})
correct_prediction=tf.equal(tf.arg_max(y_pre,1),tf.arg_max(y_test,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result=sess.run(accuracy,feed_dict={xs:X_test,ys:y_test})
print("epoch ", i, ",accuracy:", result)
train_result=sess.run(merged,feed_dict={xs:X_train,ys:y_train})
test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test})
train_writer.add_summary(train_result,i)
test_writer.add_summary(test_result,i)
def main():
#test01()
#test02()
#test03()
#test04()
#test05()
#test06()
#test07()
#test08()
#test09()
#test10()
test11()
if __name__ == '__main__':
main()