tensorflow 2

import tensorflow as tf
import numpy as np

def test1():
    #create data
    x_data=np.random.rand(100).astype(np.float32)
    y_data=x_data*0.1+0.3
    
    #create tensorflow structure
    Weights=tf.Variable(tf.random_uniform([1],-1.0,1.0)) #一維,範圍[-1,1]
    biases=tf.Variable(tf.zeros([1]))
    
    y=Weights*x_data+biases
    
    loss=tf.reduce_mean(tf.square(y-y_data))
    
    #建立優化器,減小誤差,提高參數準確度,每次迭代都會優化
    optimizer=tf.train.GradientDescentOptimizer(0.5) #學習效率<1
    train=optimizer.minimize(loss)
    
    #初始化變量
    init=tf.global_variables_initializer()
    
    with tf.Session() as sess:
        sess.run(init)
        #train
        for step in range(201):
            sess.run(train)
            if step%20==0:
                print(step,sess.run(Weights),sess.run(biases))

def test2():
    node1 = tf.constant(3.0, dtype=tf.float32)
    node2 = tf.constant(4.0)# also tf.float32 implicitly
    print(node1, node2)
    sess = tf.Session()
    print(sess.run([node1, node2]))
    node3 = tf.add(node1, node2)
    print("node3:", node3)
    print("sess.run(node3):", sess.run(node3))
    
    a = tf.placeholder(tf.float32)
    b = tf.placeholder(tf.float32)
    adder_node = a + b  # + provides a shortcut for tf.add(a, b)
    print(sess.run(adder_node, {a:3, b:4.5}))
    print(sess.run(adder_node, {a: [1,3], b: [2,4]}))
    add_and_triple = adder_node *3.
    print(sess.run(add_and_triple, {a:3, b:4.5}))


def test3():
    W = tf.Variable([.3], dtype=tf.float32)
    b = tf.Variable([-.3], dtype=tf.float32)
    x = tf.placeholder(tf.float32)
    linear_model = W*x + b    
    init = tf.global_variables_initializer()
    
    sess = tf.Session()
    sess.run(init)
    
    print(sess.run(linear_model, {x: [1,2,3,4]}))
    print(sess.run(linear_model, {x: [[1,2],[3,4]]}))
    
    y = tf.placeholder(tf.float32)
    squared_deltas = tf.square(linear_model - y)
    loss = tf.reduce_sum(squared_deltas)
    loss1 = tf.reduce_mean(squared_deltas)
    
    print(sess.run(linear_model, {x:[1,2,3,4]}))
    print(sess.run(squared_deltas, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
    print(sess.run(loss, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
    print(sess.run(loss/4, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
    print(sess.run(loss1, {x: [1,2,3,4], y: [0, -1, -2, -3]}))

def test4():
    b = tf.Variable([-.3], dtype=tf.float32)
    fixb = tf.assign(b, [1.])
    
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    print(sess.run(fixb))
    

def test5():
    import tensorflow as tf
    mnist = tf.keras.datasets.mnist
    (x_train, y_train),(x_test, y_test) = mnist.load_data()
    print(type(x_train))
    print(type(y_train))
    print(type(x_test))
    print(type(y_test))
    x_train, x_test = x_train / 255.0, x_test / 255.0
    
    model = tf.keras.models.Sequential([
      tf.keras.layers.Flatten(input_shape=(28, 28)),
      tf.keras.layers.Dense(512, activation=tf.nn.relu),
      tf.keras.layers.Dropout(0.2),
      tf.keras.layers.Dense(10, activation=tf.nn.softmax)
    ])
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    
    model.fit(x_train, y_train, epochs=5)
    model.evaluate(x_test, y_test)

def test6():
    # 給定type,tf大部分只能處理float32數據
    input1 = tf.placeholder(tf.float32)
    input2 = tf.placeholder(tf.float32)
    
    # Tensorflow 1.0 修改版
    # tf.mul---tf.multiply
    # tf.sub---tf.subtract
    # tf.neg---tf.negative
    output = tf.multiply(input1, input2)
    
    with tf.Session() as sess:
        # placeholder在sess.run()的時候傳入值
        print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))


def add_layer(inputs,in_size,out_size,activation_function=None):
    #Weights是一個矩陣,[行,列]爲[in_size,out_size]
    Weights=tf.Variable(tf.random_normal([in_size,out_size]))#正態分佈
    #初始值推薦不爲0,所以加上0.1,一行,out_size列
    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    #Weights*x+b的初始化的值,也就是未激活的值
    Wx_plus_b=tf.matmul(inputs,Weights)+biases

    #激活

    if activation_function is None:
        #激活函數爲None,也就是線性函數
        outputs=Wx_plus_b
    else:
        outputs=activation_function(Wx_plus_b)
    return outputs

def test7():
    """定義數據形式"""
    # (-1,1)之間,有300個單位,後面的是維度,x_data是有300行(300個例子)
    x_data=np.linspace(-1,1,300)[:,np.newaxis]
    # 加噪聲,均值爲0,方差爲0.05,大小和x_data一樣
    print(x_data.shape)
    noise=np.random.normal(0,0.05,x_data.shape)
    y_data=np.square(x_data)-0.5+noise
    
    xs=tf.placeholder(tf.float32,[None,1])
    ys=tf.placeholder(tf.float32,[None,1])
    
    """建立網絡"""
    #定義隱藏層,輸入1個節點,輸出10個節點
    l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
    #定義輸出層
    prediction=add_layer(l1,10,1,activation_function=None)
    
    """預測"""
    #損失函數,算出的是每個例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
    loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
    
    """訓練"""
    #優化算法,minimize(loss)以0.1的學習率對loss進行減小
    train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
    
    init=tf.global_variables_initializer()
    
    with tf.Session() as sess:
        sess.run(init)
        for i in range(1000):
            sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
            if i%50==0:
                print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

def test8():
    import matplotlib.pylab as plt
    """定義數據形式"""
    # (-1,1)之間,有300個單位,後面的是維度,x_data是有300行(300個例子)
    x_data=np.linspace(-1,1,300)[:,np.newaxis]
    # 加噪聲,均值爲0,方差爲0.05,大小和x_data一樣
    noise=np.random.normal(0,0.05,x_data.shape)
    y_data=np.square(x_data)-0.5+noise
    
    xs=tf.placeholder(tf.float32,[None,1])
    ys=tf.placeholder(tf.float32,[None,1])

    """建立網絡"""
    #定義隱藏層,輸入1個節點,輸出10個節點
    l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
    #定義輸出層
    prediction=add_layer(l1,10,1,activation_function=None)
    
    """預測"""
    #損失函數,算出的是每個例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
    loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
    
    """訓練"""
    #優化算法,minimize(loss)以0.1的學習率對loss進行減小
    train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
    
    init=tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
    
        fig=plt.figure()
        #連續性的畫圖
        ax=fig.add_subplot(1,1,1)
        ax.scatter(x_data,y_data)
        # 不暫停
        plt.ion()
        # plt.show()繪製一次就會暫停
        # plt.show() #也可以用plt.show(block=False)來取消暫停,但是python3.5以後提供了ion的功能,更方便
        for i in range(1000):
            sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
            if i%50==0:
                # print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
                #嘗試先抹除,後繪製第二條線
                #第一次沒有線,會報錯,try就會忽略錯誤,然後緊接着執行下面的步驟
                try:
                    # 畫出一條後抹除掉,去除第一個線段,但是隻有一個,也就是抹除當前的線段
                    ax.lines.remove(lines[0])
                except Exception:
                    pass
                prediction_value=sess.run(prediction,feed_dict={xs:x_data})
                lines=ax.plot(x_data,prediction_value,'r-',lw=5) #lw線寬
    
                # 暫停0.1s
                plt.pause(0.1)

    
#test1()
#test2() 
#test3()
#test4()
#test5()    
#test6()
#test7()
test8()

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章