Tensorflow技術點整理(二)

Tensorflow技術點整理

TensorFlow API

 定義張量

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant([[1, 2], [3, 4]])
    print(a)
    b = a
    a = tf.ones((2, 2))
    print(a)
    a = tf.zeros((2, 2))
    print(a)
    a = tf.eye(2, 2)
    print(a)
    a = tf.zeros_like(b)
    print(a)
    a = tf.ones_like(b)
    print(a)
    # 生成0到10,中間間隔1的隊列
    a = tf.constant(np.arange(0, 11, 1))
    print(a)
    # 生成2到10,中間間隔4的隊列
    a = tf.linspace(2, 10, 4)
    print(a)
    # 生成0~6的隊列
    a = tf.range(7)
    print(a)

運行結果

tf.Tensor(
[[1 2]
 [3 4]], shape=(2, 2), dtype=int32)
tf.Tensor(
[[1. 1.]
 [1. 1.]], shape=(2, 2), dtype=float32)
tf.Tensor(
[[0. 0.]
 [0. 0.]], shape=(2, 2), dtype=float32)
tf.Tensor(
[[1. 0.]
 [0. 1.]], shape=(2, 2), dtype=float32)
tf.Tensor(
[[0 0]
 [0 0]], shape=(2, 2), dtype=int32)
tf.Tensor(
[[1 1]
 [1 1]], shape=(2, 2), dtype=int32)
tf.Tensor([ 0  1  2  3  4  5  6  7  8  9 10], shape=(11,), dtype=int64)
tf.Tensor([ 2.          4.66666667  7.33333333 10.        ], shape=(4,), dtype=float64)
tf.Tensor([0 1 2 3 4 5 6], shape=(7,), dtype=int32)

 

算術運算

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[1, 2, 3], [4, 5, 6]])
    b = tf.constant([1, 4, 7])
    print(a + b)
    print(tf.add(a, b))
    print(a - b)
    print(tf.subtract(a, b))
    print(a * b)
    print(tf.multiply(a, b))
    print(a / b)
    print(tf.divide(a, b))

運行結果

tf.Tensor(
[[ 2  6 10]
 [ 5  9 13]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[ 2  6 10]
 [ 5  9 13]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[ 0 -2 -4]
 [ 3  1 -1]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[ 0 -2 -4]
 [ 3  1 -1]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[ 1  8 21]
 [ 4 20 42]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[ 1  8 21]
 [ 4 20 42]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[1.         0.5        0.42857143]
 [4.         1.25       0.85714286]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[1.         0.5        0.42857143]
 [4.         1.25       0.85714286]], shape=(2, 3), dtype=float64)

矩陣乘法

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[1, 2, 3], [4, 5, 6]])
    b = tf.constant([[2, 4], [11, 13], [7, 9]])
    print(tf.matmul(a, b))

運行結果

tf.Tensor(
[[ 45  57]
 [105 135]], shape=(2, 2), dtype=int32)

冪運算

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[1, 2, 3], [4, 5, 6]])
    print(tf.pow(a, 2))
    print(a**2)
    a = tf.constant([2], dtype=tf.float32)
    print(tf.exp(a))

運行結果

tf.Tensor(
[[ 1  4  9]
 [16 25 36]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[ 1  4  9]
 [16 25 36]], shape=(2, 3), dtype=int32)
tf.Tensor([7.389056], shape=(1,), dtype=float32)

稀疏張量

import tensorflow as tf

if __name__ == '__main__':

    a = tf.SparseTensor(indices=[[0, 2], [1, 0], [1, 2]],
                        values=[3., 4., 5.],
                        dense_shape=[2, 4])
    print(a)
    print(tf.sparse.to_dense(a))

運行結果

SparseTensor(indices=tf.Tensor(
[[0 2]
 [1 0]
 [1 2]], shape=(3, 2), dtype=int64), values=tf.Tensor([3. 4. 5.], shape=(3,), dtype=float32), dense_shape=tf.Tensor([2 4], shape=(2,), dtype=int64))
tf.Tensor(
[[0. 0. 3. 0.]
 [4. 0. 5. 0.]], shape=(2, 4), dtype=float32)

這裏跟PyTorch不同的是序號定義的不同,PyTorch是上下定義位置,而Tensorflow是左右定義位置。

開方運算

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.float32)
    print(tf.sqrt(a))

運行結果

tf.Tensor(
[[1.        1.4142135 1.7320508]
 [2.        2.236068  2.4494898]], shape=(2, 3), dtype=float32)

對數運算

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([1, 2, 3], dtype=tf.float32)
    print(tf.math.log(a))
    # 以2爲底的對數運算
    print(tf.math.log(a) / tf.math.log(tf.constant([2], dtype=tf.float32)))

運行結果

tf.Tensor([0.        0.6931472 1.0986123], shape=(3,), dtype=float32)
tf.Tensor([0.        1.        1.5849625], shape=(3,), dtype=float32)

廣播機制

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 1, 1))
    print(a)
    b = tf.constant(np.random.rand(3))
    print(b)
    print(a + b)

運行結果

tf.Tensor(
[[[0.01537639]]

 [[0.82803137]]], shape=(2, 1, 1), dtype=float64)
tf.Tensor([0.85110735 0.80508366 0.94771198], shape=(3,), dtype=float64)
tf.Tensor(
[[[0.86648374 0.82046006 0.96308837]]

 [[1.67913872 1.63311503 1.77574335]]], shape=(2, 1, 3), dtype=float64)

取整/取餘

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 2))
    a *= 10
    print(a)
    print(tf.floor(a))
    print(tf.math.ceil(a))
    print(tf.round(a))
    print(a % 2)
    print(tf.math.mod(a, 2))

運行結果

tf.Tensor(
[[7.44209334 6.03067307]
 [0.33836814 2.66141984]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[7. 6.]
 [0. 2.]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[8. 7.]
 [1. 3.]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[7. 6.]
 [0. 3.]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[1.44209334 0.03067307]
 [0.33836814 0.66141984]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[1.44209334 0.03067307]
 [0.33836814 0.66141984]], shape=(2, 2), dtype=float64)

比較運算

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[1, 2, 3], [4, 5, 6]])
    b = tf.constant([[1, 4, 7], [6, 5, 7]])
    c = tf.random.uniform((2, 4))
    d = a
    print(tf.equal(a, b))
    # >=
    print(tf.greater_equal(a, b))
    # >
    print(tf.greater(a, b))
    # <=
    print(tf.less_equal(a, b))
    # <
    print(tf.less(a, b))
    # !=
    print(tf.not_equal(a, b))

運行結果

tf.Tensor(
[[ True False False]
 [False  True False]], shape=(2, 3), dtype=bool)
tf.Tensor(
[[ True False False]
 [False  True False]], shape=(2, 3), dtype=bool)
tf.Tensor(
[[False False False]
 [False False False]], shape=(2, 3), dtype=bool)
tf.Tensor(
[[ True  True  True]
 [ True  True  True]], shape=(2, 3), dtype=bool)
tf.Tensor(
[[False  True  True]
 [ True False  True]], shape=(2, 3), dtype=bool)
tf.Tensor(
[[False  True  True]
 [ True False  True]], shape=(2, 3), dtype=bool)

排序

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([1, 4, 4, 3, 5])
    print(tf.sort(a))
    # 升序的序號
    print(tf.argsort(a))
    print(tf.sort(a, direction='DESCENDING'))
    # 降序的序號
    print(tf.argsort(a, direction='DESCENDING'))
    b = tf.constant([[1, 4, 4, 3, 5], [2, 3, 1, 3, 5]])
    print(tf.sort(b))
    # 在第一個維度進行升序排序
    print(tf.sort(b, axis=0))

運行結果

tf.Tensor([1 3 4 4 5], shape=(5,), dtype=int32)
tf.Tensor([0 3 1 2 4], shape=(5,), dtype=int32)
tf.Tensor([5 4 4 3 1], shape=(5,), dtype=int32)
tf.Tensor([4 1 2 3 0], shape=(5,), dtype=int32)
tf.Tensor(
[[1 3 4 4 5]
 [1 2 3 3 5]], shape=(2, 5), dtype=int32)
tf.Tensor(
[[1 3 1 3 5]
 [2 4 4 3 5]], shape=(2, 5), dtype=int32)

 Top-K

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[1, 4, 6, 3, 5], [2, 3, 4, 4, 5]], dtype=tf.float32)
    print(tf.math.top_k(a, k=2))
    # 預測出來的值是否在4和1的位置上
    print(tf.math.in_top_k([4, 1], a, k=2))

運行結果

TopKV2(values=<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[6., 5.],
       [5., 4.]], dtype=float32)>, indices=<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[2, 4],
       [4, 2]], dtype=int32)>)
tf.Tensor([ True False], shape=(2,), dtype=bool)

這裏跟PyTorch不同的是,它不支持不同維度上的Top-K.

三角函數

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([0, 0, 0], dtype=tf.float32)
    print(tf.cos(a))

運行結果

tf.Tensor([1. 1. 1.], shape=(3,), dtype=float32)

 統計學函數

import tensorflow as tf
import numpy as np
from scipy import stats

if __name__ == '__main__':
    a = tf.constant(np.random.rand(2, 2))
    print(a)
    # 求均值
    print(tf.reduce_mean(a))
    # 獲取每一行的最大值
    print(tf.reduce_max(a, [1]))
    # 獲取每一列的最大值
    print(tf.reduce_max(a, [0]))
    # 對第一個維度求均值
    print(tf.reduce_mean(a, axis=0))
    # 求和
    print(tf.math.reduce_sum(a))
    # 對第一個維度求和
    print(tf.math.reduce_sum(a, axis=0))
    # 累積
    print(tf.math.reduce_prod(a))
    # 對第一個維度求累積
    print(tf.math.reduce_prod(a, axis=0))
    # 求第一個維度的最大值索引
    print(tf.argmax(a, axis=0))
    # 求第一個維度的最小值索引
    print(tf.argmin(a, axis=0))
    # 計算標準差
    print(tf.math.reduce_std(a))
    # 計算方差
    print(tf.math.reduce_variance(a))
    # 獲取中數(中間的數)
    print(tf.constant(np.median(a.numpy())))
    # 獲取衆數(出現次數最多的數)
    print(tf.constant(stats.mode(a.numpy())[0][0]))
    a *= 10
    # 打印直方圖
    print(tf.histogram_fixed_width(a, [0, tf.math.reduce_max(a)], nbins=6))
    a = tf.constant(np.random.randint(0, 10, [10]), dtype=tf.int32)
    print(a)
    print(tf.math.bincount(a))

運行結果

tf.Tensor(
[[0.18079731 0.83275504]
 [0.93849049 0.69468691]], shape=(2, 2), dtype=float64)
tf.Tensor(0.6616824381588154, shape=(), dtype=float64)
tf.Tensor([0.83275504 0.93849049], shape=(2,), dtype=float64)
tf.Tensor([0.93849049 0.83275504], shape=(2,), dtype=float64)
tf.Tensor([0.5596439  0.76372097], shape=(2,), dtype=float64)
tf.Tensor(2.646729752635262, shape=(), dtype=float64)
tf.Tensor([1.1192878  1.52744195], shape=(2,), dtype=float64)
tf.Tensor(0.09815857168640853, shape=(), dtype=float64)
tf.Tensor([0.16967656 0.57850403], shape=(2,), dtype=float64)
tf.Tensor([1 0], shape=(2,), dtype=int64)
tf.Tensor([0 1], shape=(2,), dtype=int64)
tf.Tensor(0.29078700667359864, shape=(), dtype=float64)
tf.Tensor(0.08455708325019148, shape=(), dtype=float64)
tf.Tensor(0.7637209747951317, shape=(), dtype=float64)
tf.Tensor([0.18079731 0.69468691], shape=(2,), dtype=float64)
tf.Tensor([0 1 0 0 1 2], shape=(6,), dtype=int32)
tf.Tensor([9 8 5 5 1 9 7 3 9 9], shape=(10,), dtype=int32)
tf.Tensor([0 1 0 1 0 2 0 1 1 4], shape=(10,), dtype=int32)

隨機抽樣

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    tf.random.set_seed(1)
    np.random.seed(1)
    mean = tf.constant(np.random.rand(1, 2), dtype=tf.float32)
    std = tf.constant(np.random.rand(1, 2), dtype=tf.float32)
    # 正態分佈
    print(tf.random.normal((1, 2), mean=mean, stddev=std))

運行結果

tf.Tensor([[0.41689605 1.1876557 ]], shape=(1, 2), dtype=float32)

範數運算

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 1))
    b = tf.constant(np.random.rand(2, 1))
    print(a)
    print(b)
    # L1範數
    print(tf.reduce_sum(tf.abs(a - b)))
    # L2範數
    print(tf.sqrt(tf.reduce_sum(tf.square(a - b), axis=0)))
    print(tf.norm(a))

運行結果

tf.Tensor(
[[0.90844106]
 [0.57310612]], shape=(2, 1), dtype=float64)
tf.Tensor(
[[0.49385858]
 [0.8327303 ]], shape=(2, 1), dtype=float64)
tf.Tensor(0.6742066607716513, shape=(), dtype=float64)
tf.Tensor([0.48916597], shape=(1,), dtype=float64)
tf.Tensor(1.0741116263657322, shape=(), dtype=float64)

張量裁剪

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 2)) * 10
    print(a)
    # 將小於2的變成2,大於5的變成5,2~5之間的不變
    a = tf.clip_by_value(a, 2, 5)
    print(a)

運行結果

tf.Tensor(
[[0.70826017 7.9352412 ]
 [2.81880047 0.15188473]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[2.         5.        ]
 [2.81880047 2.        ]], shape=(2, 2), dtype=float64)

張量的索引與數據篩選

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(4, 4))
    b = tf.constant(np.random.rand(4, 4))
    print(a)
    print(b)
    out = tf.where(a > 0.5, a, b)
    print(out)
    # 此處返回的是a大於b的所有座標,但是跟PyTorch表現形式不同
    out = tf.where(a > b)
    print(out)
    # 挑選a的第一個維度的第0行、第3行、第2行構建出一個新的tensor
    out = tf.gather(a, indices=tf.constant([0, 3, 2]), axis=0)
    print(out)
    # 挑選a的第二個維度的第0列、第3列、第2列構建出一個新的tensor
    out = tf.gather(a, indices=tf.constant([0, 3, 2]), axis=1)
    print(out)
    a = tf.linspace(1, 16, 16)
    a = tf.reshape(a, (4, 4))
    print(a)
    # 此處跟PyTorch的gather不一樣
    out = tf.gather(a, indices=tf.constant([[0, 1, 1, 1], [0, 1, 2, 2], [0, 1, 3, 3]]), axis=0)
    print(out)
    mask = tf.greater(a, 8)
    print(mask)
    out = tf.boolean_mask(a, mask)
    print(out)
    a = tf.reshape(a, (-1,))
    out = tf.gather(a, indices=tf.constant([0, 15, 13, 10]))
    print(out)
    a = tf.constant([[0, 1, 2, 0], [2, 3, 0, 1]])
    out = tf.where(a > 0)
    print(out)

運行結果

tf.Tensor(
[[0.36259518 0.19380163 0.44899191 0.28385357]
 [0.515987   0.80924924 0.2813977  0.61434081]
 [0.77888583 0.11589285 0.47789409 0.02359696]
 [0.16616797 0.19040272 0.80915301 0.49108375]], shape=(4, 4), dtype=float64)
tf.Tensor(
[[0.38603178 0.6567274  0.93351692 0.97103008]
 [0.11047731 0.44910036 0.66476197 0.223172  ]
 [0.01461463 0.76318377 0.44559227 0.93314565]
 [0.92961906 0.58895644 0.0390715  0.78634479]], shape=(4, 4), dtype=float64)
tf.Tensor(
[[0.38603178 0.6567274  0.93351692 0.97103008]
 [0.515987   0.80924924 0.66476197 0.61434081]
 [0.77888583 0.76318377 0.44559227 0.93314565]
 [0.92961906 0.58895644 0.80915301 0.78634479]], shape=(4, 4), dtype=float64)
tf.Tensor(
[[1 0]
 [1 1]
 [1 3]
 [2 0]
 [2 2]
 [3 2]], shape=(6, 2), dtype=int64)
tf.Tensor(
[[0.36259518 0.19380163 0.44899191 0.28385357]
 [0.16616797 0.19040272 0.80915301 0.49108375]
 [0.77888583 0.11589285 0.47789409 0.02359696]], shape=(3, 4), dtype=float64)
tf.Tensor(
[[0.36259518 0.28385357 0.44899191]
 [0.515987   0.61434081 0.2813977 ]
 [0.77888583 0.02359696 0.47789409]
 [0.16616797 0.49108375 0.80915301]], shape=(4, 3), dtype=float64)
tf.Tensor(
[[ 1.  2.  3.  4.]
 [ 5.  6.  7.  8.]
 [ 9. 10. 11. 12.]
 [13. 14. 15. 16.]], shape=(4, 4), dtype=float64)
tf.Tensor(
[[[ 1.  2.  3.  4.]
  [ 5.  6.  7.  8.]
  [ 5.  6.  7.  8.]
  [ 5.  6.  7.  8.]]

 [[ 1.  2.  3.  4.]
  [ 5.  6.  7.  8.]
  [ 9. 10. 11. 12.]
  [ 9. 10. 11. 12.]]

 [[ 1.  2.  3.  4.]
  [ 5.  6.  7.  8.]
  [13. 14. 15. 16.]
  [13. 14. 15. 16.]]], shape=(3, 4, 4), dtype=float64)
tf.Tensor(
[[False False False False]
 [False False False False]
 [ True  True  True  True]
 [ True  True  True  True]], shape=(4, 4), dtype=bool)
tf.Tensor([ 9. 10. 11. 12. 13. 14. 15. 16.], shape=(8,), dtype=float64)
tf.Tensor([ 1. 16. 14. 11.], shape=(4,), dtype=float64)
tf.Tensor(
[[0 1]
 [0 2]
 [1 0]
 [1 1]
 [1 3]], shape=(5, 2), dtype=int64)

張量的組合與拼接

import tensorflow as tf

if __name__ == '__main__':

    a = tf.zeros((2, 4))
    b = tf.ones((2, 4))
    # 在第一個維度上進行拼接,此時不會增加維度,只會增加該維度的條數
    out = tf.concat((a, b), axis=0)
    print(out)
    a = tf.linspace(1, 6, 6)
    a = tf.reshape(a, (2, 3))
    b = tf.linspace(7, 12, 6)
    b = tf.reshape(b, (2, 3))
    print(a)
    print(b)
    # 將a和b看成獨立元素進行拼接,此時會增加一個維度,該維度表示a和b的2個元素
    out = tf.stack((a, b), axis=0)
    print(out)
    # 將a的每一行跟b的相同序號的一行拼接,拼接後形成一個維度
    out = tf.stack((a, b), axis=1)
    print(out)
    print(out[:, 0, :])
    print(out[:, 1, :])
    # 將a的每一行每一列跟b的相同序號的行相同序號的列拼接,拼接後形成一個維度
    out = tf.stack((a, b), axis=2)
    print(out)
    print(out[:, :, 0])
    print(out[:, :, 1])

運行結果

tf.Tensor(
[[0. 0. 0. 0.]
 [0. 0. 0. 0.]
 [1. 1. 1. 1.]
 [1. 1. 1. 1.]], shape=(4, 4), dtype=float32)
tf.Tensor(
[[1. 2. 3.]
 [4. 5. 6.]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[ 7.  8.  9.]
 [10. 11. 12.]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[[ 1.  2.  3.]
  [ 4.  5.  6.]]

 [[ 7.  8.  9.]
  [10. 11. 12.]]], shape=(2, 2, 3), dtype=float64)
tf.Tensor(
[[[ 1.  2.  3.]
  [ 7.  8.  9.]]

 [[ 4.  5.  6.]
  [10. 11. 12.]]], shape=(2, 2, 3), dtype=float64)
tf.Tensor(
[[1. 2. 3.]
 [4. 5. 6.]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[ 7.  8.  9.]
 [10. 11. 12.]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[[ 1.  7.]
  [ 2.  8.]
  [ 3.  9.]]

 [[ 4. 10.]
  [ 5. 11.]
  [ 6. 12.]]], shape=(2, 3, 2), dtype=float64)
tf.Tensor(
[[1. 2. 3.]
 [4. 5. 6.]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[ 7.  8.  9.]
 [10. 11. 12.]], shape=(2, 3), dtype=float64)

張量切片

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(4, 4))
    print(a)
    # 不支持不等分切片
    out = tf.split(a, 2, axis=0)
    print(out)
    out = tf.split(a, 2, axis=1)
    print(out)
    out = tf.split(a, [2, 1, 1], axis=0)
    print(out)

運行結果

tf.Tensor(
[[0.76125837 0.55359673 0.5226298  0.162057  ]
 [0.70181616 0.21919613 0.29682429 0.87271386]
 [0.65784256 0.11186252 0.46019658 0.68551491]
 [0.93408276 0.7907662  0.62974255 0.16056717]], shape=(4, 4), dtype=float64)
[<tf.Tensor: shape=(2, 4), dtype=float64, numpy=
array([[0.76125837, 0.55359673, 0.5226298 , 0.162057  ],
       [0.70181616, 0.21919613, 0.29682429, 0.87271386]])>, <tf.Tensor: shape=(2, 4), dtype=float64, numpy=
array([[0.65784256, 0.11186252, 0.46019658, 0.68551491],
       [0.93408276, 0.7907662 , 0.62974255, 0.16056717]])>]
[<tf.Tensor: shape=(4, 2), dtype=float64, numpy=
array([[0.76125837, 0.55359673],
       [0.70181616, 0.21919613],
       [0.65784256, 0.11186252],
       [0.93408276, 0.7907662 ]])>, <tf.Tensor: shape=(4, 2), dtype=float64, numpy=
array([[0.5226298 , 0.162057  ],
       [0.29682429, 0.87271386],
       [0.46019658, 0.68551491],
       [0.62974255, 0.16056717]])>]
[<tf.Tensor: shape=(2, 4), dtype=float64, numpy=
array([[0.76125837, 0.55359673, 0.5226298 , 0.162057  ],
       [0.70181616, 0.21919613, 0.29682429, 0.87271386]])>, <tf.Tensor: shape=(1, 4), dtype=float64, numpy=array([[0.65784256, 0.11186252, 0.46019658, 0.68551491]])>, <tf.Tensor: shape=(1, 4), dtype=float64, numpy=array([[0.93408276, 0.7907662 , 0.62974255, 0.16056717]])>]

張量變形

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 3))
    print(a)
    out = tf.reshape(a, (3, 2))
    print(out)
    print(tf.transpose(a))
    a = tf.constant(np.random.rand(1, 2, 3))
    print(a)
    # 這裏跟PyTorch不同,需要列出所有的維度,才能交換
    out = tf.transpose(a, (1, 0, 2))
    print(out)
    out = tf.squeeze(a)
    print(out)
    b = tf.constant(np.random.rand(1, 2, 1))
    print(b)
    # 只去掉第一個維度的1,保留第三個維度的1
    out = tf.squeeze(b, [0])
    print(out)
    out = tf.expand_dims(a, -1)
    print(out)
    out = tf.split(a, 2, axis=1)
    print(out)
    out = tf.reverse(a, axis=[1])
    print(out)
    out = tf.reverse(a, axis=[2])
    print(out)
    out = tf.reverse(a, axis=[1, 2])
    print(out)

運行結果

tf.Tensor(
[[0.58693252 0.40994406 0.4536791 ]
 [0.98581972 0.02112171 0.13718469]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[0.58693252 0.40994406]
 [0.4536791  0.98581972]
 [0.02112171 0.13718469]], shape=(3, 2), dtype=float64)
tf.Tensor(
[[0.58693252 0.98581972]
 [0.40994406 0.02112171]
 [0.4536791  0.13718469]], shape=(3, 2), dtype=float64)
tf.Tensor(
[[[0.0294984  0.39577176 0.41719977]
  [0.16434196 0.06959612 0.89783393]]], shape=(1, 2, 3), dtype=float64)
tf.Tensor(
[[[0.0294984  0.39577176 0.41719977]]

 [[0.16434196 0.06959612 0.89783393]]], shape=(2, 1, 3), dtype=float64)
tf.Tensor(
[[0.0294984  0.39577176 0.41719977]
 [0.16434196 0.06959612 0.89783393]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[[0.48809593]
  [0.65678944]]], shape=(1, 2, 1), dtype=float64)
tf.Tensor(
[[0.48809593]
 [0.65678944]], shape=(2, 1), dtype=float64)
tf.Tensor(
[[[[0.0294984 ]
   [0.39577176]
   [0.41719977]]

  [[0.16434196]
   [0.06959612]
   [0.89783393]]]], shape=(1, 2, 3, 1), dtype=float64)
[<tf.Tensor: shape=(1, 1, 3), dtype=float64, numpy=array([[[0.0294984 , 0.39577176, 0.41719977]]])>, <tf.Tensor: shape=(1, 1, 3), dtype=float64, numpy=array([[[0.16434196, 0.06959612, 0.89783393]]])>]
tf.Tensor(
[[[0.16434196 0.06959612 0.89783393]
  [0.0294984  0.39577176 0.41719977]]], shape=(1, 2, 3), dtype=float64)
tf.Tensor(
[[[0.41719977 0.39577176 0.0294984 ]
  [0.89783393 0.06959612 0.16434196]]], shape=(1, 2, 3), dtype=float64)
tf.Tensor(
[[[0.89783393 0.06959612 0.16434196]
  [0.41719977 0.39577176 0.0294984 ]]], shape=(1, 2, 3), dtype=float64)

tensorflow沒有支持旋轉的API。

張量填充

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.fill((2, 3), 10)
    print(a)
    a = tf.constant(np.random.rand(2, 2))
    print(a)
    # 在tensor的上下左各填充一行0,右邊不填充
    out = tf.pad(a, [[1, 1], [1, 0]])
    print(out)

運行結果

tf.Tensor(
[[10 10 10]
 [10 10 10]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[0.65031861 0.45838591]
 [0.54348782 0.06645889]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[0.         0.         0.        ]
 [0.         0.65031861 0.45838591]
 [0.         0.54348782 0.06645889]
 [0.         0.         0.        ]], shape=(4, 3), dtype=float64)

網格

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 2))
    b = tf.constant(np.random.rand(3, 2))
    print(a)
    print(b)
    a_count = a.shape[0] * a.shape[1]
    b_count = b.shape[0] * b.shape[1]
    print(a_count, b_count)
    # 生成兩個b_count*a_count的張量(這裏是6*4),第一個張量
    # 包含6行a的扁平向量,第二個張量包含6行4列
    # b的每一個值,其中每行的值相同,不同行的值不同
    out = tf.meshgrid(a, b)
    print(out)

運行結果

tf.Tensor(
[[0.71037233 0.70864467]
 [0.72272255 0.97050206]], shape=(2, 2), dtype=float64)
tf.Tensor(
[[0.9679168  0.60009048]
 [0.43580961 0.31357009]
 [0.23997045 0.63421268]], shape=(3, 2), dtype=float64)
4 6
[<tf.Tensor: shape=(6, 4), dtype=float64, numpy=
array([[0.71037233, 0.70864467, 0.72272255, 0.97050206],
       [0.71037233, 0.70864467, 0.72272255, 0.97050206],
       [0.71037233, 0.70864467, 0.72272255, 0.97050206],
       [0.71037233, 0.70864467, 0.72272255, 0.97050206],
       [0.71037233, 0.70864467, 0.72272255, 0.97050206],
       [0.71037233, 0.70864467, 0.72272255, 0.97050206]])>, <tf.Tensor: shape=(6, 4), dtype=float64, numpy=
array([[0.9679168 , 0.9679168 , 0.9679168 , 0.9679168 ],
       [0.60009048, 0.60009048, 0.60009048, 0.60009048],
       [0.43580961, 0.43580961, 0.43580961, 0.43580961],
       [0.31357009, 0.31357009, 0.31357009, 0.31357009],
       [0.23997045, 0.23997045, 0.23997045, 0.23997045],
       [0.63421268, 0.63421268, 0.63421268, 0.63421268]])>]

保留最大最小值

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 3))
    b = tf.constant(np.random.rand(2, 3))
    print(a)
    print(b)
    # 保留a、b每一個位置上的最小值,形成一個新的張量
    out = tf.minimum(a, b)
    print(out)
    out = tf.where(a < b, a, b)
    print(out)
    # 保留a、b每一個位置上的最大值,形成一個新的張量
    out = tf.maximum(a, b)
    print(out)
    out = tf.where(a > b, a, b)
    print(out)

運行結果

tf.Tensor(
[[0.21136504 0.79508077 0.08270648]
 [0.63864327 0.45865036 0.92851595]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[0.60009355 0.06415244 0.79583999]
 [0.57438331 0.0202818  0.87532482]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[0.21136504 0.06415244 0.08270648]
 [0.57438331 0.0202818  0.87532482]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[0.21136504 0.06415244 0.08270648]
 [0.57438331 0.0202818  0.87532482]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[0.60009355 0.79508077 0.79583999]
 [0.63864327 0.45865036 0.92851595]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[0.60009355 0.79508077 0.79583999]
 [0.63864327 0.45865036 0.92851595]], shape=(2, 3), dtype=float64)

 亂序

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(2, 3))
    print(a)
    a = tf.reshape(a, (-1,))
    # 亂序
    out = tf.random.shuffle(a)
    out = tf.reshape(out, (2, 3))
    print(out)

運行結果

tf.Tensor(
[[0.20761759 0.51233245 0.67144284]
 [0.91502356 0.47983641 0.00408644]], shape=(2, 3), dtype=float64)
tf.Tensor(
[[0.91502356 0.67144284 0.00408644]
 [0.51233245 0.47983641 0.20761759]], shape=(2, 3), dtype=float64)

保留唯一值

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([1, 2, 3, 1, 2, 3])
    # 保留唯一值
    out = tf.unique(a)[0]
    print(out)

運行結果

tf.Tensor([1 2 3], shape=(3,), dtype=int32)

根據位置座標填充

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(5, 4), dtype=tf.float32)
    print(a)
    dim = tf.where(a > 0.5)
    print(dim)
    value = tf.ones((dim.shape[0],))
    print(value)
    # 根據dim的位置座標,將value列表填充回a
    out = tf.tensor_scatter_nd_update(a, dim, value)
    print(out)

運行結果

tf.Tensor(
[[0.20862171 0.26360217 0.96401286 0.70376277]
 [0.07841657 0.12179881 0.47482252 0.7394226 ]
 [0.85304624 0.54386026 0.7446324  0.8679093 ]
 [0.59722424 0.25879326 0.07276706 0.44899505]
 [0.2745695  0.9253298  0.908037   0.04632197]], shape=(5, 4), dtype=float32)
tf.Tensor(
[[0 2]
 [0 3]
 [1 3]
 [2 0]
 [2 1]
 [2 2]
 [2 3]
 [3 0]
 [4 1]
 [4 2]], shape=(10, 2), dtype=int64)
tf.Tensor([1. 1. 1. 1. 1. 1. 1. 1. 1. 1.], shape=(10,), dtype=float32)
tf.Tensor(
[[0.20862171 0.26360217 1.         1.        ]
 [0.07841657 0.12179881 0.47482252 1.        ]
 [1.         1.         1.         1.        ]
 [1.         0.25879326 0.07276706 0.44899505]
 [0.2745695  1.         1.         0.04632197]], shape=(5, 4), dtype=float32)

一維向量也是可以的

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(9,), dtype=tf.float32)
    print(a)
    dim = tf.where(a > 0.5)
    print(dim)
    value = tf.ones((dim.shape[0],))
    print(value)
    # 根據dim的位置座標,將value列表填充回a
    out = tf.tensor_scatter_nd_update(a, dim, value)
    print(out)

運行結果

tf.Tensor(
[0.9599079  0.9158476  0.59267145 0.69443715 0.4816758  0.71821004
 0.10235249 0.20957421 0.9197179 ], shape=(9,), dtype=float32)
tf.Tensor(
[[0]
 [1]
 [2]
 [3]
 [5]
 [8]], shape=(6, 1), dtype=int64)
tf.Tensor([1. 1. 1. 1. 1. 1.], shape=(6,), dtype=float32)
tf.Tensor(
[1.         1.         1.         1.         0.4816758  1.
 0.10235249 0.20957421 1.        ], shape=(9,), dtype=float32)

 擴展維度,複製數值

import tensorflow as tf
import numpy as np

if __name__ == '__main__':

    a = tf.constant(np.random.rand(3, 2), dtype=tf.float32)
    print(a)
    # 對第一個維度擴展2倍,這裏從3行變成6行;對第二個維度擴展3倍
    # 這裏從2列變成6列,值都是複製
    out = tf.tile(a, (2, 3))
    print(out)

運行結果

tf.Tensor(
[[0.32347408 0.2620785 ]
 [0.27956298 0.67863226]
 [0.4664628  0.3838015 ]], shape=(3, 2), dtype=float32)
tf.Tensor(
[[0.32347408 0.2620785  0.32347408 0.2620785  0.32347408 0.2620785 ]
 [0.27956298 0.67863226 0.27956298 0.67863226 0.27956298 0.67863226]
 [0.4664628  0.3838015  0.4664628  0.3838015  0.4664628  0.3838015 ]
 [0.32347408 0.2620785  0.32347408 0.2620785  0.32347408 0.2620785 ]
 [0.27956298 0.67863226 0.27956298 0.67863226 0.27956298 0.67863226]
 [0.4664628  0.3838015  0.4664628  0.3838015  0.4664628  0.3838015 ]], shape=(6, 6), dtype=float32)

根據概率挑選樣本索引

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[3, 2, 4, 5, 3], [2, 3, 4, 2, 1]], dtype=tf.float32)
    for i in range(5):
        # 根據概率來取索引數,其中第一行[3, 2, 4, 5, 3]中拿到第一個3的索引的概率爲3/17
        # 拿到2的索引的概率爲2/17,4的索引的概率爲4/17
        # 5表示在這一行中根據概率取5次
        out = tf.random.categorical(a, 5)
        print(out)

運行結果

tf.Tensor(
[[3 3 3 4 3]
 [2 1 2 0 2]], shape=(2, 5), dtype=int64)
tf.Tensor(
[[3 2 3 3 3]
 [3 2 1 3 1]], shape=(2, 5), dtype=int64)
tf.Tensor(
[[2 2 0 3 3]
 [2 2 2 2 1]], shape=(2, 5), dtype=int64)
tf.Tensor(
[[3 4 3 2 4]
 [0 1 2 0 2]], shape=(2, 5), dtype=int64)
tf.Tensor(
[[4 3 3 1 2]
 [2 2 1 1 2]], shape=(2, 5), dtype=int64)

數據抽取

import tensorflow as tf

if __name__ == '__main__':

    a = tf.constant([[3, 2, 4, 5, 3], [2, 3, 4, 2, 1]], dtype=tf.float32)
    # 從第0行第1列開始抽取,抽取的範圍爲2行,3列
    out = tf.slice(a, [0, 1], [2, 3])
    print(out)

運行結果

tf.Tensor(
[[2. 4. 5.]
 [3. 4. 2.]], shape=(2, 3), dtype=float32)

圖像裁剪

import tensorflow as tf
from tensorflow.keras import preprocessing
import numpy as np
import matplotlib.pyplot as plt

if __name__ == '__main__':

    img0 = preprocessing.image.load_img("/Users/admin/Documents/6565.jpeg", color_mode='rgb')
    img0 = tf.constant(np.asarray(img0))
    img1 = preprocessing.image.load_img("/Users/admin/Documents/7788.jpg", color_mode='rgb')
    img1 = tf.constant(np.asarray(img1))
    img1 = tf.image.resize(img1, (500, 600))
    img1 = tf.cast(img1, tf.uint8)
    img = tf.stack([img0, img1], axis=0)
    # 圖像裁剪,image的格式爲(batch_size,高,寬,通道數)的圖片,boxes表示裁剪區域,格式爲[y1, x1, y2, x2]
    # 這裏有3個裁剪區域,box_indices表示這些裁剪區域是哪張圖片的
    # crop_size表示裁剪後的尺寸都resize到該尺寸,原裁剪尺寸不被保留
    out = tf.image.crop_and_resize(image=img, boxes=[[0.5, 0.6, 0.9, 0.8], [0.2, 0.6, 1.3, 0.9],
                                                     [0.3, 0.7, 1.5, 1.]],
                                   box_indices=[0, 1, 0], crop_size=(100, 100))
    img1 = tf.cast(out[0], tf.uint8)
    img2 = tf.cast(out[1], tf.uint8)
    img3 = tf.cast(out[2], tf.uint8)
    fig, ax = plt.subplots(1, 3, figsize=(20, 10))
    ax[0].imshow(img1)
    ax[1].imshow(img2)
    ax[2].imshow(img3)
    plt.show()

運行結果

波士頓房價預測

import tensorflow as tf
from sklearn import datasets

if __name__ == '__main__':

    boston = datasets.load_boston()
    X = tf.constant(boston.data, dtype=tf.float32)
    y = tf.constant(boston.target, dtype=tf.float32)
    y = tf.expand_dims(y, -1)
    data = tf.concat((X, y), axis=-1)
    print(data)

運行結果

tf.Tensor(
[[6.3200e-03 1.8000e+01 2.3100e+00 ... 3.9690e+02 4.9800e+00 2.4000e+01]
 [2.7310e-02 0.0000e+00 7.0700e+00 ... 3.9690e+02 9.1400e+00 2.1600e+01]
 [2.7290e-02 0.0000e+00 7.0700e+00 ... 3.9283e+02 4.0300e+00 3.4700e+01]
 ...
 [6.0760e-02 0.0000e+00 1.1930e+01 ... 3.9690e+02 5.6400e+00 2.3900e+01]
 [1.0959e-01 0.0000e+00 1.1930e+01 ... 3.9345e+02 6.4800e+00 2.2000e+01]
 [4.7410e-02 0.0000e+00 1.1930e+01 ... 3.9690e+02 7.8800e+00 1.1900e+01]], shape=(506, 14), dtype=float32)
import tensorflow as tf
from sklearn import datasets
from tensorflow.keras import models, layers, losses, optimizers

if __name__ == '__main__':

    boston = datasets.load_boston()
    X = tf.constant(boston.data, dtype=tf.float32)
    y = tf.constant(boston.target, dtype=tf.float32)
    y = tf.expand_dims(y, -1)
    data = tf.concat((X, y), axis=-1)
    print(data)
    y = tf.squeeze(y)
    X_train = X[:496]
    y_train = y[:496]
    X_test = X[496:]
    y_test = y[496:]

    class Net(models.Model):

        def __init__(self, n_output):
            super(Net, self).__init__()
            self.hidden = layers.Dense(100, activation='relu')
            self.pred = layers.Dense(n_output)

        def call(self, x):
            out = self.hidden(x)
            out = self.pred(out)
            return out

    net = Net(1)
    optimizer = optimizers.Adam(learning_rate=0.01)
    loss_func = losses.MeanSquaredError()
    for epoch in range(10000):
        with tf.GradientTape() as tape:
            pred = net(X_train)
            pred = tf.squeeze(pred)
            loss = loss_func(y_train, pred) * 0.001
        grads = tape.gradient(loss, net.variables)
        optimizer.apply_gradients(zip(grads, net.variables))
        print("item:{},loss:{}".format(epoch, loss))
        print(pred[:10])
        print(y_train[:10])
        pred_test = net.predict(X_test)
        pred_test = tf.squeeze(pred_test)
        print(pred_test[:10])
        print(y_test[:10])

運行結果(最終訓練結果)

item:9999,loss:0.004027204588055611
tf.Tensor(
[26.32511  23.44358  31.221886 33.211582 33.850327 28.128717 22.353136
 21.29065  16.69853  20.120735], shape=(10,), dtype=float32)
tf.Tensor([24.  21.6 34.7 33.4 36.2 28.7 22.9 27.1 16.5 18.9], shape=(10,), dtype=float32)
tf.Tensor(
[15.420125 19.307917 21.476751 18.167856 20.689434 23.90838  19.904911
 22.959293 21.854155 18.169727], shape=(10,), dtype=float32)
tf.Tensor([19.7 18.3 21.2 17.5 16.8 22.4 20.6 23.9 22.  11.9], shape=(10,), dtype=float32)

手寫數字集

import tensorflow as tf
from tensorflow.keras import datasets, models, layers, losses, Sequential, optimizers

if __name__ == '__main__':

    (X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
    X_train = tf.constant(X_train, dtype=tf.float32) / 255
    X_train = tf.reshape(X_train, (X_train.shape[0], 28, 28, 1))
    y_train = tf.constant(y_train, dtype=tf.int32)
    X_test = tf.constant(X_test, dtype=tf.float32) / 255
    X_test = tf.reshape(X_test, (X_test.shape[0], 28, 28, 1))
    y_test = tf.constant(y_test, dtype=tf.int32)
    data_train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    data_test = tf.data.Dataset.from_tensor_slices((X_test, y_test))
    data_train = data_train.shuffle(10000).batch(64)
    data_test = data_test.shuffle(10000).batch(64)

    class CNN(models.Model):

        def __init__(self):
            super(CNN, self).__init__()
            self.conv = Sequential([
                layers.Conv2D(32, (5, 5), padding='same'),
                layers.BatchNormalization(),
                layers.ReLU(),
                layers.MaxPool2D((2, 2))
            ])
            self.fc = layers.Dense(10)

        def call(self, x):
            out = self.conv(x)
            out = tf.reshape(out, (out.shape[0], -1))
            out = self.fc(out)
            return out

    cnn = CNN()
    loss_func = losses.SparseCategoricalCrossentropy(from_logits=True)
    optimizer = optimizers.Adam(learning_rate=0.01)
    for epoch in range(10):
        for i, (images, labels) in enumerate(data_train):
            with tf.GradientTape() as tape:
                out = cnn(images)
                loss = loss_func(labels, out)
            grads = tape.gradient(loss, cnn.trainable_variables)
            optimizer.apply_gradients(zip(grads, cnn.trainable_variables))
        print("epoch is {}, ite is {}/{}, loss is {}".format(epoch + 1, i,
                                                             len(X_train) // 64, loss.numpy()))
        loss_test = 0
        accuracy = 0
        for i, (images, labels) in enumerate(data_test):
            out = cnn(images)
            loss_test += loss_func(labels, out)
            pred = tf.argmax(out, axis=-1)
            pred = tf.cast(pred, tf.int32)
            accuracy += tf.where(pred == labels).shape[0]
        accuracy = accuracy / len(X_test)
        loss_test = loss_test / (len(X_test) // 64)
        print("epoch is {}, accuracy is {}, loss test is {}".format(epoch + 1, accuracy, loss_test.numpy()))

運行結果

epoch is 1, ite is 937/937, loss is 0.03964223340153694
epoch is 1, accuracy is 0.9831, loss test is 0.053593263030052185
epoch is 2, ite is 937/937, loss is 0.0005187737406231463
epoch is 2, accuracy is 0.9808, loss test is 0.06631167978048325
epoch is 3, ite is 937/937, loss is 0.25279638171195984
epoch is 3, accuracy is 0.9853, loss test is 0.04976803809404373
epoch is 4, ite is 937/937, loss is 0.005918027367442846
epoch is 4, accuracy is 0.9814, loss test is 0.0674174427986145
epoch is 5, ite is 937/937, loss is 0.001395822037011385
epoch is 5, accuracy is 0.9845, loss test is 0.054165858775377274
epoch is 6, ite is 937/937, loss is 0.009619451127946377
epoch is 6, accuracy is 0.9809, loss test is 0.07322004437446594
epoch is 7, ite is 937/937, loss is 0.0014850541483610868
epoch is 7, accuracy is 0.9829, loss test is 0.07367455959320068
epoch is 8, ite is 937/937, loss is 0.000294497178401798
epoch is 8, accuracy is 0.9829, loss test is 0.07780919224023819
epoch is 9, ite is 937/937, loss is 1.4779652701690793e-05
epoch is 9, accuracy is 0.9843, loss test is 0.06709001213312149
epoch is 10, ite is 937/937, loss is 0.00040841943700797856
epoch is 10, accuracy is 0.9829, loss test is 0.07521752268075943

Cifar10圖像分類

  • VggNet網絡結構
import tensorflow as tf
from tensorflow.keras import datasets, models, Sequential, layers, losses, optimizers

if __name__ == '__main__':

    (X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
    X_train = tf.constant(X_train, dtype=tf.float32) / 255
    y_train = tf.constant(y_train, dtype=tf.int32)
    X_test = tf.constant(X_test, dtype=tf.float32) / 255
    y_test = tf.constant(y_test, dtype=tf.int32)
    data_train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    data_test = tf.data.Dataset.from_tensor_slices((X_test, y_test))
    data_train = data_train.shuffle(10000).batch(64)
    data_test = data_test.shuffle(10000).batch(64)

    class Vggbase(models.Model):

        def __init__(self):
            super(Vggbase, self).__init__()
            self.conv1 = Sequential([
                layers.Conv2D(64, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.max_pooling1 = layers.MaxPool2D((2, 2), strides=2)
            self.conv2_1 = Sequential([
                layers.Conv2D(128, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.conv2_2 = Sequential([
                layers.Conv2D(128, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.max_pooling2 = layers.MaxPool2D((2, 2), strides=2)
            self.conv3_1 = Sequential([
                layers.Conv2D(256, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.conv3_2 = Sequential([
                layers.Conv2D(256, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.max_pooling3 = layers.MaxPool2D((2, 2), strides=2, padding='same')
            self.conv4_1 = Sequential([
                layers.Conv2D(512, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.conv4_2 = Sequential([
                layers.Conv2D(512, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.max_pooling4 = layers.MaxPool2D((2, 2), strides=2)
            self.fc = layers.Dense(10)

        def call(self, x):
            batchsize = x.shape[0]
            out = self.conv1(x)
            out = self.max_pooling1(out)
            out = self.conv2_1(out)
            out = self.conv2_2(out)
            out = self.max_pooling2(out)
            out = self.conv3_1(out)
            out = self.conv3_2(out)
            out = self.max_pooling3(out)
            out = self.conv4_1(out)
            out = self.conv4_2(out)
            out = self.max_pooling4(out)
            out = tf.reshape(out, (batchsize, -1))
            out = self.fc(out)
            return out

    epoch_num = 200
    lr = 0.001
    net = Vggbase()
    loss_func = losses.SparseCategoricalCrossentropy(from_logits=True)
    scheduler = optimizers.schedules.ExponentialDecay(initial_learning_rate=lr,
                                                      decay_steps=3905,
                                                      decay_rate=0.9)
    optimizer = optimizers.Adam(learning_rate=scheduler)
    for epoch in range(epoch_num):
        for i, (images, labels) in enumerate(data_train):
            with tf.GradientTape() as tape:
                outputs = net(images)
                labels = tf.squeeze(labels, axis=-1)
                loss = loss_func(labels, outputs)
            grads = tape.gradient(loss, net.trainable_variables)
            optimizer.apply_gradients(zip(grads, net.trainable_variables))
            pred = tf.argmax(outputs, axis=-1)
            pred = tf.cast(pred, tf.int32)
            correct = tf.where(pred == labels).shape[0]
            print("epoch is ", epoch, "step ", i, "loss is: ", loss.numpy(),
                  "mini-batch correct is: ", 100.0 * correct / 64)

 運行結果(部分)

epoch is  14 step  82 loss is:  0.08098086 mini-batch correct is:  95.3125
epoch is  14 step  83 loss is:  0.031877242 mini-batch correct is:  100.0
epoch is  14 step  84 loss is:  0.028943349 mini-batch correct is:  98.4375
epoch is  14 step  85 loss is:  0.064095914 mini-batch correct is:  96.875
epoch is  14 step  86 loss is:  0.16567983 mini-batch correct is:  96.875
epoch is  14 step  87 loss is:  0.04256191 mini-batch correct is:  98.4375
epoch is  14 step  88 loss is:  0.028884312 mini-batch correct is:  100.0
epoch is  14 step  89 loss is:  0.032220896 mini-batch correct is:  100.0
epoch is  14 step  90 loss is:  0.025648516 mini-batch correct is:  100.0
  • ResNet網絡結構
import tensorflow as tf
from tensorflow.keras import datasets, models, Sequential, layers, losses, optimizers

if __name__ == '__main__':

    (X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
    X_train = tf.constant(X_train, dtype=tf.float32) / 255
    y_train = tf.constant(y_train, dtype=tf.int32)
    X_test = tf.constant(X_test, dtype=tf.float32) / 255
    y_test = tf.constant(y_test, dtype=tf.int32)
    data_train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    data_test = tf.data.Dataset.from_tensor_slices((X_test, y_test))
    data_train = data_train.shuffle(10000).batch(64)
    data_test = data_test.shuffle(10000).batch(64)

    class ResBlock(models.Model):

        def __init__(self, out_channel, stride=1):
            super(ResBlock, self).__init__()
            self.layer = Sequential([
                layers.Conv2D(out_channel, (3, 3), strides=stride, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU(),
                layers.Conv2D(out_channel, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization()
            ])
            self.shortcut = Sequential([])
            if stride > 1:
                self.shortcut = Sequential([
                    layers.Conv2D(out_channel, (3, 3), strides=stride, padding='same'),
                    layers.BatchNormalization()
                ])

        def call(self, x):
            out1 = self.layer(x)
            out2 = self.shortcut(x)
            out = out1 + out2
            out = layers.ReLU()(out)
            return out

    class ResNet(models.Model):

        def make_layer(self, block, out_channel, stride, num_block):
            layers_list = []
            for i in range(num_block):
                if i == 0:
                    in_stride = stride
                else:
                    in_stride = 1
                layers_list.append(block(out_channel, in_stride))
            return Sequential(layers_list)

        def __init__(self):
            super(ResNet, self).__init__()
            self.conv1 = Sequential([
                layers.Conv2D(32, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.layer1 = self.make_layer(ResBlock, 64, 2, 2)
            self.layer2 = self.make_layer(ResBlock, 128, 2, 2)
            self.layer3 = self.make_layer(ResBlock, 256, 2, 2)
            self.layer4 = self.make_layer(ResBlock, 512, 2, 2)
            self.fc = layers.Dense(10)

        def call(self, x):
            out = self.conv1(x)
            out = self.layer1(out)
            out = self.layer2(out)
            out = self.layer3(out)
            out = self.layer4(out)
            out = layers.AvgPool2D((2, 2))(out)
            out = tf.reshape(out, (out.shape[0], -1))
            out = self.fc(out)
            return out

    epoch_num = 200
    lr = 0.001
    net = ResNet()
    loss_func = losses.SparseCategoricalCrossentropy(from_logits=True)
    scheduler = optimizers.schedules.ExponentialDecay(initial_learning_rate=lr,
                                                      decay_steps=3905,
                                                      decay_rate=0.9)
    optimizer = optimizers.Adam(learning_rate=scheduler)
    for epoch in range(epoch_num):
        for i, (images, labels) in enumerate(data_train):
            with tf.GradientTape() as tape:
                outputs = net(images)
                labels = tf.squeeze(labels, axis=-1)
                loss = loss_func(labels, outputs)
            grads = tape.gradient(loss, net.trainable_variables)
            optimizer.apply_gradients(zip(grads, net.trainable_variables))
            pred = tf.argmax(outputs, axis=-1)
            pred = tf.cast(pred, tf.int32)
            correct = tf.where(pred == labels).shape[0]
            print("epoch is ", epoch, "step ", i, "loss is: ", loss.numpy(),
                  "mini-batch correct is: ", 100.0 * correct / 64)
  • MobileNet網絡結構
import tensorflow as tf
from tensorflow.keras import datasets, models, Sequential, layers, losses, optimizers

if __name__ == '__main__':

    (X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
    X_train = tf.constant(X_train, dtype=tf.float32) / 255
    y_train = tf.constant(y_train, dtype=tf.int32)
    X_test = tf.constant(X_test, dtype=tf.float32) / 255
    y_test = tf.constant(y_test, dtype=tf.int32)
    data_train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    data_test = tf.data.Dataset.from_tensor_slices((X_test, y_test))
    data_train = data_train.shuffle(10000).batch(64)
    data_test = data_test.shuffle(10000).batch(64)

    class MobileNet(models.Model):
        def conv_dw(self, out_channel, stride):
            return Sequential([
                layers.DepthwiseConv2D((3, 3), strides=stride, padding='same',
                                       use_bias=False),
                layers.BatchNormalization(),
                layers.ReLU(),
                layers.Conv2D(out_channel, (1, 1), strides=1, padding='same',
                              use_bias=False),
                layers.BatchNormalization(),
                layers.ReLU()
            ])

        def __init__(self):
            super(MobileNet, self).__init__()
            self.conv1 = Sequential([
                layers.Conv2D(32, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.conv_dw2 = self.conv_dw(32, 1)
            self.conv_dw3 = self.conv_dw(64, 2)
            self.conv_dw4 = self.conv_dw(64, 1)
            self.conv_dw5 = self.conv_dw(128, 2)
            self.conv_dw6 = self.conv_dw(128, 1)
            self.conv_dw7 = self.conv_dw(256, 2)
            self.conv_dw8 = self.conv_dw(256, 1)
            self.conv_dw9 = self.conv_dw(512, 2)
            self.fc = layers.Dense(10)

        def call(self, x):
            out = self.conv1(x)
            out = self.conv_dw2(out)
            out = self.conv_dw3(out)
            out = self.conv_dw4(out)
            out = self.conv_dw5(out)
            out = self.conv_dw6(out)
            out = self.conv_dw7(out)
            out = self.conv_dw8(out)
            out = self.conv_dw9(out)
            out = layers.GlobalAveragePooling2D()(out)
            out = self.fc(out)
            return out

    epoch_num = 200
    lr = 0.001
    loss_func = losses.SparseCategoricalCrossentropy(from_logits=True)
    scheduler = optimizers.schedules.ExponentialDecay(initial_learning_rate=lr,
                                                      decay_steps=3905,
                                                      decay_rate=0.9)
    optimizer = optimizers.Adam(learning_rate=scheduler)
    net = MobileNet()
    net.build(input_shape=(None, 32, 32, 3))
    print(net.summary())
    net.compile(loss=loss_func, optimizer=optimizer, metrics=['accuracy'])
    net.fit(data_train, batch_size=64, epochs=epoch_num)

運行結果(部分)

Model: "mobile_net"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
sequential (Sequential)      (None, 32, 32, 32)        1024      
_________________________________________________________________
sequential_1 (Sequential)    (None, 32, 32, 32)        1568      
_________________________________________________________________
sequential_2 (Sequential)    (None, 16, 16, 64)        2720      
_________________________________________________________________
sequential_3 (Sequential)    (None, 16, 16, 64)        5184      
_________________________________________________________________
sequential_4 (Sequential)    (None, 8, 8, 128)         9536      
_________________________________________________________________
sequential_5 (Sequential)    (None, 8, 8, 128)         18560     
_________________________________________________________________
sequential_6 (Sequential)    (None, 4, 4, 256)         35456     
_________________________________________________________________
sequential_7 (Sequential)    (None, 4, 4, 256)         69888     
_________________________________________________________________
sequential_8 (Sequential)    (None, 2, 2, 512)         136448    
_________________________________________________________________
dense (Dense)                multiple                  5130      
=================================================================
Total params: 285,514
Trainable params: 280,650
Non-trainable params: 4,864
_________________________________________________________________
None
Epoch 1/200
782/782 [==============================] - 114s 144ms/step - loss: 1.6013 - accuracy: 0.4112
Epoch 2/200
782/782 [==============================] - 114s 146ms/step - loss: 1.2068 - accuracy: 0.5633
Epoch 3/200
782/782 [==============================] - 114s 146ms/step - loss: 1.0066 - accuracy: 0.6420
  • InceptionNet網絡結構
import tensorflow as tf
from tensorflow.keras import datasets, models, Sequential, layers, losses, optimizers

if __name__ == '__main__':

    (X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
    X_train = tf.constant(X_train, dtype=tf.float32) / 255
    y_train = tf.constant(y_train, dtype=tf.int32)
    X_test = tf.constant(X_test, dtype=tf.float32) / 255
    y_test = tf.constant(y_test, dtype=tf.int32)
    data_train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    data_test = tf.data.Dataset.from_tensor_slices((X_test, y_test))
    data_train = data_train.shuffle(10000).batch(64)
    data_test = data_test.shuffle(10000).batch(64)

    class BaseInception(models.Model):
        def ConvBNRelu(self, out_channel, kernel_size):
            return Sequential([
                layers.Conv2D(out_channel, (kernel_size, kernel_size), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])

        def __init__(self, out_channel_list, reduce_channel_list):
            super(BaseInception, self).__init__()
            self.branch1_conv = self.ConvBNRelu(out_channel_list[0], 1)
            self.branch2_conv1 = self.ConvBNRelu(reduce_channel_list[0], 1)
            self.branch2_conv2 = self.ConvBNRelu(out_channel_list[1], 3)
            self.branch3_conv1 = self.ConvBNRelu(reduce_channel_list[1], 1)
            self.branch3_conv2 = self.ConvBNRelu(out_channel_list[2], 5)
            self.branch4_pool = layers.MaxPool2D((3, 3), strides=1, padding='same')
            self.branch4_conv = self.ConvBNRelu(out_channel_list[3], 3)

        def call(self, x):
            out1 = self.branch1_conv(x)
            out2 = self.branch2_conv1(x)
            out2 = self.branch2_conv2(out2)
            out3 = self.branch3_conv1(x)
            out3 = self.branch3_conv2(out3)
            out4 = self.branch4_pool(x)
            out4 = self.branch4_conv(out4)
            out = tf.concat([out1, out2, out3, out4], axis=1)
            return out

    class InceptionNet(models.Model):

        def __init__(self):
            super(InceptionNet, self).__init__()
            self.block1 = Sequential([
                layers.Conv2D(64, (3, 3), strides=1, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.block2 = Sequential([
                layers.Conv2D(128, (3, 3), strides=2, padding='same'),
                layers.BatchNormalization(),
                layers.ReLU()
            ])
            self.block3 = Sequential([
                BaseInception(out_channel_list=[64, 64, 64, 64],
                              reduce_channel_list=[16, 16]),
                layers.MaxPool2D((3, 3), strides=2, padding='same')
            ])
            self.block4 = Sequential([
                BaseInception(out_channel_list=[96, 96, 96, 96],
                              reduce_channel_list=[32, 32]),
                layers.MaxPool2D((3, 3), strides=2, padding='same')
            ])
            self.fc = layers.Dense(10)

        def call(self, x):
            out = self.block1(x)
            out = self.block2(out)
            out = self.block3(out)
            out = self.block4(out)
            out = layers.AvgPool2D((2, 2))(out)
            out = tf.reshape(out, (out.shape[0], -1))
            out = self.fc(out)
            return out


    epoch_num = 200
    lr = 0.001
    net = InceptionNet()
    loss_func = losses.SparseCategoricalCrossentropy(from_logits=True)
    scheduler = optimizers.schedules.ExponentialDecay(initial_learning_rate=lr,
                                                      decay_steps=3905,
                                                      decay_rate=0.9)
    optimizer = optimizers.Adam(learning_rate=scheduler)
    for epoch in range(epoch_num):
        for i, (images, labels) in enumerate(data_train):
            with tf.GradientTape() as tape:
                outputs = net(images)
                labels = tf.squeeze(labels, axis=-1)
                loss = loss_func(labels, outputs)
            grads = tape.gradient(loss, net.trainable_variables)
            optimizer.apply_gradients(zip(grads, net.trainable_variables))
            pred = tf.argmax(outputs, axis=-1)
            pred = tf.cast(pred, tf.int32)
            correct = tf.where(pred == labels).shape[0]
            print("epoch is ", epoch, "step ", i, "loss is: ", loss.numpy(),
                  "mini-batch correct is: ", 100.0 * correct / 64)
  • 使用預訓練模型
import tensorflow as tf
from tensorflow.keras import datasets, models, losses, optimizers, applications

if __name__ == '__main__':

    (X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
    X_train = tf.constant(X_train, dtype=tf.float32) / 255
    y_train = tf.constant(y_train, dtype=tf.int32)
    X_test = tf.constant(X_test, dtype=tf.float32) / 255
    y_test = tf.constant(y_test, dtype=tf.int32)
    data_train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    data_test = tf.data.Dataset.from_tensor_slices((X_test, y_test))
    data_train = data_train.shuffle(10000).batch(64)
    data_test = data_test.shuffle(10000).batch(64)
    
    class ResNet50(models.Model):

        def __init__(self):
            super(ResNet50, self).__init__()
            self.model = applications.ResNet50(input_shape=(32, 32, 3), classes=10, weights=None)

        def call(self, x):
            out = self.model(x)
            return out

    epoch_num = 200
    lr = 0.001
    net = ResNet50()
    loss_func = losses.SparseCategoricalCrossentropy(from_logits=True)
    scheduler = optimizers.schedules.ExponentialDecay(initial_learning_rate=lr,
                                                      decay_steps=3905,
                                                      decay_rate=0.9)
    optimizer = optimizers.Adam(learning_rate=scheduler)
    for epoch in range(epoch_num):
        for i, (images, labels) in enumerate(data_train):
            with tf.GradientTape() as tape:
                outputs = net(images)
                labels = tf.squeeze(labels, axis=-1)
                loss = loss_func(labels, outputs)
            grads = tape.gradient(loss, net.trainable_variables)
            optimizer.apply_gradients(zip(grads, net.trainable_variables))
            pred = tf.argmax(outputs, axis=-1)
            pred = tf.cast(pred, tf.int32)
            correct = tf.where(pred == labels).shape[0]
            print("epoch is ", epoch, "step ", i, "loss is: ", loss.numpy(),
                  "mini-batch correct is: ", 100.0 * correct / 64)

圖像增強

import tensorflow as tf
from tensorflow.keras import preprocessing
import numpy as np
import matplotlib.pyplot as plt

if __name__ == '__main__':

    trans = preprocessing.image.ImageDataGenerator(
        featurewise_center=True,
        rotation_range=75,
        horizontal_flip=True,
        height_shift_range=0.2
    )
    image = plt.imread("/Users/admin/Documents/444.jpeg")
    image = tf.constant(image)
    image = tf.reshape(image, (1, 1279, 1896, 3))
    result = trans.flow(image)
    image_out = result.next()
    image_out = np.reshape(image_out, (1279, 1896, 3))
    image_out = image_out.astype(np.uint8)
    print(image_out)
    plt.imshow(image_out)
    plt.show()

運行結果

[[[28 27 25]
  [23 22 20]
  [20 19 17]
  ...
  [ 8  2  2]
  [ 8  2  2]
  [ 8  2  2]]

 [[30 29 27]
  [29 28 26]
  [24 23 21]
  ...
  [ 8  2  2]
  [ 8  2  2]
  [ 8  2  2]]

 [[32 31 29]
  [30 29 27]
  [29 28 26]
  ...
  [ 8  2  2]
  [ 8  2  2]
  [ 8  2  2]]

 ...

 [[29 22 16]
  [28 19 13]
  [28 19 12]
  ...
  [19 19 11]
  [19 19 11]
  [20 17  8]]

 [[28 20 13]
  [28 19 12]
  [29 20 13]
  ...
  [19 19 11]
  [19 19 11]
  [19 19 11]]

 [[28 19 12]
  [29 20 13]
  [31 22 15]
  ...
  [19 19 11]
  [19 19 11]
  [19 19 11]]]

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章