tensorlayer學習日誌2_chapter2_2.6.1~2

import numpy as np
import tensorlayer as tl

x = np.asarray([['a','a'],['b','b'],['c','c'],['d','d'],['e','e'],['f','f']])
y = np.asarray([0,1,2,3,4,5])
# x與y的元素個數要相同,否則會報錯
for xx, yy in tl.iterate.minibatches(inputs=x, targets=y,batch_size=2,shuffle=False):
	print(xx, yy)

print('~~~~~~~~~~~~~~~')

# 下面兩種輸入法貎似也行
m = np.array([['a','a'],['b','b'],['c','c'],['d','d'],['e','e'],['f','f']])
n = np.array([0,1,2,3,4,5])

for mm, nn in tl.iterate.minibatches(inputs=m, targets=n,batch_size=2,shuffle=True):
	print(mm, nn)

print('~~~~~~~~~~~~~~~')

f = [['a','a'],['b','b'],['c','c'],['d','d'],['e','e'],['f','f']]
t = [0,1,2,3,4,5]


for ff, tt in tl.iterate.minibatches(inputs=f, targets=t,batch_size=3,shuffle=False):
	print(ff, tt)

上面2.6.1的例子很奇怪,試過np.array與直接 定義個列表好像也沒什麼影響~~,不一定要np.asarray~~情況如下:

[['a' 'a']
 ['b' 'b']] [0 1]
[['c' 'c']
 ['d' 'd']] [2 3]
[['e' 'e']
 ['f' 'f']] [4 5]
~~~~~~~~~~~~~~~
[['e' 'e']
 ['a' 'a']] [4 0]
[['c' 'c']
 ['b' 'b']] [2 1]
[['f' 'f']
 ['d' 'd']] [5 3]
~~~~~~~~~~~~~~~
[['a', 'a'], ['b', 'b'], ['c', 'c']] [0, 1, 2]
[['d', 'd'], ['e', 'e'], ['f', 'f']] [3, 4, 5]
[Finished in 4.3s]

下面是2.6.2的例子,還是很順利的,就是要注意# tl.logging.set_verbosity(tl.logging.DEBUG)要註釋掉,不然會報錯,說是tensorlayer還沒logging這個功能,另一個是network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output')這行的act要定義個算法,

https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py 教材上提供的示例,這裏是act=None,如果不管運行會報錯,說是__name__配對不上。

import time
import tensorflow as tf
import tensorlayer as tl

tf.logging.set_verbosity(tf.logging.DEBUG)
# tl.logging.set_verbosity(tl.logging.DEBUG)

sess = tf.InteractiveSession()

# prepare data
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784))
# define placeholder
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y_ = tf.placeholder(tf.int64, shape=[None], name='y_')

# define the network
network = tl.layers.InputLayer(x, name='input')
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')
network = tl.layers.DenseLayer(network, n_units=800, act=tf.nn.relu, name='relu1')
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2')
network = tl.layers.DenseLayer(network, n_units=800, act=tf.nn.relu, name='relu2')
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3')
# the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to
# speed up computation, so we use identity here.
# see tf.nn.sparse_softmax_cross_entropy_with_logits()
network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output')

# define cost function and metric.
y = network.outputs
cost = tl.cost.cross_entropy(y, y_, name='xentropy')
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
y_op = tf.argmax(tf.nn.softmax(y), 1)

# define the optimizer
train_params = network.all_params
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)

# initialize all variables in the session
tl.layers.initialize_global_variables(sess)

# print network information
network.print_params()
network.print_layers()

n_epoch = 100
batch_size = 500
print_freq = 10

for epoch in range(n_epoch):
    start_time = time.time()
    for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
        feed_dict = {x: X_train_a, y_: y_train_a}
        feed_dict.update(network.all_drop)  # enable noise layers
        sess.run(train_op, feed_dict=feed_dict)

    if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
        print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))
        train_loss, train_acc, n_batch = 0, 0, 0
        for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
            dp_dict = tl.utils.dict_to_one(network.all_drop)  # disable noise layers
            feed_dict = {x: X_train_a, y_: y_train_a}
            feed_dict.update(dp_dict)
            err, ac = sess.run([cost, acc], feed_dict=feed_dict)
            train_loss += err
            train_acc += ac
            n_batch += 1
        print("   train loss: %f" % (train_loss / n_batch))
        print("   train acc: %f" % (train_acc / n_batch))
        val_loss, val_acc, n_batch = 0, 0, 0
        for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):
            dp_dict = tl.utils.dict_to_one(network.all_drop)  # disable noise layers
            feed_dict = {x: X_val_a, y_: y_val_a}
            feed_dict.update(dp_dict)
            err, ac = sess.run([cost, acc], feed_dict=feed_dict)
            val_loss += err
            val_acc += ac
            n_batch += 1
        print("   val loss: %f" % (val_loss / n_batch))
        print("   val acc: %f" % (val_acc / n_batch))

print('~~~~~~~~~~~~~Evaluation~~~~~~~~~~~~~')
test_loss, test_acc, n_batch = 0, 0, 0
for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True):
    dp_dict = tl.utils.dict_to_one(network.all_drop)  # disable noise layers
    feed_dict = {x: X_test_a, y_: y_test_a}
    feed_dict.update(dp_dict)
    err, ac = sess.run([cost, acc], feed_dict=feed_dict)
    test_loss += err
    test_acc += ac
    n_batch += 1
print("   test loss: %f" % (test_loss / n_batch))
print("   test acc: %f" % (test_acc / n_batch))

因爲是在家裏運行,電腦比單位的又更破了一個等級,所以n_epoch從500降成了100~~運行如下,共花了兩千多秒~~~

[TL] Load or Download MNIST > data\mnist
[TL] data\mnist\train-images-idx3-ubyte.gz
[TL] data\mnist\t10k-images-idx3-ubyte.gz
[TL] InputLayer  input: (?, 784)
[TL] DropoutLayer drop1: keep:0.800000 is_fix:False
[TL] DenseLayer  relu1: 800 relu
[TL] DropoutLayer drop2: keep:0.500000 is_fix:False
[TL] DenseLayer  relu2: 800 relu
[TL] DropoutLayer drop3: keep:0.500000 is_fix:False
[TL] DenseLayer  output: 10 identity
[TL]   param   0: relu1/W:0            (784, 800)         float32_ref (mean: -3.3809192245826125e-05, median: 5.016334034735337e-05, std: 0.08794602751731873)   
[TL]   param   1: relu1/b:0            (800,)             float32_ref (mean: 0.0               , median: 0.0               , std: 0.0               )   
[TL]   param   2: relu2/W:0            (800, 800)         float32_ref (mean: 6.852106162114069e-05, median: 3.730915341293439e-05, std: 0.08797246217727661)   
[TL]   param   3: relu2/b:0            (800,)             float32_ref (mean: 0.0               , median: 0.0               , std: 0.0               )   
[TL]   param   4: output/W:0           (800, 10)          float32_ref (mean: 0.000250009645242244, median: -0.0002332517469767481, std: 0.0876460000872612)   
[TL]   param   5: output/b:0           (10,)              float32_ref (mean: 0.0               , median: 0.0               , std: 0.0               )   
[TL]   num of params: 1276810
[TL]   layer   0: drop1/mul:0          (?, 784)           float32
[TL]   layer   1: relu1/Relu:0         (?, 800)           float32
[TL]   layer   2: drop2/mul:0          (?, 800)           float32
[TL]   layer   3: relu2/Relu:0         (?, 800)           float32
[TL]   layer   4: drop3/mul:0          (?, 800)           float32
[TL]   layer   5: output/Identity:0    (?, 10)            float32
Epoch 1 of 100 took 26.056490s
   train loss: 0.592533
   train acc: 0.809320
   val loss: 0.531577
   val acc: 0.830900
Epoch 10 of 100 took 25.144438s
   train loss: 0.244603
   train acc: 0.928800
   val loss: 0.223174
   val acc: 0.938600
Epoch 20 of 100 took 25.214442s
   train loss: 0.171495
   train acc: 0.952060
   val loss: 0.164636
   val acc: 0.955400
Epoch 30 of 100 took 25.943484s
   train loss: 0.131061
   train acc: 0.963680
   val loss: 0.133256
   val acc: 0.963200
Epoch 40 of 100 took 24.997430s
   train loss: 0.102254
   train acc: 0.972000
   val loss: 0.111744
   val acc: 0.967300
Epoch 50 of 100 took 24.961428s
   train loss: 0.083485
   train acc: 0.977180
   val loss: 0.098263
   val acc: 0.972600
Epoch 60 of 100 took 24.688412s
   train loss: 0.068905
   train acc: 0.981760
   val loss: 0.088438
   val acc: 0.974000
Epoch 70 of 100 took 25.372451s
   train loss: 0.057064
   train acc: 0.984420
   val loss: 0.080247
   val acc: 0.975900
Epoch 80 of 100 took 25.180440s
   train loss: 0.047653
   train acc: 0.987080
   val loss: 0.074070
   val acc: 0.977000
Epoch 90 of 100 took 24.923426s
   train loss: 0.040897
   train acc: 0.989320
   val loss: 0.070247
   val acc: 0.979100
Epoch 100 of 100 took 25.322448s
   train loss: 0.034475
   train acc: 0.991020
   val loss: 0.066125
   val acc: 0.980400
~~~~~~~~~~~~~Evaluation~~~~~~~~~~~~~
   test loss: 0.064027
   test acc: 0.979400
[Finished in 2658.8s]

按教材是說,這節的設計是用來調參的,但是我還是一臉不懂~~以後明白了再回來添加吧,先placeholder個吧

—————————閒話————————————

這個github.com是越來越慢了,今天順便百度找了個辦法加速了一下,在這個網址上http://tool.chinaz.com/dns?type=1&host=github.com&ip= 搜github.com,選TTL最小的,複製IP號,

然後找到host文件,用文本txt打開,在最後一行加上IP 和github.com,然後重開瀏覽器速度就快多了,這裏的ip是會變的,下次再變慢就再搜再加吧,windows7可以直接改host文件的,windows10有時好像是不讓改的,這時得用管理員權限了,用管理員權限打開cmd,再通過cmd打開host文件來改,windows7的過程如下,win10的要自己百度了:

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章