#!coding=utf-8
import numpy as np
import tensorflow as tf
from tensorflow.contrib.seq2seq import sequence_loss
'''
爲了探究 seq2seq 中的loss具體改如何計算, 依據的標準是 tensorflow.contrib.seq2seq.sequence_loss
Note: 忘了從哪看的了, 訓練的loss要用 batch的loss, 也就是 batch_loss / batch 而不是 batch_loss/num_predict_words.
'''
# logits
output_np = np.array(
[
[[0.6, 0.5, 0.3, 0.2], [0.9, 0.5, 0.3, 0.2], [1.0, 0.5, 0.3, 0.2]],
[[0.2, 0.5, 0.3, 0.2], [0.3, 0.5, 0.3, 0.2], [0.4, 0.5, 0.3, 0.2]]
]
)
print(output_np.shape) # (2, 3, 4)
target_np = np.array([[0, 1, 2], [3, 0, 1]], dtype=np.int32)
target_lengths = np.array([3, 2], dtype=np.int32)
output = tf.convert_to_tensor(output_np, np.float32)
target = tf.convert_to_tensor(target_np, np.int32)
weights = tf.convert_to_tensor(target_lengths, np.float32)
max_target_sequence_length = tf.reduce_max(weights, name='max_target_len')
masks = tf.sequence_mask(weights, max_target_sequence_length, dtype=tf.float32, name='masks')
cost1 = sequence_loss(output, target, weights=masks) # 1.429252
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
cost2 = tf.reduce_mean(crossent * masks) # 1.1910433 這樣直接求均值是默認 mask 全是1, 是錯誤的.
cost4 = tf.reduce_sum(crossent * masks) / 6 # cost4 = cost2 = 1.1910433
# 一共預測了 weights 個詞語, 所以這裏要用
cost3 = tf.reduce_sum(crossent * masks) / tf.reduce_sum(masks) # 1.429252
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
masks_r = sess.run(masks)
print(masks_r) # (2,3,4)
cost1_r = sess.run(cost1)
print(cost1_r)
crossent_r = sess.run(crossent)
print(crossent_r) # (2, 3)
cost2_r = sess.run(cost2)
print(cost2_r)
cost4_r = sess.run(cost4)
print(cost4_r)
cost3_r = sess.run(cost3)
print(cost3_r)
seq2seq模型中最後的Loss該如何計算
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.