一、def
如果我們的state與action很多,就如打磚塊遊戲,每個時間不同的磚塊排列跟剩餘都是不同的state,就會導致維度災難。使用神經網絡來 估算 這個 state 的值, 這樣就不需要一張表了.
更新方式
二、算法更新
1、初始化replay memory D 容量爲N
2、用一個深度神經網絡作爲Q值網絡,初始化權重參數
3、設定遊戲片段總數M
4、初始化網絡輸入,大小爲84*84*4,並且計算網絡輸出
5、以概率ϵ隨機選擇動作at或者通過網絡輸出的Q(max)值選擇動作at
6、得到執行at後的獎勵rt和下一個網絡的輸入
7、根據當前的值計算下一時刻網絡的輸出
8、將四個參數作爲此刻的狀態一起存入到D中(D中存放着N個時刻的狀態)
9、隨機從D中取出minibatch個狀態 (隨即採樣)
10、計算每一個狀態的目標值(通過執行at後的reward來更新Q值作爲目標值)
11、通過SGD更新weight
整個算法乍看起來很複雜, 不過我們拆分一下, 就變簡單了. 也就是個 Q learning 主框架上加了些裝飾.
這些裝飾包括:
- 記憶庫 (用於重複學習)
- 神經網絡計算 Q 值
- 暫時凍結
q_target
(現實)參數 (切斷相關性)
rom maze_env import Maze
from RL_brain import DeepQNetwork
def run_maze():
step = 0 # 用來控制什麼時候學習
for episode in range(300):
# 初始化環境
observation = env.reset()
while True:
# 刷新環境
env.render()
# action
action = RL.choose_action(observation)
# 獲得 state, reward, 是否終止
observation_, reward, done = env.step(action)
# DQN 存儲記憶
RL.store_transition(observation, action, reward, observation_)
# 控制學習起始時間和頻率 (先累積一些記憶再開始學習)
if (step > 200) and (step % 5 == 0):
RL.learn()
# state_ → state
observation = observation_
# 如果終止, 就跳出循環
if done:
break
step += 1 # 總步數
# end of game
print('game over')
env.destroy()
if __name__ == "__main__":
env = Maze()
RL = DeepQNetwork(env.n_actions, env.n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=200, # 每 200 步替換一次 target_net 的參數
memory_size=2000, # 記憶上限
# output_graph=True # 是否輸出 tensorboard 文件
)
env.after(100, run_maze)
env.mainloop()
RL.plot_cost() # 觀看神經網絡的誤差曲線
三、神經網絡
暫時凍結q_target參數:方式是搭建兩個神經網絡, (這兩個神經網絡結構是完全一樣的, 只是裏面的參數不一樣.)
target_net
用於預測 q_target
值, 他不會及時更新參數. 不可被訓練
eval_net
用於預測 q_eval
, 這個神經網絡擁有最新的神經網絡參數. 可被訓練
class DeepQNetwork:
def _build_net(self):
# -------------- 創建 eval 神經網絡, 及時提升參數 --------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用來接收 observation
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用來接收 q_target 的值, 這個之後會通過計算得到
with tf.variable_scope('eval_net'):
# c_names(collections_names) 是在更新 target_net 參數時會用到
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# eval_net 的第一層. collections 是在更新 target_net 參數時會用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# eval_net 的第二層. collections 是在更新 target_net 參數時會用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'): # 求誤差
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'): # 梯度下降
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ---------------- 創建 target 神經網絡, 提供 target Q ---------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下個 observation
with tf.variable_scope('target_net'):
# c_names(collections_names) 是在更新 target_net 參數時會用到
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# target_net 的第一層. collections 是在更新 target_net 參數時會用到
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# target_net 的第二層. collections 是在更新 target_net 參數時會用到
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
四、思維決策
1.代碼構架
class DeepQNetwork:
# 創建神經網絡
def _build_net(self):
# 初始值
def __init__(self):
# 存儲記憶
def store_transition(self, s, a, r, s_):
# 選行爲
def choose_action(self, observation):
# 學習
def learn(self):
# 看看學習效果 (可選)
def plot_cost(self):
2.分別實現
(1)創建神經網絡
(2)初始值
def __init__(
self,
n_actions,
n_features, #observation數量:如長寬高等
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300, #更新target所間隔的步數
memory_size=500, #記憶庫記憶容量:記憶上限
batch_size=32, #每次更新所取得記憶數量
e_greedy_increment=None, #擴大貪婪率,縮小隨機範圍,減少探索次數
output_graph=False, #出圖
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# 記錄學習次數 (用於判斷是否更換 target_net 參數)
self.learn_step_counter = 0
# 初始化全 0 記憶 [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) #記憶容量 observation數量+action+reward
# 創建 [target_net, evaluate_net]
self._build_net()
# 替換 target net 的參數
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 參數
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer()) #初始化所有參數
self.cost_his = [] #記錄誤差
(3)存儲記憶
DQN 的精髓部分之一: 記錄下所有經歷過的步, 這些步可以進行反覆的學習, 所以是一種 off-policy 方法。
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
# 記錄一條 [s, a, r, s_] 記錄
transition = np.hstack((s, [a, r], s_))
# 總 memory 大小是固定的, 如果超出總大小, 舊 memory 就被新 memory 迭代更新
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition # 替換過程
self.memory_counter += 1
(4)選行爲
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :] #轉爲2維:統一 observation 的 shape (1, size_of_observation)
if np.random.uniform() < self.epsilon:
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
(5)最重要的一步, 就是在 DeepQNetwork
中, 學習, 更新參數的步驟. 這裏涉及了 target_net
和 eval_net
的交互使用.
def learn(self):
# 每隔replace_target_iter才更新一次target_net
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
# 抽取記憶樣本
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
#神經網絡輸出值(輸入爲memory存儲數據)
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory[:, -self.n_features:], # 存儲的後n個features
self.s: batch_memory[:, :self.n_features], # 存儲的前n個features
})
# 將target(現實)值改變爲與eval(估計)值位置對應
# 根據 memory 當中的具體 action 位置來修改 q_target 對應 action 上的值:
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
"""
For example in this batch I have 2 samples and 3 actions:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
Then change q_target with the real q_target value w.r.t the q_eval's action.
For example in:
sample 0, I took action 0, and the max q_target value is -1;
sample 1, I took action 2, and the max q_target value is -2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
So the (q_target - q_eval) becomes:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
We then backpropagate this error w.r.t the corresponding action to network,
leave other action as error=0 cause we didn't choose it.
"""
# train eval network
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
(6)學習效果圖
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
曲線解釋:通過探索收集數據,不斷有新的數據,所以可能有升高的cost。