Deep Q Learning 筆記

alphago 基礎之DQN
Q learning:
1 主要用在解是離散時
2 主要是利用值函數,即,直接由值函數來推策略
3 其核心在於bellman方程代價函數
bellman的核心在於使用reward的時候要考慮到將來的情況,而不是隻考慮現在的情況,否則的話,只考慮到當前的reward就和人只顧當下,不考慮未來,是走不長遠的,在遊戲中就意味着,你很快將死掉,不論是Qlearning還是policy gradient,都使用到了bellman方程
代價函數,代表你的目標,即你要做什麼
在代碼下面的代碼中
體現bellman方程的是是代碼第93行:

y_batch.append(reward_batch[i] + GAMMA * np.max(Q_value_batch[i]))

這裏的Q_value_batch[i]是下一個狀態下,能夠獲得的最大Q值,也就是對應的概率最大的概率值,將這個值作爲下一步的reward
我覺得是不是可以在繼續計算下下步,直到結束時的reward,然後全部按照權重進行累加,還是因爲無法估計下下一步的state,從而無法再計算下下步的Q值?
代價函數:
在上面代碼的第49行的函數給出了代價函數:

  def create_training_method(self):
    self.action_input = tf.placeholder("float",[None,self.action_dim]) # one hot presentation
    self.y_input = tf.placeholder("float",[None])
    Q_action = tf.reduce_sum(tf.multiply(self.Q_value,self.action_input),reduction_indices = 1)
    self.cost = tf.reduce_mean(tf.square(self.y_input - Q_action))
    self.optimizer = tf.train.AdamOptimizer(0.0001).minimize(self.cost)

在代價函數中self.y_input是輸入的reward,包含未來的reward信息
Q_action是Q值和當前輸入的action的乘積
代價函數的核心,是將self.y_input和Q_action之間的差值最小化
當輸入self.y_input較大的時,Q_action需要也變的較大,而

Q_action = tf.reduce_sum(tf.multiply(self.Q_value,self.action_input),reduction_indices = 1)

即與Q值和輸入的self.action_input相關,而其中self.action_input無法進行優化,只有Q值進行優化,那麼從理論上分析就是:
當self.y_input較大,即reward較大,那麼說明這個action較好,那麼需要更多的調整Q值,使得Q值的輸出向着這個action靠近,而如果這個self.y_input值較小,說明這個reward較小,那麼需要較小的調整向這個action靠近.
由於值函數的作用就是評估當前狀態下的最大reward(或者說評估當前狀態下的價值),那麼Q learning相當於就是直接由值函數來推導策略,或者可以說直接將值函數和策略函數整合在一起,如果將值函數和策略函數分開,那麼典型的就是策略梯度算法,策略梯度使用值函數對當前狀態進行估計,使用實際的reward與估計的reward之間的差值作爲策略函數(針對當前action)梯度更新的方向和大小,可以看看這篇文章(http://blog.csdn.net/liyuan123zhouhui/article/details/78656231)
貼出代碼及核心註釋:
150行代碼實現DQN算法玩CartPole(https://zhuanlan.zhihu.com/p/21477488):

import gym
import tensorflow as tf
import numpy as np
import random
from collections import deque

# Hyper Parameters for DQN
GAMMA = 0.9 # discount factor for target Q
INITIAL_EPSILON = 0.5 # starting value of epsilon
FINAL_EPSILON = 0.01 # final value of epsilon
REPLAY_SIZE = 10000 # experience replay buffer size
BATCH_SIZE = 32 # size of minibatch

#agent needs to train this DQN network
class DQN():
  # DQN Agent
  def __init__(self, env):
    # init experience replay
    self.replay_buffer = deque()
    # init some parameters
    self.time_step = 0
    self.epsilon = INITIAL_EPSILON
    self.state_dim = env.observation_space.shape[0]
    self.action_dim = env.action_space.n

    #create q network and training method when init
    self.create_Q_network()
    self.create_training_method()

    # Init session
    self.session = tf.InteractiveSession()
    #self.session.run(tf.initialize_all_variables())
    self.session.run(tf.global_variables_initializer())

  def create_Q_network(self):
    # network weights
    W1 = self.weight_variable([self.state_dim,20])
    b1 = self.bias_variable([20])
    W2 = self.weight_variable([20,self.action_dim])
    b2 = self.bias_variable([self.action_dim])

    # input layer
    self.state_input = tf.placeholder("float",[None,self.state_dim])
    # hidden layers
    h_layer = tf.nn.relu(tf.matmul(self.state_input,W1) + b1)
    # Q Value layer
    self.Q_value = tf.matmul(h_layer,W2) + b2

  def create_training_method(self):
    self.action_input = tf.placeholder("float",[None,self.action_dim]) # one hot presentation
    self.y_input = tf.placeholder("float",[None])
    Q_action = tf.reduce_sum(tf.multiply(self.Q_value,self.action_input),reduction_indices = 1)
    #當self.y_input較大,即reward較大,那麼說明這個action較好,那麼需要更多的調整Q值,使得Q值的輸出向着這個action靠近,而如果這個self.y_input值較小,說明這個reward較小,那麼需要較小的調整向這個action靠近
    self.cost = tf.reduce_mean(tf.square(self.y_input - Q_action))
    self.optimizer = tf.train.AdamOptimizer(0.0001).minimize(self.cost)

  #perceive method could train q network
  def perceive(self,state,action,reward,next_state,done):
    one_hot_action = np.zeros(self.action_dim)
    one_hot_action[action] = 1
    self.replay_buffer.append((state,one_hot_action,reward,next_state,done))
    if len(self.replay_buffer) > REPLAY_SIZE:
      self.replay_buffer.popleft()

    if len(self.replay_buffer) > BATCH_SIZE:
      self.train_Q_network()

  def train_Q_network(self):
    self.time_step += 1
    # Step 1: obtain random minibatch from replay memory
    minibatch = random.sample(self.replay_buffer,BATCH_SIZE)
    state_batch = [data[0] for data in minibatch]
    action_batch = [data[1] for data in minibatch]
    reward_batch = [data[2] for data in minibatch]
    next_state_batch = [data[3] for data in minibatch]

    # Step 2: calculate y
    y_batch = []
    Q_value_batch = self.Q_value.eval(feed_dict={self.state_input:next_state_batch})

    for i in range(0,BATCH_SIZE):
      done = minibatch[i][4]

      if done:
        y_batch.append(reward_batch[i])
      else :
        #需要加上未來的reward,bellman方程?
        #要考慮到將來的情況,而不是隻考慮現在的情況,否則的話,只考慮到當前的reward就和人只顧當下,不考慮未來,是走不長遠的,在遊戲中就意味着,你很快將死掉
        y_batch.append(reward_batch[i] + GAMMA * np.max(Q_value_batch[i]))

    self.optimizer.run(feed_dict={
      self.y_input:y_batch,
      self.action_input:action_batch,
      self.state_input:state_batch
      })

  #agent action when training,action with noise
  def egreedy_action(self,state):
    Q_value = self.Q_value.eval(feed_dict = {
      self.state_input:[state]
      })[0]

    self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON)/10000
    if random.random() <= self.epsilon:
      return random.randint(0,self.action_dim - 1)
    else:
      return np.argmax(Q_value)


  #agent action without noise 
  def action(self,state):
    return np.argmax(self.Q_value.eval(feed_dict = {
      self.state_input:[state]
      })[0])

  def weight_variable(self,shape):
    initial = tf.truncated_normal(shape)
    return tf.Variable(initial)

  def bias_variable(self,shape):
    initial = tf.constant(0.01, shape = shape)
    return tf.Variable(initial)
# ---------------------------------------------------------
# Hyper Parameters
ENV_NAME = 'CartPole-v0'
EPISODE = 10000 # Episode limitation
STEP = 300 # Step limitation in an episode
TEST = 10 # The number of experiment test every 100 episode

def main():
  # initialize OpenAI Gym env and dqn agent
  #1 init envirment
  env = gym.make(ENV_NAME)
  #2 init agent
  agent = DQN(env)

  for episode in range(EPISODE):
    # initialize task
    state = env.reset()
    # Train

    for step in range(STEP):
      action = agent.egreedy_action(state) # e-greedy action for train,action with noise
      #print("action:",action)
      next_state,reward,done,_ = env.step(action)#envirment feedback with action,feedback:state,reward,
      if reward ==None:
        print("action:",action)
        print("reward:",reward)
        print("state:",state)
        print("next_state:",next_state)

      # Define reward for agent
      reward_agent = -1 if done else 0.1
      #agent training with input:state,action,output:reward,next_state
      agent.perceive(state,action,reward_agent,next_state,done)
      state = next_state
      if done:
        break
    # Test every 100 episodes
    if episode % 100 == 0:
      total_reward = 0
      for i in range(TEST):
        state = env.reset()
        for j in range(STEP):
          env.render()
          action = agent.action(state) # direct action for test
          state,reward,done,_ = env.step(action)
          #print("TEST reward:",reward)
          total_reward += reward
          if done:
            break
      ave_reward = total_reward/TEST
      print ('episode: ',episode,'Evaluation Average Reward:',ave_reward)
      #if ave_reward >= 200:
      #  break

if __name__ == '__main__':
  main()
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章