代碼和解釋
安裝依賴
!pip uninstall -y parl # 說明:AIStudio預裝的parl版本太老,容易跟其他庫產生兼容性衝突,建議先卸載
!pip uninstall -y pandas scikit-learn # 提示:在AIStudio中卸載這兩個庫再import parl可避免warning提示,不卸載也不影響parl的使用
!pip install gym
!pip install paddlepaddle==1.6.3
!pip install parl==1.3.1
# 說明:安裝日誌中出現兩條紅色的關於 paddlehub 和 visualdl 的 ERROR 與parl無關,可以忽略,不影響使用
導入依賴
import parl
from parl import layers
import paddle.fluid as fluid
import copy
import numpy as np
import os
import gym
from parl.utils import logger
設置超參數
LEARN_FREQ = 5 # 訓練頻率,不需要每一個step都learn,攢一些新增經驗後再learn,提高效率
MEMORY_SIZE = 20000 # replay memory的大小,越大越佔用內存
MEMORY_WARMUP_SIZE = 200 # replay_memory 裏需要預存一些經驗數據,再開啓訓練
BATCH_SIZE = 32 # 每次給agent learn的數據數量,從replay memory隨機裏sample一批數據出來
LEARNING_RATE = 0.001 # 學習率
GAMMA = 0.99 # reward 的衰減因子,一般取 0.9 到 0.999 不等
搭建Model、Algorithm、Agent架構
Model
class Model(parl.Model):
def __init__(self, act_dim):
hid1_size = 128
hid2_size = 128
# 3層全連接網絡
self.fc1 = layers.fc(size=hid1_size, act='relu')
self.fc2 = layers.fc(size=hid2_size, act='relu')
self.fc3 = layers.fc(size=act_dim, act=None)
def value(self, obs):
# 定義網絡
# 輸入state,輸出所有action對應的Q,[Q(s,a1), Q(s,a2), Q(s,a3)...]
h1 = self.fc1(obs)
h2 = self.fc2(h1)
Q = self.fc3(h2)
return Q
Algorithm
# from parl.algorithms import DQN # 也可以直接從parl庫中導入DQN算法
class DQN(parl.Algorithm):
def __init__(self, model, act_dim=None, gamma=None, lr=None):
""" DQN algorithm
Args:
model (parl.Model): 定義Q函數的前向網絡結構
act_dim (int): action空間的維度,即有幾個action
gamma (float): reward的衰減因子
lr (float): learning rate 學習率.
"""
self.model = model
self.target_model = copy.deepcopy(model)
assert isinstance(act_dim, int)
assert isinstance(gamma, float)
assert isinstance(lr, float)
self.act_dim = act_dim
self.gamma = gamma
self.lr = lr
def predict(self, obs):
""" 使用self.model的value網絡來獲取 [Q(s,a1),Q(s,a2),...]
"""
return self.model.value(obs)
def learn(self, obs, action, reward, next_obs, terminal):
""" 使用DQN算法更新self.model的value網絡
"""
# 從target_model中獲取 max Q' 的值,用於計算target_Q
next_pred_value = self.target_model.value(next_obs)
best_v = layers.reduce_max(next_pred_value, dim=1)
best_v.stop_gradient = True # 阻止梯度傳遞
terminal = layers.cast(terminal, dtype='float32')
target = reward + (1.0 - terminal) * self.gamma * best_v
pred_value = self.model.value(obs) # 獲取Q預測值
# 將action轉onehot向量,比如:3 => [0,0,0,1,0]
action_onehot = layers.one_hot(action, self.act_dim)
action_onehot = layers.cast(action_onehot, dtype='float32')
# 下面一行是逐元素相乘,拿到action對應的 Q(s,a)
# 比如:pred_value = [[2.3, 5.7, 1.2, 3.9, 1.4]], action_onehot = [[0,0,0,1,0]]
# ==> pred_action_value = [[3.9]]
pred_action_value = layers.reduce_sum(
layers.elementwise_mul(action_onehot, pred_value), dim=1)
# 計算 Q(s,a) 與 target_Q的均方差,得到loss
cost = layers.square_error_cost(pred_action_value, target)
cost = layers.reduce_mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=self.lr) # 使用Adam優化器
optimizer.minimize(cost)
return cost
def sync_target(self):
""" 把 self.model 的模型參數值同步到 self.target_model
"""
self.model.sync_weights_to(self.target_model)
Agent
class Agent(parl.Agent):
def __init__(self,
algorithm,
obs_dim,
act_dim,
e_greed=0.1,
e_greed_decrement=0):
assert isinstance(obs_dim, int)
assert isinstance(act_dim, int)
self.obs_dim = obs_dim
self.act_dim = act_dim
super(Agent, self).__init__(algorithm)
self.global_step = 0
self.update_target_steps = 200 # 每隔200個training steps再把model的參數複製到target_model中
self.e_greed = e_greed # 有一定概率隨機選取動作,探索
self.e_greed_decrement = e_greed_decrement # 隨着訓練逐步收斂,探索的程度慢慢降低
def build_program(self):
self.pred_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.pred_program): # 搭建計算圖用於 預測動作,定義輸入輸出變量
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
self.value = self.alg.predict(obs)
with fluid.program_guard(self.learn_program): # 搭建計算圖用於 更新Q網絡,定義輸入輸出變量
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
action = layers.data(name='act', shape=[1], dtype='int32')
reward = layers.data(name='reward', shape=[], dtype='float32')
next_obs = layers.data(
name='next_obs', shape=[self.obs_dim], dtype='float32')
terminal = layers.data(name='terminal', shape=[], dtype='bool')
self.cost = self.alg.learn(obs, action, reward, next_obs, terminal)
def sample(self, obs):
sample = np.random.rand() # 產生0~1之間的小數
if sample < self.e_greed:
act = np.random.randint(self.act_dim) # 探索:每個動作都有概率被選擇
else:
act = self.predict(obs) # 選擇最優動作
self.e_greed = max(
0.01, self.e_greed - self.e_greed_decrement) # 隨着訓練逐步收斂,探索的程度慢慢降低
return act
def predict(self, obs): # 選擇最優動作
obs = np.expand_dims(obs, axis=0)
pred_Q = self.fluid_executor.run(
self.pred_program,
feed={'obs': obs.astype('float32')},
fetch_list=[self.value])[0]
pred_Q = np.squeeze(pred_Q, axis=0)
act = np.argmax(pred_Q) # 選擇Q最大的下標,即對應的動作
return act
def learn(self, obs, act, reward, next_obs, terminal):
# 每隔200個training steps同步一次model和target_model的參數
if self.global_step % self.update_target_steps == 0:
self.alg.sync_target()
self.global_step += 1
act = np.expand_dims(act, -1)
feed = {
'obs': obs.astype('float32'),
'act': act.astype('int32'),
'reward': reward,
'next_obs': next_obs.astype('float32'),
'terminal': terminal
}
cost = self.fluid_executor.run(
self.learn_program, feed=feed, fetch_list=[self.cost])[0] # 訓練一次網絡
return cost
ReplayMemory
import random
import collections
import numpy as np
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
# 增加一條經驗到經驗池中
def append(self, exp):
self.buffer.append(exp)
# 從經驗池中選取N條經驗出來
def sample(self, batch_size):
mini_batch = random.sample(self.buffer, batch_size)
obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []
for experience in mini_batch:
s, a, r, s_p, done = experience
obs_batch.append(s)
action_batch.append(a)
reward_batch.append(r)
next_obs_batch.append(s_p)
done_batch.append(done)
return np.array(obs_batch).astype('float32'), \
np.array(action_batch).astype('float32'), np.array(reward_batch).astype('float32'),\
np.array(next_obs_batch).astype('float32'), np.array(done_batch).astype('float32')
def __len__(self):
return len(self.buffer)
Training && Test
# 訓練一個episode
def run_episode(env, agent, rpm):
total_reward = 0
obs = env.reset()
step = 0
while True:
step += 1
action = agent.sample(obs) # 採樣動作,所有動作都有概率被嘗試到
next_obs, reward, done, _ = env.step(action)
rpm.append((obs, action, reward, next_obs, done))
# train model
if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
(batch_obs, batch_action, batch_reward, batch_next_obs,
batch_done) = rpm.sample(BATCH_SIZE)
train_loss = agent.learn(batch_obs, batch_action, batch_reward,
batch_next_obs,
batch_done) # s,a,r,s',done
total_reward += reward
obs = next_obs
if done:
break
return total_reward
# 評估 agent, 跑 5 個episode,總reward求平均
def evaluate(env, agent, render=False):
eval_reward = []
for i in range(5):
obs = env.reset()
episode_reward = 0
while True:
action = agent.predict(obs) # 預測動作,只選最優動作
obs, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
創建環境和Agent,創建經驗池,啓動訓練,保存模型
env = gym.make('CartPole-v0') # CartPole-v0: 預期最後一次評估總分 > 180(最大值是200)
action_dim = env.action_space.n # CartPole-v0: 2
obs_shape = env.observation_space.shape # CartPole-v0: (4,)
rpm = ReplayMemory(MEMORY_SIZE) # DQN的經驗回放池
# 根據parl框架構建agent
model = Model(act_dim=action_dim)
algorithm = DQN(model, act_dim=action_dim, gamma=GAMMA, lr=LEARNING_RATE)
agent = Agent(
algorithm,
obs_dim=obs_shape[0],
act_dim=action_dim,
e_greed=0.1, # 有一定概率隨機選取動作,探索
e_greed_decrement=1e-6) # 隨着訓練逐步收斂,探索的程度慢慢降低
# 加載模型
# save_path = './dqn_model.ckpt'
# agent.restore(save_path)
# 先往經驗池裏存一些數據,避免最開始訓練的時候樣本豐富度不夠
while len(rpm) < MEMORY_WARMUP_SIZE:
run_episode(env, agent, rpm)
max_episode = 2000
# 開始訓練
episode = 0
while episode < max_episode: # 訓練max_episode個回合,test部分不計算入episode數量
# train part
for i in range(0, 50):
total_reward = run_episode(env, agent, rpm)
episode += 1
# test part
eval_reward = evaluate(env, agent, render=False) # render=True 查看顯示效果
logger.info('episode:{} e_greed:{} test_reward:{}'.format(
episode, agent.e_greed, eval_reward))
# 訓練結束,保存模型
save_path = './dqn_model.ckpt'
agent.save(save_path)
運行結果