本文內容源自百度強化學習 7 日入門課程學習整理
感謝百度 PARL 團隊李科澆老師的課程講解
使用DDPG
解決連續控制版本的CartPole
問題,給小車一個力(連續量)使得車上的擺杆倒立起來。
文章目錄
一、安裝依賴
!pip install gym
!pip install paddlepaddle==1.6.3
!pip install parl==1.3.1
# 檢查依賴包版本是否正確
!pip list | grep paddlepaddle
!pip list | grep parl
二、導入依賴
import gym
import numpy as np
from copy import deepcopy
import paddle.fluid as fluid
import parl
from parl import layers
from parl.utils import logger
三、設置超參數
ACTOR_LR = 1e-3 # Actor網絡的 learning rate
CRITIC_LR = 1e-3 # Critic網絡的 learning rate
GAMMA = 0.99 # reward 的衰減因子
TAU = 0.001 # 軟更新的係數
MEMORY_SIZE = int(1e6) # 經驗池大小
MEMORY_WARMUP_SIZE = MEMORY_SIZE // 20 # 預存一部分經驗之後再開始訓練
BATCH_SIZE = 128
REWARD_SCALE = 0.1 # reward 縮放係數
NOISE = 0.05 # 動作噪聲方差
TRAIN_EPISODE = 6000 # 訓練的總episode數
四、搭建Model、Algorithm、Agent架構
Agent
把產生的數據傳給algorithm
,algorithm
根據model
的模型結構計算出Loss
,使用SGD
或者其他優化器不斷的優化,PARL
這種架構可以很方便的應用在各類深度強化學習問題中。
4.1 Model
Model
用來定義前向(Forward
)網絡,用戶可以自由的定製自己的網絡結構
class Model(parl.Model):
def __init__(self, act_dim):
self.actor_model = ActorModel(act_dim)
self.critic_model = CriticModel()
def policy(self, obs): # 鏈接 ActorModel 下的該方法
return self.actor_model.policy(obs)
def value(self, obs, act): # 鏈接 CriticModel 下的該方法
return self.critic_model.value(obs, act)
def get_actor_params(self):
return self.actor_model.parameters() # 基類中的方法,獲取參數
class ActorModel(parl.Model): # 演員模型
def __init__(self, act_dim):
hid_size = 100
self.fc1 = layers.fc(size=hid_size, act='relu') # 第一層用 relu 激活
self.fc2 = layers.fc(size=act_dim, act='tanh') # 第二層用 tanh 激活 -1~1
def policy(self, obs): # 輸入 obs
hid = self.fc1(obs)
means = self.fc2(hid)
return means # 輸出一個 -1~1 的浮點數
class CriticModel(parl.Model): # 評價模型
def __init__(self):
hid_size = 100
self.fc1 = layers.fc(size=hid_size, act='relu') # 第一層用 relu
self.fc2 = layers.fc(size=1, act=None) # 第二層沒有激活函數,線性,因爲輸出的是 Q 值
def value(self, obs, act):
concat = layers.concat([obs, act], axis=1)
# 沿着第 2 個維度進行拼接,即 行數不變,列數增加
# 每一個 樣本 包含了 obs 和 act
hid = self.fc1(concat)
Q = self.fc2(hid)
Q = layers.squeeze(Q, axes=[1]) # 壓縮一維數據
return Q
4.2 Algorithm
Algorithm
定義了具體的算法來更新前向網絡(Model
),也就是通過定義損失函數來更新Model
,和算法相關的計算都放在algorithm
中。
# from parl.algorithms import DDPG # 也可以直接從parl庫中快速引入DDPG算法,無需自己重新寫算法
class DDPG(parl.Algorithm):
def __init__(self,
model,
gamma=None,
tau=None,
actor_lr=None,
critic_lr=None):
""" DDPG algorithm
Args:
model (parl.Model): actor and critic 的前向網絡.
model 必須實現 get_actor_params() 方法.
gamma (float): reward的衰減因子.
tau (float): self.target_model 跟 self.model 同步參數 的 軟更新參數
actor_lr (float): actor 的學習率
critic_lr (float): critic 的學習率
"""
assert isinstance(gamma, float) # 確認參數類型
assert isinstance(tau, float)
assert isinstance(actor_lr, float)
assert isinstance(critic_lr, float)
self.gamma = gamma # 賦值
self.tau = tau
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.model = model # 傳入 model
self.target_model = deepcopy(model) # 硬拷貝 model
def predict(self, obs):
""" 使用 self.model 的 actor model 來預測動作
"""
return self.model.policy(obs)
def learn(self, obs, action, reward, next_obs, terminal):
""" 用DDPG算法更新 actor 和 critic
"""
actor_cost = self._actor_learn(obs)
critic_cost = self._critic_learn(obs, action, reward, next_obs,
terminal)
return actor_cost, critic_cost
def _actor_learn(self, obs):
action = self.model.policy(obs) # 獲得的 action 是一個 -1~1 的連續值
Q = self.model.value(obs, action) # 狀態和動作下,通過神經網絡,獲得對應 Q 值
cost = layers.reduce_mean(-1.0 * Q) # 最小化 cost,就是最大化 Q 值
optimizer = fluid.optimizer.AdamOptimizer(self.actor_lr)
optimizer.minimize(cost, parameter_list=self.model.get_actor_params())
return cost
def _critic_learn(self, obs, action, reward, next_obs, terminal):
next_action = self.target_model.policy(next_obs) # 預測下一次的動作
next_Q = self.target_model.value(next_obs, next_action) # 獲取下一步的 Q
terminal = layers.cast(terminal, dtype='float32') # 把 bool 值轉化爲浮點數
target_Q = reward + (1.0 - terminal) * self.gamma * next_Q # 求得 目標 Q
target_Q.stop_gradient = True # 阻止更新網絡參數
Q = self.model.value(obs, action) # 狀態和動作下,通過神經網絡,獲得對應 Q 值
cost = layers.square_error_cost(Q, target_Q) # 最小化預測 Q 和 目標 Q 的差別
cost = layers.reduce_mean(cost)
optimizer = fluid.optimizer.AdamOptimizer(self.critic_lr)
optimizer.minimize(cost)
return cost
def sync_target(self, decay=None, share_vars_parallel_executor=None):
""" self.target_model從self.model複製參數過來,可設置軟更新參數
"""
if decay is None:
decay = 1.0 - self.tau
# 新參數 0.1% 權重,舊參數爲 99.9% 的權重
# 使得參數更新更平滑
self.model.sync_weights_to(
self.target_model,
decay=decay,
share_vars_parallel_executor=share_vars_parallel_executor)
# 使用 PARL 自帶的函數進行參數同步
4.3 Agent
Agent
負責算法與環境的交互,在交互過程中把生成的數據提供給Algorithm
來更新模型(Model
),數據的預處理流程也一般定義在這裏。
class Agent(parl.Agent):
def __init__(self, algorithm, obs_dim, act_dim):
assert isinstance(obs_dim, int)
assert isinstance(act_dim, int)
self.obs_dim = obs_dim # 狀態維度
self.act_dim = act_dim # 動作維度(這裏爲 1)
super(Agent, self).__init__(algorithm)
# 注意:最開始先同步self.model和self.target_model的參數.
self.alg.sync_target(decay=0)
def build_program(self):
self.pred_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.pred_program): # 形成預測程序
# 輸入參數定義
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
# 輸出參數定義
self.pred_act = self.alg.predict(obs)
with fluid.program_guard(self.learn_program): # 形成學習程序
# 輸入參數定義
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
act = layers.data(
name='act', shape=[self.act_dim], dtype='float32')
reward = layers.data(name='reward', shape=[], dtype='float32')
next_obs = layers.data(
name='next_obs', shape=[self.obs_dim], dtype='float32')
terminal = layers.data(name='terminal', shape=[], dtype='bool')
# 輸出參數定義
_, self.critic_cost = self.alg.learn(obs, act, reward, next_obs,
terminal)
def predict(self, obs):
obs = np.expand_dims(obs, axis=0) # 程序輸入數據結構要求增維
act = self.fluid_executor.run(
self.pred_program, feed={'obs': obs},
fetch_list=[self.pred_act])[0]
act = np.squeeze(act)
return act
def learn(self, obs, act, reward, next_obs, terminal):
# 輸入的數據
feed = {
'obs': obs,
'act': act,
'reward': reward,
'next_obs': next_obs,
'terminal': terminal
}
# 運行程序,並取得輸出的數據
critic_cost = self.fluid_executor.run(
self.learn_program, feed=feed, fetch_list=[self.critic_cost])[0]
self.alg.sync_target()
return critic_cost # 評價網絡的 cost
五、連續控制版本的CartPole環境
# env.py
# Continuous version of Cartpole
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
class ContinuousCartPoleEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5 # actually half the pole's length
self.polemass_length = (self.masspole * self.length)
self.force_mag = 30.0
self.tau = 0.02 # seconds between state updates
self.min_action = -1.0
self.max_action = 1.0
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max])
self.action_space = spaces.Box(
low=self.min_action,
high=self.max_action,
shape=(1,)
)
self.observation_space = spaces.Box(-high, high)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def stepPhysics(self, force):
x, x_dot, theta, theta_dot = self.state
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / \
(self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
return (x, x_dot, theta, theta_dot)
def step(self, action):
action = np.expand_dims(action, 0)
assert self.action_space.contains(action), \
"%r (%s) invalid" % (action, type(action))
# Cast action to float to strip np trappings
force = self.force_mag * float(action)
self.state = self.stepPhysics(force)
x, x_dot, theta, theta_dot = self.state
done = x < -self.x_threshold \
or x > self.x_threshold \
or theta < -self.theta_threshold_radians \
or theta > self.theta_threshold_radians
done = bool(done)
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
gym.logger.warn("""
You are calling 'step()' even though this environment has already returned
done = True. You should always call 'reset()' once you receive 'done = True'
Any further steps are undefined behavior.
""")
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold * 2
scale = screen_width /world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * 1.0
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l, r, t, b = -polewidth / 2, polewidth / 2, polelen-polewidth / 2, -polewidth / 2
pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
pole.set_color(.8, .6, .4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth / 2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5, .5, .8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
if self.state is None:
return None
x = self.state
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=(mode == 'rgb_array'))
def close(self):
if self.viewer:
self.viewer.close()
六、設置經驗池
與DQN
的replay_mamory.py
代碼一致
# replay_memory.py
import random
import collections
import numpy as np
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
def append(self, exp):
self.buffer.append(exp)
def sample(self, batch_size):
mini_batch = random.sample(self.buffer, batch_size)
obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []
for experience in mini_batch:
s, a, r, s_p, done = experience
obs_batch.append(s)
action_batch.append(a)
reward_batch.append(r)
next_obs_batch.append(s_p)
done_batch.append(done)
return np.array(obs_batch).astype('float32'), \
np.array(action_batch).astype('float32'), np.array(reward_batch).astype('float32'),\
np.array(next_obs_batch).astype('float32'), np.array(done_batch).astype('float32')
def __len__(self):
return len(self.buffer)
七、Training && Test(訓練&&測試)
def run_episode(agent, env, rpm):
obs = env.reset()
total_reward = 0
steps = 0
while True:
steps += 1
batch_obs = np.expand_dims(obs, axis=0)
action = agent.predict(batch_obs.astype('float32'))
# 增加探索擾動, 輸出限制在 [-1.0, 1.0] 範圍內
action = np.clip(np.random.normal(action, NOISE), -1.0, 1.0)
# action 爲均值(-1~1),NOISE 爲方差,正態分佈區值
# np.clip 限制區間,以免區值超出範圍
next_obs, reward, done, info = env.step(action) # 交互一步
action = [action] # 方便存入replaymemory
rpm.append((obs, action, REWARD_SCALE * reward, next_obs, done))
if len(rpm) > MEMORY_WARMUP_SIZE and (steps % 5) == 0:
(batch_obs, batch_action, batch_reward, batch_next_obs,
batch_done) = rpm.sample(BATCH_SIZE)
agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs,
batch_done)
obs = next_obs
total_reward += reward
if done or steps >= 200:
break
return total_reward
def evaluate(env, agent, render=False):
eval_reward = []
for i in range(5):
obs = env.reset()
total_reward = 0
steps = 0
while True:
batch_obs = np.expand_dims(obs, axis=0)
action = agent.predict(batch_obs.astype('float32'))
action = np.clip(action, -1.0, 1.0)
steps += 1
next_obs, reward, done, info = env.step(action)
obs = next_obs
total_reward += reward
if render:
env.render()
if done or steps >= 200:
break
eval_reward.append(total_reward)
return np.mean(eval_reward)
八、創建環境和Agent,創建經驗池,啓動訓練,保存模型
# 創建環境
env = ContinuousCartPoleEnv()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# 使用PARL框架創建agent
model = Model(act_dim)
# model實例化
algorithm = DDPG(
model, gamma=GAMMA, tau=TAU, actor_lr=ACTOR_LR, critic_lr=CRITIC_LR)
# algorithm 實例化,傳入 model
agent = Agent(algorithm, obs_dim, act_dim)
# agent 實例化,傳入 algorithm
# 創建經驗池
rpm = ReplayMemory(MEMORY_SIZE)
# 往經驗池中預存數據
while len(rpm) < MEMORY_WARMUP_SIZE:
run_episode(agent, env, rpm)
episode = 0
while episode < TRAIN_EPISODE:
for i in range(50):
total_reward = run_episode(agent, env, rpm)
episode += 1
eval_reward = evaluate(env, agent, render=False)
logger.info('episode:{} test_reward:{}'.format(
episode, eval_reward))