CliffWalking
- 如下圖所示,S是起點,C是障礙,G是目標
- agent從S開始走,目標是找到到G的最短路徑
- 這裏reward可以建模成-1,最終目標是讓return最大,也就是路徑最短
代碼和解釋
import gym
import time
import numpy as np
class QLearningAgent(object):
def __init__(self, obs_n, act_n, learning_rate=0.01, gamma=0.9, e_greed=0.1):
self.act_n = act_n # 動作維度,有幾個動作可選
self.lr = learning_rate # 學習率
self.gamma = gamma # reward的衰減率
self.epsilon = e_greed # 按一定概率隨機選動作
self.Q = np.zeros((obs_n, act_n))
# 根據輸入觀察值,採樣輸出的動作值,帶探索
def sample(self, obs):
if np.random.uniform(0, 1) < (1.0 - self.epsilon): #根據table的Q值選動作
action = self.predict(obs)
else:
action = np.random.choice(self.act_n) #有一定概率隨機探索選取一個動作
return action
# 根據輸入觀察值,預測輸出的動作值
def predict(self, obs):
Q_list = self.Q[obs, :]
maxQ = np.max(Q_list)
action_list = np.where(Q_list == maxQ)[0] # maxQ可能對應多個action
action = np.random.choice(action_list)
return action
# 學習方法,也就是更新Q-table的方法
def learn(self, obs, action, reward, next_obs, done):
""" off-policy
obs: 交互前的obs, s_t
action: 本次交互選擇的action, a_t
reward: 本次動作獲得的獎勵r
next_obs: 本次交互後的obs, s_t+1
done: episode是否結束
"""
predict_Q = self.Q[obs, action]
if done:
target_Q = reward # 沒有下一個狀態了
else:
target_Q = reward + self.gamma * np.max(self.Q[next_obs, :]) # Q-learning
self.Q[obs, action] += self.lr * (target_Q - predict_Q) # 修正q
# 把 Q表格 的數據保存到文件中
def save(self):
npy_file = './q_table.npy'
np.save(npy_file, self.Q)
print(npy_file + ' saved.')
# 從文件中讀取數據到 Q表格
def restore(self, npy_file='./q_table.npy'):
self.Q = np.load(npy_file)
print(npy_file + ' loaded.')
# train.py
def run_episode(env, agent, render=False):
total_steps = 0 # 記錄每個episode走了多少step
total_reward = 0
obs = env.reset() # 重置環境, 重新開一局(即開始新的一個episode)
while True:
action = agent.sample(obs) # 根據算法選擇一個動作
next_obs, reward, done, _ = env.step(action) # 與環境進行一個交互
# 訓練 Q-learning算法
agent.learn(obs, action, reward, next_obs, done)
obs = next_obs # 存儲上一個觀察值
total_reward += reward
total_steps += 1 # 計算step數
if render:
env.render() #渲染新的一幀圖形
if done:
break
return total_reward, total_steps
def test_episode(env, agent):
total_reward = 0
obs = env.reset()
while True:
action = agent.predict(obs) # greedy
next_obs, reward, done, _ = env.step(action)
total_reward += reward
obs = next_obs
# time.sleep(0.5)
# env.render()
if done:
break
return total_reward
# 使用gym創建懸崖環境
env = gym.make("CliffWalking-v0") # 0 up, 1 right, 2 down, 3 left
# 創建一個agent實例,輸入超參數
agent = QLearningAgent(
obs_n=env.observation_space.n,
act_n=env.action_space.n,
learning_rate=0.1,
gamma=0.9,
e_greed=0.1)
# 訓練500個episode,打印每個episode的分數
for episode in range(500):
ep_reward, ep_steps = run_episode(env, agent, False)
print('Episode %s: steps = %s , reward = %.1f' % (episode, ep_steps, ep_reward))
# 全部訓練結束,查看算法效果
test_reward = test_episode(env, agent)
print('test reward = %.1f' % (test_reward))
運行結果
- Q值表的部分
- 讀取方法見此link
這裏就不展示了