一、Q-learning算法
1.莫煩Q-learning第一段代碼已理解,如下:
import numpy as np
import pandas as pd
import time
np.random.seed() # reproducible
N_STATES = 6 # the length of the 1 dimensional world
ACTIONS = ['left', 'right'] # available actions
EPSILON = 0.9 # greedy police
ALPHA = 0.1 # learning rate
GAMMA = 0.9 # discount factor
MAX_EPISODES = 10 # maximum episodes
FRESH_TIME = 0.01 # fresh time for one move
def build_q_table(n_states, actions):
# 創建表格
table = pd.DataFrame(
np.zeros((n_states, len(actions))), # q_table initial values
columns=actions, # actions's name
)
# print(table) # show table
return table
# 在s下,選擇動作a
def choose_action(state, q_table):
# This is how to choose an action
state_actions = q_table.iloc[state, :]
if (np.random.uniform() > EPSILON) or ((state_actions == 0).all()): # act non-greedy or state-action have no value
action_name = np.random.choice(ACTIONS)
else: # act greedy
action_name = state_actions.idxmax() # replace argmax to idxmax as argmax means a different function in newer version of pandas
return action_name
def get_env_feedback(S, A):
# 在(S, A)下得到的反饋r
if A == 'right': # move right
if S == N_STATES - 2: # terminate
S_ = 'terminal'
R = 1
else:
S_ = S + 1
R = 0
else: # move left
R = 0
if S == 0:
S_ = S # reach the wall
else:
S_ = S - 1
return S_, R
def update_env(S, episode, step_counter):
# This is how environment be updated
env_list = ['-']*(N_STATES-1) + ['T'] # '---------T' our environment
if S == 'terminal':
interaction = 'Episode %s: total_steps = %s' % (episode+1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(2)
print('\r ', end='')
else:
env_list[S] = 'o'
interaction = ''.join(env_list)
print('\r{}'.format(interaction), end='')
time.sleep(FRESH_TIME)
def rl():
# Q-learning循環
q_table = build_q_table(N_STATES, ACTIONS)
for episode in range(MAX_EPISODES):
step_counter = 0
S = 0
is_terminated = False
update_env(S, episode, step_counter)
while not is_terminated:
A = choose_action(S, q_table)
S_, R = get_env_feedback(S, A) # take action & get next state and reward
q_predict = q_table.loc[S, A]
if S_ != 'terminal':
q_target = R + GAMMA * q_table.iloc[S_, :].max() # next state is not terminal
else:
q_target = R # next state is terminal
is_terminated = True # terminate this episode
q_table.loc[S, A] += ALPHA * (q_target - q_predict) # update
S = S_ # move to next state
update_env(S, episode, step_counter+1)
step_counter += 1
return q_table
if __name__ == "__main__":
q_table = rl()
print('\r\nQ-table:\n')
print(q_table)
對於
if __name__ == "__main__":
的解釋,參考此文章。
2.利用Q-learning解決出租車問題
代碼如下:
import random
import gym
env = gym.make('Taxi-v3')
alpha = 0.4
gamma = 0.999
epsilon = 0.017
# 初始化一個q表,即一個將(s,a)保存爲字典
q = {}
for s in range(env.observation_space.n):
for a in range(env.action_space.n):
q[(s, a)] = 0
# 更新q表
def update_q_table(prev_state, action, reward, nextstate, alpha, gamma):
qa = max(q[nextstate, a] for a in range(env.action_space.n))
q[(prev_state, action)] += alpha * (reward + gamma * qa - q[(prev_state, action)])
# epsilon貪婪策略
def epsilon_greedy_policy(state, epsilon):
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return max(list(range(env.action_space.n)), key=lambda x: q[state, x])
for i in range(8000):
r = 0
prev_state = env.reset()
while True:
env.render()
# 根據epsilon貪婪策略選擇行爲
action = epsilon_greedy_policy(prev_state, epsilon)
# 執行該行爲,並轉移到下一狀態,接受獎勵
nextstate, reward, done, _ = env.step(action)
# 更新q值
update_q_table(prev_state, action, reward, nextstate, alpha, gamma)
# 將下一狀態更新爲前一狀態
prev_state = nextstate
# 保存獲得的獎勵總和
r += reward
# 如果到達最終狀態,終止循環
if done:
break
print("總的獎勵:", r)
env.close()