統計學習方法課後習題【第十章】

目錄

HMM code

 exercise 10_1: 後向算法

exercise 10_2:單個狀態的概率計算

exercise 10_3:viterbi算法求最有路徑

HMM 的代碼實現 和 部分課後練習運行結果

HMM code

import numpy as np

class HMM:
    def __init__(self):
        self.alphas = None
        self.betas = None
        self.forward_p = None
        self.backend_p = None
        self.deltas = None
        self.psis = None
        self.I = None

    def forward_probability(self, Q, V, A, B, PI, O):
        # 狀態序列的大小
        N = len(Q)
        # 觀測序列的大小
        M = len(O)
        # 初始化前向概率alphas
        alphas = np.zeros((N, M))
        T = M
        # 遍歷 1 -> T 時刻,計算前向概率alphas的值
        for t in range(T):
            index_of_O = V.index(O[t])
            for i in range(N):
                # alphas 的初值
                if t == 0:
                    alphas[i][t] = PI[i] * B[i][index_of_O]
                    print("alpha1({}) = PI({})B{}(O1) = {}".format(i+1, i+1, i+1, alphas[i][t]))
                # 遞推計算alphas在 t=2 -> T 時刻的值
                else:
                    # for j in range(N):
                    #     alphas[i][t] = sum(alphas[j][t-1] * A[j][i]) * B[i][index_of_O]
                    alphas[i][t] = np.dot([alpha[t-1] for alpha in alphas], [a[i] for a in A]) * B[i][index_of_O]
                    print("alpha{}({}) = sigma(alpha{}(j) * aj{}) * b{}(O{})] = {}".format(t+1, i+1, t, i+1, i+1, t+1, alphas[i][t]))
        self.alphas = alphas
        self.forward_p = np.sum(alpha[M-1] for alpha in alphas)


    def backend_probability(self, Q, V, A, B, PI, O):
        # 狀態序列的長度
        N = len(Q)
        # 觀測序列的長度
        M = len(O)
        # 時刻 = 觀測序列的長度
        T = M
        # 初始化後向概率betas
        betas = np.zeros((N, M))

        # 遍歷計算 1 -> T 時刻的後向概率
        for t in reversed(range(T)):
            for i in range(N):
                # betas的初始值
                if t == T-1:
                    betas[i][t] = 1
                    print("beta8({}) = {}".format(i+1, betas[i][t]))
                else:
                    index_of_O = V.index(O[t + 1])
                    AB = np.multiply(A[i], [b[index_of_O] for b in B])
                    betas[i][t] = np.dot(AB, [beta[t+1] for beta in betas])
                    print("beta{}({}) = ".format(t+1, i+1), end='')
                    print("sigma a{}j * bj(o{}) * beta{}(j) = ".format(i+1, t+1+1, t+1+1,), end='')
                    print("{}".format(betas[i][t]))
        self.betas = betas
        self.backend_p = np.dot(np.multiply(PI, [b[V.index(O[0])] for b in B]), [beta[0] for beta in betas])

    def viterbi(self, Q, V, A, B, PI, O):
        # 狀態序列的長度
        N = len(Q)
        # 觀察序列的長度
        M = len(O)
        # 時刻 = 觀察序列的長度
        T = M
        # 初始化單個路徑最大概率deltas和單個路徑概率最大的前一個結點psis
        deltas = np.zeros((N, M))
        psis = np.zeros((N, M))
        # 初始化最有路徑序列
        I = np.zeros((1, M))
        for t in range(T):
            index_of_O = V.index(O[t])
            for i in range(N):
                if t == 0:
                    deltas[i][t] = PI[i] * B[i][index_of_O]
                    psis[i][t] = 0
                    print("delta{}({})=PI{}b{}(O{}) = {}".format(t+1, i+1, i+1, i+1, t+1, deltas[i][t]))
                    print("psi{}({}) = 0".format(t+1, i+1))
                else:
                    deltas[i][t] = np.max(np.multiply([delta[t-1] for delta in deltas], [a[i] for a in A])) * B[i][index_of_O]
                    psis[i][t] = np.argmax(np.multiply([delta[t-1] for delta in deltas], [a[i] for a in A]))
                    print("delta{}({}) = max delta{}(j)aj{} b{}(O{}) = {}".format(t+1, i+1, t, i+1, i+1, t+1, deltas[i][t]))
                    print("psis{}({}) = argmax delta{}(j)aj{} = {}".format(t+1, i+1, t, i+1, psis[i][t]))
        # 得到最有路徑的終結點
        I[0][T-1] = np.argmax([delta[T-1] for delta in deltas])
        # 最有路徑回溯
        for t in reversed(range(T-1)):
            last_I = I[0][t+1]
            I[0][t] = psis[int(last_I)][t+1]
        print("the best states sequence is: I* = {}".format(" -> ".join([str(int(i+1)) for i in I[0]])))

        self.deltas = deltas
        self.psis = psis
        self.I = I

def exercise_10_1():
    Q = [1, 2, 3]
    V = ['紅', '白']
    A = [[0.5, 0.2, 0.3],
         [0.3, 0.5, 0.2],
         [0.2, 0.3, 0.5]]
    B = [[0.5, 0.5],
         [0.4, 0.6],
         [0.7, 0.3]]
    PI = [0.2, 0.4, 0.4]
    O = ['紅', '白', '紅', '白']
    hmm = HMM()
    hmm.forward_probability(Q, V, A, B, PI, O)
    print("forward algorithm of P(O|lambda): ", hmm.forward_p)
    hmm.backend_probability(Q, V, A, B, PI, O)
    print("backend algorithm of P(O|lambda): ", hmm.backend_p)

def exercise_10_2():
    Q = [1, 2, 3]
    V = ['紅', '白']
    A = [[0.5, 0.1, 0.4],
         [0.3, 0.5, 0.2],
         [0.2, 0.2, 0.6]]
    B = [[0.5, 0.5],
         [0.4, 0.6],
         [0.7, 0.3]]
    PI = [0.2, 0.3, 0.5]
    O = ['紅', '白', '紅', '紅', '白', '紅', '白', '白']
    hmm = HMM()
    hmm.forward_probability(Q, V, A, B, PI, O)
    hmm.backend_probability(Q, V, A, B, PI, O)
    alphas = hmm.alphas
    betas = hmm.betas
    state_t_i = alphas[2][3] * betas[2][3]
    print("at time 4, the probility that the state of q3: ", state_t_i)
    PO = hmm.forward_p

    print("P(O|lambda): ", PO)
    print("P(i4 = q3 | O, lambda) = {}".format(state_t_i / PO))

def exercise_10_3():
    Q = [1, 2, 3]
    V = ['紅', '白']
    A = [[0.5, 0.2, 0.3],
         [0.3, 0.5, 0.2],
         [0.2, 0.3, 0.5]]
    B = [[0.5, 0.5],
         [0.4, 0.6],
         [0.7, 0.3]]
    PI = [0.2, 0.4, 0.4]
    O = ['紅', '白', '紅', '白']
    hmm = HMM()
    hmm.viterbi(Q, V, A, B, PI, O)

if __name__ == '__main__':
    print("=====================exercise_10_1=========================")
    exercise_10_1()
    print("=====================exercise_10_2=========================")
    exercise_10_2()
    print("=====================exercise_10_3=========================")
    exercise_10_3()




 exercise 10_1: 後向算法

                                                           

=====================exercise_10_1=========================
alpha1(1) = PI(1)B1(O1) = 0.1
alpha1(2) = PI(2)B2(O1) = 0.16000000000000003
alpha1(3) = PI(3)B3(O1) = 0.27999999999999997
alpha2(1) = sigma(alpha1(j) * aj1) * b1(O2)] = 0.077
alpha2(2) = sigma(alpha1(j) * aj2) * b2(O2)] = 0.1104
alpha2(3) = sigma(alpha1(j) * aj3) * b3(O2)] = 0.060599999999999994
alpha3(1) = sigma(alpha2(j) * aj1) * b1(O3)] = 0.04186999999999999
alpha3(2) = sigma(alpha2(j) * aj2) * b2(O3)] = 0.035512
alpha3(3) = sigma(alpha2(j) * aj3) * b3(O3)] = 0.052835999999999994
alpha4(1) = sigma(alpha3(j) * aj1) * b1(O4)] = 0.021077899999999997
alpha4(2) = sigma(alpha3(j) * aj2) * b2(O4)] = 0.02518848
alpha4(3) = sigma(alpha3(j) * aj3) * b3(O4)] = 0.013824419999999999
forward algorithm of P(O|lambda):  0.06009079999999999
beta8(1) = 1.0
beta8(2) = 1.0
beta8(3) = 1.0
beta3(1) = sigma a1j * bj(o4) * beta4(j) = 0.45999999999999996
beta3(2) = sigma a2j * bj(o4) * beta4(j) = 0.51
beta3(3) = sigma a3j * bj(o4) * beta4(j) = 0.43000000000000005
beta2(1) = sigma a1j * bj(o3) * beta3(j) = 0.24609999999999999
beta2(2) = sigma a2j * bj(o3) * beta3(j) = 0.2312
beta2(3) = sigma a3j * bj(o3) * beta3(j) = 0.2577
beta1(1) = sigma a1j * bj(o2) * beta2(j) = 0.11246199999999998
beta1(2) = sigma a2j * bj(o2) * beta2(j) = 0.12173699999999998
beta1(3) = sigma a3j * bj(o2) * beta2(j) = 0.10488099999999999
backend algorithm of P(O|lambda):  0.06009079999999999

exercise 10_2:單個狀態的概率計算

                                                           

=====================exercise_10_2=========================
alpha1(1) = PI(1)B1(O1) = 0.1
alpha1(2) = PI(2)B2(O1) = 0.12
alpha1(3) = PI(3)B3(O1) = 0.35
alpha2(1) = sigma(alpha1(j) * aj1) * b1(O2)] = 0.07799999999999999
alpha2(2) = sigma(alpha1(j) * aj2) * b2(O2)] = 0.084
alpha2(3) = sigma(alpha1(j) * aj3) * b3(O2)] = 0.08220000000000001
alpha3(1) = sigma(alpha2(j) * aj1) * b1(O3)] = 0.040319999999999995
alpha3(2) = sigma(alpha2(j) * aj2) * b2(O3)] = 0.026496000000000006
alpha3(3) = sigma(alpha2(j) * aj3) * b3(O3)] = 0.068124
alpha4(1) = sigma(alpha3(j) * aj1) * b1(O4)] = 0.020866799999999998
alpha4(2) = sigma(alpha3(j) * aj2) * b2(O4)] = 0.012361920000000002
alpha4(3) = sigma(alpha3(j) * aj3) * b3(O4)] = 0.043611119999999996
alpha5(1) = sigma(alpha4(j) * aj1) * b1(O5)] = 0.011432099999999999
alpha5(2) = sigma(alpha4(j) * aj2) * b2(O5)] = 0.0101939184
alpha5(3) = sigma(alpha4(j) * aj3) * b3(O5)] = 0.011095732799999999
alpha6(1) = sigma(alpha5(j) * aj1) * b1(O6)] = 0.00549668604
alpha6(2) = sigma(alpha5(j) * aj2) * b2(O6)] = 0.0033837263039999997
alpha6(3) = sigma(alpha5(j) * aj3) * b3(O6)] = 0.009288344351999999
alpha7(1) = sigma(alpha6(j) * aj1) * b1(O7)] = 0.0028105648908
alpha7(2) = sigma(alpha6(j) * aj2) * b2(O7)] = 0.0024595203758399995
alpha7(3) = sigma(alpha6(j) * aj3) * b3(O7)] = 0.0025345278864
alpha8(1) = sigma(alpha7(j) * aj1) * b1(O8)] = 0.0013250220677159998
alpha8(2) = sigma(alpha7(j) * aj2) * b2(O8)] = 0.0012106333525679998
alpha8(3) = sigma(alpha7(j) * aj3) * b3(O8)] = 0.0009410540289983999
beta8(1) = 1.0
beta8(2) = 1.0
beta8(3) = 1.0
beta7(1) = sigma a1j * bj(o8) * beta8(j) = 0.43
beta7(2) = sigma a2j * bj(o8) * beta8(j) = 0.51
beta7(3) = sigma a3j * bj(o8) * beta8(j) = 0.4
beta6(1) = sigma a1j * bj(o7) * beta7(j) = 0.1861
beta6(2) = sigma a2j * bj(o7) * beta7(j) = 0.2415
beta6(3) = sigma a3j * bj(o7) * beta7(j) = 0.1762
beta5(1) = sigma a1j * bj(o6) * beta6(j) = 0.10552099999999999
beta5(2) = sigma a2j * bj(o6) * beta6(j) = 0.100883
beta5(3) = sigma a3j * bj(o6) * beta6(j) = 0.111934
beta4(1) = sigma a1j * bj(o5) * beta5(j) = 0.04586530999999999
beta4(2) = sigma a2j * bj(o5) * beta5(j) = 0.052809089999999996
beta4(3) = sigma a3j * bj(o5) * beta5(j) = 0.04280618
beta3(1) = sigma a1j * bj(o4) * beta4(j) = 0.025564421499999997
beta3(2) = sigma a2j * bj(o4) * beta4(j) = 0.0234344797
beta3(3) = sigma a3j * bj(o4) * beta4(j) = 0.0267898538
beta2(1) = sigma a1j * bj(o3) * beta3(j) = 0.014829643626999999
beta2(2) = sigma a2j * bj(o3) * beta3(j) = 0.012272138697
beta2(3) = sigma a3j * bj(o3) * beta3(j) = 0.015682939122
beta1(1) = sigma a1j * bj(o2) * beta2(j) = 0.00632569192321
beta1(2) = sigma a2j * bj(o2) * beta2(j) = 0.00684706450047
beta1(3) = sigma a3j * bj(o2) * beta2(j) = 0.0057785500483
at time 4, the probility that the state of q3:  0.0018668254527215997
P(O|lambda):  0.0034767094492823996
P(i4 = q3 | O, lambda) = 0.5369518160647323

exercise 10_3:viterbi算法求最有路徑

                                        

=====================exercise_10_3=========================
delta1(1)=PI1b1(O1) = 0.1
psi1(1) = 0
delta1(2)=PI2b2(O1) = 0.16000000000000003
psi1(2) = 0
delta1(3)=PI3b3(O1) = 0.27999999999999997
psi1(3) = 0
delta2(1) = max delta1(j)aj1 b1(O2) = 0.027999999999999997
psis2(1) = argmax delta1(j)aj1 = 2.0
delta2(2) = max delta1(j)aj2 b2(O2) = 0.05039999999999999
psis2(2) = argmax delta1(j)aj2 = 2.0
delta2(3) = max delta1(j)aj3 b3(O2) = 0.041999999999999996
psis2(3) = argmax delta1(j)aj3 = 2.0
delta3(1) = max delta2(j)aj1 b1(O3) = 0.007559999999999999
psis3(1) = argmax delta2(j)aj1 = 1.0
delta3(2) = max delta2(j)aj2 b2(O3) = 0.010079999999999999
psis3(2) = argmax delta2(j)aj2 = 1.0
delta3(3) = max delta2(j)aj3 b3(O3) = 0.014699999999999998
psis3(3) = argmax delta2(j)aj3 = 2.0
delta4(1) = max delta3(j)aj1 b1(O4) = 0.0018899999999999998
psis4(1) = argmax delta3(j)aj1 = 0.0
delta4(2) = max delta3(j)aj2 b2(O4) = 0.0030239999999999993
psis4(2) = argmax delta3(j)aj2 = 1.0
delta4(3) = max delta3(j)aj3 b3(O4) = 0.0022049999999999995
psis4(3) = argmax delta3(j)aj3 = 2.0
the best states sequence is: I* = 3 -> 2 -> 2 -> 2

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章