BiLSTM-CRF (雙向LSTM+條件隨機場)實現對命名實體識別的詳細介紹

先膜拜一波大佬,太牛了

對照Pytorch代碼實現BiLSTM+CRF與大佬的解析相信看到這篇文章的你也能解惑。

 

下面是華麗的分割線  


大佬對BiLSTM+CRF的解析非常透徹,解決了我很多迷惑的地方。。。詳細的步驟我就不介紹了請參閱參考文獻。

 

pytorch 實例

# -*- coding: utf-8 -*-

import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim

torch.manual_seed(1)

def argmax(vec):
    # return the argmax as a python int
    _, idx = torch.max(vec, 1)
    # 返回兩個列表,一個是最大值列表,另一個是dim = 1,
    # 目的是取出當前的最大值在行向量中位置。
    return idx.item()


def prepare_sequence(seq, to_ix):
    idxs = [to_ix[w] for w in seq]
    return torch.tensor(idxs, dtype=torch.long)


# Compute log sum exp in a numerically stable way for the forward algorithm
# 以前向算法的數值穩定方式計算log-sum-exp.
def log_sum_exp(vec):
    max_score = vec[0, argmax(vec)]
    max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
    return max_score + \
        torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))

############################################################################## #        
class BiLSTM_CRF(nn.Module):

    def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
        super(BiLSTM_CRF, self).__init__()
        self.embedding_dim = embedding_dim # 5
        self.hidden_dim = hidden_dim# 3
        self.vocab_size = vocab_size
        self.tag_to_ix = tag_to_ix
        self.tagset_size = len(tag_to_ix) # 5
        self.word_embeds = nn.Embedding(vocab_size, embedding_dim)#(25,5)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
                            num_layers=1, bidirectional=True)#(5, 1, num_layers =1, Bi=True)

        # Maps the output of the LSTM into tag space.將LSTM的輸出映射到標記空間。
        self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)#(3,5)

        # Matrix of transition parameters.  Entry i,j is the score of
        # transitioning *to* i *from* j.
        # 轉移參數矩陣. 條目i,j是*從* j轉換*到* i 的分數。
        
        # 轉移矩陣是訓練的得來的,它是隨機初始化的。
        self.transitions = nn.Parameter(
            torch.randn(self.tagset_size, self.tagset_size))

        # These two statements enforce the constraint that we never transfer
        # to the start tag and we never transfer from the stop tag
        # 這兩個語句強制執行約束: 我們從不轉移到start-tag的,我們永遠也不會從stop-tag轉移.
        # 強制設置START和STOP的值爲-10000
        self.transitions.data[tag_to_ix[START_TAG], :] = -10000 #也就是第4行的位置全部設置爲-10000
        self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000  #也就是第5列的位置全部設置爲-10000

        self.hidden = self.init_hidden()

    def init_hidden(self):
        return (torch.randn(2, 1, self.hidden_dim // 2),# (2,1,1)正態標準分佈
                torch.randn(2, 1, self.hidden_dim // 2))# (2,1,1)

    def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        # 使用前向算法來計算分區函數(部分函數)
        #torch.full:Returns a tensor of size :attr:`size` filled with :attr:`fill_value`.
        init_alphas = torch.full((1, self.tagset_size), -10000.)
        # START_TAG has all of the score. START_TAG擁有所有分數。
        # tensor([[-10000., -10000., -10000.,      0., -10000.]])
        init_alphas[0][self.tag_to_ix[START_TAG]] = 0.

        # Wrap in a variable so that we will get automatic backprop
        # 包裝一個變量,以便我們獲得自動反向提升(反向傳播)
        forward_var = init_alphas

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward tensors at this timestep 在這個時間步長的前向張量
            for next_tag in range(self.tagset_size):
                # 發射得分。
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                # 廣播發射得分:不管以前的標記是什麼都是相同的,發射得分來自BiLSTM訓練的標籤。
                emit_score = feat[next_tag].view(1, -1).expand(1, self.tagset_size)
                #print(emit_score)
                
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                # trans_score的第i個entry的tag是從i轉換到next_tag(也就是i+i的tag)的分數。#(1, 自動計算)-->(1, 5)
                trans_score = self.transitions[next_tag].view(1, -1)
                
                #下一個標籤的變量
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                # next_tag_var的第i個條目是我們執行log-sum-exp之前的邊(i - > next_tag)的值
                next_tag_var = forward_var + trans_score + emit_score
                
                # 所有得分。
                # The forward variable for this tag is log-sum-exp of all the scores.
                # 此標記的前向變量是所有分數的log-sum-exp的值。
                print(log_sum_exp(next_tag_var).view(1))
                alphas_t.append(log_sum_exp(next_tag_var).view(1))
                
            forward_var = torch.cat(alphas_t).view(1, -1)
            print(forward_var)
        terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]

        alpha = log_sum_exp(terminal_var)
        
        return alpha

    def _get_lstm_features(self, sentence):
        self.hidden = self.init_hidden()
        embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
        lstm_out, self.hidden = self.lstm(embeds, self.hidden)
        lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
        lstm_feats = self.hidden2tag(lstm_out)
        return lstm_feats

    def _score_sentence(self, feats, tags):
        # Gives the score of a provided tag sequence
        # 給出提供的標籤序列的分數
        score = torch.zeros(1)
        tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])
        for i, feat in enumerate(feats):
            score = score + \
                self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
        score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
        
        return score

    def _viterbi_decode(self, feats):
        """
        feats: LSTM 隱層到標籤轉變的向量。
        該函數使用於解碼的,也就是說當訓練時,是不需要使用的。
        """
        backpointers = []

        # Initialize the viterbi variables in log space
        # 初始化viterbi向量在log空間
        init_vvars = torch.full((1, self.tagset_size), -10000.)
        #tensor([[-10000., -10000., -10000., -10000., -10000.]])
        init_vvars[0][self.tag_to_ix[START_TAG]] = 0 #設置開始標籤爲0
        # tensor([[-10000., -10000., -10000.,      0., -10000.]])

        # forward_var at step i holds the viterbi variables for step i-1
        # 步驟i中的forward_var變量保持步驟i-1的viterbi變量
        forward_var = init_vvars # tensor([[-10000., -10000., -10000.,      0., -10000.]])
        for feat in feats:
            bptrs_t = []  # holds the backpointers for this step# 保存這一步的後指針
            viterbivars_t = []  # holds the viterbi variables for this step# 保存這一步的viterbi向量

            for next_tag in range(self.tagset_size):
                # next_tag_var[i] holds the viterbi variable for tag i at the
                # previous step, plus the score of transitioning
                # from tag i to next_tag.
                # We don't include the emission scores here because the max
                # does not depend on them (we add them in below)
                # next_tag_var [i]保存上一步標籤i的維特比變量,加上從標籤i轉換到next_tag的分數。
                # 我們這裏不包括髮射分數,因爲最大值不依賴於它們(我們在下面添加它們)
                next_tag_var = forward_var + self.transitions[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
            # Now add in the emission scores, and assign forward_var to the set
            # of viterbi variables we just computed
            #現在添加發射分數,並將forward_var分配給我們剛剛計算的維特比變量
            forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
            backpointers.append(bptrs_t)

        # Transition to STOP_TAG
        # 轉換到 STOP_TAG
        terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0][best_tag_id]

        # Follow the back pointers to decode the best path.
        # 遵循後項指針來解碼最佳路徑
        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        
        # Pop off the start tag (we dont want to return that to the caller)
        # 彈出開始標籤(我們不想把它歸還給調用者)
        start = best_path.pop()
        assert start == self.tag_to_ix[START_TAG]  # Sanity check #完整性檢查
        best_path.reverse()
        return path_score, best_path

    def neg_log_likelihood(self, sentence, tags):
        print("test---1")
        #1、LSTM
        feats = self._get_lstm_features(sentence)
        #2、前向算法
        forward_score = self._forward_alg(feats)
        #3、句子得分
        gold_score = self._score_sentence(feats, tags)
        return forward_score - gold_score

    def forward(self, sentence):  # dont confuse this with _forward_alg above.
        # Get the emission scores from the BiLSTM
        # 從BiLSTM獲得發射得分。
        # 一、lstm
        print("test---2")
        lstm_feats = self._get_lstm_features(sentence)

        # Find the best path, given the features.
        # 找到最佳路徑,給定的特徵。
        # 二、進行viterbi解碼
        score, tag_seq = self._viterbi_decode(lstm_feats)
        return score, tag_seq
        
###############################################################################        
START_TAG = "<START>"
STOP_TAG = "<STOP>"
EMBEDDING_DIM = 5
HIDDEN_DIM = 4

# Make up some training data 創建一些訓練數據集
training_data = [("which genre of album is harder ... ..faster ?".split(), "O O O O O I I I O".split()),
                 ("the wall street journal reported today that apple corporation made money".split(),"B I I I O O O B I O O".split()), 
                 ("georgia tech is a university in georgia".split(), "B I O O O O B".split())]

word_to_ix = {}
for sentence, tags in training_data:
    for word in sentence:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix) #字典word_to_ix的length

tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}

model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)#(25,tag_to_ix, 5, 4 )
#隨機梯度進行優化
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)

# Check predictions before training 訓練之前檢查預測的結果
#使用 torch.no_grad()構建不需要追蹤的上下文環境
with torch.no_grad():
     i = 0 
     while i < 3:
        #training_data[0][0] token列表
        
        precheck_sent = prepare_sequence(training_data[i][0], word_to_ix)# [0,1,2,3,4,5,6,7,8,9,10]
        precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[i][1]], dtype=torch.long)
        #返回標籤所對應的長整型值
        print("before_train:", model(precheck_sent))
        i+=1
#------------------------------------------------------------------------------

# Make sure prepare_sequence from earlier in the LSTM section is loaded
for epoch in range(300):  # again, normally you would NOT do 300 epochs, it is toy data
    for sentence, tags in training_data:
        # Step 1. Remember that Pytorch accumulates gradients.
        # We need to clear them out before each instance
        # 第一步,梯度清零
        model.zero_grad()

        # Step 2. Get our inputs ready for the network, that is,
        # turn them into Tensors of word indices.
        # 第二步,轉換爲詞爲張量,轉換標籤爲張量
        sentence_in = prepare_sequence(sentence, word_to_ix)
        targets = torch.tensor([tag_to_ix[t] for t in tags], dtype=torch.long)

        # Step 3. Run our forward pass.
        # 第三步,前向傳播
        loss = model.neg_log_likelihood(sentence_in, targets)
        model.train()

        # Step 4. Compute the loss, gradients, and update the parameters by
        # calling optimizer.step()
        # 調用optimizer.step()來計算損失函數,梯度和更新參數
        loss.backward()
        optimizer.step()

#------------------------------------------------------------------------------
# Check predictions after training。訓練之後檢查預測的結果
with torch.no_grad():
    i = 0
    while i < 3: 
        precheck_sent = prepare_sequence(training_data[i][0], word_to_ix)
        
        #此時會調用forward函數,返回 score,target_sequence 。
        print("after_train:", model(precheck_sent))
        i+=1
# We got it!

 

 

參考文獻

大佬的介紹BiLSTM+CRF的博客地址(雖然是英文的,但是簡單並不難懂)

pytorch實現BiLSTM_CRF的教程


又一條華麗的分割線。。。

膜拜大佬吧!!!

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章