pytorch使用batch訓練lstm網絡實現

pytorch使用batch的時候一定要注意訓練和預測的區別,通常需要編寫predict代替forward,接下來我會展示出兩段代碼,展示使用batch的細節。

1. 未使用batch的lstm

#導入相應的包
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.manual_seed(1)
#準備數據的階段
def prepare_sequence(seq, to_ix):
    idxs = [to_ix[w] for w in seq]
    return torch.tensor(idxs, dtype=torch.long)

#DET 限定詞, NN 名詞 V 動詞
training_data = [
    ("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
    ("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_ix = {}

for sent, tags in training_data:
    for word in sent:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
#{'The': 0, 'dog': 1, 'ate': 2, 'the': 3, 'apple': 4, 'Everybody': 5, 'read': 6, 'that': 7, 'book': 8}
tag_to_ix = {"DET": 0, "NN": 1, "V": 2}

#詞向量的維度
EMBEDDING_DIM = 6

#隱藏層的單元數
HIDDEN_DIM = 6

class LSTMTagger(nn.Module):

    def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
        super(LSTMTagger, self).__init__()
        self.hidden_dim = hidden_dim

        self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)

        # The LSTM takes word embeddings as inputs, and outputs hidden states
        # with dimensionality hidden_dim.
        self.lstm = nn.LSTM(embedding_dim, hidden_dim)

        # The linear layer that maps from hidden state space to tag space
        self.hidden2tag = nn.Linear(hidden_dim, tagset_size)

    def forward(self, sentence):
        embeds = self.word_embeddings(sentence)
        lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))
        tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
        tag_scores = F.log_softmax(tag_space, dim=1)
        return tag_scores
#定義了一個LSTM網絡
#nn.Embedding(vocab_size, embedding_dim) 是pytorch內置的詞嵌入工具
#第一個參數詞庫中的單詞數,第二個參數將詞向量表示成的維數
#self.lstm LSTM層,nn.LSTM(arg1, arg2) 第一個參數輸入的詞向量維數,第二個參數隱藏層的單元數
#self.hidden2tag, 線性層

#前向傳播的過程,很簡單首先詞嵌入(將詞表示成向量),然後通過LSTM層,線性層,最後通過一個logsoftmax函數
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix))
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# 訓練過程
for epoch in range(300):
    for sentence, tags in training_data:
        # 梯度清零
        model.zero_grad()

        # 準備數據
        sentence_in = prepare_sequence(sentence, word_to_ix)
        targets = prepare_sequence(tags, tag_to_ix)

        # 前向傳播
        tag_scores = model(sentence_in)

        # 計算損失
        loss = loss_function(tag_scores, targets)
        # 後向傳播
        loss.backward()

        # 更新參數
        optimizer.step()

# 測試過程
with torch.no_grad():
    inputs = prepare_sequence(training_data[0][0], word_to_ix)
    print(inputs)
    tag_scores = model(inputs)
    print(torch.argmax(tag_scores,dim=1))

2. 使用了batch的lstm

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.manual_seed(1)


# 準備數據的階段
def prepare_sequence(seq, to_ix):
    idxs = [to_ix[w] for w in seq]
    return torch.tensor(idxs, dtype=torch.long)


# DET 限定詞, NN 名詞 V 動詞
training_data = [
    ("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
    ("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_ix = {}

for sent, tags in training_data:
    for word in sent:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
# {'The': 0, 'dog': 1, 'ate': 2, 'the': 3, 'apple': 4, 'Everybody': 5, 'read': 6, 'that': 7, 'book': 8}
tag_to_ix = {"DET": 0, "NN": 1, "V": 2}

# 詞向量的維度
EMBEDDING_DIM = 6

# 隱藏層的單元數
HIDDEN_DIM = 6

# 批大小
batch_size = 2


class LSTMTagger(nn.Module):

    def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, batch_size):
        super(LSTMTagger, self).__init__()
        self.hidden_dim = hidden_dim
        self.batch_size = batch_size
        self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)

        # The LSTM takes word embeddings as inputs, and outputs hidden states
        # with dimensionality hidden_dim.
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)

        # The linear layer that maps from hidden state space to tag space
        self.hidden2tag = nn.Linear(hidden_dim, tagset_size)

    def forward(self, sentence):
        embeds = self.word_embeddings(sentence)
        input_tensor = embeds.view(self.batch_size, len(sentence) // self.batch_size, -1)
        lstm_out, _ = self.lstm(input_tensor)
        tag_space = self.hidden2tag(lstm_out)
        tag_scores = F.log_softmax(tag_space, dim=1)
        return tag_scores

    def predict(self, sentence):
        embeds = self.word_embeddings(sentence)
        input_tensor = embeds.view(len(sentence), 1, -1)
        lstm_out, _ = self.lstm(input_tensor)
        tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
        tag_scores = F.log_softmax(tag_space, dim=1)
        return tag_scores



model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix), batch_size)
optimizer = optim.SGD(model.parameters(), lr=0.1)
# 訓練過程
for epoch in range(300):
    for sentence, tags in training_data:
        # 梯度清零
        model.zero_grad()

        # 準備數據
        sentence_in = prepare_sequence(sentence, word_to_ix)
        sentence_in2 = prepare_sequence(sentence, word_to_ix)
        targets = prepare_sequence(tags, tag_to_ix)
        targets2 = prepare_sequence(tags, tag_to_ix)

        # 前向傳播
        tag_scores = model(torch.cat([sentence_in, sentence_in2], dim=0))

        # 計算損失
        A = tag_scores.view(-1,3)
        B = torch.cat([targets,targets2], dim=0)
        loss = F.nll_loss(A,B)
        # 後向傳播
        loss.backward()

        # 更新參數
        optimizer.step()

# 測試過程
with torch.no_grad():
    inputs = prepare_sequence(training_data[0][0], word_to_ix)
    print(inputs)
    tag_scores = model.predict(inputs)
    print(tag_scores.shape)
    print(torch.argmax(tag_scores, dim=1))  # 結果應該爲[0,1,2,0,1]

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章