Pytorch之Bert中文文本分類(二)

本文主要是針對入門級別的Bert使用,包括英文文本分類和中文文本分類。
這部分主要使用Bert進行情感分析,屬於中文文本分類,同樣使用BertForSequenceClassification
數據集中包括三個情感分類等級[-1,0,1]
在這裏插入圖片描述
流程和第一部分一致,主要修改地方是在Bert的config文件中將類別設置成3,並將數據集中的[-1,0,1],變化成[0,1,2]的形式,bert的預訓練模型使用
bert-base-uncased-cn

這個數據集包括倆列:[‘label’, ‘txt’]
首先讀入數據:

df = pd.read_csv(os.path.join(data_path,"train.tsv"), delimiter='\t')
df_dev=pd.read_csv(os.path.join(data_path,"dev.tsv"), delimiter='\t')
print("train:",df.head())
print("dev:",df_dev.head())

在這裏插入圖片描述
提取句子並進行處理

#提取語句並處理
sentencses=['[CLS] ' + sent + ' [SEP]' for sent in df.txt.values]
labels=df.label.values
#這裏中性還是使用0表示1表示積極2表示不積極
labels=list(map(lambda x:0 if x == 0 else 1 if x == 1 else 2,[x for x in labels]))
print("train label:",labels[100:110])
print("第一句話:",sentencses[0])
tokenizer=BertTokenizer.from_pretrained(bert_pre_tokenizer,do_lower_case=True)
tokenized_sents=[tokenizer.tokenize(sent) for sent in sentencses]
print("tokenized的第一句話:",tokenized_sents[0])

定義Bert的輸入格式

  1. 處理後的句子
  2. 同樣這裏還是一句話[0,0,0,0,…]默認即可不需要傳入
  3. mask
MAX_LEN=80
#訓練集部分
#將分割後的句子轉化成數字  word-->idx
input_ids=[tokenizer.convert_tokens_to_ids(sent) for sent in tokenized_sents]
print("轉化後的第一個句子:",input_ids[0])
#做PADDING
#大於128做截斷,小於128做PADDING
input_ids=pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
print("Padding 第一個句子:",input_ids[0])
#建立mask
attention_masks = []
for seq in input_ids:
  seq_mask = [float(i>0) for i in seq]
  attention_masks.append(seq_mask)
print("第一個attention mask:",attention_masks[0])

#驗證集部分
#構建驗證集
dev_sentencses=['[CLS] ' + sent + ' [SEP]' for sent in df_dev.txt.values]
dev_labels=df_dev.label.values
print("dev_label:",dev_labels[100:110])
dev_labels=list(map(lambda x:0 if x == 0 else 1 if x == 1 else 2,[x for x in dev_labels]))
# dev_labels=[to_categorical(i, num_classes=3) for i in dev_labels]
dev_tokenized_sents=[tokenizer.tokenize(sent) for sent in dev_sentencses]
dev_input_ids=[tokenizer.convert_tokens_to_ids(sent) for sent in dev_tokenized_sents]
dev_input_ids=pad_sequences(dev_input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
dev_attention_masks = []
for seq in dev_input_ids:
  dev_seq_mask = [float(i>0) for i in seq]
  dev_attention_masks.append(dev_seq_mask)

構建訓練集和驗證集的dataloader

train_inputs = torch.tensor(input_ids)
validation_inputs = torch.tensor(dev_input_ids)
train_labels = torch.tensor(labels)
validation_labels = torch.tensor(dev_labels)
train_masks = torch.tensor(attention_masks)
validation_masks = torch.tensor(dev_attention_masks)

batch_size = 32
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)

裝載預訓練模型,這裏是bert的中文模型

#裝載預訓練bert模型
modelConfig = BertConfig.from_pretrained(bert_config)
model = BertForSequenceClassification.from_pretrained(bert_pre_model, config=modelConfig)
print(model.cuda())

定義優化器

param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
    {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
     'weight_decay_rate': 0.01},
    {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
     'weight_decay_rate': 0.0}
]

optimizer = BertAdam(optimizer_grouped_parameters,
                     lr=2e-5,
                     warmup=.1)

訓練部分

def flat_accuracy(preds, labels):
    pred_flat = np.argmax(preds, axis=1).flatten()
    labels_flat = labels.flatten()
    return np.sum(pred_flat == labels_flat) / len(labels_flat)
train_loss_set = []
epochs = 4
for _ in trange(epochs, desc="Epoch"):
    model.train()
    tr_loss = 0
    nb_tr_examples, nb_tr_steps = 0, 0
    for step, batch in enumerate(train_dataloader):
        batch = tuple(t.to(device) for t in batch)#將數據放置在GPU上
        b_input_ids, b_input_mask, b_labels = batch
        optimizer.zero_grad()
        loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)[0]
        # print("loss:",loss)
        train_loss_set.append(loss.item())
        loss.backward()
        optimizer.step()
        tr_loss += loss.item()
        nb_tr_examples += b_input_ids.size(0)
        nb_tr_steps += 1
    print("Train loss: {}".format(tr_loss / nb_tr_steps))

    #驗證集
    model.eval()
    eval_loss, eval_accuracy = 0, 0
    nb_eval_steps, nb_eval_examples = 0, 0
    for batch in validation_dataloader:
        batch = tuple(t.to(device) for t in batch)
        b_input_ids, b_input_mask, b_labels = batch
        with torch.no_grad():
            logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)[0]
        logits = logits.detach().cpu().numpy()
        label_ids = b_labels.to('cpu').numpy()
        tmp_eval_accuracy = flat_accuracy(logits, label_ids)
        eval_accuracy += tmp_eval_accuracy
        nb_eval_steps += 1
    print("Validation Accuracy: {}".format(eval_accuracy / nb_eval_steps))

測試一下效果

#str
st="真的好嗎?"
str='[CLS] ' + st + ' [SEP]'
str_tokenized_sents = tokenizer.tokenize(str)
print(str_tokenized_sents)
# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary
str_input_ids = [tokenizer.convert_tokens_to_ids(str_tokenized_sents)]
str_input_ids = pad_sequences(str_input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
print(str_input_ids)
str_mask = [[float(i > 0) for i in str_input_ids[0]]]
str_label=[0]

str_input_ids = torch.tensor(str_input_ids).cuda()
str_mask = torch.tensor(str_mask).cuda()
str_label = torch.tensor(str_label).cuda()
print("size:",str_input_ids.size(),str_mask.size(),str_label.size())
logits_str = model(str_input_ids, token_type_ids=None, attention_mask=str_mask)[0]
print(np.argmax(logits_str.detach().cpu().numpy(), axis=1))

結果是個中性,再看看誇張點的
在這裏插入圖片描述
這裏是負面的

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章