- 之前由于在本机mac中训练验证码识别,导致mac后来的使用有点卡,另外囊中羞涩,所以考虑用google的colab进行训练
- 关于google的colab的使用,一般包含两个方面:
- 关于使用,我们只要把数据上传到云磁盘,然后可以在colab页面上新建一个笔记本(jupyter风格),就可以开始操作代码了。
- 值得注意的是,如果需要使用GPU,则在笔记本的菜单栏里选择 - 代码执行程序 - 更改运行时的类型- 选择GPU即可。
- 数据存储的路径怎么看?一般是存储在/content/gdrive/My Drive/ 路径下; 下面是我读取文件夹下的图片数据
from google.colab import drive
def get_train(path):
X=[]
files=os.listdir(path)
cnt=0
for ele in files:
cnt=cnt+1
if cnt % 100==0:
print(cnt)
img=Image.open(path+ele)
img_array=np.array(img)
X.append(img_array)
return X
# 挂在google的磁盘
drive.mount('/content/gdrive/')
# 要看下该磁盘下的目录
# for ele in os.listdir("/content/gdrive/My Drive/data/Images"):
# print(ele)
#找到图片所在目录
trainx=get_train("/content/gdrive/My Drive/data/Images/")
# 获取图片并且转换
print(len(trainx))
- 数据集是对酒店的中文评论数据,只有0,1两类; 实际上有些评论是比较中性的,对于这种情况,模型可能会存在一定的误杀
- 模型采用双向的LSTM,当训练集上(train)的准确率达到0.9之后,我们就开始在验证集(val)上计算准确率,如果在验证集上的准确率达到0.82,那么我就终止训练了,并将模型进行保存。
- 先看下结果
epoch=7的时候,训练集上的准确率达到96.2%, 验证集上的准确率达到82.5%
看了几条,红款部分是误杀的;实际上像第一条,也不能定义为好,数据集的质量有待提升;
- 下面我就丢代码了,因为写的时间比较短,所以代码的结构不是很好,将就看看吧;
import torchtext import torch from torchtext.vocab import Vectors from torchtext import data import jieba import torch.nn as nn from torchtext.data import BucketIterator import torch.optim as optim import torch.nn.functional as F from google.colab import drive import os drive.mount('/content/gdrive/') # 定义分词器,为列处理器服务 def cut_words(text): return list(jieba.cut(text)) # 定义列的处理器 TEXT=torchtext.data.Field(sequential=True,tokenize=cut_words,batch_first=True) LABEL=torchtext.data.Field(sequential=False,use_vocab=False) tv_datafields = [("label", LABEL),("review", TEXT)] test_datafields=[("label", None),("review", TEXT)] # 观察下数据存储的位置 for ele in os.listdir("/content/gdrive/"): print(ele) # 读取训练和验证集数据 train,val=torchtext.data.TabularDataset.splits(path="/content/gdrive/My Drive/data/",train="train.csv",validation="val.csv",format="csv",skip_header=True,fields=tv_datafields) train_len=len(train) #vectors = Vectors(name='/content/gdrive/My Drive/data/glove.6B.300d.txt') # 构建字典 TEXT.build_vocab(train) LABEL.build_vocab() vocab_len=TEXT.vocab.__len__() # 构建迭代器 train_iter,val_iter=BucketIterator.splits( (train,val), batch_size=64, sort_key=lambda x: len(x.review), sort_within_batch=False, repeat=False ) # 定义lstm模型 class rnn(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim): super(rnn, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) # 这次没有采用预训练的词向量,发现我自己找的是英文glove预训练词向量 #self.embedding.weight.data.copy_(TEXT.vocab.vectors) #self.embedding.weight.requires_grad=True self.rnn= nn.LSTM(embedding_dim, hidden_dim,num_layers=2,batch_first=True,bidirectional=True) #self.fc = nn.Linear(hidden_dim, output_dim) self.fc=nn.Sequential( nn.Linear(hidden_dim*2, output_dim), nn.Sigmoid() ) def forward(self, text): embedded = self.embedding(text) output,(hidden,cell)=self.rnn(embedded) hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1) y=self.fc(hidden) return y model=rnn(vocab_size=vocab_len,embedding_dim=300,hidden_dim=128,output_dim=1) model.cuda() lr=0.001 optimizer = optim.Adam(model.parameters(), lr=lr) criterion = nn.BCELoss() for i in range(20): model.train() total_loss=0 total_accurate=0 for batch_idx, batch in enumerate(train_iter): data, target = batch.review.cuda(), batch.label.cuda() optimizer.zero_grad() pred= model(data).squeeze() loss = criterion(pred, target.float()) loss.backward() optimizer.step() total_loss+=loss.item() accurate=((pred>0.5)==target).sum().item() total_accurate+=accurate if batch_idx % 10 ==0: print("EPOCH:{epoch},Batch:{batch},Loss:{loss},Acc:{acc}".format(loss=loss,epoch=i,acc=accurate/64,batch=batch_idx)) avg_acc=total_accurate/train_len avg_loss=total_loss/train_len print("\n"*3) print("avg_acc:{avg_acc},avg_loss:{avg_loss}".format(avg_acc=avg_acc,avg_loss=avg_loss)) # 如果训练集上的准确率达到95%,那么就看下模型在验证集上的准确率 if avg_acc>0.95 : model.eval() val_acc=0.0 for batch_idx, batch in enumerate(val_iter): data, target = batch.review.cuda(), batch.label.cuda() pred= model(data).squeeze() accurate=((pred>0.5)==target).sum().item() val_acc+=accurate print("val_avg_acc:{val_avg_acc}".format(val_avg_acc=val_acc/len(val))) # 如果验证集上的准确率达到82%,那么就看下验证集上的预测结果,并保存模型到final.pkl中,终止循环了 if val_acc/len(val)>0.82: for batch_idx, batch in enumerate(val_iter): data, target = batch.review.cuda(), batch.label.cuda() pred= model(data).squeeze() for i in range(len(data)): source=[TEXT.vocab.itos[ele] for ele in data[i] if TEXT.vocab.itos[ele]!='<pad>' and TEXT.vocab.itos[ele]!='<unk>'] print("真实标签:{real_tag},预测标签:{pred_tag},真实评论:{real_review}".format(real_tag=target[i],real_review="".join(source),pred_tag=int((pred>0.5)[i]))) torch.save(model,"final.pkl") break