PyTorch NLP 常用 API

根據官方文檔整理,知道有這麼些常用的函數,具體要用的時候再去官方文檔裏面查詢,前三部分建議直接運行對照結果記函數功能

數據創建

由列表、numpy創建或tensor自定義的隨機整數、正態分佈等

import torch
import numpy as np


# *****tensor創建*****
device = torch.device('cuda:0')
a = np.array([1, 2, 3])
t1 = torch.from_numpy(a)  # 原地創建tensor
t2 = torch.as_tensor(a)  # 同一設備的話,原地創建tensor
t3 = torch.as_tensor(a, device=device)  # 先註釋,爲了後面運行快
a[0] = -1
t4 = torch.full_like(t1, 1)
t5 = torch.arange(1, 2.5, 0.5)
t6 = torch.linspace(start=-10, end=10, steps=5)  # 從-10到10分5步取完
t7 = torch.normal(mean=2, std=3, size=(2, 2, 3))  # 當mean=0,std=1時,等價於torch.randn(2,2,3)
t8 = t7.to(device) # 跟t7尺度數值一樣的張量,但是在不同設備上
t9 = torch.rand(1, requires_grad=True, device=device)  # 生成一個[1]的浮點數隨機矩陣,且計算梯度
t10 = t9.item()
for i in range(1, 11):
    print('-----t' + str(i) + '-----')
    eval('print(t' + str(i) + ')')

基本數學運算

加、減、除、哈達瑪積、矩陣乘、冪、開方、指數、對數、絕對值、sigmoid函數、近似值、裁剪

import torch
import numpy as np

# a.shape = [2,2,3]
a = torch.tensor([[[4, 2, 3], [3, 2, 1]], [[1, 2, 3], [3, 2, 3]]], dtype=torch.float32)
# d.shape = [2,3,2]
d = torch.tensor(np.array([[[2, 1], [3, 2], [4, 3]], [[1, 4], [2, 3], [2, 3]]]), dtype=torch.float32)
# b.shape = [3] 相當於[1,3]
b = torch.tensor([1, -2, 3], dtype=torch.float32)

# ******基本數學運算*****
c1 = a + b #開新內存,用a.di
c2 = a - b
c3 = a / b
c4 = torch.mul(a, b)  # 逐個相乘
c5 = torch.matmul(a, d)  # 矩陣乘,要保證維度一致
c6 = b.pow(2)
c7 = b.sqrt()
c8 = b.rsqrt() #開方再取倒數
c9 = torch.exp(b)
c10 = torch.log(c9)  # 以e爲底,還有torch.log1p(),即torch.log(1+input)
c11 = torch.abs(b)  # c11 = torch.abs_(b) 帶下劃線意爲在原內存執行操作
c12 = torch.sigmoid(b)
for i in range(1, 13):
    print('-----c' + str(i) + '-----')
    eval('print(c' + str(i) + ')')

e = torch.tensor([3.14, 4.98, 5.5])
print('-----取下,取上,取整數,取小數,四捨五入,值裁剪-----')
print(e.floor(), e.ceil(), e.trunc(), e.frac(), e.round(), e.clamp(4, 5))

常用矩陣運算

求最值及其索引、求p-norm、求累加指數的對數、求均值方差、累加、累乘、求(連續)不同值、排序、求前n大、判斷是否相等、構造對角矩陣、求跡、SVD

import torch
import numpy as np

# a.shape = [2,2,3]
a = torch.tensor([[[4, 2, 3], [3, 2, 1]], [[1, 2, 3], [3, 2, 3]]], dtype=torch.float32)
# d.shape = [2,3,2]
d = torch.tensor(np.array([[[2, 1], [3, 2], [4, 3]], [[1, 4], [2, 3], [2, 3]]]), dtype=torch.float32)
# b.shape = [3] 相當於[1,3]
b = torch.tensor([1, -2, 3], dtype=torch.float32)

# *****常用矩陣運算*****
d1 = torch.argmax(a, dim=0)  # [2,3] a[0]跟a[1]之間的比較,torch.argmax是對應的索引
d2 = torch.argmax(a, dim=1)  # [2,3] a[:,0]跟a[:,1]之間的比較
d3 = torch.argmax(a, dim=2)  # [2,2] a[:,:,0]跟a[:,:,1]跟a[:,:,2]之間的比較
d4 = torch.dist(a, b, p=2)  # p-norm距離(p=2爲歐氏距離)
d5 = torch.logsumexp(a, dim=1)  # 求1維度上log(exp的累加)
d6 = torch.mean(a, 1)  # 求1維度上的平均數,可以用d6,d7 = torch.std_mean(a,1)來求均值和標準差
d7 = torch.std(a, 1)  # 求1維度上的標準差,可以用d6,d7 = torch.var_mean(a,1)來求均值和方差
d8 = torch.sum(a, dim=1)  # 求1維度上的累加
d9 = torch.prod(a, dim=1)  # 求1維度上的累乘
d10 = torch.unique(a, return_inverse=True, return_counts=True)  # 求a攤平之後的不同元素及其個數
d11 = torch.unique_consecutive(a, return_inverse=True, return_counts=True)  # 求a攤平之後的連續不同元素及其個數
d12 = torch.argsort(a, dim=2)  # 求升序排序之後的數值在原矩陣中對應的索引
d13 = torch.topk(a, k=2, dim=2)  # 求第2維裏面前k大的數
d14 = torch.eq(a, b)  # [2,2,3] 各元素對比
d15 = torch.equal(a, b)  # True or False, 要求兩句矩陣尺寸、數值完全一樣
d16 = torch.diag(b, 0)  # 以一維張量b作爲第二行對角線元素構建矩陣
d17 = torch.trace(d16)  # 必須得是2維張量
d18 = torch.svd(d16)  # 必須得是2維張量
for i in range(1, 19):
    print('-----d' + str(i) + '-----')
    eval('print(d' + str(i) + ')')

Tensor 維度操作

拼接、疊加、轉置、升維、降維、掩碼取值、查值、取值、攤平、取張量不同維度視角

import torch
import numpy as np

# a.shape = [2,2,3]
a = torch.tensor([[[4, 2, 3], [3, 2, 1]], [[1, 2, 3], [3, 2, 3]]], dtype=torch.float32)
# d.shape = [2,3,2]
d = torch.tensor(np.array([[[2, 1], [3, 2], [4, 3]], [[1, 4], [2, 3], [2, 3]]]), dtype=torch.float32)
# b.shape = [3] 相當於[1,3]
b = torch.tensor([1, -2, 3], dtype=torch.float32)

#*****Tensor 維度操作*****
f1 = torch.cat((a, a), 0)  # [4,2,3]
f2 = torch.cat((a, a, a), 2)  # [2,2,9]
f3 = torch.stack((a, a), 0)  # [2,2,2,3]
f4 = torch.stack((a, a), 2)  # [2,2,2,3]
f5 = torch.stack((a, a), 3)  # [2,2,3,2]
f6 = a.transpose(1, 2)  # [2,3,2]
f7 = a.permute(1, 0, 2)  # [2,2,3]
f8 = a.unsqueeze(2)  # [2,2,1,3]
f9 = f7.unsqueeze(0)  # [1,2,2,1,3]
f10 = f8.squeeze()  # [2,2,3] 不指定參數,則去掉所有多餘的維度
f11 = torch.masked_select(a, mask=a.gt(3))  # [6] 通過掩碼選取a中大於3的數(6個),a.le(3)表示選擇小於等於3的
f12 = torch.where(a > 3, a, torch.zeros_like(a))  # [2,2,3] 選取a中大於3的數,其他的用0表示
f13 = torch.narrow(a, 2, 1, 2)  # [2,2,2] 取第2維的[1:1+2],可以通過torch.index_select,或者通過切片a[:, :, 1:3]的方式
f14 = torch.index_select(a, 2, index=torch.tensor([1, 2]))
f15 = torch.flatten(a, start_dim=1)  # [2,6] 從第一維開始攤平
f16 = torch.reshape(a, (-1, 2 * 3))  # [2,6] 可能改變源數據形狀,儘量用view方法
f17 = a.view(-1, 2 * 3)
f18 = a.flip(1, 2)  # [2,2,3] 翻轉張量的第1,2維
for i in range(1, 19):
    print('-----f' + str(i) + '-----')
    eval('print(f' + str(i) + '.shape)')
    eval('print(f' + str(i) + ')')

神經網絡

先給包重命名

import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torch.nn.utils.rnn as rn
from torch.utils.tensorboard import SummaryWriter

繼承nn.Module構造網絡模型

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel,self).__init__()
        pass
    def forward(self,input):
        pass
        return output

通過nn.Sequential()函數構造網絡模型

model = nn.Sequential(nn.Conv1d(),
                    nn.MaxPool1d(), # nn.AvgPool1d()
                    nn.ReLU(), #nn.Tanh() or nn.LeakyReLU()
                    nn.Dropout(),
                    nn.Linear())

model = nn.Sequential(OrderDict([
    ('conv1', nn.Conv1d()),
    ('pool1', nn.MaxPool1d()),
    ('relu', nn.ReLU()),
    ('drouput',nn.Dropout()),
    ('linear',nn.Linear())
]))

規範化處理

nn.BatchNorm1d()
nn.LayerNorm()
nn.InstanceNorm1d()

優化器&損失函數

optimizer = optim.SGD([input],lr=0.01,momentum=0.9)
optimizer = optim.Adam(net.parameters(),lr=0.0001)
# 動態調整學習率
optim.lr_scheduler.ReduceLROnPlateau(optimizer)
loss = nn.MSELoss()
loss = nn.KLDivLoss()
loss = nn.NLLLoss()
loss = nn.CrossEntropyLoss()
# 每個batch都要對優化器清零
optimizer.zero_grad()
input = torch.randn(2, 3, requires_grad=True)
target = torch.empty(2, dtype=torch.long).random_(3)
output = loss(input,target)
output.backward()
optimizer.step()

詞嵌入

# one-hot形式
F.one_hot(torch.arange(0,5)%3, num_classes=5)
# 加載預訓練權重
embedding = nn.Embedding.from_pretrained(weight)
# 按0-1正態分佈隨機生成
embedding = nn.Embedding(10, 3)
# 按索引lookup
input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
output = embedding(input) # shape = [2,4,3]

構建循環神經網絡

input = rn.pad_sequence([input]) # 補全操作
rnn = nn.RNN(input_size=10, hidden_size=20, num_layers=2) # nn.LSTM() or nn.GRU()
output,hn = rnn(input,h0)

多頭注意力機制

multi_attn = nn.MultiheadAttention(embed_dim,num_heads)
attn_output,attn_output_weights = multi_attn(query,key,value)

Transformer

tm = nn.Transformer(nhead=16,num_encoder_layers = 12)
src,tgt = torch.rand((10, 32, 512)), torch.rand((20, 32, 512))
out = tm(src,tgt) # out.shape = tgt.shape

Transformer-encoder

encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
tm_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
src = torch.randn(10, 32, 512)
out = tm_encoder(src) #out.shape = src.shape

度量函數

# 餘弦距離
dis = nn.CosineSimilarity()(torch.randn(2,3),torch.randn(2,3))
# 明氏距離(默認爲歐氏距離)
dis = nn.PairwiseDistance()(torch.randn(2,3),torch.randn(2,3))

Tensorboard使用

writer = SummaryWriter('logs')
writer.add_scalar('random_val',torch.tensor([1]).item(),global_step=i)
writer.add_embedding(torch.randn(100, 3),i)
writer.add_text('lstm','This is an lstm',i)
# pr曲線
writer.add_pr_curve('pr_curve', groud_truth, predictions, i)
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章