PyTorch學習:參數初始化

 Sequential 模型的參數初始化

import numpy as np
import torch
from torch import nn
# 定義一個 Sequential 模型
net1 = nn.Sequential(
    nn.Linear(2, 4), #nn.Linear(2, 4) shape爲(4,2)
    nn.ReLU(),
    nn.Linear(4, 5),
    nn.ReLU(),
    nn.Linear(5, 2)
)
# 訪問第一層的參數
w1 = net1[0].weight
b1 = net1[0].bias
print(w1)
# 定義一個 Tensor 直接對其進行替換
net1[0].weight.data = torch.from_numpy(np.random.uniform(3, 5, size=(4, 2)))
print(net1[0].weight)
for layer in net1:
    if isinstance(layer, nn.Linear): # 判斷是否是線性層
        param_shape = layer.weight.shape
        layer.weight.data = torch.from_numpy(np.random.normal(0, 0.5, size=param_shape)) 
        # 定義爲均值爲 0,方差爲 0.5 的正態分佈

 Module 模型的參數初始化

class sim_net(nn.Module):
    def __init__(self):
        super(sim_net, self).__init__()
        self.l1 = nn.Sequential(
            nn.Linear(2, 4),
            nn.ReLU()
        )     
        self.l1[0].weight.data = torch.randn(4, 2) # 直接對某一層初始化   
        self.l2 = nn.Sequential(
            nn.Linear(4, 5),
            nn.ReLU()
        )      
        self.l3 = nn.Sequential(
            nn.Linear(5, 2),
            nn.ReLU()
        )   
    def forward(self, x):
        x = self.l1(x)
        x =self.l2(x)
        x = self.l3(x)
        return x
net2 = sim_net()
a=0
# 訪問 children
for i in net2.children():
    print(a)
    a+=1
    print(i)

0
Sequential(
  (0): Linear(in_features=2, out_features=4, bias=True)
  (1): ReLU()
)
1
Sequential(
  (0): Linear(in_features=4, out_features=5, bias=True)
  (1): ReLU()
)
2
Sequential(
  (0): Linear(in_features=5, out_features=2, bias=True)
  (1): ReLU()
)

# # 訪問 modules
a=0
for i in net2.modules():
    print(a)
    a+=1
    print(i)
#children 只會訪問到模型定義中的第一層,因爲上面的模型中定義了三個 Sequential,
#所以只會訪問到三個 Sequential,而 modules 會訪問到最後的結構.
#比如上面的例子,modules 不僅訪問到了 Sequential,也訪問到了 Sequential 裏面,這就對我們做初始化非常方便.

0
sim_net(
  (l1): Sequential(
    (0): Linear(in_features=2, out_features=4, bias=True)
    (1): ReLU()
  )
  (l2): Sequential(
    (0): Linear(in_features=4, out_features=5, bias=True)
    (1): ReLU()
  )
  (l3): Sequential(
    (0): Linear(in_features=5, out_features=2, bias=True)
    (1): ReLU()
  )
)
1
Sequential(
  (0): Linear(in_features=2, out_features=4, bias=True)
  (1): ReLU()
)
2
Linear(in_features=2, out_features=4, bias=True)
3
ReLU()
4
Sequential(
  (0): Linear(in_features=4, out_features=5, bias=True)
  (1): ReLU()
)
5
Linear(in_features=4, out_features=5, bias=True)
6
ReLU()
7
Sequential(
  (0): Linear(in_features=5, out_features=2, bias=True)
  (1): ReLU()
)
8
Linear(in_features=5, out_features=2, bias=True)
9
ReLU()

for layer in net2.modules():
    if isinstance(layer, nn.Linear):
        print(layer)
        param_shape = layer.weight.shape
        layer.weight.data = torch.from_numpy(np.random.normal(0, 0.5, size=param_shape))

Linear(in_features=2, out_features=4, bias=True)
Linear(in_features=4, out_features=5, bias=True)
Linear(in_features=5, out_features=2, bias=True)

torch.nn.init

因爲 PyTorch 靈活的特性,我們可以直接對 Tensor 進行操作從而初始化,PyTorch 也提供了初始化的函數幫助我們快速初始化,就是 torch.nn.init,其操作層面仍然在 Tensor 上.

from torch.nn import init
w = torch.Tensor(3, 5)
init.xavier_uniform_(w, gain=1)
print("w:",w)

init.xavier_uniform_(layer.weight, gain=1)  #tensor – n維的torch.Tensor  gain - 可選的縮放因子

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章