1 ReLU
# 非線性激活
# ReLU (讀:瑞盧) 是目前最常用的
import torch
import torchvision
from torch import nn
from torch.nn import ReLU
'''
Parameters
inplace – can optionally do the operation in-place. Default: False
inplace = True 原位直接替換
inplace = False 原位保留,返回變換後的結果,(推薦false,可以保留原始數據)
'''
class Test_NN(nn.Module):
def __init__(self):
super(Test_NN, self).__init__()
self.relu1 = ReLU()
def forward(self, input):
output = self.relu1(input)
return output
input = torch.tensor([[1, -0.5],
[-1, 3]])
input = torch.reshape(input, (-1, 1, 2, 2))
print(input.shape)
testNN = Test_NN()
output = testNN(input)
print(output)
得到結果爲:
[[ 1, 0],
[ 0, 3]]
2 Sigmoid
import torch
import torchvision
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
'''
Parameters
inplace – can optionally do the operation in-place. Default: False
inplace = True 原位直接替換
inplace = False 原位保留,返回變換後的結果,(推薦false,可以保留原始數據)
'''
class Test_NN(nn.Module):
def __init__(self):
super(Test_NN, self).__init__()
self.relu1 = ReLU()
self.sigmoid1 = Sigmoid()
def forward(self, input):
# output = self.relu1(input)
output = self.sigmoid1(input)
return output
dataset = torchvision.datasets.CIFAR10(root="D:/Learn/dataSet/Pytorch", train=False, transform=torchvision.transforms.ToTensor(), download=False)
dataloader = DataLoader(dataset, batch_size=64)
writer = SummaryWriter("logs_NonLinearActivation")
testNN = Test_NN()
step = 0
for data in dataloader:
imgs, targets = data
writer.add_images("input", imgs, step)
output = testNN(imgs)
writer.add_images("output", output, step)
step += 1
writer.close()