# 池化的作用就是,想保留特徵,並把數據量減小
1 池化層官方文檔
2 最大池化操作
就是取池化核覆蓋範圍內的最大值
# 池化的作用就是,想保留特徵,並把數據量減小
import torch
from torch import nn
from torch.nn import MaxPool2d
# 池化層操作,需要將數據類型轉爲dtype=torch.float32
input = torch.tensor([[1, 2, 0, 3, 1],
[0, 1, 2, 3, 1],
[1, 2, 1, 0, 0],
[5, 2, 3, 1, 1],
[2, 1, 0, 1, 1]], dtype=torch.float32)
input = torch.reshape(input, (-1, 1, 5 , 5)) # -1可以自動調整前面的維度
print(input.shape)
class PoolingLayer_test(nn.Module):
def __init__(self):
super(PoolingLayer_test, self).__init__()
self.maxpool_1 = MaxPool2d(kernel_size=3, ceil_mode=False)
def forward(self, input):
output = self.maxpool_1(input)
return output
textPoolingLayer = PoolingLayer_test()
output = textPoolingLayer(input)
print(output)
# 池化的作用就是,想保留特徵,並把數據量減小
import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10(root="D:/Learn/dataSet/Pytorch", train=False, transform=torchvision.transforms.ToTensor(), download=False)
dataloader = DataLoader(dataset, batch_size=64)
writer = SummaryWriter("logs_pooling")
class PoolingLayer_test(nn.Module):
def __init__(self):
super(PoolingLayer_test, self).__init__()
self.maxpool_1 = MaxPool2d(kernel_size=3, ceil_mode=False)
def forward(self, input):
output = self.maxpool_1(input)
return output
textPoolingLayer = PoolingLayer_test()
step = 0
for data in dataloader:
imgs, targets = data
print(imgs.shape)
writer.add_images("input", imgs, step)
output = textPoolingLayer(imgs)
print(output.shape)
writer.add_images("output", output, step)
step += 1
writer.close()