學習記錄:
常用api:
#卷積層
conv2d = nn.Conv2d(in_channels,out_channels,kernel_size=3,stride=1,padding=1)
#輸入通道數,輸出通道數,卷積核大小,移動步長,補0大小
#批量歸一化
bn = nn.BatchNorm2d(out_channels) #緊跟在卷積層後,參數爲卷積層輸出通道數
#激活層
relu = nn.ReLU(inplace=True)
#池化層
pool = nn.MaxPool2d(kernel_size=2,stride=2)
#反捲積層
deconv = nn.ConvTranspose2d(in_channels,out_channels,kernel_size=3,stride=2,padding=1)
訓練大體流程
# -*- coding:utf-8 -*-
import torch
import torchvision
from torch import nn,optim
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.nn import functional as F
#外部定義
criterion = nn.NLLLoss().cuda()
optimizer = optim.Adam(net.parameters(),lr=LR)
#訓練過程
optimizer.zero_grad() # 梯度清0
out = net(imgData)
out = F.log_softmax(out,dim=1)
loss = criterion(out,imgLabel)
loss.backward() #反向傳播
optimizer.step() #更新梯度信息