Pytorch —— 損失函數(一)

1、損失函數概念

損失函數:衡量模型輸出與真實標籤的差異;
在這裏插入圖片描述
上圖是一個一元線性迴歸的擬合過程,綠色的點是訓練的樣本,藍色的直線是訓練好的模型。這個模型沒有很好地擬合所有的數據點,也就是說,每個數據點並沒有都在模型上,所以數據點會產生Loss。

現在我們認識一下什麼是損失函數、代價函數、目標函數:

損失函數(Loss Function)Loss=f(y^,y)Loss=f(\hat{y},y)這是計算一個樣本的損失;

代價函數(Cost Function)cost=1NiNf(yi,yi)\cos t=\frac{1}{N} \sum_{i}^{N} f\left(y_{i}^{\prime}, y_{i}\right)這是整個訓練集的樣本的損失的平均值;

目標函數(Objective Function)
目標函數是一個更廣泛的概念,在機器學習中,目標函數包含Cost和Regularization(正則項):Obj=Cost+RegularizationObj=Cost+Regularization

現在瞭解一下Pytorch中的Loss:

class _loss(Module):
    def __init__(self, size_average=None, reduce=None, reduction='mean'):
        super(_loss,self).__init__()
        if size_average is not None or reduce is not None:
            self.reduction = _Reduction.legacy_get_string(size_average, reduce)
        else:
            self.reduction = reduction

Pytorch中的Loss還是繼承於Module,所以Loss相當於一個網絡層。init()初始化函數中有三個參數,其中size_average和reduce即將被捨棄,其功能在reduction中已經被實現。

下面觀察一下交叉熵函數的具體使用和原理。

2、交叉熵損失函數

這裏使用人民幣二分類代碼觀察一下交叉熵損失函數的使用:

import os
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torch.optim as optim
from PIL import Image
from matplotlib import pyplot as plt
from model.lenet import LeNet
from toolss.my_dataset import RMBDataset
from toolss.common_tools import transform_invert, set_seed

set_seed(1)  # 設置隨機種子
rmb_label = {"1": 0, "100": 1}

# 參數設置
MAX_EPOCH = 10
BATCH_SIZE = 16
LR = 0.01
log_interval = 10
val_interval = 1

# ============================ step 1/5 數據 ============================

split_dir = os.path.join("F:/Pytorch框架班/Pytorch-Camp-master/代碼合集/rmb_split")
train_dir = os.path.join(split_dir, "train")
valid_dir = os.path.join(split_dir, "valid")

norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]

train_transform = transforms.Compose([
    transforms.Resize((32, 32)),
    transforms.RandomCrop(32, padding=4),
    transforms.RandomGrayscale(p=0.8),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

valid_transform = transforms.Compose([
    transforms.Resize((32, 32)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

# 構建MyDataset實例
train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

# 構建DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

# ============================ step 2/5 模型 ============================

net = LeNet(classes=2)
net.initialize_weights()

# ============================ step 3/5 損失函數 ============================
loss_functoin = nn.CrossEntropyLoss()                                                   # 選擇損失函數

# ============================ step 4/5 優化器 ============================
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)                        # 選擇優化器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)     # 設置學習率下降策略

# ============================ step 5/5 訓練 ============================
train_curve = list()
valid_curve = list()

for epoch in range(MAX_EPOCH):

    loss_mean = 0.
    correct = 0.
    total = 0.

    net.train()
    for i, data in enumerate(train_loader):

        # forward
        inputs, labels = data
        outputs = net(inputs)

        # backward
        optimizer.zero_grad()
        loss = loss_functoin(outputs, labels)
        loss.backward()

        # update weights
        optimizer.step()

        # 統計分類情況
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).squeeze().sum().numpy()

        # 打印訓練信息
        loss_mean += loss.item()
        train_curve.append(loss.item())
        if (i+1) % log_interval == 0:
            loss_mean = loss_mean / log_interval
            print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
            loss_mean = 0.

    scheduler.step()  # 更新學習率

    # validate the model
    if (epoch+1) % val_interval == 0:

        correct_val = 0.
        total_val = 0.
        loss_val = 0.
        net.eval()
        with torch.no_grad():
            for j, data in enumerate(valid_loader):
                inputs, labels = data
                outputs = net(inputs)
                loss = loss_functoin(outputs, labels)

                _, predicted = torch.max(outputs.data, 1)
                total_val += labels.size(0)
                correct_val += (predicted == labels).squeeze().sum().numpy()

                loss_val += loss.item()

            valid_curve.append(loss_val)
            print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, correct / total))


train_x = range(len(train_curve))
train_y = train_curve

train_iters = len(train_loader)
valid_x = np.arange(1, len(valid_curve)+1) * train_iters*val_interval # 由於valid中記錄的是epochloss,需要對記錄點進行轉換到iterations
valid_y = valid_curve

plt.plot(train_x, train_y, label='Train')
plt.plot(valid_x, valid_y, label='Valid')

plt.legend(loc='upper right')
plt.ylabel('loss value')
plt.xlabel('Iteration')
plt.show()

# ============================ inference ============================

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.join(BASE_DIR, "test_data")

test_data = RMBDataset(data_dir=test_dir, transform=valid_transform)
valid_loader = DataLoader(dataset=test_data, batch_size=1)

for i, data in enumerate(valid_loader):
    # forward
    inputs, labels = data
    outputs = net(inputs)
    _, predicted = torch.max(outputs.data, 1)

    rmb = 1 if predicted.numpy()[0] == 0 else 100

    img_tensor = inputs[0, ...]  # C H W
    img = transform_invert(img_tensor, train_transform)
    plt.imshow(img)
    plt.title("LeNet got {} Yuan".format(rmb))
    plt.show()
    plt.pause(0.5)
    plt.close()

我們分析一下代碼中的loss_functoin = nn.CrossEntropyLoss() 具體作用,通過單步調試進入代碼中查看loss_functoin = nn.CrossEntropyLoss() 。

class CrossEntropyLoss(_WeightedLoss):
    __constants__ = ['weight', 'ignore_index', 'reduction']

    def __init__(self, weight=None, size_average=None, ignore_index=-100,
                 reduce=None, reduction='mean'):
        super(CrossEntropyLoss, self).__init__(weight, size_average, reduce, reduction)
        self.ignore_index = ignore_index

    def forward(self, input, target):
        return F.cross_entropy(input, target, weight=self.weight,
                               ignore_index=self.ignore_index, reduction=self.reduction)

通過代碼可以發現CrossEntropyLoss是繼承於_WeightedLoss,點擊代碼中的:

super(CrossEntropyLoss, self).__init__(weight, size_average, reduce, reduction)

進入

class _WeightedLoss(_Loss):
    def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'):
        super(_WeightedLoss, self).__init__(size_average, reduce, reduction)
        self.register_buffer('weight', weight)

可以看到_WeightedLoss也是繼承於_Loss類,再單步調試代碼:

super(_WeightedLoss, self).__init__(size_average, reduce, reduction)

進入

class _Loss(Module):
    def __init__(self, size_average=None, reduce=None, reduction='mean'):
        super(_Loss, self).__init__()
        if size_average is not None or reduce is not None:
            self.reduction = _Reduction.legacy_get_string(size_average, reduce)
        else:
            self.reduction = reduction

這時候進入了_Loss類,這個類是繼承於Module類的,通過編譯:

loss_functoin = nn.CrossEntropyLoss() 

就構建了一個loss_function,從loss_function的構建過程中知道,nn.CrossEntropyLoss是一個Module。Debug程序到:

loss = loss_functoin(outputs, labels)

因爲loss_functoin()是一個Module,所以輸入outputs和labels,其實就是執行一個forward(),一個模型模塊必須有一個forward()函數,通過步進調試進入查看具體是怎麼實現的:

    def __call__(self, *input, **kwargs):
        for hook in self._forward_pre_hooks.values():
            result = hook(self, input)
            if result is not None:
                if not isinstance(result, tuple):
                    result = (result,)
                input = result
        if torch._C._get_tracing_state():
            result = self._slow_forward(*input, **kwargs)
        else:
            result = self.forward(*input, **kwargs)
        for hook in self._forward_hooks.values():
            hook_result = hook(self, input, result)
            if hook_result is not None:
                result = hook_result
        if len(self._backward_hooks) > 0:
            var = result
            while not isinstance(var, torch.Tensor):
                if isinstance(var, dict):
                    var = next((v for v in var.values() if isinstance(v, torch.Tensor)))
                else:
                    var = var[0]
            grad_fn = var.grad_fn`在這裏插入代碼片`
            if grad_fn is not None:
                for hook in self._backward_hooks.values():
                    wrapper = functools.partial(hook, self)
                    functools.update_wrapper(wrapper, hook)
                    grad_fn.register_hook(wrapper)
        return result

我們關注程序中的代碼:

result = self.forward(*input, **kwargs)

通過步進調試進入:

    def forward(self, input, target):
        return F.cross_entropy(input, target, weight=self.weight,
                               ignore_index=self.ignore_index, reduction=self.reduction)

通過代碼可以發現調用了cross_entropy(),接着通過步進,進入Function模塊中的cross_entropy()部分,其代碼如下:

    if size_average is not None or reduce is not None:
        reduction = _Reduction.legacy_get_string(size_average, reduce)
    return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)

這部分代碼首先會進行reduction的判斷,然後會進入nll_loss的判斷。

2.1 nn.CrossEntropyLoss

功能:nn.LogSoftmax()與nn.NLLLoss()結合,進行交叉熵計算;
主要參數

  • weight:各類別的loss設置權值;
  • ignore_index:忽略某個類別;
  • reduction:計算模型,可爲none/sum/mean,none是逐個元素計算,sum是所有元素求和,返回標量,mean是加權平均,返回標量;
nn.CrossEntropyLoss(weight=None,
					size_average=None,
					ignore_index=-100,
					reduce=None,
					reduction='mean')

交叉熵損失函數常常用於分類任務,分類任務中常常需要計算兩個輸出的概率值,因爲分類任務中的輸出通常以概率值爲主,所以交叉熵是衡量兩個概率分佈之間的差異。所以交叉熵值越低表示兩個分佈越近。

爲什麼交叉熵值越低兩個分佈越近呢?這需要從交叉熵和相對熵的關係說起,說到交叉熵和相對熵,就不得不提到信息熵。下面分析信息熵、相對熵、交叉熵之間的關係。

交叉熵 = 信息熵 + 相對熵

首先介紹熵的概念,熵指的是信息熵,用來描述一個事件的不確定性,一個事件越不確定熵越大,熵是自信息的期望。

自信息用於衡量單個事件的不確定性,其公式爲:I(x)=log[p(x)]I(x)=-log[p(x)]p(x)指的是事件的概率,熵是整個概率分佈的不確定性,用來描述整個概率分佈,對自信息求期望,其公式爲:H(P)=Exp[I(x)]=iNP(xi)logP(xi)\mathrm{H}(\mathrm{P})=E_{x \sim p}[I(x)]=-\sum_{i}^{N} P\left(x_{i}\right) \log P\left(x_{i}\right)
下圖是伯努利分佈的信息熵的分佈:
在這裏插入圖片描述
從圖中可以發現,當事件的概率爲0.5時,其信息熵最大,這也表示事件的不確定性最大,其熵最大值爲0.69。

相對熵也稱爲KL散度,相對熵用於衡量兩個分佈之間的差異,也就是兩個分佈之間的距離,雖然相對熵可以計算兩個分佈之間的距離,但是相對熵不是一個距離函數,因爲距離函數具有對稱性,對稱性指的是P到Q的距離等於Q到P的距離,但是相對熵不具備距離函數的對稱性,看一下相對熵的計算公式:DKL(P,Q)=Exp[logP(x)Q(x)]\boldsymbol{D}_{K L}(\boldsymbol{P}, \boldsymbol{Q})=\boldsymbol{E}_{\boldsymbol{x} \sim p}\left[\log \frac{P(\boldsymbol{x})}{Q(\boldsymbol{x})}\right]公式中P是真實的分佈,Q是模型輸出的分佈,這裏需要用Q的分佈去逼近P的分佈,所以相對熵不具備對稱性。

下面看一下交叉熵的公式:H(P,Q)=i=1NP(xi)logQ(xi)\mathrm{H}(\boldsymbol{P}, \boldsymbol{Q})=-\sum_{i=1}^{N} \boldsymbol{P}\left(\boldsymbol{x}_{\boldsymbol{i}}\right) \log \boldsymbol{Q}\left(\boldsymbol{x}_{\boldsymbol{i}}\right)交叉熵也是衡量兩個概率分佈P和Q的相似度。

下面對相對熵的公式進行推導展開,觀察相對熵與信息熵、交叉熵的關係:DKL(P,Q)=Exp[logP(x)Q(x)]\boldsymbol{D}_{K L}(\boldsymbol{P}, \boldsymbol{Q})=\boldsymbol{E}_{\boldsymbol{x} \sim p}\left[\log \frac{P(\boldsymbol{x})}{\boldsymbol{Q}(\boldsymbol{x})}\right]=Exp[logP(x)logQ(x)]=\boldsymbol{E}_{\boldsymbol{x} \sim p}[\log \boldsymbol{P}(\boldsymbol{x})-\log \boldsymbol{Q}(\boldsymbol{x})]=l=1NP(xi)[logP(xi)logQ(xi)]=\sum_{l=1}^{N} P\left(x_{i}\right)\left[\log P\left(x_{i}\right)-\log Q\left(x_{i}\right)\right]=i=1NP(xi)logP(xi)i=1NP(xi)logQ(xi)=\sum_{i=1}^{N} P\left(x_{i}\right) \log P\left(x_{i}\right)-\sum_{i=1}^{N} P\left(x_{i}\right) \log Q\left(x_{i}\right)觀察公式可以看到,相對熵的公式由信息熵和交叉熵組成,因此相對熵的公式可以表示爲:DKL(P,Q)=H(P,Q)H(P)D_{K L}(P, Q)=H(P,Q)-H(P)通過公式轉換可以得到交叉熵的公式表示:H(P,Q)=DKL(P,Q)+H(P)\mathrm{H}(\boldsymbol{P}, \boldsymbol{Q})=\boldsymbol{D}_{K L}(\boldsymbol{P}, \boldsymbol{Q})+\mathrm{H}(\boldsymbol{P})公式中的P是真實的概率分佈,也就是訓練集中樣本的分佈,Q是模型輸出的分佈,所以在機器學習模型優化交叉熵等價於優化相對熵,因爲交叉熵公式中的信息熵H(P)H(P)是訓練集的信息,因爲訓練集是固定的,所以H(P)H(P)是一個常數,所以交叉熵在優化的時候是優化相對熵。

熟悉了信息熵、相對熵、交叉熵的信息後,現在正式瞭解Pytorch中的nn.CrossEntropyLoss。交叉熵的計算公式如下:H(P,Q)=i=1NP(xi)logQ(xi)\mathrm{H}(P, Q)=-\sum_{i=1}^{N} P\left(x_{i}\right) \log Q\left(x_{i}\right)loss(x, class )=log(exp(x[ class ])jexp(x[j]))=x[ class ]+log(jexp(x[j]))\operatorname{loss}(x, \text { class })=-\log \left(\frac{\exp (x[\text { class }])}{\sum_{j} \exp (x[j])}\right)=-x[\text { class }]+\log \left(\sum_{j} \exp (x[j])\right)公式中的x是一個概率值,class是一個類別值,括號中執行的是softmax操作,softmax的作用是將概率值歸一化到和爲1的概率值。

對比交叉熵的公式定義,公式中有一個P(xi)P(x_i),這個在展開公式中值爲1,因爲樣本是固定的,所以P(xi)P(x_i)是定值。因爲只計算一個樣本,所以沒有公式定義中的求和。

現在看一下nn.CrossEntropyLoss()的主要參數,第一個參數weight,其功能爲各個類別的loss設置權值,加了權值的交叉熵計算公式爲:loss(x, class )=weight[ class ](x[ class ]+log(jexp(x[j])))\operatorname{loss}(x, \text { class })=\operatorname{weight}[\text { class }]\left(-x[\text { class }]+\log \left(\sum_{j} \exp (x[\mathrm{j}])\right)\right)比如說第零類,爲了讓模型更關注第零類,可以將其weight設置爲1.2倍,這是weight的作用;

第二個參數是ignore_index,用於指定某一類別不用計算損失,例如一千類的分類任務中,忽略第999類的損失,可以設置ignore_index=999。

第三個參數爲reduction,用於指定計算模型,有三種計算模型,分別爲none/sum/mean,none是逐個元素計算損失,sum是所有元素的損失求和,返回標量,mean是對所元素的損失進行加權平均,返回標量。在mean模型中,如果不設置參數weight的時候,就是單純的期望平均。

下面通過代碼學習CrossEntropyLoss中各種參數的功能:

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

# fake data
inputs = torch.tensor([[1, 2], [1, 3], [1, 3]], dtype=torch.float)
target = torch.tensor([0, 1, 1], dtype=torch.long)

# ----------------------------------- CrossEntropy loss: reduction -----------------------------------
# flag = 0
flag = 1
if flag:
    # def loss function
    loss_f_none = nn.CrossEntropyLoss(weight=None, reduction='none')
    loss_f_sum = nn.CrossEntropyLoss(weight=None, reduction='sum')
    loss_f_mean = nn.CrossEntropyLoss(weight=None, reduction='mean')

    # forward
    loss_none = loss_f_none(inputs, target)
    loss_sum = loss_f_sum(inputs, target)
    loss_mean = loss_f_mean(inputs, target)

    # view
    print("Cross Entropy Loss:\n ", loss_none, loss_sum, loss_mean)

代碼的輸出爲:

Cross Entropy Loss:
  tensor([1.3133, 0.1269, 0.1269]) tensor(1.5671) tensor(0.5224)

輸出結果中的tensor([1.3133, 0.1269, 0.1269])是loss_f_none = nn.CrossEntropyLoss(weight=None, reduction=‘none’);
輸出結果中的tensor(1.5671)是loss_f_sum = nn.CrossEntropyLoss(weight=None, reduction=‘sum’);
輸出結果中的tensor(0.5224)是loss_f_mean = nn.CrossEntropyLoss(weight=None, reduction=‘mean’);

下面通過手算的形式驗證公式是否正確,只計算第一個樣本的損失:

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

# fake data
inputs = torch.tensor([[1, 2], [1, 3], [1, 3]], dtype=torch.float)
target = torch.tensor([0, 1, 1], dtype=torch.long)

# ----------------------------------- CrossEntropy loss: reduction -----------------------------------
# flag = 0
flag = 1
if flag:
    # def loss function
    loss_f_none = nn.CrossEntropyLoss(weight=None, reduction='none')
    loss_f_sum = nn.CrossEntropyLoss(weight=None, reduction='sum')
    loss_f_mean = nn.CrossEntropyLoss(weight=None, reduction='mean')

    # forward
    loss_none = loss_f_none(inputs, target)
    loss_sum = loss_f_sum(inputs, target)
    loss_mean = loss_f_mean(inputs, target)

    # view
    print("Cross Entropy Loss:\n ", loss_none, loss_sum, loss_mean)

# --------------------------------- compute by hand
# flag = 0
flag = 1
if flag:

    idx = 0

    input_1 = inputs.detach().numpy()[idx]      # [1, 2]
    target_1 = target.numpy()[idx]              # [0]

    # 第一項
    x_class = input_1[target_1]

    # 第二項
    sigma_exp_x = np.sum(list(map(np.exp, input_1)))
    log_sigma_exp_x = np.log(sigma_exp_x)

    # 輸出loss
    loss_1 = -x_class + log_sigma_exp_x

    print("第一個樣本loss爲: ", loss_1)

其輸出爲:

Cross Entropy Loss:
  tensor([1.3133, 0.1269, 0.1269]) tensor(1.5671) tensor(0.5224)
第一個樣本loss爲:  1.3132617

通過代碼驗證了公式的正確性。

下面通過代碼觀察weight參數的作用,有幾個類別就需要設置幾個權重:

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

# fake data
inputs = torch.tensor([[1, 2], [1, 3], [1, 3]], dtype=torch.float)
target = torch.tensor([0, 1, 1], dtype=torch.long)

# ----------------------------------- CrossEntropy loss: reduction -----------------------------------
# flag = 0
flag = 1
if flag:
    # def loss function
    loss_f_none = nn.CrossEntropyLoss(weight=None, reduction='none')
    loss_f_sum = nn.CrossEntropyLoss(weight=None, reduction='sum')
    loss_f_mean = nn.CrossEntropyLoss(weight=None, reduction='mean')

    # forward
    loss_none = loss_f_none(inputs, target)
    loss_sum = loss_f_sum(inputs, target)
    loss_mean = loss_f_mean(inputs, target)

    # view
    print("Cross Entropy Loss:\n ", loss_none, loss_sum, loss_mean)

# ----------------------------------- weight -----------------------------------
# flag = 0
flag = 1
if flag:
    # def loss function
    weights = torch.tensor([1, 2], dtype=torch.float)  # 第一個類別的權重爲1,第二個類別的權重爲2,1代表保持原權重不變
    # weights = torch.tensor([0.7, 0.3], dtype=torch.float)

    loss_f_none_w = nn.CrossEntropyLoss(weight=weights, reduction='none')
    loss_f_sum = nn.CrossEntropyLoss(weight=weights, reduction='sum')
    loss_f_mean = nn.CrossEntropyLoss(weight=weights, reduction='mean')

    # forward
    loss_none_w = loss_f_none_w(inputs, target)
    loss_sum = loss_f_sum(inputs, target)
    loss_mean = loss_f_mean(inputs, target)

    # view
    print("\nweights: ", weights)
    print(loss_none_w, loss_sum, loss_mean)

比較添加權重和不添加權重的loss輸出比較:

Cross Entropy Loss:
  tensor([1.3133, 0.1269, 0.1269]) tensor(1.5671) tensor(0.5224)

weights:  tensor([1., 2.])
tensor([1.3133, 0.2539, 0.2539]) tensor(1.8210) tensor(0.3642)

可以看到,label爲1的數據的loss增大了一倍。

2.2 nn.NLLLoss

功能:實現負對數似然函數中的負號功能,簡單來說,就是對輸入取負號;
主要參數

  • weight:各類別的loss權重設置;
  • ignore_index:忽略某個類別;
  • reduction:計算模式,可爲none/sum/mean;
nn.NLLLoss(weight=None,
			size_average=None,
			ignore_index=-100,
			reduce=None,
			reduction='mean')

nn.NLLLoss的計算公式爲:ln=wynxn,ynl_{n}=-w_{y_{n}} x_{n, y_{n}}(x,y)=L={l1,,lN}\ell(x, y)=L=\left\{l_{1}, \ldots, l_{N}\right\}公式中的wynw_{y_n}是weight中設置的權值,如果weight=None,則默認爲1,公式中的x是輸入神經元的輸出值,其參數和上面介紹的nn.CrossEntropyloss是一樣的。

下面通過代碼查看nn.NLLLoss的作用:

weights = torch.tensor([1, 1], dtype=torch.float)

loss_f_none_w = nn.NLLLoss(weight=weights, reduction='none')
loss_f_sum = nn.NLLLoss(weight=weights, reduction='sum')
loss_f_mean = nn.NLLLoss(weight=weights, reduction='mean')

# forward
loss_none_w = loss_f_none_w(inputs, target)
loss_sum = loss_f_sum(inputs, target)
loss_mean = loss_f_mean(inputs, target)

# view
print("\nweights: ", weights)
print("NLL Loss", loss_none_w, loss_sum, loss_mean)

運行代碼得到其輸出爲:

weights:  tensor([1., 1.])
NLL Loss tensor([-1., -3., -3.]) tensor(-7.) tensor(-2.3333)

通過觀察輸出結果,可以知道其輸出值是輸入值的負數。

2.3 nn.BCELoss

功能:二分類交叉熵;
注意事項:輸入值取值在[0,1];
主要參數

  • weight:各類別的loss權重設置;
  • ignore_index:忽略某個類別;
  • reduction:計算模式,可爲none/sum/mean;
nn.BCELoss(weight=None,
			size_average=None,
			reduce=None,
			reduction='mean')

其計算公式爲:ln=wn[ynlogxn+(1yn)log(1xn)]l_{n}=-w_{n}\left[y_{n} \cdot \log x_{n}+\left(1-y_{n}\right) \cdot \log \left(1-x_{n}\right)\right]公式中yny_n的值爲0或者1。

通過代碼觀察nn.BCELoss的作用:

inputs = torch.tensor([[1, 2], [2, 2], [3, 4], [4, 5]], dtype=torch.float)
target = torch.tensor([[1, 0], [1, 0], [0, 1], [0, 1]], dtype=torch.float)

target_bce = target

# itarget
inputs = torch.sigmoid(inputs)  # 注意輸入必須是0-1之間的值

weights = torch.tensor([1, 1], dtype=torch.float)

loss_f_none_w = nn.BCELoss(weight=weights, reduction='none')
loss_f_sum = nn.BCELoss(weight=weights, reduction='sum')
loss_f_mean = nn.BCELoss(weight=weights, reduction='mean')

# forward
loss_none_w = loss_f_none_w(inputs, target_bce)
loss_sum = loss_f_sum(inputs, target_bce)
loss_mean = loss_f_mean(inputs, target_bce)

# view
print("\nweights: ", weights)
print("BCE Loss", loss_none_w, loss_sum, loss_mean)

代碼的輸出結果爲:

weights:  tensor([1., 1.])
BCE Loss tensor([[0.3133, 2.1269],
        [0.1269, 2.1269],
        [3.0486, 0.0181],
        [4.0181, 0.0067]]) tensor(11.7856) tensor(1.4732)

2.4 nn.BCEWithLogitsLoss

功能:結合Sigmoid與二分類交叉熵;
注意事項:網絡最後不加sigmoid函數;
主要參數

  • pos_weight:正樣本的權值;
  • weight:各類別的loss權重設置;
  • ignore_index:忽略某個類別;
  • reduction:計算模式,可爲none/sum/mean;
nn.BCEWithLogitsLoss(weight=None,
					sizie_average=None,
					reduce=None,
					reduction='mean',
					pos_weight=None)

nn.BCEWithLogistLoss的計算公式爲:ln=wn[ynlogσ(xn)+(1yn)log(1σ(xn))]l_{n}=-w_{n}\left[y_{n} \cdot \log \sigma\left(x_{n}\right)+\left(1-y_{n}\right) \cdot \log \left(1-\sigma\left(x_{n}\right)\right)\right]

nn.BCAWithLogitsLoss中參數pos_weight的作用是均衡正負樣本,其作用是正樣本的loss乘於pos_weight係數,比如正樣本有100個,負樣本有300個,正負樣本的比例爲13\frac{1}{3},所以pos_weight可以設置爲3,也就是正樣本的loss乘於3,這樣就等價於正樣本爲300個,負樣本爲300個,實現正負樣本的均衡。

下面通過代碼觀察nn.BCEWithLogitsLoss的具體作用:

inputs = torch.tensor([[1, 2], [2, 2], [3, 4], [4, 5]], dtype=torch.float)
target = torch.tensor([[1, 0], [1, 0], [0, 1], [0, 1]], dtype=torch.float)

target_bce = target

# inputs = torch.sigmoid(inputs)

weights = torch.tensor([1, 1], dtype=torch.float)

loss_f_none_w = nn.BCEWithLogitsLoss(weight=weights, reduction='none')
loss_f_sum = nn.BCEWithLogitsLoss(weight=weights, reduction='sum')
loss_f_mean = nn.BCEWithLogitsLoss(weight=weights, reduction='mean')

# forward
loss_none_w = loss_f_none_w(inputs, target_bce)
loss_sum = loss_f_sum(inputs, target_bce)
loss_mean = loss_f_mean(inputs, target_bce)

# view
print("\nweights: ", weights)
print(loss_none_w, loss_sum, loss_mean)

其輸出爲:

weights:  tensor([1., 1.])
tensor([[0.3133, 2.1269],
        [0.1269, 2.1269],
        [3.0486, 0.0181],
        [4.0181, 0.0067]]) tensor(11.7856) tensor(1.4732)

以上就是我們要總結的關於損失函數的內容。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章