PyTorch學習:邏輯迴歸

import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import matplotlib.pyplot as plt
# 設定隨機種子
torch.manual_seed(2018)
# 從 data.txt 中讀入點
#data.txt 是一個100行3列的數據 35.84740876993872,72.90219802708364,0    60.18259938620976,86.30855209546826,1
#第一列爲x,第二列爲y,第三列爲lable(0 or 1)
with open('./data.txt', 'r') as f:
    data_list = [i.split('\n')[0].split(',') for i in f.readlines()]
    data = [(float(i[0]), float(i[1]), float(i[2])) for i in data_list]
# 標準化
x0_max = max([i[0] for i in data])
x1_max = max([i[1] for i in data])
data = [(i[0]/x0_max, i[1]/x1_max, i[2]) for i in data]
x0 = list(filter(lambda x: x[-1] == 0.0, data)) # 選擇第一類的點
x1 = list(filter(lambda x: x[-1] == 1.0, data)) # 選擇第二類的點
plot_x0 = [i[0] for i in x0]
plot_y0 = [i[1] for i in x0]
plot_x1 = [i[0] for i in x1]
plot_y1 = [i[1] for i in x1]
#可視化兩類點
plt.plot(plot_x0, plot_y0, 'ro', label='x_0')
plt.plot(plot_x1, plot_y1, 'bo', label='x_1')
plt.legend(loc='best')
plt.show()
plt.close()

# 畫出參數更新之前的結果
w = Variable(torch.randn(2, 1), requires_grad=True) 
b = Variable(torch.zeros(1), requires_grad=True)
w0 = w[0].data[0].cpu().numpy()
w1 = w[1].data[0].cpu().numpy()
b0 = b.data[0].cpu().numpy()
plot_x = np.arange(0.2, 1, 0.01)
plot_y = (-w0 * plot_x - b0) / w1
plt.plot(plot_x, plot_y, 'g', label='cutting line')
plt.plot(plot_x0, plot_y0, 'ro', label='x_0')
plt.plot(plot_x1, plot_y1, 'bo', label='x_1')
plt.legend(loc='best')
plt.show()
plt.close()

# 自定義 sigmoid 函數
def sigmoid(x):
    return 1 / (1 + np.exp(-x))
# 定義 logistic 迴歸模型
def logistic_regression(x):
    return F.sigmoid(torch.mm(x, w) + b)
# 計算loss
def binary_loss(y_pred, y):
    logits = (y * y_pred.clamp(1e-12).log() + (1 - y) * (1 - y_pred).clamp(1e-12).log()).mean()
    return -logits
np_data = np.array(data, dtype='float32') # 轉換成 numpy array
x_data = torch.from_numpy(np_data[:, 0:2]) # 轉換成 Tensor, 大小是 [100, 2]
y_data = torch.from_numpy(np_data[:, -1]).unsqueeze(1) # 轉換成 Tensor,大小是 [100, 1]
x_data = Variable(x_data)
y_data = Variable(y_data)

y_pred = logistic_regression(x_data)
loss = binary_loss(y_pred, y_data)
print(loss)
# 自動求導並更新參數
loss.backward()
w.data = w.data - 0.1 * w.grad.data
b.data = b.data - 0.1 * b.grad.data
# 算出一次更新之後的loss
y_pred = logistic_regression(x_data)
loss = binary_loss(y_pred, y_data)
print(loss)



# 使用 torch.optim 更新參數
w = nn.Parameter(torch.randn(2, 1))#Parameter是Variable的子類,默認可以求梯度且可以自動加載到model中
b = nn.Parameter(torch.zeros(1))
def logistic_regression(x):
    return F.sigmoid(torch.mm(x, w) + b)
optimizer = torch.optim.SGD([w, b], lr=1.)
# 進行 1000 次更新
import time
start = time.time()
for e in range(1000):
    # 前向傳播
    y_pred = logistic_regression(x_data)
    loss = binary_loss(y_pred, y_data) # 計算 loss
    # 反向傳播
    optimizer.zero_grad() # 使用優化器將梯度歸 0
    loss.backward()
    optimizer.step() # 使用優化器來更新參數
    # 計算正確率
    mask = y_pred.ge(0.5).float()
    acc = (mask == y_data).sum().item() / y_data.shape[0]
    if (e + 1) % 200 == 0:
        print('epoch: {}, Loss: {:.5f}, Acc: {:.5f}'.format(e+1, loss.item(), acc))
during = time.time() - start
print('During Time: {:.3f} s'.format(during))



# 使用自帶的loss
criterion = nn.BCEWithLogitsLoss() # 將 sigmoid 和 loss 寫在一層,有更快的速度、更好的穩定性
w = nn.Parameter(torch.randn(2, 1))
b = nn.Parameter(torch.zeros(1))
def logistic_reg(x):
    return torch.mm(x, w) + b
optimizer = torch.optim.SGD([w, b], 1.)
# y_pred = logistic_reg(x_data)
# loss = criterion(y_pred, y_data)
print(loss.data)
# 同樣進行 1000 次更新
start = time.time()
for e in range(1000):
    # 前向傳播
    y_pred = logistic_reg(x_data)
    loss = criterion(y_pred, y_data)
    # 反向傳播
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    # 計算正確率
    mask = y_pred.ge(0.5).float()
    acc = (mask == y_data).sum().item() / y_data.shape[0]
    if (e + 1) % 200 == 0:
        print('epoch: {}, Loss: {:.5f}, Acc: {:.5f}'.format(e+1, loss.item(), acc))
during = time.time() - start
print('During Time: {:.3f} s'.format(during))


# 畫出更新之後的結果
w0 = w[0].data[0].cpu().numpy()
w1 = w[1].data[0].cpu().numpy()
b0 = b.data[0].cpu().numpy()
plot_x = np.arange(0.2, 1, 0.01)
plot_y = (-w0 * plot_x - b0) / w1
plt.plot(plot_x, plot_y, 'g', label='cutting line')
plt.plot(plot_x0, plot_y0, 'ro', label='x_0')
plt.plot(plot_x1, plot_y1, 'bo', label='x_1')
plt.legend(loc='best')
plt.show()
plt.close()

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章