numpy線性迴歸 Pytorch線性迴歸及神經網絡

numpy 梯度下降法 線性迴歸

import numpy as np
import matplotlib.pyplot as plt
def get_fake_data(batch_size=8):
    ''' 產生隨機數據:y=x*2+3,加上了一些噪聲'''
    x = np.random.rand(batch_size, 1) * 5
    y = x * 2 + 3 + np.random.rand(batch_size, 1)*2
    return x, y
 
def get_gradient(theta,x,y):
    m=x.shape[0]
    Y_estimate=np.dot(x,theta)
    assert (Y_estimate.shape==(m,))
    error=Y_estimate-y
    assert (error.shape==(m,))
    cost =1.0/(2*m)*np.sum(error**2)
    #grad=(1.0/m)*np.dot(x.T,error).reshape(-1)#(2,)
    grad = (1.0 / m) * np.dot(error,x) # (2,)
    return grad,cost
def gradient_descent(x,y,iterations,alpha):
 
    theta=np.random.randn(2)
    costs=[]
    for i in range(iterations):
        grad,cost=get_gradient(theta,x,y)
        new_theta=theta-alpha*grad
        if i%100==0:
            print('{} iterations cost={}'.format(i,cost))
            costs.append(cost)
        theta=new_theta
    return costs,theta
 
def vis_data():
    # 來看看產生的x-y分佈
    x, y = get_fake_data(batch_size=16)
    print(x.shape)
    print(y.shape)
    plt.scatter(np.squeeze(x), np.squeeze(y))
    plt.show()
if __name__=='__main__':
    batch_size=32
    data_x, data_y = get_fake_data(batch_size=batch_size)
    #添加一列爲1的向量 實際上就是乘以 theta 就是b
    data_x=np.hstack((data_x,np.ones_like(data_x)))#(m,2)
    print(data_x)
    print(data_x.shape)
 
    costs,theta=gradient_descent(data_x,np.squeeze(data_y),iterations=50000,alpha=0.002)
    print(data_y.shape)
 
    #print(theta)
    y_predict=np.dot(data_x,theta)#theta[0]+theta[1]*data_x[:,1]
    print(y_predict.shape)
    plt.figure()
    #樣本圖
    print(data_x[:2])
    plt.scatter(data_x[:,0],np.squeeze(data_y),c='red')
    plt.plot(data_x[:,0],y_predict)
    plt.show()

pytorch 梯度下降法 線性迴歸

%matplotlib inline
import torch
from IPython import display
import numpy as np
from matplotlib import pyplot as plt
import random
torch.set_default_tensor_type('torch.FloatTensor')
#1生成數據集
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.randn(num_examples, num_inputs)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)

print(features[0], labels[0])
def use_svg_display():
    # 用矢量圖顯示
    display.set_matplotlib_formats('svg')

def set_figsize(figsize=(3.5, 2.5)):
    use_svg_display()
    # 設置圖的尺寸
    plt.rcParams['figure.figsize'] = figsize

# # 在../d2lzh_pytorch裏面添加上面兩個函數後就可以這樣導入
# import sys
# sys.path.append("..")
# from d2lzh_pytorch import * 

set_figsize()
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1);

#2讀取數據
# 本函數已保存在d2lzh包中方便以後使用
def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    random.shuffle(indices)  # 樣本的讀取順序是隨機的
    for i in range(0, num_examples, batch_size):
        j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)]) # 最後一次可能不足一個batch
        yield  features.index_select(0, j), labels.index_select(0, j)
        
batch_size = 10

for X, y in data_iter(batch_size, features, labels):
    print(X, y)
    break
    
#3初始化模型參數
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float)
b = torch.zeros(1)

w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True) 

#4定義模型
def linreg(X, w, b):  # 本函數已保存在d2lzh_pytorch包中方便以後使用
    return torch.mm(X, w) + b
    
#5定義損失函數
def squared_loss(y_hat, y):  # 本函數已保存在d2lzh_pytorch包中方便以後使用
    # 注意這裏返回的是向量, 另外, pytorch裏的MSELoss並沒有除以 2
    return (y_hat - y.view(y_hat.size())) ** 2 / 2

#6定義優化算法
def sgd(params, lr, batch_size):  # 本函數已保存在d2lzh_pytorch包中方便以後使用
    for param in params:
        param.data -= lr * param.grad / batch_size # 注意這裏更改param時用的param.data
        
        
#7訓練模型
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss

for epoch in range(num_epochs):  # 訓練模型一共需要num_epochs個迭代週期
    # 在每一個迭代週期中,會使用訓練數據集中所有樣本一次(假設樣本數能夠被批量大小整除)。X
    # 和y分別是小批量樣本的特徵和標籤
    for X, y in data_iter(batch_size, features, labels):
        l = loss(net(X, w, b), y).sum()  # l是有關小批量X和y的損失
        l.backward()  # 小批量的損失對模型參數求梯度
        sgd([w, b], lr, batch_size)  # 使用小批量隨機梯度下降迭代模型參數
        
        # 不要忘了梯度清零
        w.grad.data.zero_()
        b.grad.data.zero_()
    train_l = loss(net(features, w, b), labels)
    print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
    
#輸出結果
print(true_w, '\n', w)
print(true_b, '\n', b)

pytorch實現一個簡單的神經網絡

import torch
from torch import nn
import numpy as np
torch.manual_seed(1)

print(torch.__version__)
torch.set_default_tensor_type('torch.FloatTensor')

#1 生成數據集
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)

#2 讀取數據
import torch.utils.data as Data

batch_size = 10

# 將訓練數據的特徵和標籤組合
dataset = Data.TensorDataset(features, labels)

# 把 dataset 放入 DataLoader
data_iter = Data.DataLoader(
    dataset=dataset,      # torch TensorDataset format
    batch_size=batch_size,      # mini batch size
    shuffle=True,               # 要不要打亂數據 (打亂比較好)
    num_workers=2,              # 多線程來讀數據
)

for X, y in data_iter:
    print(X, '\n', y)
    break
    
#3 定義模型
class LinearNet(nn.Module):
    def __init__(self, n_feature):
        super(LinearNet, self).__init__()
        self.linear = nn.Linear(n_feature, 1)

    def forward(self, x):
        y = self.linear(x)
        return y
    
net = LinearNet(num_inputs)
print(net) # 使用print可以打印出網絡的結構

# 寫法二
net = nn.Sequential()
net.add_module('linear', nn.Linear(num_inputs, 1))

for param in net.parameters():
    print(param)
    
#4 初始化模型參數
from torch.nn import init

init.normal_(net[0].weight, mean=0.0, std=0.01)
init.constant_(net[0].bias, val=0.0)  # 也可以直接修改bias的data: net[0].bias.data.fill_(0)
for param in net.parameters():
    print(param)
#5 定義損失函數
loss = nn.MSELoss()
#6 定義優化算法
import torch.optim as optim

optimizer = optim.SGD(net.parameters(), lr=0.03)
print(optimizer)
#7 訓練模型
num_epochs = 3
for epoch in range(1, num_epochs + 1):
    for X, y in data_iter:
        output = net(X)
        l = loss(output, y.view(-1, 1))
        optimizer.zero_grad() # 梯度清零,等價於net.zero_grad()
        l.backward()
        optimizer.step()
    print('epoch %d, loss: %f' % (epoch, l.item()))
    
dense = net[0]
print(true_w, dense.weight.data)
print(true_b, dense.bias.data)
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章