import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
# 定義模型
class LinearModel(nn.Module):
def __init__(self,ndim):
super(LinearModel,self).__init__()
self.ndim=ndim
self.weight=nn.Parameter(torch.randn(ndim,1))#定義權重
self.bias=nn.Parameter(torch.randn(1))#定義偏置
def forward(self,x):
#y=Wx+b
return x.mm(self.weight)+self.bias
# 生成數據
num_inputs = 2
num_examples = 1600
true_w = [2, -3.1]
true_b = 4.2
features = torch.randn(num_examples, num_inputs,requires_grad=True)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()),
dtype=torch.float32)
print("labels")
print(labels)
print(true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b)
# 選擇模型
#model = LinearModel(num_inputs)
model = nn.Linear(2, 1,bias=True)
#定義損失函數和優化器(loss和optimizer)
from torch import optim
criterion = nn.MSELoss()# 交叉熵損失函數
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.9)
# 處理數據
trainloader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(features,labels),
batch_size = 1,
shuffle=True,
num_workers=0)
# 訓練模型
for epoch in range(3):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# 輸入數據
inputs, labels = data
# 梯度清零
inputs = Variable(inputs)
labels = Variable(labels)
optimizer.zero_grad()
outputs = model(inputs)
# 計算損失
loss = criterion(outputs, labels)
# 反向求導
loss.backward()
# 更新參數
optimizer.step()
# 打印log信息
running_loss += loss.item()
if i % 100 == 99: # 每100個batch打印一次訓練狀態
#print("模型參數")
#print(list(model.named_parameters()))#轉換生成器爲列表
print('{%d, %5d} loss: %.3f' % (epoch+1, i+1 ,running_loss / 100))
running_loss = 0.0
print('Finished Training')
print(model.named_parameters()) #獲取模型參數(帶名字)的生成器
print(list(model.named_parameters()))#轉換生成器爲列表
print(model.parameters())#獲取模型參數(帶名字)的生成器