本文学习自李沐大神的《动手学习深度学习》系列,代码实现基于MXNet
代码预热
简要列举大意,具体参见官方API
gluon.nn.Sequential()
: 逐层堆叠网络
gluon.loss.L2Loss()
: 计算均方误差
nd.random.normal()
: 生成正态分布随机数
gluon.nn.Dense()
: 全连接神经网络层
Block.initialize()
: 初始化参数
gluon.data.ArrayDataset()
: 从数组或者列表等创建数据集
要解决的问题
使用神经网络拟合一个三次多项式
并在此过程中使用模拟数据体验欠拟合与过拟合。
模拟数据
from mxnet import ndarray as nd
num_train = 100
num_test = 100
true_w = [1.2, -3.4, 5.6]
true_b = 5.0
# 依据真实的参数生成带噪声的模拟数据
x = nd.random.normal(shape=[num_train+num_test, 1])
X = nd.concat(x, nd.power(x, 2), nd.power(x, 3))
y = true_w[0]*X[:, 0] + true_w[1]*X[:, 1] + true_w[2]*W[:, 2] + true_b
y = 0.1 * nd.random.normal(shape=y.shape)
y_train, y_test = y[:num_train], y[num_train:]
定义训练与测试步骤
- 测试
def test(net, x, y):
return square_loss(net(x), y)
- 训练
from mxnet import autograd
def train(x_train, x_test, y_train, y_test):
# 模型
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(1))
net.initialize()
# 超参数
learning_rate = 0.01
epochs = 100
batch_size = 100
dataset_train = gluon.data.ArrayDataset(x_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True)
# 优化器
trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": learning_rate})
# 损失函数
square_loss = gluon.loss.L2Loss()
# 训练
train_loss = []
test_loss = []
for e in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
train_loss.append(square_loss(net(x_train), y_train).mean().asscalar())
test_loss.append(square_loss(net(x_test), y_test).mean().asscalar())
plt.plot(train_loss)
plt.plot(test_loss)
plt.show()
print("learned w", net[0].weight.data(), "learned b", net[0].bias.data())
train(X[:num_train], X[num_train:], y[:num_train], y[num_train:])