RNN实现图像分类
用RNN处理图像
如何将图像的处理理解为时间序列
可以理解为时间序顺序为从上到下
Mnist图像的处理 一个图像为28*28 pixel
时间顺序就是从上往下,从第一行到第28行
# Hyper Parameters
EPOCH = 1
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height 一共输入time_step次。 时序步长数 seq_len
INPUT_SIZE = 28 # rnn input size / image width 每次输入多少 输入维度
LR = 0.01 # learning rate
DOWNLOAD_MNIST = True # set to True if haven't download the data
self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns
input_size=INPUT_SIZE,
hidden_size=64, # rnn hidden unit 隐藏层神经元的个数
num_layers=1, # number of rnn layer 多少层
batch_first=True,
)
https://www.jianshu.com/p/41c15d301542
理解为什么RNN输入默认不是batch first=True?这是为了便于并行计算。
r_out, (h_n, h_c) = self.rnn(x, None)
# x shape (batch, time_step, input_size)
# r_out shape (batch, time_step, output_size)
# h_n 的形状是 (n_layers, batch, hidden_size) t=time_step 时刻的隐层状态 分线剧情 hidden_state = (h_n, h_c)
# h_c 的形状是 (n_layers, batch, hidden_size) t=time_step 时刻的细胞状态 主线剧情
# 每一次处理完后会输出hidden_state 产生output , 结合下一次读取图像处理完输出的hidden_state 又产生output 这样循环
# 意思就是每一时刻的输入 会包括上一时刻的输入
# 其中hidden_state,又分为 h_n, h_c == h_state, c_state。
# h_n和output的关系: output包括了time_step中每一个时间点的隐层状态,
# 而h_n是第time_step时刻的隐层状态, 所以output中最后一个元素就是h_n, 即output[-1] == h_n.
b_x = b_x.view(-1, 28, 28) # 变一下维度 在pytorch中是.view()的形式表示reshape
完整代码:
LSTM实现是写数字识别
import torch
from torch import nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height 一共输入time_step次。 时序步长数 seq_len
INPUT_SIZE = 28 # rnn input size / image width 每次输入多少 输入维度
LR = 0.01 # learning rate
DOWNLOAD_MNIST = True # set to True if haven't download the data
# Mnist digital dataset
train_data = dsets.MNIST(
root='./mnist/',
train=True, # this is training data
transform=transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to
# torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
download=DOWNLOAD_MNIST, # download it if you don't have it
)
# plot one example
print(train_data.train_data.size()) # (60000, 28, 28)
print(train_data.train_labels.size()) # (60000)
plt.imshow(train_data.train_data[0].numpy(), cmap='gray')
plt.title('%i' % train_data.train_labels[0])
plt.show()
# Data Loader for easy mini-batch return in training
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
# convert test data into Variable, pick 2000 samples to speed up testing
test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
test_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1)
test_y = test_data.test_labels.numpy()[:2000] # covert to numpy array
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
# LSTM 函数的参数和RNN都是一致的, 区别在于输入输出不同,LSTM 多了一个细胞的状态, 所以每一个循环层都增加了一个细胞状态h_c的输出.
self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns
input_size=INPUT_SIZE,
hidden_size=64, # rnn hidden unit 隐藏层神经元的个数
num_layers=1, # number of rnn layer 多少层
batch_first=True,
# https://www.jianshu.com/p/41c15d301542 理解为什么RNN输入默认不是batch first=True?这是为了便于并行计算。
# input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
# 默认是(time_step, batch, input_size) 如果batch_first=True, 则 (batch, time_step, input_size)
)
self.out = nn.Linear(64, 10)
def forward(self, x):
# x shape (batch, time_step, input_size)
# r_out shape (batch, time_step, output_size)
# h_n 的形状是 (n_layers, batch, hidden_size) t=time_step 时刻的隐层状态 分线剧情 hidden_state = (h_n, h_c)
# h_c 的形状是 (n_layers, batch, hidden_size) t=time_step 时刻的细胞状态 主线剧情
# 每一次处理完后会输出hidden_state 产生output , 结合下一次读取图像处理完输出的hidden_state 又产生output 这样循环
# 意思就是每一时刻的输入 会包括上一时刻的输入
# 其中hidden_state,又分为 h_n, h_c == h_state, c_state。
r_out, (h_n, h_c) = self.rnn(x, None) # None represents zero initial hidden state
# h_n和output的关系: output包括了time_step中每一个时间点的隐层状态,
# 而h_n是第time_step时刻的隐层状态, 所以output中最后一个元素就是h_n, 即output[-1] == h_n.
# choose r_out at the last time step
out = self.out(r_out[:, -1, :]) # r_out shape (batch, time_step, output_size) 在time_step位置插上-1就表示最后一个时刻
return out
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
# training and testing
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
b_x = b_x.view(-1, 28, 28) # 变一下维度 在pytorch中是.view()的形式表示reshape reshape x to (batch, time_step, input_size)
output = rnn(b_x) # rnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 50 == 0:
test_output = rnn(test_x) # (samples, time_step, input_size)
pred_y = torch.max(test_output, 1)[1].data.numpy()
accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
# print 10 predictions from test data
test_output = rnn(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].data.numpy()
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')
运行结果:
-------------------------------------------------RNN regressor----------------------------------------------------------------------
##################################################################################
############################################################################
目的
通过sin曲线
去生成cos曲线
其中
def forward(self, x, h_state): # x (batch, time_step, input_size) # h_state (n_layers, batch, hidden_size) # r_out (batch, time_step, hidden_size) # r_out 保存所有time_step的hidden_state r_out, h_state = self.rnn(x, h_state) outs = [] # save all predictions for time_step in range(r_out.size(1)): # calculate output for each time step outs.append(self.out(r_out[:, time_step, :])) return torch.stack(outs, dim=1), h_state
其中有一个类似递归的思想
不断的产生h_state 然后再作为输入
所以在后面调用的时候需要第一次传入一个h_state
其次self.rnn() 会生成r_out , h_state
区别于 self.lstm() 会生成r_out , (h_n , h_c)
将每一次time_step 的r_out 作为输入到out中
将结果存入outs[ ]
因为r_out shape (batch, time_step, hidden_size)
所以
outs = [] # save all predictions for time_step in range(r_out.size(1)): # calculate output for each time step outs.append(self.out(r_out[:, time_step, :]))
最后的返回值将outs[ ] 是一个list 将其变为Tensor的形式,将里面的东西压在一起
return torch.stack(outs, dim=1), h_state
在训练阶段
step 是训练的步数
start, end = step * np.pi, (step + 1) * np.pi
截取一小段距离
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32,endpoint=False) # float32 for converting torch FloatTensor
x_np = np.sin(steps)
y_np = np.cos(steps)
在每段距离上撒点 生成训练sin曲线 x_np 预测曲线cos曲线 y_np
x = torch.from_numpy(
x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) 增加了2个维度 batch 和 input_size 为1
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
增加两个维度 变为pytorch接收的维度
# !! next step is important !!
h_state = h_state.data # repack the hidden state, break the connection from last iteration
这步非常重要
将每次训练的h_state结果 变为h_state.data 形式赋值给h_state
以下是完整的代码结果
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
# torch.manual_seed(1) # reproducible
# Hyper Parameters
TIME_STEP = 10 # rnn time step
INPUT_SIZE = 1 # rnn input size
LR = 0.02 # learning rate
# show data
steps = np.linspace(0, np.pi*2, 100, dtype=np.float32) # float32 for converting torch FloatTensor
x_np = np.sin(steps)
y_np = np.cos(steps)
plt.plot(steps, y_np, 'r-', label='target (cos)')
plt.plot(steps, x_np, 'b-', label='input (sin)')
plt.legend(loc='best')
plt.show()
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN(
input_size=INPUT_SIZE,
hidden_size=32, # rnn hidden unit
num_layers=1, # number of rnn layer
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state):
# x (batch, time_step, input_size)
# h_state (n_layers, batch, hidden_size)
# r_out (batch, time_step, hidden_size)
r_out, h_state = self.rnn(x, h_state)
outs = [] # save all predictions
for time_step in range(r_out.size(1)): # calculate output for each time step
outs.append(self.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1), h_state
# instead, for simplicity, you can replace above codes by follows
# r_out = r_out.view(-1, 32)
# outs = self.out(r_out)
# outs = outs.view(-1, TIME_STEP, 1)
# return outs, h_state
# or even simpler, since nn.Linear can accept inputs of any dimension
# and returns outputs with same dimension except for the last
# outs = self.out(r_out)
# return outs
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state = None # for initial hidden state
plt.figure(1, figsize=(12, 5))
plt.ion() # continuously plot
for step in range(100):
start, end = step * np.pi, (step+1)*np.pi # time range 截取一小段的距离
# use sin predicts cos
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32, endpoint=False) # float32 for converting torch FloatTensor
x_np = np.sin(steps)
y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) 增加了2个维度 batch 和 input_size 为1
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction, h_state = rnn(x, h_state) # rnn output
# !! next step is important !!
h_state = h_state.data # repack the hidden state, break the connection from last iteration
loss = loss_func(prediction, y) # calculate loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# plotting
plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.draw(); plt.pause(0.05)
plt.ioff()
plt.show()
运行结果: