RNN實現圖像分類
用RNN處理圖像
如何將圖像的處理理解爲時間序列
可以理解爲時間序順序爲從上到下
Mnist圖像的處理 一個圖像爲28*28 pixel
時間順序就是從上往下,從第一行到第28行
# Hyper Parameters
EPOCH = 1
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height 一共輸入time_step次。 時序步長數 seq_len
INPUT_SIZE = 28 # rnn input size / image width 每次輸入多少 輸入維度
LR = 0.01 # learning rate
DOWNLOAD_MNIST = True # set to True if haven't download the data
self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns
input_size=INPUT_SIZE,
hidden_size=64, # rnn hidden unit 隱藏層神經元的個數
num_layers=1, # number of rnn layer 多少層
batch_first=True,
)
https://www.jianshu.com/p/41c15d301542
理解爲什麼RNN輸入默認不是batch first=True?這是爲了便於並行計算。
r_out, (h_n, h_c) = self.rnn(x, None)
# x shape (batch, time_step, input_size)
# r_out shape (batch, time_step, output_size)
# h_n 的形狀是 (n_layers, batch, hidden_size) t=time_step 時刻的隱層狀態 分線劇情 hidden_state = (h_n, h_c)
# h_c 的形狀是 (n_layers, batch, hidden_size) t=time_step 時刻的細胞狀態 主線劇情
# 每一次處理完後會輸出hidden_state 產生output , 結合下一次讀取圖像處理完輸出的hidden_state 又產生output 這樣循環
# 意思就是每一時刻的輸入 會包括上一時刻的輸入
# 其中hidden_state,又分爲 h_n, h_c == h_state, c_state。
# h_n和output的關係: output包括了time_step中每一個時間點的隱層狀態,
# 而h_n是第time_step時刻的隱層狀態, 所以output中最後一個元素就是h_n, 即output[-1] == h_n.
b_x = b_x.view(-1, 28, 28) # 變一下維度 在pytorch中是.view()的形式表示reshape
完整代碼:
LSTM實現是寫數字識別
import torch
from torch import nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height 一共輸入time_step次。 時序步長數 seq_len
INPUT_SIZE = 28 # rnn input size / image width 每次輸入多少 輸入維度
LR = 0.01 # learning rate
DOWNLOAD_MNIST = True # set to True if haven't download the data
# Mnist digital dataset
train_data = dsets.MNIST(
root='./mnist/',
train=True, # this is training data
transform=transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to
# torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
download=DOWNLOAD_MNIST, # download it if you don't have it
)
# plot one example
print(train_data.train_data.size()) # (60000, 28, 28)
print(train_data.train_labels.size()) # (60000)
plt.imshow(train_data.train_data[0].numpy(), cmap='gray')
plt.title('%i' % train_data.train_labels[0])
plt.show()
# Data Loader for easy mini-batch return in training
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
# convert test data into Variable, pick 2000 samples to speed up testing
test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
test_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1)
test_y = test_data.test_labels.numpy()[:2000] # covert to numpy array
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
# LSTM 函數的參數和RNN都是一致的, 區別在於輸入輸出不同,LSTM 多了一個細胞的狀態, 所以每一個循環層都增加了一個細胞狀態h_c的輸出.
self.rnn = nn.LSTM( # if use nn.RNN(), it hardly learns
input_size=INPUT_SIZE,
hidden_size=64, # rnn hidden unit 隱藏層神經元的個數
num_layers=1, # number of rnn layer 多少層
batch_first=True,
# https://www.jianshu.com/p/41c15d301542 理解爲什麼RNN輸入默認不是batch first=True?這是爲了便於並行計算。
# input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
# 默認是(time_step, batch, input_size) 如果batch_first=True, 則 (batch, time_step, input_size)
)
self.out = nn.Linear(64, 10)
def forward(self, x):
# x shape (batch, time_step, input_size)
# r_out shape (batch, time_step, output_size)
# h_n 的形狀是 (n_layers, batch, hidden_size) t=time_step 時刻的隱層狀態 分線劇情 hidden_state = (h_n, h_c)
# h_c 的形狀是 (n_layers, batch, hidden_size) t=time_step 時刻的細胞狀態 主線劇情
# 每一次處理完後會輸出hidden_state 產生output , 結合下一次讀取圖像處理完輸出的hidden_state 又產生output 這樣循環
# 意思就是每一時刻的輸入 會包括上一時刻的輸入
# 其中hidden_state,又分爲 h_n, h_c == h_state, c_state。
r_out, (h_n, h_c) = self.rnn(x, None) # None represents zero initial hidden state
# h_n和output的關係: output包括了time_step中每一個時間點的隱層狀態,
# 而h_n是第time_step時刻的隱層狀態, 所以output中最後一個元素就是h_n, 即output[-1] == h_n.
# choose r_out at the last time step
out = self.out(r_out[:, -1, :]) # r_out shape (batch, time_step, output_size) 在time_step位置插上-1就表示最後一個時刻
return out
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
# training and testing
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
b_x = b_x.view(-1, 28, 28) # 變一下維度 在pytorch中是.view()的形式表示reshape reshape x to (batch, time_step, input_size)
output = rnn(b_x) # rnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 50 == 0:
test_output = rnn(test_x) # (samples, time_step, input_size)
pred_y = torch.max(test_output, 1)[1].data.numpy()
accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
# print 10 predictions from test data
test_output = rnn(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].data.numpy()
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')
運行結果:
-------------------------------------------------RNN regressor----------------------------------------------------------------------
##################################################################################
############################################################################
目的
通過sin曲線
去生成cos曲線
其中
def forward(self, x, h_state): # x (batch, time_step, input_size) # h_state (n_layers, batch, hidden_size) # r_out (batch, time_step, hidden_size) # r_out 保存所有time_step的hidden_state r_out, h_state = self.rnn(x, h_state) outs = [] # save all predictions for time_step in range(r_out.size(1)): # calculate output for each time step outs.append(self.out(r_out[:, time_step, :])) return torch.stack(outs, dim=1), h_state
其中有一個類似遞歸的思想
不斷的產生h_state 然後再作爲輸入
所以在後面調用的時候需要第一次傳入一個h_state
其次self.rnn() 會生成r_out , h_state
區別於 self.lstm() 會生成r_out , (h_n , h_c)
將每一次time_step 的r_out 作爲輸入到out中
將結果存入outs[ ]
因爲r_out shape (batch, time_step, hidden_size)
所以
outs = [] # save all predictions for time_step in range(r_out.size(1)): # calculate output for each time step outs.append(self.out(r_out[:, time_step, :]))
最後的返回值將outs[ ] 是一個list 將其變爲Tensor的形式,將裏面的東西壓在一起
return torch.stack(outs, dim=1), h_state
在訓練階段
step 是訓練的步數
start, end = step * np.pi, (step + 1) * np.pi
截取一小段距離
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32,endpoint=False) # float32 for converting torch FloatTensor
x_np = np.sin(steps)
y_np = np.cos(steps)
在每段距離上撒點 生成訓練sin曲線 x_np 預測曲線cos曲線 y_np
x = torch.from_numpy(
x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) 增加了2個維度 batch 和 input_size 爲1
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
增加兩個維度 變爲pytorch接收的維度
# !! next step is important !!
h_state = h_state.data # repack the hidden state, break the connection from last iteration
這步非常重要
將每次訓練的h_state結果 變爲h_state.data 形式賦值給h_state
以下是完整的代碼結果
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
# torch.manual_seed(1) # reproducible
# Hyper Parameters
TIME_STEP = 10 # rnn time step
INPUT_SIZE = 1 # rnn input size
LR = 0.02 # learning rate
# show data
steps = np.linspace(0, np.pi*2, 100, dtype=np.float32) # float32 for converting torch FloatTensor
x_np = np.sin(steps)
y_np = np.cos(steps)
plt.plot(steps, y_np, 'r-', label='target (cos)')
plt.plot(steps, x_np, 'b-', label='input (sin)')
plt.legend(loc='best')
plt.show()
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN(
input_size=INPUT_SIZE,
hidden_size=32, # rnn hidden unit
num_layers=1, # number of rnn layer
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state):
# x (batch, time_step, input_size)
# h_state (n_layers, batch, hidden_size)
# r_out (batch, time_step, hidden_size)
r_out, h_state = self.rnn(x, h_state)
outs = [] # save all predictions
for time_step in range(r_out.size(1)): # calculate output for each time step
outs.append(self.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1), h_state
# instead, for simplicity, you can replace above codes by follows
# r_out = r_out.view(-1, 32)
# outs = self.out(r_out)
# outs = outs.view(-1, TIME_STEP, 1)
# return outs, h_state
# or even simpler, since nn.Linear can accept inputs of any dimension
# and returns outputs with same dimension except for the last
# outs = self.out(r_out)
# return outs
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state = None # for initial hidden state
plt.figure(1, figsize=(12, 5))
plt.ion() # continuously plot
for step in range(100):
start, end = step * np.pi, (step+1)*np.pi # time range 截取一小段的距離
# use sin predicts cos
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32, endpoint=False) # float32 for converting torch FloatTensor
x_np = np.sin(steps)
y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) 增加了2個維度 batch 和 input_size 爲1
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction, h_state = rnn(x, h_state) # rnn output
# !! next step is important !!
h_state = h_state.data # repack the hidden state, break the connection from last iteration
loss = loss_func(prediction, y) # calculate loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# plotting
plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.draw(); plt.pause(0.05)
plt.ioff()
plt.show()
運行結果: