RNN粗淺理解

很好的學習RNN的網址:點擊打開鏈接

 

1.    什麼是RNN

簡單地說,RNN就是上一次的輸出當做這一次的輸入。

2.    什麼情況下應該用RNN

a.    首先有一些獨立的樣本

b.   某些樣本與樣本之間存在聯繫,比如a樣本的label不僅和a樣本本身的特徵有關,還和b樣本的輸出有關。

c.    將所有有關聯的樣本分成一個一個的簇,簇內樣本與樣本之間存在聯繫;簇外無任何聯繫。

3.    一個簡單的例子

用神經網絡,求a,b兩個整數的和,a和b分別用8位二進制數表示。


4.    網絡結構:


5.    代碼:

#-*- coding:utf-8 -*-
import copy,numpy as np
np.random.seed(0)


print("start")

# compute sigmoid nonlinearity
def sigmoid(x):
    output = 1/(1+np.exp(-x))
    returnoutput

# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):
    returnoutput*(1-output)

# training dataset generation
int2binary = {}
binary_dim = 8


largest_number = pow(2,binary_dim)
print binary_dim,largest_number,(type(int2binary))
binary  = np.unpackbits(
np.array([range(largest_number)],dtype=np.uint8).T,axis=1)
print "binary=",binary,type(binary),binary.shape

for i inrange(largest_number):
    int2binary[i] = binary[i]
    #print"i=",i,"int2binary=",int2binary[i]

#input cariables
alpha = 0.1
input_dim = 2
hidden_dim = 16
output_dim = 1

#initialize neural network weights
synapse_0 = 2*np.random.random((input_dim,hidden_dim))-1
synapse_1 = 2*np.random.random((hidden_dim,output_dim)) -1
synapse_h = 2*np.random.random((hidden_dim,hidden_dim)) -1

print type(synapse_0),synapse_0.shape,synapse_1.shape,synapse_h.shape

synapse_0_update = np.zeros_like(synapse_0)
synapse_1_update = np.zeros_like(synapse_1)
synapse_h_update = np.zeros_like(synapse_h)

print type(synapse_0_update),synapse_0_update.shape

#train logic
for j inrange(1):

    #generatea simple addition problem (a+b=c)
   
a_int= np.random.randint(largest_number/2#largest_num=256  a_int 是一個0128的整數
   
a =int2binary[a_int]
    #print"a_int=",a_int," binary is ",a

   
b_int= np.random.randint(largest_number/2#largest_num=256  a_int 是一個0128的整數
   
b =int2binary[b_int]
    #print"a_int=",a_int," binary is ",a

    # true answer
   
c_int=a_int + b_int
    c= int2binary[c_int]
    #printa_int,b_int,c_int
    # where we'll store our best guess(binary encoded)
   
d =np.zeros_like(c)

    overallError = 0
   
layer_2_deltas= list()
    layer_1_values = list()
   layer_1_values.append(np.zeros(hidden_dim))
    #printlayer_1_values

    # moving along the positions in thebinary encoding
   
for position in range(binary_dim):

        #generate input and output
        
X =np.array([[a[binary_dim - position - 1], b[binary_dim - position - 1]]])
        #print"position =",position,"X=",X
       
y =np.array([[c[binary_dim - position - 1]]]).T
        #printy
        # hidden layer (input ~+prev_hidden)
        #printX.shape,synapse_0.shape,layer_1_values[-1].shape,synapse_h.shape
       
layer_1= sigmoid(np.dot(X,synapse_0) + np.dot(layer_1_values[-1],synapse_h))
        #print"position=",position,"layer_1=",layer_1[0].shape

        # output layer (new binary representation)
       
layer_2= sigmoid(np.dot(layer_1, synapse_1))
        #print"position=",position,"layer_2=",layer_2

        # did we miss?... if so, by howmuch?
       
layer_2_error= y - layer_2
       layer_2_deltas.append((layer_2_error) * sigmoid_output_to_derivative(layer_2))
        overallError +=np.abs(layer_2_error[0])
        #print"position=",position,"layer_2_deltas=",len(layer_2_deltas),layer_2_deltas,"overallError",overallError
       
d[binary_dim- position - 1] = np.round(layer_2[0][0])
        #printd[binary_dim - position - 1]

        # store hidden layer so we canuse it in the next timestep
       
layer_1_values.append(copy.deepcopy(layer_1))

    future_layer_1_delta =np.zeros(hidden_dim)


    for position in range(binary_dim):

        X =np.array([[a[position],b[position]]])
        layer_1 =layer_1_values[-position - 1]
        prev_layer_1 =layer_1_values[-position - 2]
        #print"position = ",position,"layer_1_values size is",len(layer_1_values)
        #print layer_1_values[-position - 1]
        # error at output layer

       
layer_2_delta= layer_2_deltas[-position - 1]
        printlayer_2_delta

        #error at hidden layer
       
layer_1_delta= (future_layer_1_delta.dot(synapse_h.T) + layer_2_delta.dot(
            synapse_1.T)) *sigmoid_output_to_derivative(layer_1)

        #let's update all our weights so we can try again
       
synapse_1_update+= np.atleast_2d(layer_1).T.dot(layer_2_delta)
        synapse_h_update +=np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
        synapse_0_update +=X.T.dot(layer_1_delta)
        future_layer_1_delta =layer_1_delta

    synapse_0 += synapse_0_update * alpha
    synapse_1 += synapse_1_update * alpha
    synapse_h += synapse_h_update * alpha

    synapse_0_update *= 0
   
synapse_1_update*= 0
   
synapse_h_update*= 0


   
#print out progress
   
if j%1000==0:
        print"Error:"+ str(overallError)
        print"Pred:"+ str(d)
        print"True:"+ str(c)
        out = 0
       
for index, x in enumerate(reversed(d)):
            out += x * pow(2, index)
        printstr(a_int) +" + " + str(b_int) + " = " +str(out)

        print"____________"

 

 

6.    sd

7.     


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章