全連接層參數用 TensorFlow學習的代碼

本算法先構建了一個三元組數據,佔位符X表示原特徵點,Xp表示匹配的特徵點, Xn表示不匹配的特徵點。

目的在於通過訓練,
使得參考樣本與正樣本之間的歐氏距離 PDis 和參考樣本與
負樣本之間的歐氏距離 NDis 滿足同類樣本間的距離加上某
個給定的閾值threshold 要小於異類樣本間的距離。

全部代碼如下:
from __future__ import division, print_function, absolute_import

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# Training Parameters
learning_rate = 0.005
num_steps = 100
n_epoch = 5
batch_size = 250

display_step = 5
examples_to_show = 10

# Network Parameters
num_hidden_1 = 20# 1st layer num features
num_hidden_2 = 20 # 2nd layer num features (the latent dim)
num_input = 882 # MNIST data input (img shape: 28*28)

out_file = "D:/data/out.txt"
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, num_input])
Xp = tf.placeholder("float", [None, num_input])
Xn = tf.placeholder("float",[None, num_input])
thresh = 2500000

weights = {
    'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_2]))
}


# Building the encoder
def encoder(x,xp,xn):

    # Encoder Hidden layer with sigmoid activation #1
    layer_1 = tf.matmul(x, weights['encoder_h1'])    #原特徵
    layer_1p = tf.matmul(xp, weights['encoder_h1'])   #匹配的特徵
    layer_1n = tf.matmul(xn, weights['encoder_h1'])   #不匹配的特徵
    # Encoder Hidden layer with sigmoid activation #2
   # layer_2 = tf.matmul(layer_1, weights['encoder_h2'])
    return layer_1,layer_1p,layer_1n


# Construct model
encoder_x,encoder_yp,encoder_yn = encoder(X,Xp,Xn)
#decoder_op = decoder(encoder_op)

# Prediction
#y_pred = decoder_op
# Targets (Labels) are the input data.
#y_true = X

# Define loss and optimizer, minimize the squared error
loss = tf.reduce_mean(tf.add(tf.subtract(tf.pow(encoder_x - encoder_yp, 2),tf.pow(encoder_x - encoder_yn, 2)),thresh))
#optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Start Training
# Start a new TF session
sess = tf.Session()

# Run the initializer
sess.run(init)



data = []
datap = []
datan = []

dataline = []
f = open("D:/data.txt","r",encoding='utf-8')


# i%4 ==0時爲標籤,i%4 ==1時爲原特徵點數據,i%4 ==2時爲匹配的特徵點數據,i%4 ==3時爲不匹配的特徵點數據
i = 0
for line in f:
    if(i%4 ==1): 
        data.append(list(map(int, line.split())))
        #data.append(line)
    elif(i%4 ==2):
        datap.append(list(map(int, line.split())))
        #datap.append(line)
    elif(i%4 ==3):
        datan.append(list(map(int, line.split())))
        #datan.append(line)

    i +=1


num_example = i/4

ratio = 1
#num_steps = round(num_example*ratio)
num_steps = round(num_example)
s = np.int(num_example * ratio)
o_train = data[:s]
p_train = datap[:s]
n_train = datan[:s]
o_val = data[s:]  # 驗證集
p_val = datap[s:]
n_val = datan[s:]


# 定義一個函數,按批次取數據
def minibatches(inputs=None, inputsp=None, inputsn=None,batch_size=None, shuffle=False):
    assert len(inputs) == len(inputsp) & len(inputs) == len(inputsn)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield np.array(inputs)[excerpt], np.array(inputsp)[excerpt],np.array(inputsn)[excerpt]



breakflag =0

with tf.Session() as sess:
     #sess.run(init_local_op)
     tf.global_variables_initializer().run()
     #for i in range(5):
 # Retrieve a single instance:
         #e_val = sess.run([example_batch])
         #print(e_val)
     for _ in range(n_epoch):
         # Prepare Data
         # Get the next batch of MNIST data (only images are needed, not labels)
         n_batch = 0
         for o_train_a, p_train_a, n_train_a in minibatches(o_train, p_train, n_train, batch_size, shuffle=True):

             o_train_a = np.array(o_train_a).reshape([-1, num_input])
             p_train_a = np.array(p_train_a).reshape([-1, num_input])
             n_train_a = np.array(n_train_a).reshape([-1, num_input])

             _, l = sess.run([optimizer, loss], feed_dict={X: o_train_a, Xp: p_train_a,Xn:n_train_a})
             n_batch += 1
             print("batches:%d  loss:%f" % (n_batch,l))

             if n_batch % display_step == 0 or i == 1:
                print('Step %i: Minibatch Loss: %f' % (n_batch, l))
                if l< 0:
                 #fw =open(out_file,"w")
                 #fw.write(weights['encoder_h1'].eval() *100)
                     np.set_printoptions(suppress=True) #去掉科學顯示

                     #result1 = tf.matmul(weights['encoder_h1'],weights['encoder_h2'])
                     result2 = np.array(weights['encoder_h1'].eval())
                     result2 = np.round(result2 * 100)
                     result3 = result2.T

                     print(result3)
                     np.savetxt('D:/data/w11.txt', result3, fmt=['%d,']*result3.shape[1],newline='\r\n')
                     #np.savetxt('D:/data/b1.txt', result4, fmt='%d,') #fmt='%.02f,'
                     print(weights['encoder_h1'].eval())
                     breakflag = 1
                     break


         if(breakflag ==1):
             break

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章