tensorflow模型保存

tensorflow模型保存

1 import numpy as np
2 import tensorflow as tf
3 from sklearn.datasets import fetch_california_housing
4 from sklearn.preprocessing import StandardScaler
5 scaler = StandardScaler()
6
7 # 獲取數據
8 housing = fetch_california_housing()
9 m, n = housing.data.shape
10 print(housing.keys())
11 print(housing.feature_names)
12 print(housing.target)
13 housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]
14 scaled_data = scaler.fit_transform(housing.data)
15 data = np.c_[np.ones((m, 1)), scaled_data]
16
17 # 設置參數
18 n_epoch = 1000
19 learning_rate = 0.01
20
21 # 設置placeholder
22 X = tf.constant(data, tf.float32, name=”X”)
23 y = tf.constant(housing.target.reshape(-1, 1), tf.float32, name=”y”)
24
25 theta = tf.Variable(tf.random_uniform([n+1, 1], -1, 1), name=”theta”)
26 y_pred = tf.matmul(X, theta, name=”prediction”)
27 error = y_pred - y
28 mse = tf.reduce_mean(tf.square(error), name=”mse”)
29
30 # 計算梯度公式
31 gradient = 2 / m * tf.matmul(tf.transpose(X), error)
32 training_op = tf.assign(theta, theta - learning_rate * gradient)
33
34 init = tf.global_variables_initializer()
35 with tf.Session() as sess:
36 sess.run(init)
37 for epoch in range(n_epoch):
38 if epoch % 100 == 0:
39 print(“Epoch”, epoch, “MSE=”, mse.eval())
40 print(“Best theta:”, theta.eval())
41
42
43 # 梯度下降中傳輸數據的方式
44 # 傳統方式
45 A = tf.placeholder(tf.float32, shape=[None, 3])
46 B = A + 5
47 with tf.Session() as sess:
48 test_b_1 = B.eval(feed_dict={A: [[1, 2, 3]]})
49 test_b_2 = B.eval(feed_dict={A: [[4, 5, 6], [7, 8, 9]]})
50 print(test_b_1)
51 print(test_b_2)
52
53 # 使用mini-batch的方式
54 X = tf.placeholder(tf.float32, shape=[None, n + 1], name=”X”)
55 y = tf.placeholder(tf.float32, shape=[None, 1], name=”y”)
56
57 n_epochs = 100
58 display_step = 10
59 learning_rate = 0.01
60 batch_size = 100
61 n_batches = int(np.ceil(m / batch_size))
62 theta = tf.Variable(tf.random_uniform([n + 1, 1], -1, 1, seed=42), name=”theta”)
63 y_pred = tf.matmul(X, theta, name=”prediction”)
64 error = y_pred - y
65 mse = tf.reduce_mean(tf.square(error), name=”mse”)
66 optimizer = tf.train.GradientDescentOptimizer(learning_rate)
67 training_op = optimizer.minimize(mse)
68 init = tf.global_variables_initializer()
69
70
71 # 定義mini-batch數據讀取方式
72 def fetch_batch(epoch, batch_index, batch_size):
73 np.random.seed(epoch * n_batches + batch_index)
74 indices = np.random.randint(m, size=batch_size)
75 X_batch = data[indices]
76 y_batch = housing.target.reshape(-1, 1)[indices]
77 return X_batch, y_batch
78
79
80 # mini-batch計算過程
81 with tf.Session() as sess:
82 sess.run(init)
83 for epoch in range(n_epochs):
84 avg_cost = 0
85 for batch_index in range(n_batches):
86 X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
87 sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
88
89 if epoch % display_step == 0:
90 total_loss = 0
91 acc_train = mse.eval(feed_dict={X: X_batch, y: y_batch})
92 total_loss += acc_train
93 print(“Epoch:”, epoch, “Batch id:”, batch_index, “Batch loss:”, total_loss)
94
95 # 模型的保存和恢復
96 # 在創建圖階段的時候創建一個Saver節點,在執行階段需要保存模型的地方調用Saver()函數即可
97 init = tf.global_variables_initializer()
98 saver = tf.train.Saver()
99 with tf.Session() as sess:
100 sess.run(init)
101 for epoch in range(n_epochs):
102 if epoch % display_step == 0:
103 print(“Epoch:”, epoch, “mse=”, mse.eval()) # 保存運行過程
104 save_path = saver.save(sess, “/my_model.ckpt”)
105 sess.run(training_op)
106
107 best_theta = theta.eval()
108 save_path = saver.save(sess, “/my_model_final.ckpt”) # 保存最後的結果

發佈了13 篇原創文章 · 獲贊 6 · 訪問量 2萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章