本文爲轉載,原博客地址:https://blog.csdn.net/lengguoxing/article/details/78456279
- import tensorflow as tf
- node1 = tf.constant(3.0, dtype=tf.float32)
- node2 = tf.constant(4.0)# also tf.float32 implicitly
- print(node1, node2)
- Tensor("Const:0", shape=(), dtype=float32) Tensor("Const_1:0",shape=(), dtype=float32)
- sess = tf.Session()
- print(sess.run([node1, node2]))
- node3 = tf.add(node1, node2)
- print("node3:", node3)
- print("sess.run(node3):", sess.run(node3))
- node3:Tensor("Add:0", shape=(), dtype=float32)
- sess.run(node3):7.0
- a = tf.placeholder(tf.float32)
- b = tf.placeholder(tf.float32)
- adder_node = a + b # + provides a shortcut for tf.add(a, b)
- print(sess.run(adder_node, {a:3, b:4.5}))
- print(sess.run(adder_node, {a: [1,3], b: [2,4]}))
- 7.5
- [3. 7.]
- add_and_triple = adder_node *3.
- print(sess.run(add_and_triple, {a:3, b:4.5}))
- 輸出結果是:
- 22.5
- W = tf.Variable([.3], dtype=tf.float32)
- b = tf.Variable([-.3], dtype=tf.float32)
- x = tf.placeholder(tf.float32)
- linear_model = W*x + b
- init = tf.global_variables_initializer()
- .run(init)
- print(sess.run(linear_model, {x: [1,2,3,4]}))
- 求值linear_model
- 輸出爲
- [0. 0.30000001 0.60000002 0.90000004]
- y = tf.placeholder(tf.float32)
- squared_deltas = tf.square(linear_model - y)
- loss = tf.reduce_sum(squared_deltas)
- print(sess.run(loss, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
- 輸出的結果爲
- 23.66
- fixb = tf.assign(b, [1.])
- sess.run([fixW, fixb])
- print(sess.run(loss, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
- 最終打印的結果是:
- 0.0
- optimizer = tf.train.GradientDescentOptimizer(0.01)
- train = optimizer.minimize(loss)
- sess.run(init)# reset values to incorrect defaults.
- for iin range(1000):
- sess.run(train, {x: [1,2,3,4], y: [0, -1, -2, -3]})
- print(sess.run([W, b]))
- 輸出結果爲
- [array([-0.9999969], dtype=float32), array([ 0.99999082], dtype=float32)]
- running training loops 運行訓練循環
- running evaluation loops 運行求值循環
- managing data sets 管理數據集合
- import tensorflow as tf
- node1 = tf.constant(3.0, dtype=tf.float32)
- node2 = tf.constant(4.0) # also tf.float32 implicitly
- print(node1, node2)
- sess = tf.Session()
- print(sess.run([node1, node2]))
- # from __future__ import print_function
- node3 = tf.add(node1, node2)
- print("node3:", node3)
- print("sess.run(node3):", sess.run(node3))
- # 佔位符
- a = tf.placeholder(tf.float32)
- b = tf.placeholder(tf.float32)
- adder_node = a + b # + provides a shortcut for tf.add(a, b)
- print(sess.run(adder_node, {a: 3, b: 4.5}))
- print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
- add_and_triple = adder_node * 3.
- print(sess.run(add_and_triple, {a: 3, b: 4.5}))
- # 多個變量求值
- W = tf.Variable([.3], dtype=tf.float32)
- b = tf.Variable([-.3], dtype=tf.float32)
- x = tf.placeholder(tf.float32)
- linear_model = W*x + b
- # 變量初始化
- init = tf.global_variables_initializer()
- sess.run(init)
- print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
- # loss function
- y = tf.placeholder(tf.float32)
- squared_deltas = tf.square(linear_model - y)
- loss = tf.reduce_sum(squared_deltas)
- print("loss function", sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
- ss = (0-0)*(0-0) + (0.3+1)*(0.3+1) + (0.6+2)*(0.6+2) + (0.9+3)*(0.9+3) # 真實算法
- print("真實算法ss", ss)
- print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, 0.3, 0.6, 0.9]})) # 測試參數
- # ft.assign 變量重新賦值
- fixW = tf.assign(W, [-1.])
- fixb = tf.assign(b, [1.])
- sess.run([fixW, fixb])
- print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
- print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
- # tf.train API
- optimizer = tf.train.GradientDescentOptimizer(0.01) # 梯度下降優化器
- train = optimizer.minimize(loss) # 最小化損失函數
- sess.run(init) # reset values to incorrect defaults.
- for i in range(1000):
- sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
- print(sess.run([W, b]))
- print("------------------------------------1")
- # Complete program:The completed trainable linear regression model is shown here:完整的訓練線性迴歸模型代碼
- # Model parameters
- W = tf.Variable([.3], dtype=tf.float32)
- b = tf.Variable([-.3], dtype=tf.float32)
- # Model input and output
- x = tf.placeholder(tf.float32)
- linear_model = W*x + b
- y = tf.placeholder(tf.float32)
- # loss
- loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
- # optimizer
- optimizer = tf.train.GradientDescentOptimizer(0.01)
- train = optimizer.minimize(loss)
- # training data
- x_train = [1, 2, 3, 4]
- y_train = [0, -1, -2, -3]
- # training loop
- init = tf.global_variables_initializer()
- sess = tf.Session()
- sess.run(init) # reset values to wrong
- for i in range(1000):
- sess.run(train, {x: x_train, y: y_train})
- # evaluate training accuracy
- curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
- print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
- print("------------------------------------2")
- # tf.estimator 使用tf.estimator實現上述訓練
- # Notice how much simpler the linear regression program becomes with tf.estimator:
- # NumPy is often used to load, manipulate and preprocess data.
- import numpy as np
- import tensorflow as tf
- # Declare list of features. We only have one numeric feature. There are many
- # other types of columns that are more complicated and useful.
- feature_columns = [tf.feature_column.numeric_column("x", shape=[1])]
- # An estimator is the front end to invoke training (fitting) and evaluation
- # (inference). There are many predefined types like linear regression,
- # linear classification, and many neural network classifiers and regressors.
- # The following code provides an estimator that does linear regression.
- estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
- # TensorFlow provides many helper methods to read and set up data sets.
- # Here we use two data sets: one for training and one for evaluation
- # We have to tell the function how many batches
- # of data (num_epochs) we want and how big each batch should be.
- x_train = np.array([1., 2., 3., 4.])
- y_train = np.array([0., -1., -2., -3.])
- x_eval = np.array([2., 5., 8., 1.])
- y_eval = np.array([-1.01, -4.1, -7, 0.])
- input_fn = tf.estimator.inputs.numpy_input_fn(
- {"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
- train_input_fn = tf.estimator.inputs.numpy_input_fn(
- {"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
- eval_input_fn = tf.estimator.inputs.numpy_input_fn(
- {"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
- # We can invoke 1000 training steps by invoking the method and passing the
- # training data set.
- estimator.train(input_fn=input_fn, steps=1000)
- # Here we evaluate how well our model did.
- train_metrics = estimator.evaluate(input_fn=train_input_fn)
- eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
- print("train metrics: %r"% train_metrics)
- print("eval metrics: %r"% eval_metrics)
- print("------------------------------------3")
- # A custom model:客戶自定義實現訓練
- # Declare list of features, we only have one real-valued feature
- def model_fn(features, labels, mode):
- # Build a linear model and predict values
- W = tf.get_variable("W", [1], dtype=tf.float64)
- b = tf.get_variable("b", [1], dtype=tf.float64)
- y = W*features['x'] + b
- # Loss sub-graph
- loss = tf.reduce_sum(tf.square(y - labels))
- # Training sub-graph
- global_step = tf.train.get_global_step()
- optimizer = tf.train.GradientDescentOptimizer(0.01)
- train = tf.group(optimizer.minimize(loss),
- tf.assign_add(global_step, 1))
- # EstimatorSpec connects subgraphs we built to the
- # appropriate functionality.
- return tf.estimator.EstimatorSpec(
- mode=mode,
- predictions=y,
- loss=loss,
- train_op=train)
- estimator = tf.estimator.Estimator(model_fn=model_fn)
- # define our data sets
- x_train = np.array([1., 2., 3., 4.])
- y_train = np.array([0., -1., -2., -3.])
- x_eval = np.array([2., 5., 8., 1.])
- y_eval = np.array([-1.01, -4.1, -7., 0.])
- input_fn = tf.estimator.inputs.numpy_input_fn(
- {"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
- train_input_fn = tf.estimator.inputs.numpy_input_fn(
- {"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
- eval_input_fn = tf.estimator.inputs.numpy_input_fn(
- {"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
- # train
- estimator.train(input_fn=input_fn, steps=1000)
- # Here we evaluate how well our model did.
- train_metrics = estimator.evaluate(input_fn=train_input_fn)
- eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
- print("train metrics: %r"% train_metrics)
- print("eval metrics: %r"% eval_metrics)