import tensorflow as tf
import sklearn.datasets as datasets
import numpy as np
## 1.加载数据集
x_data = datasets.load_iris().data #加载features
y_data = datasets.load_iris().target #加载label
# 随机切分数据,此处需设置相同的随机种子
np.random.seed(168)
np.random.shuffle(x_data)
np.random.seed(168)
np.random.shuffle(y_data)
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
# 原始x_train为float64,不转化后期矩阵运算报错
x_train = tf.cast(x_train,tf.float32)
x_test = tf.cast(x_test,tf.float32)
### 1.3 featur和label拼接
train_data = tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(32)
test_data = tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(32)
# 2.模型训练
### 2.1 定义神经元连接权重和偏置项
w = tf.Variable(tf.random.truncated_normal([4,3],stddev=0.1,dtype = tf.float32))
b = tf.Variable(tf.random.truncated_normal([3],stddev=0.1,dtype=tf.float32))
lr = 0.1 #学习率
epochs = 500 #迭代次数
classes = 3 #总的类别数
train_loss_result = []
accuracy_result = []
loss_all = 0
for epoch in range(epochs):
for x_train,y_train in train_data:
with tf.GradientTape() as tape:
y = tf.matmul(x_train,w) + b
y = tf.nn.softmax(y) #通过softmax转化为概率模型
y_= tf.one_hot(y_train,depth =classes) #label转化为独热编码
loss = tf.reduce_mean(tf.square(y - y_))
loss_all += loss.numpy()
grads = tape.gradient(loss,[w,b]) #求取梯度
w.assign_sub(lr * grads[0])
b.assign_sub(lr * grads[1])
train_loss_result.append(loss_all / 4) #4批次的均值作为当前轮次的loss
loss_all = 0
#测试数据
total_correct ,total_num =0,0
for x_test,y_test in test_data:
y = tf.matmul(x_test,w) + b
y = tf.nn.softmax(y)
pred = tf.argmax(y,axis=1)
pred = tf.cast(pred,dtype = tf.int32)
# 计算准确率
correct = tf.cast(tf.equal(pred,y_test),dtype = tf.int32) #boolean型数据转化为0,1,方便使用tf.reduce_sum
correct = tf.reduce_sum(correct)
#所以batch的correct累加
total_correct += correct
total_num += x_test.shape[0]
accuracy = total_correct / total_num
accuracy_result.append(accuracy)
算法小白的第一次尝试---tensorflow2.0实战iris
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.