【深度學習入門】Paddle實現人臉檢測和表情識別(基於YOLO和ResNet18)
博主主頁:https://blog.csdn.net/weixin_44936889
未經博主允許,本文禁止轉載!
一、先看效果:
本項目在 AI Studio 上進行,項目地址:
https://aistudio.baidu.com/aistudio/projectdetail/443545
訓練及測試結果:
UI 界面及其可視化:
二、AI Studio 簡介:
地址:https://aistudio.baidu.com/aistudio/index
平臺簡介:
爲了給廣大開發者提供更加完善自由的編程環境,幫助入門小白進行項目學習和體驗,幫助開發者更快捷簡便的完成深度學習項目,並持續提供更多的增值服務,百度設計研發了百度AI Studio一站式開發平臺。此平臺集合了AI教程、代碼環境、算法算力和數據集,併爲開發者提供了免費的在線雲計算編程環境,並且無需再進行環境配置和依賴包等繁瑣步驟,隨時隨地可以上線AI Studio開展深度學習項目。
創建項目:
開發者可以自由創建項目,並選擇 AI Studio 平臺提供的 GPU 和訓練集:
三、創建AI Studio項目:
創建並啓動環境:
進入項目地址並 Fork:
運行項目:
啓動並進入環境:
下載數據:
數據集地址:
鏈接:https://pan.baidu.com/s/1H7x2HSL_WV6oB-16enffmw
提取碼:cllz
下載後查看一下,包含7個不同情緒:
將壓縮包上傳到項目:
添加該命令並運行進行解壓:
!unzip images.zip
下載預訓練模型:
鏈接:https://pan.baidu.com/s/1LnISJdMVY1B2xCBG0tzF8A
提取碼:6wap
下載後同樣上傳到項目:
同樣解壓:
解壓完刪除命令行命令:
四、代碼講解:
該項目使用 TinYOLO 進行人臉檢測,使用 ResNet 進行表情識別;
這一步設置並查看工作路徑:
import sys
sys.path.append('/home/aistudio/work')
import os
os.chdir('/home/aistudio/work')
print(os.getcwd())
導入需要的 Python 庫:
#導入需要的包
import paddle as paddle
import paddle.fluid as fluid
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import os
import cv2
定義一個數據迭代器並加載數據:
def reader_createor(im_list, label_list):
def reader():
for pt, lbl in zip(im_list, label_list):
im = cv2.imread(pt, 0)
im = cv2.resize(im, (128, 128))
if np.random.random() > 0.5:
im = cv2.flip(im, 1)
yield im, lbl
return reader
base_pt = './images'
datas = []
labels = []
label_list = []
for i, cls in enumerate(os.listdir(base_pt)):
pt = os.path.join(base_pt, cls)
label_list.append(cls)
for im_pt in os.listdir(pt):
datas.append(os.path.join(pt, im_pt))
labels.append(i)
np.random.seed(10)
np.random.shuffle(datas)
np.random.seed(10)
np.random.shuffle(labels)
print(len(datas))
print(datas[0], labels[0])
print(datas[600], labels[600])
定義網絡結構,使用一個具有 4 個殘差結構的 ResNet,使用 elu 激活:
class DistResNet():
def __init__(self, is_train=True):
self.is_train = is_train
self.weight_decay = 1e-4
def net(self, input, class_dim=10):
depth = [3, 3, 3, 3, 3]
num_filters = [16, 16, 32, 32, 64]
conv = self.conv_bn_layer(
input=input, num_filters=16, filter_size=3, act='elu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1)
conv = fluid.layers.batch_norm(input=conv, act='elu')
print(conv.shape)
pool = fluid.layers.pool2d(
input=conv, pool_size=4, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
act="softmax",
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv,
stdv),
regularizer=fluid.regularizer.L2Decay(self.weight_decay)),
bias_attr=fluid.ParamAttr(
regularizer=fluid.regularizer.L2Decay(self.weight_decay))
)
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
bn_init_value=1.0):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False,
param_attr=fluid.ParamAttr(regularizer=fluid.regularizer.L2Decay(self.weight_decay)))
return fluid.layers.batch_norm(
input=conv, act=act, is_test=not self.is_train,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(bn_init_value),
regularizer=None))
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
return self.conv_bn_layer(input, ch_out, 1, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='elu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='elu')
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, bn_init_value=0.0)
short = self.shortcut(input, num_filters * 4, stride)
return fluid.layers.elementwise_add(x=short, y=conv2, act='elu')
定義輸入輸出的佔位符:
#定義輸入數據
data_shape = [1, 128, 128]
images = fluid.layers.data(name='images', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
定義模型:
# 獲取分類器,用cnn進行分類
import math
model = DistResNet()
predict = model.net(images)
print(predict.shape, label.shape)
定義損失函數:
# 獲取損失函數和準確率
cost = fluid.layers.cross_entropy(input=predict, label=label) # 交叉熵
avg_cost = fluid.layers.mean(cost) # 計算cost中所有元素的平均值
acc = fluid.layers.accuracy(input=predict, label=label) #使用輸入和標籤計算準確率
定義優化方法:
optimizer =fluid.optimizer.Adam(learning_rate=2e-4)
optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
feeder = fluid.DataFeeder( feed_list=[images, label],place=place)
定義繪製loss和accuracy變化曲線的函數:
iter=0
iters=[]
train_costs=[]
train_accs=[]
def draw_train_process(iters, train_costs, train_accs):
title="training costs/training accs"
plt.title(title, fontsize=24)
plt.xlabel("iter", fontsize=14)
plt.ylabel("cost/acc", fontsize=14)
plt.plot(iters, train_costs, color='red', label='training costs')
plt.plot(iters, train_accs, color='green', label='training accs')
plt.legend()
plt.grid()
plt.show()
定義迭代次數及模型保存路徑:
EPOCH_NUM = 20
model_save_dir = "/home/aistudio/data/emotion.inference.model"
訓練ing:
for pass_id in range(EPOCH_NUM):
# 開始訓練
train_cost = 0
for batch_id, data in enumerate(train_reader()):
train_cost,train_acc = exe.run(program=fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, acc])
if batch_id % 100 == 0:
# print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %
# (pass_id, batch_id, train_cost[0], train_acc[0]))
print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %
(pass_id, batch_id, np.mean(train_cost), np.mean(train_acc)))
iter=iter+BATCH_SIZE
iters.append(iter)
train_costs.append(np.mean(train_cost))
train_accs.append(np.mean(train_acc))
# 開始測試
test_costs = [] #測試的損失值
test_accs = [] #測試的準確率
for batch_id, data in enumerate(test_reader()):
test_cost, test_acc = exe.run(program=fluid.default_main_program(), #運行測試程序
feed=feeder.feed(data), #喂入一個batch的數據
fetch_list=[avg_cost, acc]) #fetch均方誤差、準確率
test_costs.append(test_cost[0]) #記錄每個batch的誤差
test_accs.append(test_acc[0]) #記錄每個batch的準確率
test_cost = (sum(test_costs) / len(test_costs)) #計算誤差平均值(誤差和/誤差的個數)
test_acc = (sum(test_accs) / len(test_accs)) #計算準確率平均值( 準確率的和/準確率的個數)
print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))
#保存模型
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
fluid.io.save_inference_model(model_save_dir,
['images'],
[predict],
exe)
print('訓練模型保存完成!')
draw_train_process(iters, train_costs,train_accs)
定義測試域:
infer_exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
導入人臉檢測模型:
from yolo3tiny.detection import Detector, draw_bbox, recover_img
# 這句後面的可以刪掉
定義圖片讀取函數:
def load_image(im):
# 打開圖片
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = cv2.resize(im, (128, 128))
# 建立圖片矩陣 類型爲float32
im = np.array(im).astype(np.float32)
im = np.expand_dims(im, axis=0)
im = np.expand_dims(im, axis=0)
# 保持和之前輸入image維度一致
print('im_shape的維度:', im.shape)
return im
讀取測試數據並進行人臉檢測:
im = cv2.imread('a.png')
im = scale_img(im, 416*2)
bboxes_pre = det.detect(
im, confidence_threshold=0.99, nms_threshold=0.3)[0]
result, rois = draw_bbox(im, bboxes_pre*2)
print()
plt.imshow(result[:, :, [2,1,0]])
進行表情識別:
with fluid.scope_guard(inference_scope):
#從指定目錄中加載 推理model(inference model)
[inference_program, # 預測用的program
feed_target_names, # 是一個str列表,它包含需要在推理 Program 中提供數據的變量的名稱。
fetch_targets] = fluid.io.load_inference_model(model_save_dir,#fetch_targets:是一個 Variable 列表,從中我們可以得到推斷結果。
infer_exe) #infer_exe: 運行 inference model的 executor
img = load_image(rois[0])
results = infer_exe.run(inference_program, #運行預測程序
feed={feed_target_names[0]: img}, #喂入要預測的img
fetch_list=fetch_targets) #得到推測結果
plt.imshow(rois[0][:, :, [2,1,0]])
plt.title("infer results: %s" % label_list[np.argmax(results[0])])
plt.show()
五、算法詳解:
YOLO 算法詳解:
這個我之前寫過:
【論文閱讀筆記】YOLO v1——You Only Look Once: Unified, Real-Time Object Detection:
https://blog.csdn.net/weixin_44936889/article/details/104384273
【論文閱讀筆記】YOLO9000: Better, Faster, Stronger:
https://blog.csdn.net/weixin_44936889/article/details/104387529
【論文閱讀筆記】YOLOv3: An Incremental Improvement:
https://blog.csdn.net/weixin_44936889/article/details/104390227
ResNet 算法詳解:
這個之前也寫過:
殘差神經網絡ResNet系列網絡結構詳解:從ResNet到DenseNet:https://blog.csdn.net/weixin_44936889/article/details/103774753