基於pyqt+OpenCV+神經網絡算法實現人臉識別

文章《基於pyqt+OpenCV設計的用戶密碼或人臉識別登錄GUI程序設計》介紹了利用pyqt+opencv實現人臉識別。該文章純粹利用opencv提供的有關人臉識別的接口:人臉圖像採集、模型參數學習、人臉推理(識別)等。

在該文章基礎之上,我們作了一些修改:

  • 仍然以opencv完成人臉照片的採集(保存)
  • 利用python圖像處理庫PIL對圖片進行尺寸修改(比如設置爲150x150)
  • 自主實現人臉數據的採集:

       (1)利用python圖像處理庫PIL將圖片轉爲一維像素數據(0〰️255),一張圖片獲得的像素數據爲一個樣本。

       (2)將所有圖像數據打包爲pkl格式文件,作爲神經網絡學習的輸入數據。

  • 自主實現模型參數學習算法:選擇兩層神經網絡模型,利用數值微分或誤差反向傳播法完成權重參數的更新。
  • 自主實現人臉推理(識別),選擇softmax作爲神經網絡輸出層的激活函數,輸出最大值所對應的索引即爲識別結果。

閱讀完 《基於pyqt+OpenCV設計的用戶密碼或人臉識別登錄GUI程序設計》和本文,你將獲得以下收穫:

  1. 利用pyqt開發GUI程序
  2. 藉助opencv庫,實現人臉識別
  3. 能實現將圖片轉數據及固定圖片大小
  4. 能將自主開發的神經網絡或其他機器學習算法實際運用到人臉識別應用中
  5. 能掌握將純粹的黑白圖片轉換爲自己需要的、能作爲機器學習輸入數據的方法
  6. 能自主實現類別標籤(數字編號)與one-hot類型的轉換

這裏,直接給出源代碼,由於對代碼進行了廣泛註釋,所以就不額外對程序進行說明了。讀者可根據自己的實際需求,找到對自己有用的代碼,加以分析與利用。

# coding: utf-8
#----------主程序,也是啓動程序-------------------------
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox,QWidget
from faces_input_frame import Ui_Dialog
import os

class Ui_Form(QWidget):  #將object改爲QWidget,才能彈出消息對話框
    def __init__(self):
        super(Ui_Form,self).__init__() #用戶添加代碼
    def setupUi(self, Form):
        self.form=Form  #用戶添加代碼
        Form.setObjectName("Form")
        Form.setMinimumSize(QtCore.QSize(329, 230))
        Form.setMaximumSize(QtCore.QSize(400, 230))
        Form.setStyleSheet("")
        self.label = QtWidgets.QLabel(Form)
        self.label.setGeometry(QtCore.QRect(63, 43, 64, 16))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.label.setFont(font)
        self.label.setObjectName("label")
        self.label_2 = QtWidgets.QLabel(Form)
        self.label_2.setGeometry(QtCore.QRect(63, 80, 48, 16))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.label_2.setFont(font)
        self.label_2.setObjectName("label_2")
        self.lineEdit_2 = QtWidgets.QLineEdit(Form)
        self.lineEdit_2.setGeometry(QtCore.QRect(121, 80, 133, 20))
        self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
        self.lineEdit_2.setCursorPosition(0)
        self.lineEdit_2.setObjectName("lineEdit_2")
        self.pushButton = QtWidgets.QPushButton(Form)
        self.pushButton.setGeometry(QtCore.QRect(70, 150, 75, 23))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton.setFont(font)
        self.pushButton.setObjectName("pushButton")
        self.pushButton_2 = QtWidgets.QPushButton(Form)
        self.pushButton_2.setGeometry(QtCore.QRect(170, 150, 75, 23))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton_2.setFont(font)
        self.pushButton_2.setObjectName("pushButton_2")
        self.checkBox = QtWidgets.QCheckBox(Form)
        self.checkBox.setGeometry(QtCore.QRect(63, 110, 151, 20))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.checkBox.setFont(font)
        self.checkBox.setObjectName("checkBox")
        self.lineEdit_3 = QtWidgets.QLineEdit(Form)
        self.lineEdit_3.setGeometry(QtCore.QRect(121, 41, 133, 20))
        self.lineEdit_3.setObjectName("lineEdit_3")
        self.pushButton_face_pass = QtWidgets.QPushButton(Form)
        self.pushButton_face_pass.setGeometry(QtCore.QRect(279, 100, 104, 41))
        self.pushButton_face_pass.setMaximumSize(QtCore.QSize(16777215, 16777213))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton_face_pass.setFont(font)
        self.pushButton_face_pass.setObjectName("pushButton_face_pass")
        self.pushButton_face_input = QtWidgets.QPushButton(Form)
        self.pushButton_face_input.setGeometry(QtCore.QRect(280, 31, 104, 41))
        self.pushButton_face_input.setMaximumSize(QtCore.QSize(16777215, 16777213))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton_face_input.setFont(font)
        self.pushButton_face_input.setObjectName("pushButton_face_input")

        self.retranslateUi(Form)
        self.pushButton.clicked.connect(self.close)
        self.pushButton_2.clicked.connect(self.open)
        self.pushButton_face_input.clicked.connect(self.faceinput)
        self.pushButton_face_pass.clicked.connect(self.facepass)

    def retranslateUi(self, Form):
        _translate = QtCore.QCoreApplication.translate
        Form.setWindowTitle(_translate("Form", "Form"))
        self.label.setText(_translate("Form", "用戶名:"))
        self.label_2.setText(_translate("Form", "密碼:"))
        self.pushButton.setText(_translate("Form", "取消"))
        self.pushButton_2.setText(_translate("Form", "確認"))
        self.checkBox.setText(_translate("Form", "記住用戶名和密碼"))
        self.pushButton_face_pass.setText(_translate("Form", "人臉識別登錄"))
        self.pushButton_face_input.setText(_translate("Form", "人臉信息錄入"))

    def open(self):
        #--------判斷用戶是否存在--------------
        fl = open('user_names.txt', 'r+',encoding='utf-8')
        pre_name = fl.read()
        print(pre_name)

        names = pre_name.split(',')
        fl.close()
        if self.lineEdit_3.text() in names:
            fl = open('password.txt', 'r+')
            password= fl.read()
            if self.lineEdit_2.text() ==password:
                reply=QMessageBox.information(self,'提示','請編寫主程序',QMessageBox.Close)
            else:
                reply=QMessageBox.information(self,'提示','密碼錯誤',QMessageBox.Close)


        else:
            reply=QMessageBox.information(self,'提示','用戶不存在',QMessageBox.Close)

    def close(self):
        self.close()
    def faceinput(self,event):
        self.form.hide()
        Form1=QtWidgets.QDialog()
        ui=Ui_Dialog()
        ui.setupUi(Form1)
        Form1.show()
        Form1.exec_()
        self.form.show()   #子窗口關閉後,主窗口顯示

    def facepass(self,event):
        import face_recognize
        get_name=face_recognize.recognize_face()#返回識別的人名
        if get_name=="unknown":
            reply = QMessageBox.information(self, '提示', '人臉識別失敗', QMessageBox.Close)
        else:
            reply = QMessageBox.information(self, '提示', "歡迎您:"+get_name,QMessageBox.Close)
            print("編寫其他程序")


if __name__=="__main__":
    import sys
    app=QtWidgets.QApplication(sys.argv)
    widget=QtWidgets.QWidget()
    ui=Ui_Form()
    ui.setupUi(widget)
    widget.show()
    sys.exit(app.exec_())
# -*- coding: utf-8 -*-
#------------fces_input_frame.py程序-----------
# Form implementation generated from reading ui file 'faces_input_frame.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
import face_recognize
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox,QWidget
class Ui_Dialog(QWidget):
    def setupUi(self, Dialog):
        self.form=Dialog #用戶添加代碼
        Dialog.setObjectName("Dialog")
        Dialog.resize(315, 104)
        Dialog.setMinimumSize(QtCore.QSize(315, 104))
        Dialog.setMaximumSize(QtCore.QSize(315, 104))
        Dialog.setAutoFillBackground(False)
        self.Button_Enter = QtWidgets.QPushButton(Dialog)
        self.Button_Enter.setGeometry(QtCore.QRect(221, 21, 75, 27))
        font = QtGui.QFont()
        font.setPointSize(14)
        self.Button_Enter.setFont(font)
        self.Button_Enter.setObjectName("Button_Enter")
        self.Button_Exit=QtWidgets.QPushButton(Dialog)
        self.Button_Exit.setGeometry(QtCore.QRect(221, 54, 75, 27))
        font = QtGui.QFont()
        font.setPointSize(14)
        self.Button_Exit.setFont(font)
        self.Button_Exit.setObjectName("Button_Exit")
        self.face_name=QtWidgets.QLabel(Dialog)
        self.face_name.setGeometry(QtCore.QRect(40, 20, 131, 16))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.face_name.setFont(font)
        self.face_name.setObjectName("face_name")
        self.face_name_frame=QtWidgets.QLineEdit(Dialog)
        self.face_name_frame.setGeometry(QtCore.QRect(30, 40, 167, 31))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.face_name_frame.setFont(font)
        self.face_name_frame.setText("")
        self.face_name_frame.setObjectName("face_name_frame")

        self.retranslateUi(Dialog)
        self.Button_Enter.clicked.connect(self.Enter)
        self.Button_Exit.clicked.connect(self.ext)
    def retranslateUi(self, Dialog):
        _translate = QtCore.QCoreApplication.translate
        Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
        self.Button_Enter.setText(_translate("Dialog", "確認"))
        self.Button_Exit.setText(_translate("Dialog", "退出"))
        self.face_name.setText(_translate("Dialog", "請輸入您的姓名:"))

    def Enter(self):
        fl = open('user_names.txt','a+')
        if self.face_name_frame.text()=="":
            #輸入爲空時
            reply = QMessageBox.information(self, '提示', '請輸入有效用戶名', QMessageBox.Ok)
        else:
            fl.write(self.face_name_frame.text()+',')
            fl.close()
            print("正在採集人臉照片")
            face_recognize.Collect_faces()
            print("正在採集人臉數據")
            face_recognize.Training_faces()
            print("正在學習模型")
            face_recognize.faces_parms()
            print("學習完畢")

    def ext(self,event):
        self.form.close()

if __name__ == "__main__":
    import sys
    app=QtWidgets.QApplication(sys.argv)
    Dialog=QtWidgets.QDialog()
    ui = Ui_Dialog()
    ui.setupUi(Dialog)
    Dialog.show()
    sys.exit(app.exec_())
# -*- coding: utf-8 -*-
#---------face_recognize.py程序-----------------
from functions import *
def Collect_faces():
    #本函數只完成人臉圖像採集並保存爲150x150的圖片大小
    import cv2
    from PIL import Image
    # 調用筆記本內置攝像頭,所以參數爲0,如果有其他的攝像頭可以調整參數爲1,2
    # import numpy as np
    # import pandas as pd
    cap = cv2.VideoCapture(0)

    face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    fl = open('user_names.txt', 'r+')
    pre_name=fl.read()
    name = pre_name.split(',')
    face_id=len(name) - 2#去掉逗號,表示某人的一些列照片
    fl.close()
    #face_id = input('\n enter user id:')  #輸入序號,表示某人的一些列照片

    print('\n Initializing face capture. Look at the camera and wait ...')

    count = 0

    while True:

        # 從攝像頭讀取圖片

        sucess, img = cap.read()

        # 轉爲灰度圖片

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 檢測人臉

        faces = face_detector.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + w), (255, 0, 0))
            count += 1
            # 保存圖像,從原始照片中截取人臉尺寸,並重新保存爲150x150尺寸圖片
            cv2.imwrite("Facedata/User." + str(face_id) + '.' + str(count) + '.jpg', gray[y: y + h, x: x + w]) #原始人臉圖像
            PIL_img = Image.open("Facedata/User." + str(face_id) + '.' + str(count) + '.jpg').resize((150, 150), Image.ANTIALIAS).convert('L')  # 轉爲150x150大小的黑白圖
            PIL_img.save("Facedata/User." +str(face_id) + '.' + str(count) + '.jpg') #先處理後保存150x150尺寸圖片,覆蓋原始人臉圖片

            cv2.imshow('image', img)

        # 保持畫面的持續。

        k = cv2.waitKey(1)

        if k == 27:  # 通過esc鍵退出攝像
            break

        elif count >=150:  # 得到150個樣本後退出攝像
            break

    # 關閉攝像頭
    cap.release()
    cv2.destroyAllWindows()


def Training_faces():
    from PIL import Image
    import numpy as np
    import pickle
    faceSamples = []  # 保存人臉數據的列表
    faceLables = []  # 保存人臉標籤,不是one-hot
    fl = open('user_names.txt', 'r+')
    pre_name = fl.read()
    name = pre_name.split(',')
    face_id = len(name) - 1  # 去掉逗號,表示某人的一些列照片
    fl.close()
    print(face_id)
    for j in range(face_id):         #人數
        for i in range(150):  # 每個人的照片數量count=150
            PIL_img = Image.open("Facedata/User." + str(j) + '.' + str(i+1) + '.jpg').resize((150, 150),Image.ANTIALIAS).convert('L')  # 轉爲150x150大小的黑白圖
            pre_img_numpy = np.array(PIL_img, 'uint8')  # 圖片轉爲array數組
            flatten_img_numpy = pre_img_numpy.flatten()  # 轉爲一維
            faceSamples.append(flatten_img_numpy)  # 多張人臉數據
            faceLables.append(j)  # 多個人臉標籤


    array_faces_data=np.array(faceSamples)  #列表轉numpy數組
    array_faces_labels=np.array(faceLables) #有多少張圖片就有多少個標籤
    dict = {'train_img': array_faces_data,'train_label':array_faces_labels}  #字典形式
    pickle.dump(dict, open('faces_data.pkl','wb'))                           #保存人臉數據


def faces_parms():
    #模型參數學習
    import pickle
    from Two_layer_net import TwoLayerNet
    # 讀入數據
    origin_data = pickle.load(open("faces_data.pkl", 'rb'))
    train_img = origin_data['train_img']  # 樣本
    train_label = origin_data['train_label']  # 標籤

    # 獲取用戶個數(類別個數)
    fl = open('user_names.txt', 'r+')
    pre_name = fl.read()
    name = pre_name.split(',')
    user_num = len(name) - 1  # 去掉逗號,表示用戶數量
    # 輸入層神經元數量爲圖像數據長度(train_img[1]),隱藏層神經元50個,神經網絡輸出層的神經元數量爲用戶個數
    network = TwoLayerNet(input_size=len(train_img[1]), hidden_size=5, output_size=user_num)

    iters_num = 10000  # 梯度法的更新次數
    train_size = train_img.shape[0]  # 樣本數量
    batch_size = 5  # 批數量
    learning_rate = 0.1
    train_loss_list = []

    for i in range(iters_num):
        batch_mask = np.random.choice(train_size, batch_size)  # 在樣本大小中隨機選擇100個數字
        x_batch = train_img[batch_mask]  # 取出100個數據
        t_batch = train_label[batch_mask]  # 及其對應的正確解標籤
        # 計算梯度
        import time
        start = time.clock()
        # grad = network.numerical_gradient(x_batch, t_batch)
        grad = network.gradient(x_batch, t_batch)
        end = time.clock()

        # 更新參數
        for key in ('W1', 'b1', 'W2', 'b2'):
            network.params[key] -= learning_rate * grad[key]

        # 記錄學習過程

        loss = network.loss(x_batch, t_batch)
        train_loss_list.append(loss)

    # 保存模型參數
    pickle.dump(network.params, open('faces_params.pkl', 'wb'))

def recognize_face():
    import cv2
    import numpy as np
    from PIL import Image
    import  pickle
    # 調用筆記本內置攝像頭,所以參數爲0,如果有其他的攝像頭可以調整參數爲1,2
    cap = cv2.VideoCapture(0)

    face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    fl = open('user_names.txt', 'r+')
    pre_name = fl.read()
    name = pre_name.split(',')
    face_id = len(name) - 2  # 去掉逗號,表示某人的一些列照片
    fl.close()
    # face_id = input('\n enter user id:')  #輸入序號,表示某人的一些列照片


    count = 0

    while True:

        # 從攝像頭讀取圖片
        result="unknown"
        sucess, img = cap.read()

        # 轉爲灰度圖片

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 檢測人臉

        faces = face_detector.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + w), (255, 0, 0))
            count += 1
            # 將識別的圖片保存爲150x150
            cv2.imwrite("Recognize_img/" + str(count) + '.jpg', gray[y: y + h, x: x + w])  # 原始人臉圖像
            PIL_img = Image.open("Recognize_img/" + str(count) + '.jpg').resize((150, 150),Image.ANTIALIAS).convert('L')  # 轉爲150x150大小的黑白圖

            PIL_img.save("Recognize_img/"  + str(count) + '.jpg')  # 先處理後保存150x150尺寸圖片,覆蓋原始人臉圖片

            print("dd")
            # ---------------------進行識別------------------------------
            PIL_img = Image.open("Recognize_img/" + str(count) + '.jpg').resize((150, 150), Image.ANTIALIAS).convert(
                'L')  # 轉爲150x150大小的黑白圖
            pre_img_numpy = np.array(PIL_img, 'uint8')  # 圖片轉爲array數組
            flatten_img_numpy = pre_img_numpy.flatten()  # 轉爲一維,待推理數據
            # 打開模型參數
            faces_params = pickle.load(open("faces_params.pkl", 'rb'))
            # 預測
            W1, W2 = faces_params['W1'], faces_params['W2']
            b1, b2 = faces_params['b1'], faces_params['b2']
            a1 = np.dot(flatten_img_numpy, W1) + b1  # 加權和:xw+b
            z1 = sigmoid(a1)  # 激活函數:輸出0-1
            a2 = np.dot(z1, W2) + b2
            y = softmax(a2)  # 輸出層激活函數
            print(y)
            if y.max() > 0.55:  # 概率大於0.55
                p = np.argmax(y)  # 獲取概率最高的元素的索引
                result = name[p]  # 獲取識別名字
                cap.release()
                cv2.destroyAllWindows()  # 退出攝像頭

                return result

            else:
                pass

        # 保持畫面的持續。
        cv2.imshow('image', img)
        k = cv2.waitKey(1)

        if k == 27:  # 通過esc鍵退出攝像
            break

        elif count >= 5:  # 識別5次後退出
            break

    # 關閉攝像頭
    cap.release()
    cv2.destroyAllWindows()
    return result   #返回識別結果:人名或“unknown”
# coding: utf-8
#--------functions.py程序----------
import numpy as np
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_grad(x):
    return (1.0 - sigmoid(x)) * sigmoid(x)

def softmax(x):
    #之前的文章討論過對softmax進行改進,防止數值過大而溢出
    if x.ndim == 2:   #二維時,溢出對策
        x = x.T       #轉置
        x = x - np.max(x, axis=0) #axis=0,表示取每一列的最大值
        y = np.exp(x) / np.sum(np.exp(x), axis=0)  #axis=0,將每列的各值相加,
        return y.T     #再轉置回去

    x = x - np.max(x) # 一維時,溢出對策
    return np.exp(x) / np.sum(np.exp(x))

def cross_entropy_error(y, t):
    if y.ndim == 1:  #y的維度爲1,即單個數據
        t = t.reshape(1, t.size)  #改變數據形狀(n,)變爲(1,n)
        y = y.reshape(1, y.size)
    batch_size = y.shape[0]   #獲取批數量
    return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size  #交叉熵誤差(正確解標籤對應的值越大,則誤差越小,學習順利)
def numerical_gradient(f, x):
#數據微分實現梯度更新
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)
    for idx in range(x.shape[0]):
        tmp_val = x[idx]
        x[idx]=tmp_val + h
        fxh1 = f(x)  # f(x+h)
        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)
        x[idx] = tmp_val  # 還原值
    return grad
# coding: utf-8
#----------Two_layer_net.py程序-----------------
import  numpy as np
from functions import *

class TwoLayerNet:
#我們構建一個兩層神經網絡(三層神經元,但只有兩層神經網絡有權),關於神經網絡的知識,讀者可以去閱讀作者以前發佈的文章
    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        # 初始化權重:輸入數據的大小,隱藏層大小,輸出層大小
        self.params = {}  #保存神經網絡的初始參數
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)  #初始偏執爲0
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)
    def predict(self, x):
        #識別推理函數,參數x爲圖像數據train_img
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
    
        a1 = np.dot(x, W1) + b1  #加權和:xw+b
        z1= sigmoid(a1)         #激活函數:輸出0-1
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)        #輸出層激活函數
        
        return y               #返回每一種分類所對應的數值
        
    # x:輸入數據, t:監督數據
    def loss(self, x, t):
        y=self.predict(x)

        return cross_entropy_error(y, t)  #返回損失函數的值
    
    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)  #獲取輸出最大值所對應的索引
        t = np.argmax(t, axis=1)  #獲取正確解標籤所在的位置
        
        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy          #返回識別精度
        
    # x:輸入數據, t:監督數據
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
        
        return grads


    def gradient(self, x, t):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}

        batch_num = x.shape[0]

        # forward
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        # backward

        one_hot_t= np.zeros_like(y)  #生成和y形狀一樣的元素爲零的數組
        for j, i in zip(range(t.size), t):
            one_hot_t[j][i] = 1      #變爲one-hot類型標籤:j表示樣本,i表示標籤索引
        dy=(y-one_hot_t)/batch_num
        grads['W2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)

        da1=np.dot(dy, W2.T)
        dz1=sigmoid_grad(a1) * da1
        grads['W1']=np.dot(x.T, dz1)
        grads['b1']=np.sum(dz1, axis=0)

        return grads

歡迎關注公衆號“Py生活”,學知識,享生活

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章