def backward_propagation(w,b,X,Y,num,learning_rate,print_cost=False):
cost=[]
for i in range(num):
grad,cost = forward_propagate(w,b,X,Y)
dw = grad['dw']#获取此次迭代返回的权重梯度
db = grad['db']#获取此次迭代返回的偏置梯度
w=w - learning_rate *dw #权重更新
b= b-learning_rate*db #偏置值更新
if i% 100 == 0: #如果迭代100次
cost.append(cost) #更新损失值
if print_cost and i%100 == 0:
print("cost after iteration %i:%f" %(i,cost))
params = {"w":w,
"b":b} #打包权重值和偏置值
grads = {"dw":dw,
"db":db} #打包权重梯度和偏置梯度
return params,grads,cost