深度學習mxnet::線性迴歸(從0到1建立)

from IPython import  display
print("IPython ok");
from matplotlib import pyplot as plt
print("plt ok");
from mxnet import autograd,nd
print("mxnet ok");
import random 
print("randomn ok");
import os,sys

IPython ok
plt ok
mxnet ok
randomn ok
#模型:y=Xw+b+ϵ
  
#設置特徵數、樣本數
num_inputs=2
num_examples=1000
#設置真實的權值w和偏移b
true_w=[2,-3.4]
true_b=4.2
#隨機生成 兩列1000行的數據,features的每一行是一個長度爲2的向量   (相當於X)
features=nd.random.normal(scale=1,shape=(num_examples,num_inputs))
#標籤生成 ,labels的每一行是一個長度爲1的向量  (相當於Y)
labels=true_w[0]*features[:,0]+true_w[1]*features[:,1]+true_b
labels+=nd.random.normal(scale=0.01,shape=labels.shape)

#輸出看一下
print(features[0], labels[0])
[-0.48362762 -0.921403  ]
<NDArray 2 @cpu(0)> 
[6.353317]
<NDArray 1 @cpu(0)>
#輸出看一下
print(features[0], labels[0])

#定義繪製散點圖方法
def use_svg_display():
    # 用矢量圖顯示
    display.set_matplotlib_formats('svg')

def set_figsize(figsize=(3.5, 2.5)):
    use_svg_display()
    # 設置圖的尺寸
    plt.rcParams['figure.figsize'] = figsize

#繪製散點圖
set_figsize()
plt.scatter(features[:, 1].asnumpy(), labels.asnumpy(), 1);

[-0.48362762 -0.921403  ]
<NDArray 2 @cpu(0)> 
[6.353317]
<NDArray 1 @cpu(0)>

在這裏插入圖片描述

語法提示


一、關於range()
#range()返回一個iterator,例a=range(5)則返回range(0, 5),可以用for i in a: 來獲取0到4
另外,如果希望直接得到一個list,那麼就用list(range(5)),就可以獲得0到4的list。 這個是python3和python2的區別之一

在這裏插入圖片描述

#定義讀取數據的函數(遍歷數據集)
#這個函數輸入參數爲(希望輸出的隨機樣本數,輸入特徵,輸出標籤)
def data_iter(batch_size,features,labels):
    num_examples=len(features)
    #range()返回一個iterator,例a=range(5)則返回range(0, 5),可以用for i in a: 來獲取0到4
    indices=list(range(num_examples))
    #這裏上面的indices是一個[0..到總樣本數]的列表,即[0,1,2,3,4,5.....]
    #random.shuffle()目的是將indices重新隨機排序
    random.shuffle(indices)
    #i從0開始,到總樣本數,間隔爲batch_size(步長)
    for i in range(0,num_examples,batch_size):
        #print("now i is:",i)
        #j是一個array,爲indices[i,i+batch_size]
        j=nd.array(indices[i:min(i+batch_size,num_examples)])
        #print("now j is:",j)
        #這裏返回的不是一個一個返回,而是以數組作爲索引來返回,先返回輸入特徵,features[i]到features[i+batch_size]
        #再返回labels,即返回labels[i]到labels[i+batch_size]
        yield features.take(j),labels.take(j) #take函數根據索引返回對應元素

batch_size = 10

#只生成一次
#next(data_iter(batch_size, features, labels))

#這裏也是隻生成一次,因爲有個break
for X, y in data_iter(batch_size, features, labels):
    print(X, y)
    break
[[-0.5459386  -1.8447278 ]
 [-0.2551912  -1.3350252 ]
 [ 0.38828224 -0.21730624]
 [-0.41434413  0.79045075]
 [-0.9193256  -0.59021354]
 [ 0.10230371  0.56393176]
 [-0.63922304 -0.4232461 ]
 [-0.8403067  -1.2707146 ]
 [-0.23893945 -0.86148673]
 [ 0.3051429  -1.6782148 ]]
<NDArray 10x2 @cpu(0)> 
[ 9.398996    8.23137     5.7183204   0.67691046  4.360369    2.4872274
  4.3754888   6.833427    6.65252    10.516858  ]
<NDArray 10 @cpu(0)>
#初始化要估計的參數
#爲什麼w是2行1列的?
#答:因爲我們的x(即輸入是一行兩列的),按照公式y_head=x矩陣乘w+b,所以w爲2行1列
w=nd.random.normal(scale=0.01,shape=(num_inputs,1))
b=nd.zeros(shape=(1,))

print(w)
print(true_w)

[[ 0.00059183]
 [-0.00173846]]
<NDArray 2x1 @cpu(0)>
[2, -3.4]
#創建待估計參數的梯度
#調用attach_grad函數來申請存儲梯度所需要的內存
w.attach_grad()
b.attach_grad()

#定義模型計算公式
def linreg(X,w,b):    #實際上這個函數已經封裝在d2lzh包中
    #利用nd中的.dot()函數做矩陣乘法
    return nd.dot(X,w)+b

#定義損失函數
def squared_loss(y_hat,y):
    #這裏用了reshape是爲了防止結構不一致時它自動複製來構建與前面相同結構
    return(y_hat-y.reshape(y_hat.shape))**2/2;

#定義優化算法,這裏用梯度下降法
def sgd(params,lr,batch_size):
    #這裏的意思就是,對傳入的參數(列表形式的),對應元素-學習率*梯度/batch_size
    #爲什麼要除以batch_size,
    #答:原因是mxnet自動求梯度模塊計算出來的梯度是批量樣本的梯度和,所以要除以樣本數來取平均值
    for param in params:
        param[:]=param-lr*param.grad/batch_size
#訓練模型部分

lr=0.03
num_epochs=3
net=linreg
loss=squared_loss

#迭代次數設置
for epoch in range(num_epochs):
    #在每次迭代中,會使用訓練集中的所有樣本一次
    #X、y分別是小批量樣本的特徵和標籤
    for X,y in data_iter(batch_size,features,labels):
        #記錄相關計算?
        with autograd.record():
            #計算損失函數
            l=loss(net(X,w,b),y) #這裏的l是由每次隨機選擇出來的小批量樣本根據待估計參數估計出來與真實值的損失
        l.backward() #計算損失函數對param的梯度
        sgd([w,b],lr,batch_size) #更新待估計參數
    train_l=loss(net(features,w,b),labels) #訓練之後的損失
    print("epoch %d ,loss %f" % (epoch+1,train_l.mean().asnumpy()))
epoch 1 ,loss 16.032024
epoch 1 ,loss 15.253522
epoch 1 ,loss 14.489670
epoch 1 ,loss 13.800489
epoch 1 ,loss 12.592360
epoch 1 ,loss 11.466541
epoch 1 ,loss 10.983962
epoch 1 ,loss 10.376640
epoch 1 ,loss 10.049289
epoch 1 ,loss 9.340507
epoch 1 ,loss 8.621167
epoch 1 ,loss 8.071239
epoch 1 ,loss 7.874382
epoch 1 ,loss 7.561985
epoch 1 ,loss 6.782496
epoch 1 ,loss 6.073438
epoch 1 ,loss 5.859311
epoch 1 ,loss 5.377839
epoch 1 ,loss 5.043412
epoch 1 ,loss 4.618695
epoch 1 ,loss 4.380817
epoch 1 ,loss 4.016131
epoch 1 ,loss 3.875211
epoch 1 ,loss 3.573936
epoch 1 ,loss 3.295837
epoch 1 ,loss 3.060077
epoch 1 ,loss 2.751062
epoch 1 ,loss 2.635000
epoch 1 ,loss 2.492720
epoch 1 ,loss 2.375383
epoch 1 ,loss 2.243919
epoch 1 ,loss 2.097693
epoch 1 ,loss 2.012014
epoch 1 ,loss 1.835119
epoch 1 ,loss 1.742900
epoch 1 ,loss 1.721242
epoch 1 ,loss 1.554879
epoch 1 ,loss 1.428716
epoch 1 ,loss 1.357238
epoch 1 ,loss 1.231590
epoch 1 ,loss 1.089459
epoch 1 ,loss 1.049516
epoch 1 ,loss 0.998722
epoch 1 ,loss 0.900703
epoch 1 ,loss 0.838384
epoch 1 ,loss 0.787540
epoch 1 ,loss 0.751771
epoch 1 ,loss 0.713378
epoch 1 ,loss 0.662468
epoch 1 ,loss 0.606591
epoch 1 ,loss 0.581789
epoch 1 ,loss 0.551202
epoch 1 ,loss 0.516823
epoch 1 ,loss 0.493626
epoch 1 ,loss 0.461487
epoch 1 ,loss 0.429556
epoch 1 ,loss 0.398790
epoch 1 ,loss 0.383569
epoch 1 ,loss 0.364635
epoch 1 ,loss 0.348120
epoch 1 ,loss 0.315813
epoch 1 ,loss 0.287658
epoch 1 ,loss 0.262841
epoch 1 ,loss 0.255787
epoch 1 ,loss 0.248302
epoch 1 ,loss 0.234603
epoch 1 ,loss 0.223034
epoch 1 ,loss 0.212251
epoch 1 ,loss 0.200142
epoch 1 ,loss 0.187317
epoch 1 ,loss 0.176090
epoch 1 ,loss 0.169432
epoch 1 ,loss 0.154877
epoch 1 ,loss 0.146811
epoch 1 ,loss 0.142990
epoch 1 ,loss 0.137739
epoch 1 ,loss 0.130291
epoch 1 ,loss 0.117630
epoch 1 ,loss 0.109481
epoch 1 ,loss 0.106655
epoch 1 ,loss 0.099498
epoch 1 ,loss 0.093083
epoch 1 ,loss 0.087889
epoch 1 ,loss 0.082439
epoch 1 ,loss 0.077900
epoch 1 ,loss 0.070729
epoch 1 ,loss 0.068127
epoch 1 ,loss 0.065624
epoch 1 ,loss 0.063323
epoch 1 ,loss 0.059628
epoch 1 ,loss 0.055787
epoch 1 ,loss 0.052828
epoch 1 ,loss 0.051324
epoch 1 ,loss 0.047842
epoch 1 ,loss 0.045890
epoch 1 ,loss 0.042634
epoch 1 ,loss 0.039844
epoch 1 ,loss 0.036246
epoch 1 ,loss 0.034407
epoch 1 ,loss 0.032353
epoch 2 ,loss 0.030018
epoch 2 ,loss 0.026958
epoch 2 ,loss 0.025471
epoch 2 ,loss 0.024189
epoch 2 ,loss 0.022893
epoch 2 ,loss 0.021189
epoch 2 ,loss 0.020086
epoch 2 ,loss 0.019043
epoch 2 ,loss 0.017653
epoch 2 ,loss 0.017073
epoch 2 ,loss 0.016404
epoch 2 ,loss 0.015186
epoch 2 ,loss 0.013850
epoch 2 ,loss 0.012788
epoch 2 ,loss 0.011747
epoch 2 ,loss 0.011144
epoch 2 ,loss 0.010457
epoch 2 ,loss 0.009806
epoch 2 ,loss 0.009321
epoch 2 ,loss 0.008852
epoch 2 ,loss 0.008318
epoch 2 ,loss 0.007895
epoch 2 ,loss 0.007369
epoch 2 ,loss 0.007081
epoch 2 ,loss 0.006513
epoch 2 ,loss 0.006094
epoch 2 ,loss 0.005815
epoch 2 ,loss 0.005599
epoch 2 ,loss 0.005391
epoch 2 ,loss 0.005240
epoch 2 ,loss 0.004764
epoch 2 ,loss 0.004334
epoch 2 ,loss 0.004216
epoch 2 ,loss 0.004025
epoch 2 ,loss 0.003883
epoch 2 ,loss 0.003643
epoch 2 ,loss 0.003530
epoch 2 ,loss 0.003371
epoch 2 ,loss 0.003195
epoch 2 ,loss 0.002966
epoch 2 ,loss 0.002790
epoch 2 ,loss 0.002612
epoch 2 ,loss 0.002443
epoch 2 ,loss 0.002359
epoch 2 ,loss 0.002150
epoch 2 ,loss 0.001961
epoch 2 ,loss 0.001887
epoch 2 ,loss 0.001691
epoch 2 ,loss 0.001565
epoch 2 ,loss 0.001477
epoch 2 ,loss 0.001428
epoch 2 ,loss 0.001376
epoch 2 ,loss 0.001300
epoch 2 ,loss 0.001248
epoch 2 ,loss 0.001166
epoch 2 ,loss 0.001115
epoch 2 ,loss 0.001063
epoch 2 ,loss 0.001003
epoch 2 ,loss 0.000918
epoch 2 ,loss 0.000886
epoch 2 ,loss 0.000796
epoch 2 ,loss 0.000739
epoch 2 ,loss 0.000670
epoch 2 ,loss 0.000618
epoch 2 ,loss 0.000602
epoch 2 ,loss 0.000563
epoch 2 ,loss 0.000538
epoch 2 ,loss 0.000510
epoch 2 ,loss 0.000467
epoch 2 ,loss 0.000445
epoch 2 ,loss 0.000423
epoch 2 ,loss 0.000393
epoch 2 ,loss 0.000382
epoch 2 ,loss 0.000344
epoch 2 ,loss 0.000332
epoch 2 ,loss 0.000308
epoch 2 ,loss 0.000285
epoch 2 ,loss 0.000277
epoch 2 ,loss 0.000262
epoch 2 ,loss 0.000255
epoch 2 ,loss 0.000232
epoch 2 ,loss 0.000218
epoch 2 ,loss 0.000206
epoch 2 ,loss 0.000190
epoch 2 ,loss 0.000187
epoch 2 ,loss 0.000178
epoch 2 ,loss 0.000171
epoch 2 ,loss 0.000167
epoch 2 ,loss 0.000163
epoch 2 ,loss 0.000157
epoch 2 ,loss 0.000154
epoch 2 ,loss 0.000146
epoch 2 ,loss 0.000142
epoch 2 ,loss 0.000134
epoch 2 ,loss 0.000130
epoch 2 ,loss 0.000128
epoch 2 ,loss 0.000123
epoch 2 ,loss 0.000120
epoch 2 ,loss 0.000118
epoch 2 ,loss 0.000115
epoch 3 ,loss 0.000111
epoch 3 ,loss 0.000109
epoch 3 ,loss 0.000106
epoch 3 ,loss 0.000103
epoch 3 ,loss 0.000098
epoch 3 ,loss 0.000095
epoch 3 ,loss 0.000093
epoch 3 ,loss 0.000089
epoch 3 ,loss 0.000086
epoch 3 ,loss 0.000083
epoch 3 ,loss 0.000082
epoch 3 ,loss 0.000081
epoch 3 ,loss 0.000078
epoch 3 ,loss 0.000077
epoch 3 ,loss 0.000074
epoch 3 ,loss 0.000073
epoch 3 ,loss 0.000072
epoch 3 ,loss 0.000071
epoch 3 ,loss 0.000070
epoch 3 ,loss 0.000068
epoch 3 ,loss 0.000066
epoch 3 ,loss 0.000065
epoch 3 ,loss 0.000064
epoch 3 ,loss 0.000064
epoch 3 ,loss 0.000062
epoch 3 ,loss 0.000062
epoch 3 ,loss 0.000062
epoch 3 ,loss 0.000061
epoch 3 ,loss 0.000060
epoch 3 ,loss 0.000060
epoch 3 ,loss 0.000059
epoch 3 ,loss 0.000059
epoch 3 ,loss 0.000059
epoch 3 ,loss 0.000059
epoch 3 ,loss 0.000058
epoch 3 ,loss 0.000058
epoch 3 ,loss 0.000058
epoch 3 ,loss 0.000057
epoch 3 ,loss 0.000057
epoch 3 ,loss 0.000057
epoch 3 ,loss 0.000057
epoch 3 ,loss 0.000056
epoch 3 ,loss 0.000056
epoch 3 ,loss 0.000055
epoch 3 ,loss 0.000055
epoch 3 ,loss 0.000055
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000054
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000053
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
epoch 3 ,loss 0.000052
print("model w is:",w.asnumpy())

print("true w is:",true_w)

print("model b is:",b)

print("true b is:",true_b)

model w is: [[ 2.0001972]
 [-3.399224 ]]
true w is: [2, -3.4]
model b is: 
[4.1994486]
<NDArray 1 @cpu(0)>
true b is: 4.2
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章