pytorch-1

Tensor

import torch
#生成一個矩陣
a=torch.Tensor([[1,2],[3,4],[5,6],[7,8]])
print(a)
print('{}'.format(a))
print('{}'.format(a.size()))
#生成一個全爲0的矩陣
b=torch.zeros((4,2))
print(b)
#生成不同類型的矩陣
c=torch.IntTensor([[1,2],[3,4],[5,6],[7,8]])
print(c)
d=torch.LongTensor([[1,2],[3,4],[5,6],[7,8]])
print(d)
e=torch.DoubleTensor([[1,2],[3,4],[5,6],[7,8]])
print(d)
#訪問裏面的元素或改變
print(e[1,1])
e[1,1]=0
print(e[1,1])
#Tensor與Numpy的轉化
f=e.numpy()
print(f)

g=torch.from_numpy(f)
print(g)
#是否將Tensor放在GPU上運行
if torch.cuda.is_available():
    h=g.cuda()
    print(h)

Variable

Variable的創建和使用

1.我們首先創建一個空的Variable:

import torch
#創建Variable
a = torch.autograd.Variable()
print(a)

在這裏插入圖片描述
可以看到默認的類型爲Tensor
2.那麼,我們如果需要給Variable變量賦值,那麼就一定是Tensor類型

b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)

3.第一章提到了Variable的三個屬性,我們依次打印它們:

import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)

三、標量求導計算圖

1.爲了方便起見,我們可以將torch.autograd.Variable簡寫爲Variable:

from torch.autograd import Variable

2.之後,我們先聲明一個變量x,這裏requires_grad=True意義是否對這個變量求梯度,默認的 Fa!se:

x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)

3.我們再聲明兩個變量w和b:

w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)

4.我們再寫兩個變量y1和y2:

y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)

5.我們來計算各個變量的梯度,首先是y1:

#計算梯度
y1.backward()
print(x.grad)
print(w.grad)
print(b.grad)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#計算梯度
y1.backward()
print(x.grad)
print(w.grad)
print(b.grad)

其中:

y1 = 3 * 2 + 4 = 10,

y2 = 3 * 2 + 4 * 2 = 14,

x的梯度是3因爲是3 * x,

w的梯度是2因爲w * 2,

b的梯度是1因爲b * 1(* 1被省略)

6.其次是y2,註銷y1部分:

y2.backward(x)
print(x.grad)
print(w.grad)
print(b.grad)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#計算梯度
#y1.backward()
#print(x.grad)
#print(w.grad)
#print(b.grad)
y2.backward()
print(x.grad)
print(w.grad)
print(b.grad)

其中:

x的梯度是7因爲是3 * x + 4 * x,

w的梯度是2因爲w * 2,

b的梯度是2因爲b * 2

7.backward的函數可以填入參數,例如我們填入變量a:

a = Variable(torch.Tensor([5]),requires_grad = True)
y2.backward(a)
print(x.grad)
print(w.grad)
print(b.grad)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#計算梯度
#y1.backward()
#print(x.grad)
#print(w.grad)
#print(b.grad)
a = Variable(torch.Tensor([5]),requires_grad = True)
y2.backward(a)
print(x.grad)
print(w.grad)
print(b.grad)

四、矩陣求導計算圖

#矩陣求導
c = torch.randn(3)
print(c)
c = Variable(c,requires_grad = True)
print(c)
y3 = c * 2
print(y3)
y3.backward(torch.FloatTensor([1, 0.1, 0.01]))
print(c.grad)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#計算梯度
#y1.backward()
#print(x.grad)
#print(w.grad)
#print(b.grad)
a = Variable(torch.Tensor([5]),requires_grad = True)
y2.backward(a)
print(x.grad)
print(w.grad)
print(b.grad)
 
#矩陣求導
c = torch.randn(3)
print(c)
c = Variable(c,requires_grad = True)
print(c)
y3 = c * 2
print(y3)
y3.backward(torch.FloatTensor([1, 0.1, 0.01]))
print(c.grad)


可以看到,c是一個1行3列的矩陣,因爲y3 = c * 2,因此如果backward()裏的參數爲:

torch.FloatTensor([1, 1, 1])

則就是每個分量的梯度,但是傳入的是:

torch.FloatTensor([1, 0.1, 0.01])

則每個分量梯度要分別乘以1,0.1和0.01

五、Variable放到GPU上執行

1.和Tensor一樣的道理,代碼如下:

#Variable放在GPU上
if torch.cuda.is_available():
    d = c.cuda()
    print(d)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#計算梯度
#y1.backward()
#print(x.grad)
#print(w.grad)
#print(b.grad)
a = Variable(torch.Tensor([5]),requires_grad = True)
y2.backward(a)
print(x.grad)
print(w.grad)
print(b.grad)
 
#矩陣求導
c = torch.randn(3)
print(c)
c = Variable(c,requires_grad = True)
print(c)
y3 = c * 2
print(y3)
y3.backward(torch.FloatTensor([1, 0.1, 0.01]))
print(c.grad)
#Variable放在GPU上
if torch.cuda.is_available():
    d = c.cuda()
    print(d)

六、Variable轉Numpy與Numpy轉Variable

1.值得注意的是,Variable裏requires_grad 一般設置爲 False,代碼中爲True則:

#變量轉Numpy
e = Variable(torch.Tensor([4]),requires_grad = True)
f = e.numpy()
print(f)

會報如下錯誤:

Can’t call numpy() on Variable that requires grad. Use var.detach().numpy() instead.

2.解決方法1:requires_grad改爲False後,可以看到最後一行的Numpy類型的矩陣[4.]:

3.解決方法2::將numpy()改爲detach().numpy(),可以看到最後一行的Numpy類型的矩陣[4.]

#變量轉Numpy
e = Variable(torch.Tensor([4]),requires_grad = True)
f = e.detach().numpy()
print(f)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#計算梯度
#y1.backward()
#print(x.grad)
#print(w.grad)
#print(b.grad)
a = Variable(torch.Tensor([5]),requires_grad = True)
y2.backward(a)
print(x.grad)
print(w.grad)
print(b.grad)
 
#矩陣求導
c = torch.randn(3)
print(c)
c = Variable(c,requires_grad = True)
print(c)
y3 = c * 2
print(y3)
y3.backward(torch.FloatTensor([1, 0.1, 0.01]))
print(c.grad)
#Variable放在GPU上
if torch.cuda.is_available():
    d = c.cuda()
    print(d)
#變量轉Numpy
e = Variable(torch.Tensor([4]),requires_grad = True)
f = e.detach().numpy()
print(f)

4.Numpy轉Variable先是轉爲Tensor再轉爲Variable:

#轉換爲Tensor
g = torch.from_numpy(f)
print(g)
#轉換爲Variable
g = Variable(g,requires_grad = True)
print(g)
import torch
#創建Variable
a = torch.autograd.Variable()
print(a)
b = torch.autograd.Variable(torch.Tensor([[1, 2], [3, 4],[5, 6], [7, 8]]))
print(b)
print(b.data)
print(b.grad)
print(b.grad_fn)
 
#建立計算圖
from torch.autograd import Variable
x = Variable(torch.Tensor([2]),requires_grad = True)
print(x)
w = Variable(torch.Tensor([3]),requires_grad = True)
print(w)
b = Variable(torch.Tensor([4]),requires_grad = True)
print(b)
y1 = w * x + b
print(y1)
y2 = w * x + b * x
print(y2)
#計算梯度
#y1.backward()
#print(x.grad)
#print(w.grad)
#print(b.grad)
a = Variable(torch.Tensor([5]),requires_grad = True)
y2.backward(a)
print(x.grad)
print(w.grad)
print(b.grad)
 
#矩陣求導
c = torch.randn(3)
print(c)
c = Variable(c,requires_grad = True)
print(c)
y3 = c * 2
print(y3)
y3.backward(torch.FloatTensor([1, 0.1, 0.01]))
print(c.grad)
#Variable放在GPU上
if torch.cuda.is_available():
    d = c.cuda()
    print(d)
#變量轉Numpy
e = Variable(torch.Tensor([4]),requires_grad = True)
f = e.detach().numpy()
print(f)
#轉換爲Tensor
g = torch.from_numpy(f)
print(g)
#轉換爲Variable
g = Variable(g,requires_grad = True)
print(g)

Dataset和DataLoader

一、概念

1.torch.utils.data.dataset這樣的抽象類可以用來創建數據集。學過面向對象的應該清楚,抽象類不能實例化,因此我們需要構造這個抽象類的子類來創建數據集,並且我們還可以定義自己的繼承和重寫方法。

2.這其中最重要的就是__len__和__getitem__這兩個函數,前者給出數據集的大小,後者是用於查找數據和標籤。

3.torch.utils.data.DataLoader是一個迭代器,方便我們去多線程地讀取數據,並且可以實現batch以及shuffle的讀取等

二、Dataset的創建和使用

1.首先我們需要引入dataset這個抽象類,當然我們還需要引入Numpy:

import torch.utils.data.dataset as Dataset
import numpy as np

2.我們創建Dataset的一個子類:

1)初始化,定義數據內容和標籤:

#初始化,定義數據內容和標籤
def __init__(self, Data, Label):
    self.Data = Data
    self.Label = Label

(2)返回數據集大小:

#返回數據集大小
def __len__(self):
    return len(self.Data)

(3)得到數據內容和標籤:

#得到數據內容和標籤
def __getitem__(self, index):
    data = torch.Tensor(self.Data[index])
    label = torch.Tensor(self.Label[index])
    return data, label

(4)最終這個子類定義爲:

import torch
import torch.utils.data.dataset as Dataset
import numpy as np
#創建子類
class subDataset(Dataset.Dataset):
    #初始化,定義數據內容和標籤
    def __init__(self, Data, Label):
        self.Data = Data
        self.Label = Label
    #返回數據集大小
    def __len__(self):
        return len(self.Data)
    #得到數據內容和標籤
    def __getitem__(self, index):
        data = torch.Tensor(self.Data[index])
        label = torch.Tensor(self.Label[index])
        return data, label


值得注意的地方是:

class subDataset(Dataset.Dataset):

如果只寫了Dataset而不是Dataset.Dataset,則會報錯:module.init() takes at most 2 arguments (3 given)

因爲Dataset是module模塊,不是class類,所以需要調用module裏的class才行,因此是Dataset.Dataset!
3.在類外對Data和Label賦值:


Data = np.asarray([[1, 2], [3, 4],[5, 6], [7, 8]])
Label = np.asarray([[0], [1], [0], [2]])

4.聲明主函數,主函數創建一個子類的對象,傳入Data和Label參數:

if __name__ == '__main__':
    dataset = subDataset(Data, Label)

5.輸出數據集大小和數據:

	print(dataset)
    print('dataset大小爲:', dataset.__len__())
    print(dataset.__getitem__(0))
    print(dataset[0])

import torch
import torch.utils.data.dataset as Dataset
import numpy as np
 
Data = np.asarray([[1, 2], [3, 4],[5, 6], [7, 8]])
Label = np.asarray([[0], [1], [0], [2]])
#創建子類
class subDataset(Dataset.Dataset):
    #初始化,定義數據內容和標籤
    def __init__(self, Data, Label):
        self.Data = Data
        self.Label = Label
    #返回數據集大小
    def __len__(self):
        return len(self.Data)
    #得到數據內容和標籤
    def __getitem__(self, index):
        data = torch.Tensor(self.Data[index])
        label = torch.IntTensor(self.Label[index])
        return data, label
 
if __name__ == '__main__':
    dataset = subDataset(Data, Label)
    print(dataset)
    print('dataset大小爲:', dataset.__len__())
    print(dataset.__getitem__(0))
    print(dataset[0]

三、DataLoader的創建和使用

1.引入DataLoader:

import torch.utils.data.dataloader as DataLoader

  1. 創建DataLoader,batch_size設置爲2,shuffle=False不打亂數據順序,num_workers= 4使用4個子進程:
#創建DataLoader迭代器
    dataloader = DataLoader.DataLoader(dataset,batch_size= 2, shuffle = False, num_workers= 4)

3.使用enumerate訪問可遍歷的數組對象:

for i, item in enumerate(dataloader):
        print('i:', i)
        data, label = item
        print('data:', data)
        print('label:', label)


4.最終代碼如下:

import torch.utils.data.dataset as Dataset
import torch.utils.data.dataloader as DataLoader
import numpy as np
 
Data = np.asarray([[1, 2], [3, 4],[5, 6], [7, 8]])
Label = np.asarray([[0], [1], [0], [2]])
#創建子類
class subDataset(Dataset.Dataset):
    #初始化,定義數據內容和標籤
    def __init__(self, Data, Label):
        self.Data = Data
        self.Label = Label
    #返回數據集大小
    def __len__(self):
        return len(self.Data)
    #得到數據內容和標籤
    def __getitem__(self, index):
        data = torch.Tensor(self.Data[index])
        label = torch.IntTensor(self.Label[index])
        return data, label
 
if __name__ == '__main__':
    dataset = subDataset(Data, Label)
    print(dataset)
    print('dataset大小爲:', dataset.__len__())
    print(dataset.__getitem__(0))
    print(dataset[0])
 
    #創建DataLoader迭代器
    dataloader = DataLoader.DataLoader(dataset,batch_size= 2, shuffle = False, num_workers= 4)
    for i, item in enumerate(dataloader):
        print('i:', i)
        data, label = item
        print('data:', data)
        print('label:', label)



四、將Dataset數據和標籤放在GPU上(代碼執行順序出錯則會有bug)

1.改寫__getitem__函數:


        if torch.cuda.is_available():
            data = data.cuda()
            label = label.cuda()

代碼變爲:


    #得到數據內容和標籤
    def __getitem__(self, index):
        data = torch.Tensor(self.Data[index])
        label = torch.IntTensor(self.Label[index])
        if torch.cuda.is_available():
            data = data.cuda()
            label = label.cuda()
        return data, label


2.報錯啦:

THCudaCheck FATIHCudaCheck FAIL file=Lc:\n efwile=-builder_3\win-whce:el\\pnyteorwch-\tborucihl\cdsrec\rge_3n\weirinc\StorageSharing.cpp-w helienl\epy=t2or3ch1\ toercrhr\cosrrc=\g71e ne:r ioc\pSteorartagieSohanr niotng .cspupppo line=231 error=rt7e1d
: operProcess Process-2:
ation not supportedTraceback (most recent call last):
 
  File "D:\Anaconda3\lib\multiprocessing\process.py", line 258, in _bootstrap
    self.run()
  File "D:\Anaconda3\lib\multiprocessing\process.py", line 93, in run
    self._target(*self._args, **self._kwargs)
  File "D:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py", line 110, in _worker_loop
    data_queue.put((idx, samples))
Process Process-1:
  File "D:\Anaconda3\lib\multiprocessing\queues.py", line 341, in put
    obj = _ForkingPickler.dumps(obj)
  File "D:\Anaconda3\lib\multiprocessing\reduction.py", line 51, in dumps
    cls(buf, protocol).dump(obj)
  File "D:\Anaconda3\lib\site-packages\torch\multiprocessing\reductions.py", line 109, in reduce_tensor
    (device, handle, storage_size, storage_offset) = storage._share_cuda_()
RuntimeError: cuda runtime error (71) : operation not supported at c:\new-builder_3\win-wheel\pytorch\torch\csrc\generic\StorageSharing.cpp:231
Traceback (most recent call last):
  File "D:\Anaconda3\lib\multiprocessing\process.py", line 258, in _bootstrap
    self.run()
  File "D:\Anaconda3\lib\multiprocessing\process.py", line 93, in run
    self._target(*self._args, **self._kwargs)
  File "D:\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py", line 110, in _worker_loop
    data_queue.put((idx, samples))
  File "D:\Anaconda3\lib\multiprocessing\queues.py", line 341, in put
    obj = _ForkingPickler.dumps(obj)
  File "D:\Anaconda3\lib\multiprocessing\reduction.py", line 51, in dumps
    cls(buf, protocol).dump(obj)
  File "D:\Anaconda3\lib\site-packages\torch\multiprocessing\reductions.py", line 109, in reduce_tensor
    (device, handle, storage_size, storage_offset) = storage._share_cuda_()
RuntimeError: cuda runtime error (71) : operation not supported at c:\new-builder_3\win-wheel\pytorch\torch\csrc\generic\StorageSharing.cpp:231
 

3.那怎麼辦呢?有兩種方法:

(1)只需要將num_workers改成0即可:

dataloader = DataLoader.DataLoader(dataset,batch_size= 2, shuffle = False, num_workers= 0)
import torch
import torch.utils.data.dataset as Dataset
import torch.utils.data.dataloader as DataLoader
import numpy as np
 
Data = np.asarray([[1, 2], [3, 4],[5, 6], [7, 8]])
Label = np.asarray([[0], [1], [0], [2]])
#創建子類
class subDataset(Dataset.Dataset):
    #初始化,定義數據內容和標籤
    def __init__(self, Data, Label):
        self.Data = Data
        self.Label = Label
    #返回數據集大小
    def __len__(self):
        return len(self.Data)
    #得到數據內容和標籤
    def __getitem__(self, index):
        data = torch.Tensor(self.Data[index])
        label = torch.IntTensor(self.Label[index])
        if torch.cuda.is_available():
            data = data.cuda()
            label = label.cuda()
        return data, label
 
if __name__ == '__main__':
    dataset = subDataset(Data, Label)
    print(dataset)
    print('dataset大小爲:', dataset.__len__())
    print(dataset.__getitem__(0))
    print(dataset[0][0])
 
    #創建DataLoader迭代器
    dataloader = DataLoader.DataLoader(dataset,batch_size= 2, shuffle = False, num_workers= 0)
    for i, item in enumerate(dataloader):
        print('i:', i)
        data, label = item
        print('data:', data)
        print('label:', label)


結果爲:
在這裏插入圖片描述
可以看到多了一個device=‘cuda:0’
(2)把Tensor放到GPU上的操作放在DataLoader之後,即刪除__getitem__函數裏的下面內容

if torch.cuda.is_available():
   data = data.cuda()
   label = label.cuda()

並在主函數的for循環裏添加刪除的語句,代碼變爲

import torch
import torch.utils.data.dataset as Dataset
import torch.utils.data.dataloader as DataLoader
import numpy as np
 
Data = np.asarray([[1, 2], [3, 4],[5, 6], [7, 8]])
Label = np.asarray([[0], [1], [0], [2]])
#創建子類
class subDataset(Dataset.Dataset):
    #初始化,定義數據內容和標籤
    def __init__(self, Data, Label):
        self.Data = Data
        self.Label = Label
    #返回數據集大小
    def __len__(self):
        return len(self.Data)
    #得到數據內容和標籤
    def __getitem__(self, index):
        data = torch.Tensor(self.Data[index])
        label = torch.IntTensor(self.Label[index])
        return data, label
 
if __name__ == '__main__':
    dataset = subDataset(Data, Label)
    print(dataset)
    print('dataset大小爲:', dataset.__len__())
    print(dataset.__getitem__(0))
    print(dataset[0][0])
 
    #創建DataLoader迭代器
    dataloader = DataLoader.DataLoader(dataset,batch_size= 2, shuffle = False, num_workers= 8)
    for i, item in enumerate(dataloader):
        print('i:', i)
        data, label = item
        if torch.cuda.is_available():
            data = data.cuda()
            label = label.cuda()
        print('data:', data)
        print('label:', label)


五、Dataset和DataLoader總結

1.Dataset是一個抽象類,需要派生一個子類構造數據集,需要改寫的方法有__init__,__getitem__等。

2.DataLoader是一個迭代器,方便我們訪問Dataset裏的對象,值得注意的num_workers的參數設置:如果放在cpu上跑,可以不管,但是放在GPU上則需要設置爲0;或者在DataLoader操作之後將Tensor放在GPU上。

3.數據和標籤是tuple元組的形式,使用Dataloader然後使用enumerate函數訪問它們。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章