numpy和torch數據操作對比

對numpy和torch數據操作進行對比,避免遺忘。

ndarray和tensor

import torch
import numpy as np

np_data = np.arange(6).reshape((2, 3))
torch_data = torch.arange(6) # 張量
tensor2array = torch_data.numpy()

print(
    '\nnumpy array:\n', np_data,
    '\ntorch tensor\n', torch_data,
    '\ntensor to array\n', tensor2array
)
"""
numpy array:
 [[0 1 2]
 [3 4 5]] 
torch tensor
 tensor([0, 1, 2, 3, 4, 5]) 
tensor to array
 [0 1 2 3 4 5]
"""

numpy和tensor的維度

import torch
import numpy as np

np_data = np.array([[i for i in range(r * 4, (r + 1) * 4)] for r in range(5)], dtype=np.float32)
np_data = np_data.reshape((2, 5, 2))  # 5x4 reshape一下變 2x5x2
print('numpy shape', np_data.shape)  # 數組的維度
print('numpy ndim', np_data.ndim)  # 數組的軸,也就是rank,也就是shape的的大小
print('numpy size', np_data.size)  # 數組元素的個數,也就是shape乘起來
print('numpy dtype', np_data.dtype)  # 數組元素數據類型
print('numpy itemsize', np_data.itemsize)  # 數組有元素數據類型的大小,float32, 32/8=4

np_data = np_data[:, :, np.newaxis, :]  # 增加維度
print('numpy add dim, np.newaxis', np_data.shape)

np_data = np_data.transpose((2, 0, 1, 3))  # 轉置 transpose
print('numpy transpose, np.newaxis', np_data.shape)

np_data = np_data.squeeze(0) # 移除0維度,只能移除維數爲1的維度
print('numpy remove dim, np.squeeze', np_data.shape)

#######################
print()
#######################

torch_data = torch.Tensor(
    [[i for i in range(r * 4, (r + 1) * 4)] for r in range(5)]
)
torch_data = torch_data.view(2, 5, 2)  # 方法變成view, 5x4 view一下變 2x5x2
print('torch shape', torch_data.shape)
print('torch ndim', torch_data.ndim)
print('torch size', torch_data.size())  # 要調用函數
print('torch dtype', torch_data.dtype)  # 沒有itemsize

torch_data = torch_data.unsqueeze(2)  # 增加維度
print('torch add dim, torch.unsqueeze', torch_data.shape)

torch_data = torch_data.permute((2, 0, 1, 3))  # 轉置 permute
print('torch permute', torch_data.shape)

torch_data = torch_data.squeeze(0)  # 移除0維度,只能移除維數是1的維度
print('torch remove dim, torch.squeeze', torch_data.shape)

"""
numpy shape (2, 5, 2)
numpy ndim 3
numpy size 20
numpy dtype float32
numpy itemsize 4
numpy add dim, np.newaxis (2, 5, 1, 2)
numpy transpose, np.newaxis (1, 2, 5, 2)
numpy remove dim, np.squeeze (2, 5, 2)

torch shape torch.Size([2, 5, 2])
torch ndim 3
torch size torch.Size([2, 5, 2])
torch dtype torch.float32
torch add dim, torch.unsqueeze torch.Size([2, 5, 1, 2])
torch permute torch.Size([1, 2, 5, 2])
torch remove dim, torch.squeeze torch.Size([2, 5, 2])
"""

計算對比

import torch
import numpy as np

np_data = np.array([[1, 2], [1, 2]])
torch_data = torch.tensor(np_data)

np_t = np.array([0, np.pi / 4., np.pi / 2.])
torch_t = torch.tensor(np_t)

# 平均值
np_x = np.arange(5)
torch_x = torch.FloatTensor([i for i in range(5)])
# torch.mean 只能計算float的平均值,不能計算int的平均值,所有必須用FloatTensor

print(
    '\nnumpy.abs:\n', np.abs(np_data),
    '\ntorch.abs:\n', torch.abs(torch_data),
    '\nnumpy.sin:\n', np.sin(np_t),
    '\ntorch.sin:\n', torch.sin(torch_t),
    '\nnumpy.mean\n', np.mean(np_x),
    '\ntorch.mean\n', torch.mean(torch_x)
)
"""
numpy.abs:
 [[1 2]
 [1 2]] 
torch.abs:
 tensor([[1, 2],
        [1, 2]]) 
numpy.sin:
 [0.         0.70710678 1.        ] 
torch.sin:
 tensor([0.0000, 0.7071, 1.0000], dtype=torch.float64) 
numpy.mean
 2.0 
torch.mean
 tensor(2.)
"""

矩陣的乘法對比

import torch
import numpy as np

# numpy矩陣相乘
na = np.array([[1, 2], 
               [3, 4]])
nb = np.array([[1, 1], 
               [0, 1]])
nc = np.matmul(na, nb)
nd = na * nb
print(nc, nd, sep='\n')

# torch矩陣相乘
ta = torch.FloatTensor([[1, 2], 
                        [3, 4]])
tb = torch.FloatTensor([[1, 1], 
                        [0, 1]])
tc = torch.mm(ta, tb)
td = ta * tb
print(tc, td, sep='\n')

"""
[[1 3]
 [3 7]]
[[1 2]
 [0 4]]
tensor([[1., 3.],
        [3., 7.]])
tensor([[1., 2.],
        [0., 4.]])
"""

tensor求導數

import torch
from torch.autograd import Variable

# 變量
tensor = torch.FloatTensor([[1, 2], [3, 4]])
variable = Variable(tensor, requires_grad=True)
print(tensor)
print(variable)
t_mean = torch.mean(tensor * tensor)
v_mean = torch.mean(variable * variable)
print(t_mean)
print(v_mean)

# 反向傳播求導數
v_mean.backward()
print(variable.grad)

print(variable)  # Variable形式
print(variable.data)  # tensor形式
print(variable.data.numpy())  # numpy形式,隨後輸出結果一般用numpy形式

tensor的池化操作

import torch
import torch.nn as nn
from torch.autograd import Variable


# 最大池化與反池化
pool = nn.MaxPool2d(
    kernel_size=2,
    stride=2,
    return_indices=True,
    ceil_mode=True
)
unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
in_data = Variable(
    torch.Tensor([[[[1, 2, 3, 4],
                    [5, 6, 7, 8],
                    [9, 10, 11, 12],
                    [13, 14, 15, 16]]]])
)
print('\nin_data\n', in_data)
print(in_data.shape)

out_data, indices = pool(in_data)
print('\nout_data\n', out_data)
print('\nindices\n', indices)

un_data = unpool(out_data, indices)
print('\nun_data\n', un_data)

"""
in_data
 tensor([[[[ 1.,  2.,  3.,  4.],
          [ 5.,  6.,  7.,  8.],
          [ 9., 10., 11., 12.],
          [13., 14., 15., 16.]]]])
torch.Size([1, 1, 4, 4])

out_data
 tensor([[[[ 6.,  8.],
          [14., 16.]]]])

indices
 tensor([[[[ 5,  7],
          [13, 15]]]])

un_data
 tensor([[[[ 0.,  0.,  0.,  0.],
          [ 0.,  6.,  0.,  8.],
          [ 0.,  0.,  0.,  0.],
          [ 0., 14.,  0., 16.]]]])
"""

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章