多维数组, numpy, torch多维矩阵操作的理解

初学者对一维, 二维数组还容易理解, 但是3维, 4维就难理解了. 其实4维数组之内可以和图像对应起来理解.

1维:W dim=0

2维: H, W dim:(0, 1)

3维: C, H, W dim:(0, 1, 2)

4维: B, C, H, W dim:(0, 1, 2, 3)

这也是为什么深度学习框架默认都是N, C, H, W

所以很多多维矩阵操作就好理解了

import torch

变换操作

torch.repeat

a = torch.tensor([1, 2, 3, 1.1, 2.1, 3.1]).view(2, 3)
print(a)
print(a.repeat(1, 3))
print(a.repeat(3, 1))
tensor([[1.0000, 2.0000, 3.0000],
        [1.1000, 2.1000, 3.1000]])
tensor([[1.0000, 2.0000, 3.0000, 1.0000, 2.0000, 3.0000, 1.0000, 2.0000, 3.0000],
        [1.1000, 2.1000, 3.1000, 1.1000, 2.1000, 3.1000, 1.1000, 2.1000, 3.1000]])
tensor([[1.0000, 2.0000, 3.0000],
        [1.1000, 2.1000, 3.1000],
        [1.0000, 2.0000, 3.0000],
        [1.1000, 2.1000, 3.1000],
        [1.0000, 2.0000, 3.0000],
        [1.1000, 2.1000, 3.1000]])

torch.view

a = torch.tensor([1, 2, 3, 1.1, 2.1, 3.1]).view(2, 3)
print(a)
print(a.view(1, 6))
print(a.view(3, 2))
#先把原矩阵转为1维矩阵, 再view, 长宽长度变换
tensor([[1.0000, 2.0000, 3.0000],
        [1.1000, 2.1000, 3.1000]])
tensor([[1.0000, 2.0000, 3.0000, 1.1000, 2.1000, 3.1000]])
tensor([[1.0000, 2.0000],
        [3.0000, 1.1000],
        [2.1000, 3.1000]])

torch.permute

a = torch.rand(2, 3, 3)
print(a)
print(a.permute(2, 0, 1))#c, h, w -->w, c, h
print(a.permute(0, 2, 1).permute(1,0, 2))
#上述两个操作等价, 即通道数不变, 长宽旋转, 再w不变, 通道与h平面旋转,理解为长方体也可以.
tensor([[[0.9450, 0.7652, 0.4687],
         [0.1275, 0.3541, 0.1216],
         [0.5265, 0.2152, 0.9015]],

        [[0.0413, 0.0178, 0.5594],
         [0.5511, 0.9967, 0.9560],
         [0.2367, 0.6877, 0.4822]]])
tensor([[[0.9450, 0.1275, 0.5265],
         [0.0413, 0.5511, 0.2367]],

        [[0.7652, 0.3541, 0.2152],
         [0.0178, 0.9967, 0.6877]],

        [[0.4687, 0.1216, 0.9015],
         [0.5594, 0.9560, 0.4822]]])
tensor([[[0.9450, 0.1275, 0.5265],
         [0.0413, 0.5511, 0.2367]],

        [[0.7652, 0.3541, 0.2152],
         [0.0178, 0.9967, 0.6877]],

        [[0.4687, 0.1216, 0.9015],
         [0.5594, 0.9560, 0.4822]]])

实现for循环

import torch
totalClass=3
feature_dim=2
centers = torch.tensor([1, 2, 3, 1.1, 2.1, 3.1]).view(feature_dim, totalClass).float()

centers_inter = centers.repeat(1, totalClass).view(feature_dim, totalClass, totalClass)#(64, 3, 3)
centers_self_rep = centers.repeat(totalClass, 1).permute(1, 0).view(totalClass, totalClass, feature_dim).permute(2, 0, 1)
center_diff = torch.add(centers_self_rep, -1, centers_inter)
print(centers)
print(centers_inter)
print(centers_self_rep)
print(center_diff)
tensor([[1.0000, 2.0000, 3.0000],
        [1.1000, 2.1000, 3.1000]])
tensor([[[1.0000, 2.0000, 3.0000],
         [1.0000, 2.0000, 3.0000],
         [1.0000, 2.0000, 3.0000]],

        [[1.1000, 2.1000, 3.1000],
         [1.1000, 2.1000, 3.1000],
         [1.1000, 2.1000, 3.1000]]])
tensor([[[1.0000, 1.0000, 1.0000],
         [2.0000, 2.0000, 2.0000],
         [3.0000, 3.0000, 3.0000]],

        [[1.1000, 1.1000, 1.1000],
         [2.1000, 2.1000, 2.1000],
         [3.1000, 3.1000, 3.1000]]])
tensor([[[ 0.0000, -1.0000, -2.0000],
         [ 1.0000,  0.0000, -1.0000],
         [ 2.0000,  1.0000,  0.0000]],

        [[ 0.0000, -1.0000, -2.0000],
         [ 1.0000,  0.0000, -1.0000],
         [ 2.0000,  1.0000,  0.0000]]])

在这里插入图片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章