Pytorch並行訓練 示例

pytorch 多卡訓練示例

import torch
import torch.nn as nn
from torch.utils.data import DataLoader,Dataset

# parameter and DataLoaders
input_size = 5
output_size = 2
batch_size = 30
data_size = 100

device = torch.device("cuda:0" if torch.cuda.is_available() else"cpu")


class RandomDataset(Dataset):

    def __init__(self, size, length):
        self.len = length
        self.data = torch.randn(length, size)

    def __getitem__ (self, index):
        return self.data[index]

    def __len__(self):
        return self.len


rand_loader = DataLoader(dataset=RandomDataset(input_size, data_size),
                         batch_size=batch_size, shuffle=True)


class Model(nn.Module):

    # Our model
    def __init__(self, input_size, output_size):
        super(Model, self).__init__()
        self.fc = nn.Linear(input_size, output_size)

    def forward(self, input):
        output = self.fc(input)
        print("\tIn Model:input size", input.size(), "output size", output.size())
        return output




model = Model(input_size, output_size)

if torch.cuda.device_count() > 0:
    print("Let's use",torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)


model.cuda()


for data in rand_loader:
    input = data.cuda()
    output =  model(input)
    print("Outside: inputsize", input.size(), "Output_size", output.size())

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章