在FloyHub中训练模型时,如何充分利用GPU?

问题描述 投票:0回答:1

我正在使用Jupyter Notebook在FloydHub上训练以下模型。但是,每当我训练模型时,都需要花费很多时间(1分钟)。笔记本下方的统计数据显示仅使用了2%的GPU。我试过运行命令torch.cuda.is_available()并返回True

import torch
from torch import nn,optim
import torch.nn.functional as F
from torchvision import datasets,transforms

transform = transforms.Compose([transforms.ToTensor(),
                               transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)),
                               ])
trainset = datasets.MNIST('~/.pytorch/MNIST_data/',download=True,train=True,transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,batch_size=64,shuffle=True)

testset = datasets.MNIST('~/.pytorch/MNIST_data/',download=True,train=False,transform=transform)
testloader = torch.utils.data.DataLoader(testset,batch_size=64,shuffle=True)

class Classifier(nn.Module):

    def __init__(self):
        super().__init__()

        self.hidden = nn.Linear(784,256).cuda()
        self.output = nn.Linear(256,10).cuda()
        self.dropout = nn.Dropout(p=0.2).cuda()

    def forward(self,x):
        x = x.view(x.shape[0],-1).cuda()    
        x = self.hidden(x).cuda()
        x = torch.sigmoid(x).cuda()
        x = self.dropout(x).cuda()
        x = self.output(x).cuda()
        x = F.log_softmax(x,dim=1).cuda()
        return x.cuda()

model = Classifier()
model.cuda()
criterion = nn.NLLLoss().cuda()
optimizer = optim.SGD(model.parameters(),lr=0.5)

epochs = 30
training_losses = []
test_losses = []


for e in range(epochs):
    train_loss = 0
    test_loss = 0
    accuracy = 0
    for images,labels in trainloader:
        optimizer.zero_grad()
        output = model(images)
        labels = labels.cuda()
        loss = criterion(output,labels)
        loss.backward()
        optimizer.step()
        train_loss+=loss.item()

    with torch.no_grad():
        # set the model to testing mode
        model.eval()
        for images,labels in testloader:
            output = model(images)
            labels = labels.cuda()
            test_loss+=criterion(output,labels)
            ps = torch.exp(output)
            # get the class with the highest probability
            _,top_class = ps.topk(1,dim=1)
            equals = top_class == labels.view(*top_class.shape)
            accuracy+=torch.mean(equals.type(torch.FloatTensor))

    model.train()

    training_losses.append(train_loss/len(trainloader))
    test_losses.append(test_loss/len(testloader))

    if((e+1)%5 == 0):
        print(f"Epoch:{e+1}\n",
                f"Training Loss:{train_loss/len(trainloader)}\n",
                f"Test Loss:{test_loss/len(testloader)}\n",
                f"Test Accuracy:{(accuracy/len(testloader)*100)}\n\n")
machine-learning deep-learning nvidia pytorch
1个回答
0
投票

基于使用MNIST(小数据集)的三个建议: - 预先加载你的数据:即不要使用标准的dataloader,而是预先加载到.to(cuda),然后对此进行迭代。 - 增加批量。 - 不要使用MLP(线性层),而是尝试使用CNN。

© www.soinside.com 2019 - 2024. All rights reserved.