【问题标题】:Neural networks pytorch神经网络 pytorch
【发布时间】:2018-12-16 10:59:46
【问题描述】:

我是 pytorch 的新手,正在实现自己的图像分类器网络。然而,我看到每个 epoch 的训练准确度都非常好,但验证准确度为 0。我注意到直到第 5 个 epoch。我正在使用 Adam 优化器并且学习率是 0.001。还在每个 epoch 之后将整个数据集重新采样到训练 n 个验证集。请帮助我哪里出错了。

这是我的代码:

### where is data?
data_dir_train = '/home/sup/PycharmProjects/deep_learning/CNN_Data/training_set'
data_dir_test = '/home/sup/PycharmProjects/deep_learning/CNN_Data/test_set'

# Define your batch_size
batch_size = 64

allData = datasets.ImageFolder(root=data_dir_train,transform=transformArr)


# We need to further split our training dataset into training and validation sets.
def split_train_validation():
    # Define the indices
    num_train = len(allData)
    indices = list(range(num_train)) # start with all the indices in training set
    split = int(np.floor(0.2 * num_train)) # define the split size
    #train_idx, valid_idx = indices[split:], indices[:split]

    # Random, non-contiguous split
    validation_idx = np.random.choice(indices, size=split, replace=False)
    train_idx = list(set(indices) - set(validation_idx))
    # define our samplers -- we use a SubsetRandomSampler because it will return
    # a random subset of the split defined by the given indices without replacement
    train_sampler = SubsetRandomSampler(train_idx)
    validation_sampler = SubsetRandomSampler(validation_idx)

    #train_loader = DataLoader(allData,batch_size=batch_size,sampler=train_sampler,shuffle=False,num_workers=4)
    #validation_loader = DataLoader(dataset=allData,batch_size=1, sampler=validation_sampler)

    return (train_sampler,validation_sampler)

培训

from torch.optim import Adam
import torch
import createNN
import torch.nn as nn
import loadData as ld
from torch.autograd import  Variable
from torch.utils.data import DataLoader

# check if cuda - GPU support available
cuda = torch.cuda.is_available()

#create model, optimizer and loss function
model = createNN.ConvNet(class_num=2)
optimizer = Adam(model.parameters(),lr=.001,weight_decay=.0001)
loss_func = nn.CrossEntropyLoss()

if cuda:
    model.cuda()

# function to save model
def save_model(epoch):
    torch.save(model.load_state_dict(),'imageClassifier_{}.model'.format(epoch))
    print('saved model at epoch',epoch)

def exp_lr_scheduler (  epoch , init_lr = args.lr, weight_decay = args.weight_decay, lr_decay_epoch = cf.lr_decay_epoch):
 lr = init_lr * ( 0.5 ** (epoch // lr_decay_epoch))

def train(num_epochs):
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('\n\nEpoch {}'.format(epoch))
        train_sampler, validation_sampler = ld.split_train_validation()
        train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
        validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
        model.train()
        acc = 0.0
        loss = 0.0
        total = 0
        # train model with training data
        for i,(images,labels) in enumerate(train_loader):
            # if cuda then move to GPU
            if cuda:
                images = images.cuda()
                labels = labels.cuda()
            # Variable class wraps a tensor and we can calculate grad
            images = Variable(images)
            labels = Variable(labels)
            # reset accumulated gradients for each batch
            optimizer.zero_grad()
            # pass images to model which returns preiction
            output = model(images)
            #calculate the loss based on prediction and actual
            loss = loss_func(output,labels)
            # backpropagate the loss and compute gradient
            loss.backward()
            # update weights as per the computed gradients
            optimizer.step()

            # prediction class
            predVal , predClass = torch.max(output.data, 1)
            acc += torch.sum(predClass == labels.data)
            loss += loss.cpu().data[0]
            total += labels.size(0)
        # print the statistics
        train_acc = acc/total
        train_loss = loss / total
        print('Mean train acc = {} over epoch = {}'.format(epoch,acc))
        print('Mean train loss = {} over epoch = {}'.format(epoch, loss))

        # Valid model with validataion data
        model.eval()
        acc = 0.0
        loss = 0.0
        total = 0
        for i,(images,labels) in enumerate(validation_loader):
            # if cuda then move to GPU
            if cuda:
                images = images.cuda()
                labels = labels.cuda()
            # Variable class wraps a tensor and we can calculate grad
            images = Variable(images)
            labels = Variable(labels)
            # reset accumulated gradients for each batch
            optimizer.zero_grad()
            # pass images to model which returns preiction
            output = model(images)
            #calculate the loss based on prediction and actual
            loss = loss_func(output,labels)
            # backpropagate the loss and compute gradient
            loss.backward()
            # update weights as per the computed gradients
            optimizer.step()

            # prediction class
            predVal, predClass = torch.max(output.data, 1)
            acc += torch.sum(predClass == labels.data)
            loss += loss.cpu().data[0]
            total += labels.size(0)
        # print the statistics
        valid_acc = acc / total
        valid_loss = loss / total
        print('Mean train acc = {} over epoch = {}'.format(epoch, valid_acc))
        print('Mean train loss = {} over epoch = {}'.format(epoch, valid_loss))

        if(best_acc<valid_acc):
            best_acc = valid_acc
            save_model(epoch)

        # at 30th epoch we save the model
        if (epoch == 30):
            save_model(epoch)


train(20)

【问题讨论】:

    标签: python machine-learning conv-neural-network pytorch


    【解决方案1】:

    我认为您没有考虑到 acc += torch.sum(predClass == labels.data) 返回张量而不是浮点值。根据您使用的 pytorch 版本,我认为您应该将其更改为:

    acc += torch.sum(predClass == labels.data).cpu().data[0] #pytorch 0.3
    acc += torch.sum(predClass == labels.data).item() #pytorch 0.4
    

    虽然您的代码似乎适用于旧 pytorch 版本,但我建议您升级到 0.4 版本。

    另外,我在您的代码中提到了其他问题/错别字。

    您正在为每个时期加载数据集。

    for epoch in range(num_epochs):
        print('\n\nEpoch {}'.format(epoch))
        train_sampler, validation_sampler = ld.split_train_validation()
        train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
        validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
        ...
    

    这不应该发生,加载一次就足够了

    train_sampler, validation_sampler = ld.split_train_validation()
    train_loader = DataLoader(ld.allData, batch_size=30, sampler=train_sampler, shuffle=False)
    validation_loader = DataLoader(dataset=ld.allData, batch_size=1, sampler=validation_sampler)
    for epoch in range(num_epochs):
        print('\n\nEpoch {}'.format(epoch))
        ...
    

    在你拥有的训练部分(这不会发生在验证中):

    train_acc = acc/total
    train_loss = loss / total
    print('Mean train acc = {} over epoch = {}'.format(epoch,acc))
    print('Mean train loss = {} over epoch = {}'.format(epoch, loss))
    

    您在哪里打印 acc 而不是 train_acc

    另外,在验证部分我提到你正在打印print('Mean train acc = {} over epoch = {}'.format(epoch, valid_acc)),而它应该是'Mean val acc'

    更改这行代码,使用我创建的标准模型和 CIFAR 数据集,训练似乎收敛,准确性在每个时期都会增加,而平均损失值会降低。

    希望能帮到你!

    【讨论】:

      猜你喜欢
      • 2021-03-11
      • 2019-03-08
      • 1970-01-01
      • 1970-01-01
      • 2019-05-16
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      相关资源
      最近更新 更多