【问题标题】:RuntimeError: CUDA out of memory in training with pytorch "Pose2Seg"RuntimeError:使用 pytorch“Pose2Seg”进行训练时 CUDA 内存不足
【发布时间】:2020-06-23 07:04:26
【问题描述】:

当我运行这段代码时https://github.com/erezposner/Pose2Seg

我完成了本教程中的所有步骤https://towardsdatascience.com/detection-free-human-instance-segmentation-using-pose2seg-and-pytorch-72f48dc4d23e

但我在 cuda 中有这个错误:

RuntimeError: CUDA out of memory. Tried to allocate 128.00 MiB (GPU 0; 4.00 GiB total capacity; 2.57 GiB already allocated; 74.77 MiB free; 2.85 GiB reserved in total by PyTorch) (malloc at ..\c10\cuda\CUDACachingAllocator.cpp:289) (no backtrace available)

我该如何解决这个问题?

(base) C:\Users\ASUS\Pose2Seg>python train.py
06-23 07:30:01 ===========> loading model <===========
total params in model is 334, in pretrained model is 336, init 334
06-23 07:30:03 ===========> loading data <===========
loading annotations into memory...
Done (t=4.56s)
creating index...
index created!
06-23 07:30:08 ===========> set optimizer <===========
06-23 07:30:08 ===========>   training    <===========
C:\Users\ASUS\Anaconda3\Anaconda\lib\site-packages\torch\nn\functional.py:2796: UserWarning: nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.
  warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.")
C:\Users\ASUS\Anaconda3\Anaconda\lib\site-packages\torch\nn\functional.py:2973: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
  "See the documentation of nn.Upsample for details.".format(mode))
C:\Users\ASUS\Anaconda3\Anaconda\lib\site-packages\torch\nn\functional.py:3289: UserWarning: Default grid_sample and affine_grid behavior has changed to align_corners=False since 1.3.0. Please specify align_corners=True if the old behavior is desired. See the documentation of grid_sample for details.
  warnings.warn("Default grid_sample and affine_grid behavior has changed "
C:\Users\ASUS\Anaconda3\Anaconda\lib\site-packages\torch\nn\functional.py:3226: UserWarning: Default grid_sample and affine_grid behavior has changed to align_corners=False since 1.3.0. Please specify align_corners=True if the old behavior is desired. See the documentation of grid_sample for details.
  warnings.warn("Default grid_sample and affine_grid behavior has changed "
06-23 07:30:13 Epoch: [0][0/56599]      Lr: [6.68e-05]  Time 4.228 (4.228)      Data 0.028 (0.028)      loss 0.85738 (0.85738)
06-23 07:30:22 Epoch: [0][10/56599]     Lr: [6.813333333333334e-05]     Time 0.847 (1.280)      Data 0.012 (0.051)      loss 0.44195 (0.71130)
06-23 07:30:33 Epoch: [0][20/56599]     Lr: [6.946666666666667e-05]     Time 0.882 (1.180)      Data 0.045 (0.037)      loss 0.41523 (0.60743)
Traceback (most recent call last):
  File "train.py", line 157, in <module>
    optimizer, epoch, iteration)
  File "train.py", line 74, in train
    loss.backward()
  File "C:\Users\ASUS\Anaconda3\Anaconda\lib\site-packages\torch\tensor.py", line 198, in backward
    torch.autograd.backward(self, gradient, retain_graph, create_graph)
  File "C:\Users\ASUS\Anaconda3\Anaconda\lib\site-packages\torch\autograd\__init__.py", line 100, in backward
    allow_unreachable=True)  # allow_unreachable flag
RuntimeError: CUDA out of memory. Tried to allocate 128.00 MiB (GPU 0; 4.00 GiB total capacity; 2.57 GiB already allocated; 74.77 MiB free; 2.85 GiB reserved in total by PyTorch) (malloc at ..\c10\cuda\CUDACachingAllocator.cpp:289)
(no backtrace available)

cudatoolkit == 10.1.243

python3.6.5

库的版本:

>>> import tensorflow
2020-06-23 09:45:01.840827: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_101.dll
>>> tensorflow.__version__
'2.2.0'
>>> import keras
Using TensorFlow backend.
>>> keras.__version__
'2.3.1'
>>> import torch
>>> torch.__version__
'1.5.1'
>>> import torchvision
>>> torchvision.__version__
'0.6.1'
>>> import pycocotools

train.py 代码

import os
import sys
import time
import logging
import argparse
import numpy as np
from tqdm import tqdm

import torch
import torch.utils.data

from lib.averageMeter import AverageMeters
from lib.logger import colorlogger
from lib.timer import Timers
from lib.averageMeter import AverageMeters
from lib.torch_utils import adjust_learning_rate
import os
from modeling.build_model import Pose2Seg
from datasets.CocoDatasetInfo import CocoDatasetInfo, annToMask
from test import test

NAME = "release_base"

# Set `LOG_DIR` and `SNAPSHOT_DIR`


def setup_logdir():
    timestamp = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime())
    LOGDIR = os.path.join(os.getcwd(), 'logs', '%s_%s' % (NAME, timestamp))
    SNAPSHOTDIR = os.path.join(
        os.getcwd(), 'snapshot', '%s_%s' % (NAME, timestamp))
    if not os.path.exists(LOGDIR):
        os.makedirs(LOGDIR)
    if not os.path.exists(SNAPSHOTDIR):
        os.makedirs(SNAPSHOTDIR)
    return LOGDIR, SNAPSHOTDIR


LOGDIR, SNAPSHOTDIR = setup_logdir()

# Set logging
logger = colorlogger(log_dir=LOGDIR, log_name='train_logs.txt')

# Set Global Timer
timers = Timers()

# Set Global AverageMeter
averMeters = AverageMeters()


def train(model, dataloader, optimizer, epoch, iteration):
    # switch to train mode
    model.train()

    averMeters.clear()
    end = time.time()
    for i, inputs in enumerate(dataloader):
        averMeters['data_time'].update(time.time() - end)
        iteration += 1

        lr = adjust_learning_rate(optimizer, iteration, BASE_LR=0.0002,
                                  WARM_UP_FACTOR=1.0/3, WARM_UP_ITERS=1000,
                                  STEPS=(0, 14150*15, 14150*20), GAMMA=0.1)

        # forward
        outputs = model(**inputs)

        # loss
        loss = outputs

        # backward
        averMeters['loss'].update(loss.data.item())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        averMeters['batch_time'].update(time.time() - end)
        end = time.time()

        if i % 10 == 0:
            logger.info('Epoch: [{0}][{1}/{2}]\t'
                        'Lr: [{3}]\t'
                        'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                        'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                        'loss {loss.val:.5f} ({loss.avg:.5f})\t'
                        .format(
                            epoch, i, len(dataloader), lr,
                            batch_time=averMeters['batch_time'], data_time=averMeters['data_time'],
                            loss=averMeters['loss'])
                        )

        if i % 10000 == 0:
            torch.save(model.state_dict(), os.path.join(
                SNAPSHOTDIR, '%d_%d.pkl' % (epoch, i)))
            torch.save(model.state_dict(), os.path.join(
                SNAPSHOTDIR, 'last.pkl'))

    return iteration


class Dataset():
    def __init__(self):
        ImageRoot = r'C:\Users\ASUS\Pose2Seg\data\coco2017\train2017'
        AnnoFile = r'C:\Users\ASUS\Pose2Seg\data\coco2017\annotations\person_keypoints_train2017_pose2seg.json'
        self.datainfos = CocoDatasetInfo(
            ImageRoot, AnnoFile, onlyperson=True, loadimg=True)

    def __len__(self):
        return len(self.datainfos)

    def __getitem__(self, idx):
        rawdata = self.datainfos[idx]
        img = rawdata['data']
        image_id = rawdata['id']

        height, width = img.shape[0:2]
        gt_kpts = np.float32(rawdata['gt_keypoints']).transpose(
            0, 2, 1)  # (N, 17, 3)
        gt_segms = rawdata['segms']
        gt_masks = np.array([annToMask(segm, height, width)
                             for segm in gt_segms])

        return {'img': img, 'kpts': gt_kpts, 'masks': gt_masks}

    def collate_fn(self, batch):
        batchimgs = [data['img'] for data in batch]
        batchkpts = [data['kpts'] for data in batch]
        batchmasks = [data['masks'] for data in batch]
        return {'batchimgs': batchimgs, 'batchkpts': batchkpts, 'batchmasks': batchmasks}


if __name__ == '__main__':
    logger.info('===========> loading model <===========')
    model = Pose2Seg().cuda()
    # model.init("")
    model.train()

    logger.info('===========> loading data <===========')
    datasetTrain = Dataset()
    dataloaderTrain = torch.utils.data.DataLoader(datasetTrain, batch_size=1, shuffle=True,
                                                  num_workers=0, pin_memory=False,
                                                  collate_fn=datasetTrain.collate_fn)

    logger.info('===========> set optimizer <===========')
    ''' set your optimizer like this. Normally is Adam/SGD. '''
    #optimizer = torch.optim.SGD(model.parameters(), 0.0002, momentum=0.9, weight_decay=0.0005)
    optimizer = torch.optim.Adam(
        model.parameters(), 0.0002, weight_decay=0.0000)

    iteration = 0
    epoch = 0
    try:
        while iteration < 14150*25:
            logger.info('===========>   training    <===========')
            iteration = train(model, dataloaderTrain,
                              optimizer, epoch, iteration)
            epoch += 1

            logger.info('===========>   testing    <===========')
            test(model, dataset='cocoVal', logger=logger.info)
            test(model, dataset='OCHumanVal', logger=logger.info)

    except (KeyboardInterrupt):
        logger.info('Save ckpt on exception ...')
        torch.save(model.state_dict(), os.path.join(
            SNAPSHOTDIR, 'interrupt_%d_%d.pkl' % (epoch, iteration)))
        logger.info('Save ckpt done.')

【问题讨论】:

    标签: tensorflow pytorch gpu


    【解决方案1】:

    您的 GPU 内存不足。尝试减少批量大小。如果仍然相同,请尝试减小输入图像大小。那么它应该可以正常工作。

    顺便说一句,对于这种类型的模型,建议使用 8GB 的​​ GPU 内存。

    【讨论】:

    • 你的意思是拆分训练数据并将每个部分放在文件夹中并单独运行吗?
    • 不,在您的训练代码中,您可以找到批量大小。 batch_size=您的模型将一起处理的图像数量。更多数量的图像需要更多内存。减少批量大小并再次训练。
    • 但是batch_size=1 可以减少吗?请查看 train.py 代码
    • 如果是 1,那是可能的最低值。你不能减少它。输入图像大小是多少?
    • coco 数据集中的训练数据 = 18 GB
    猜你喜欢
    • 2021-08-04
    • 2021-08-08
    • 1970-01-01
    • 2021-05-20
    • 1970-01-01
    • 1970-01-01
    • 2020-09-25
    • 1970-01-01
    • 2020-12-06
    相关资源
    最近更新 更多