【问题标题】:Error:RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase错误:RuntimeError:在当前进程完成其引导阶段之前尝试启动一个新进程
【发布时间】:2021-01-28 13:36:12
【问题描述】:

运行以下脚本后出现错误:

-- 编码:utf-8 --

导入资料

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms

import cv2

import numpy as np

import csv

Step1:从日志文件中读取

samples = []
with open('data/driving_log.csv') as csvfile:
    reader = csv.reader(csvfile)
    next(reader, None)
    for line in reader:
        samples.append(line)
    

Step2:将数据划分为训练集和验证集

train_len = int(0.8*len(samples))
valid_len = len(samples) - train_len
train_samples, validation_samples = data.random_split(samples, lengths=[train_len, valid_len])

Step3a:定义数据加载器的扩充、转换过程、参数和数据集

def augment(imgName, angle):
  name = 'data/IMG/' + imgName.split('/')[-1]
  current_image = cv2.imread(name)
  current_image = current_image[65:-25, :, :]
  if np.random.rand() < 0.5:
    current_image = cv2.flip(current_image, 1)
    angle = angle * -1.0  
  return current_image, angle

class Dataset(data.Dataset):

    def __init__(self, samples, transform=None):

        self.samples = samples
        self.transform = transform

    def __getitem__(self, index):
      
        batch_samples = self.samples[index]
        
        steering_angle = float(batch_samples[3])
        
        center_img, steering_angle_center = augment(batch_samples[0], steering_angle)
        left_img, steering_angle_left = augment(batch_samples[1], steering_angle + 0.4)
        right_img, steering_angle_right = augment(batch_samples[2], steering_angle - 0.4)

        center_img = self.transform(center_img)
        left_img = self.transform(left_img)
        right_img = self.transform(right_img)

        return (center_img, steering_angle_center), (left_img, steering_angle_left), (right_img, steering_angle_right)
      
    def __len__(self):
        return len(self.samples)

Step3b:使用数据加载器创建生成器以并行化进程

def _my_normalization(x):
    return x/255.0 - 0.5
transformations = transforms.Compose([transforms.Lambda(_my_normalization)])

params = {'batch_size': 32,
          'shuffle': True,
          'num_workers': 4}

training_set = Dataset(train_samples, transformations)
training_generator = data.DataLoader(training_set, **params)

validation_set = Dataset(validation_samples, transformations)
validation_generator = data.DataLoader(validation_set, **params)

第四步:定义网络

class NetworkDense(nn.Module):

    def __init__(self):
        super(NetworkDense, self).__init__()
        self.conv_layers = nn.Sequential(
            nn.Conv2d(3, 24, 5, stride=2),
            nn.ELU(),
            nn.Conv2d(24, 36, 5, stride=2),
            nn.ELU(),
            nn.Conv2d(36, 48, 5, stride=2),
            nn.ELU(),
            nn.Conv2d(48, 64, 3),
            nn.ELU(),
            nn.Conv2d(64, 64, 3),
            nn.Dropout(0.25)
        )
        self.linear_layers = nn.Sequential(
            nn.Linear(in_features=64 * 2 * 33, out_features=100),
            nn.ELU(),
            nn.Linear(in_features=100, out_features=50),
            nn.ELU(),
            nn.Linear(in_features=50, out_features=10),
            nn.Linear(in_features=10, out_features=1)
        )
        
    def forward(self, input):  
        input = input.view(input.size(0), 3, 70, 320)
        output = self.conv_layers(input)
        output = output.view(output.size(0), -1)
        output = self.linear_layers(output)
        return output


class NetworkLight(nn.Module):

    def __init__(self):
        super(NetworkLight, self).__init__()
        self.conv_layers = nn.Sequential(
            nn.Conv2d(3, 24, 3, stride=2),
            nn.ELU(),
            nn.Conv2d(24, 48, 3, stride=2),
            nn.MaxPool2d(4, stride=4),
            nn.Dropout(p=0.25)
        )
        self.linear_layers = nn.Sequential(
            nn.Linear(in_features=48*4*19, out_features=50),
            nn.ELU(),
            nn.Linear(in_features=50, out_features=10),
            nn.Linear(in_features=10, out_features=1)
        )
        

    def forward(self, input):
        input = input.view(input.size(0), 3, 70, 320)
        output = self.conv_layers(input)
        output = output.view(output.size(0), -1)
        output = self.linear_layers(output)
        return output

Step5:定义优化器

model = NetworkLight()
optimizer = optim.Adam(model.parameters(), lr=0.0001)

criterion = nn.MSELoss()

Step6:检查设备并定义函数将张量移动到该设备

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device is: ', device)

def toDevice(datas, device):
  
  imgs, angles = datas
  return imgs.float().to(device), angles.float().to(device)

Step7:根据定义的最大时期训练和验证网络

max_epochs = 22

for epoch in range(max_epochs):
    
    model.to(device)
    
    # Training
    train_loss = 0
    model.train()
    for local_batch, (centers, lefts, rights) in enumerate(training_generator):
        # Transfer to GPU
        centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
        
        # Model computations
        optimizer.zero_grad()
        datas = [centers, lefts, rights]        
        for data in datas:
            imgs, angles = data
#             print("training image: ", imgs.shape)
            outputs = model(imgs)
            loss = criterion(outputs, angles.unsqueeze(1))
            loss.backward()
            optimizer.step()

            train_loss += loss.data[0].item()
            
        if local_batch % 100 == 0:
            print('Loss: %.3f '
                 % (train_loss/(local_batch+1)))

    
    # Validation
    model.eval()
    valid_loss = 0
    with torch.set_grad_enabled(False):
        for local_batch, (centers, lefts, rights) in enumerate(validation_generator):
            # Transfer to GPU
            centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
        
            # Model computations
            optimizer.zero_grad()
            datas = [centers, lefts, rights]        
            for data in datas:
                imgs, angles = data
#                 print("Validation image: ", imgs.shape)
                outputs = model(imgs)
                loss = criterion(outputs, angles.unsqueeze(1))
                
                valid_loss += loss.data[0].item()

            if local_batch % 100 == 0:
                print('Valid Loss: %.3f '
                     % (valid_loss/(local_batch+1)))

Step8:定义状态并将模型保存到状态

state = {
        'model': model.module if device == 'cuda' else model,
        }

torch.save(state, 'model.h5')

这是错误信息:

"D:\VICO\Back up\venv\Scripts\python.exe" "D:/VICO/Back up/venv/Scripts/self_driving_car.py" 设备是:cpu 设备是:cpu 回溯(最近一次通话最后): 文件“”,第 1 行,在 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 105 行,在 spawn_main 退出代码 = _main(fd) _main 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 114 行 准备(preparation_data) 准备中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 225 行 _fixup_main_from_path(数据['init_main_from_path']) _fixup_main_from_path 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 277 行 run_name="mp_main") 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\runpy.py”,第 263 行,在 run_path 回溯(最近一次通话最后): 文件“D:/VICO/Back up/venv/Scripts/self_driving_car.py”,第 165 行,在 pkg_name=pkg_name, script_name=fname) _run_module_code 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\runpy.py”,第 96 行 对于 enumerate(training_generator) 中的 local_batch,(中心、左侧、右侧): iter 中的文件“D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py”,第 291 行 mod_name、mod_spec、pkg_name、script_name) _run_code 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\runpy.py”,第 85 行 执行(代码,run_globals) 文件“D:\VICO\Back up\venv\Scripts\self_driving_car.py”,第 165 行,在 返回 _MultiProcessingDataLoaderIter(self) init 中的文件“D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py”,第 737 行 对于 enumerate(training_generator) 中的 local_batch,(中心、左侧、右侧): iter 中的文件“D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py”,第 291 行 返回 _MultiProcessingDataLoaderIter(self) init 中的文件“D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py”,第 737 行 w.start() 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py”,第 112 行,开始 self._popen = self._Popen(self) _Popen 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 223 行 w.start() 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py”,第 112 行,开始 返回 _default_context.get_context().Process._Popen(process_obj) _Popen 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 322 行 self._popen = self._Popen(self) _Popen 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 223 行 返回 Popen(process_obj) init 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py”,第 89 行 返回 _default_context.get_context().Process._Popen(process_obj) _Popen 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 322 行 减少。转储(process_obj,to_child) 转储中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\reduction.py”,第 60 行 返回 Popen(process_obj) init 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py”,第 46 行 ForkingPickler(文件,协议).dump(obj) BrokenPipeError:[Errno 32] 损坏的管道 prep_data = spawn.get_preparation_data(process_obj._name) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 143 行,在 get_preparation_data _check_not_importing_main() _check_not_importing_main 中的文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 136 行 不会被冻结以生成可执行文件。''') 运行时错误: 已尝试在 当前进程已完成其引导阶段。

    This probably means that you are not using fork to start your
    child processes and you have forgotten to use the proper idiom
    in the main module:

        if __name__ == '__main__':
            freeze_support()
            ...

    The "freeze_support()" line can be omitted if the program
    is not going to be frozen to produce an executable.

进程以退出代码 1 结束

我不确定解决问题的下一步

【问题讨论】:

    标签: python deep-learning torchvision


    【解决方案1】:

    解决了,简单地说:

    if __name__ == "__main__":
            main()
    

    为了避免在每个循环中重新加载模块。

    【讨论】:

      猜你喜欢
      • 2021-12-30
      • 2023-01-20
      • 2019-07-30
      • 1970-01-01
      • 2022-11-29
      • 2017-12-18
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      相关资源
      最近更新 更多