【问题标题】:TypeError: forward() missing 1 required positional argument: 'hidden' - LSTM ModelTypeError:forward()缺少1个必需的位置参数:'hidden' - LSTM 模型
【发布时间】:2021-01-23 13:26:50
【问题描述】:

我是 LSTM 和 PyTorch 的初学者。我尝试为突发流量预测场景创建一个模型。这是一种过拟合模型。首先,它使用 x_data 作为输入和目标来匹配所有下一个值。您可以通过查看主数据来识别 x_data 的形状。然后尝试使用前 100 个种子值来预测整个流量形状。但是这个模型在运行“输出,隐藏=模型(输入)”时给了我一个错误。请帮我解决这个错误。

这是我的完整代码,

import numpy as np
import torch
import torch,torch.nn as nn
from torch import Tensor 
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from scipy.signal import savgol_filter


is_cuda = torch.cuda.is_available()

if is_cuda:
    device = torch.device("cuda")
    print("GPU is available")
else:
    device = torch.device("cpu")
    print("GPU not available, CPU used")

class FPredRNN(nn.Module):         
  def __init__(self, input_size, hidden_size, num_layers, dropout_val = 0.1):
    super(FPredRNN, self).__init__()
    self.input_size = input_size
    self.nh = hidden_size
    self.nl = num_layers

    self.lstm = nn.LSTM(self.input_size, self.nh, self.nl, dropout = dropout_val) 
    self.dropout = nn.Dropout(dropout_val)
    self.linear = nn.Linear(self.nh, 1)

  def forward(self, x, hidden, steps = 1000, eval = False): 
    predictions = []
    batch_size = x.size(0)
    if(hidden.size(0) != batch_size):
      self.init_hidden(batch_size)
    
    l_out, hidden = self.lstm(x, hidden)
    l_out = l_out.contiguous().view(-1, self.nh)

    out = self.dropout(l_out)
    out = self.linear(out)
    #out = out.view(batch_size, -1)
    #out = out[:,-1]
    
    if(eval):
      eval_input = out[-1:]
      for i in range(steps):
        lstm_out, hidden = self.lstm(eval_input, hidden)
        linear_out = self.linear(lstm_out)
        predictions += [linear_out]
        eval_input = linear_out
      out = torch.stack(predictions).squeeze()
      
    return out, hidden
            
  def init_hidden(self, batch_size):
    weight = next(self.parameters()).data
    hidden = (weight.new(self.nl, batch_size, self.nh).zero_().to(device), weight.new(self.nl, batch_size, self.nh).zero_().to(device)) 
    #hidden = ((self.nl, batch_size, self.nh).zero_().to(device), (self.nl, batch_size, self.nh).zero_().to(device))
    return hidden

if __name__ == "__main__":
  x_data = np.empty((1, 2000))
  y_data = np.empty((1, 1))

  for n in [30000]:
    traffic_generator = GenerateTraffic()
    bursty_traffic, a_t = traffic_generator.create_bursty_traffic(n_d=n)
    detected, attempted = traffic_generator.simulate_bursty_traffic_arrivals(bursty_traffic, backoff_bool= True)
    smooth_x = savgol_filter(detected, 97, 2)
    x_data[(n//10000)-3] = smooth_x 

  inputs = x_data[:, :1999]
  targets = x_data[:, 1:2000]
  
  inputs = torch.from_numpy(inputs)
  targets = torch.from_numpy(targets)
 
  print(inputs.size(1))
  print(inputs.size(0))
  #print(hidden.size(0))
  
  model = FPredRNN(input_size = inputs.size(1), hidden_size = 1100, num_layers = 2, dropout_val = 0.1)
  model.to(device)   

  criterion = nn.MSELoss()
  optimizer = torch.optim.Adam(model.parameters(), lr = 0.0001)
  
  
  # Train Model
  n_epochs = 2
  for epoch in range(1, n_epochs + 1):
    optimizer.zero_grad()
    inputs.to(device)
    outputs, hidden = model(inputs)
    loss = criterion(outputs, targets)
    loss.backward()
    optimizer.step()

# Test Model
  seed_lenght = 100
  seed = inputs[:seed_lenght]
  outt = model(seed, steps=1000, eval = True)
  test_out = torch.cat((seed.squeeze(), outt))

错误代码

TypeError                                 Traceback (most recent call last)
<ipython-input-2-0e63f8c64103> in <module>()
   101     optimizer.zero_grad()
   102     inputs.to(device)
--> 103     outputs, hidden = model(inputs)
   104     loss = criterion(outputs, targets)
   105     loss.backward()

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   725             result = self._slow_forward(*input, **kwargs)
   726         else:
--> 727             result = self.forward(*input, **kwargs)
   728         for hook in itertools.chain(
   729                 _global_forward_hooks.values(),

TypeError: forward() missing 1 required positional argument: 'hidden'

【问题讨论】:

    标签: python machine-learning pytorch lstm


    【解决方案1】:

    forward 方法需要输入另一个值,隐藏。我想你想做的是:

    hidden = model.init_hidden()
    outputs, hidden = model(inputs, hidden)
    

    这样,hidden 的第一个输入将只是一个充满零的张量,而下一个隐藏的输入将是前一个字母的输入。

    【讨论】:

      【解决方案2】:

      不必为循环层提供hidden

      鉴于此,您的模型可以(并且可能可以进一步简化,您想用nn.Linear 实现什么目标?):

      class FPredRNN(nn.Module):
          def __init__(self, input_size, hidden_size, num_layers, dropout_val=0.1):
              super(FPredRNN, self).__init__()
              self.input_size = input_size
              self.nh = hidden_size
              self.nl = num_layers
      
              self.lstm = nn.LSTM(self.input_size, self.nh, self.nl, dropout=dropout_val)
              self.dropout = nn.Dropout(dropout_val)
              self.linear = nn.Linear(self.nh, 1)
      
          def forward(self, x, steps=1000):
              predictions = []
              l_out, hidden = self.lstm(x)
              l_out = l_out.contiguous().view(-1, self.nh)
      
              out = self.dropout(l_out)
              out = self.linear(out)
              # Not sure what is going on here, but eval is an attribute of nn.Module
              if self.eval:
                  eval_input = out[-1:]
                  for i in range(steps):
                      lstm_out, hidden = self.lstm(eval_input, hidden)
                      linear_out = self.linear(lstm_out)
                      predictions += [linear_out]
                      eval_input = linear_out
                  out = torch.stack(predictions).squeeze()
      
              return out
      

      请深入阅读nn.LSTM 文档,它将有助于循环层。

      【讨论】:

        猜你喜欢
        • 2021-07-31
        • 2018-09-07
        • 1970-01-01
        • 2014-09-13
        • 1970-01-01
        • 1970-01-01
        • 1970-01-01
        • 2018-06-07
        相关资源
        最近更新 更多