【发布时间】:2017-12-18 10:45:15
【问题描述】:
我正在尝试在 Keras 中为 RNN (LSTM) 实现我自己的自定义损失函数。这是我的代码。
import sys
sys.path.insert(0, "C:\\Users\\skaul\\AppData\\Local\\Continuum\\Anaconda3\\envs\\tensorflow\\Lib\\site-packages")
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense
import keras.backend as K
timesteps = 10
data_dim = 5
timesteps = 10
num_classes = 2
# expected input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True,
input_shape=(timesteps, data_dim))) # returns a sequence of vectors of dimension 32
model.add(LSTM(32, return_sequences=True)) # returns a sequence of vectors of dimension 32
model.add(LSTM(32)) # return a single vector of dimension 32
model.add(Dense(2, activation='softmax'))
def custom_loss(y_true, y_pred):
ytrue = K.argmax(y_true, axis = 1)
ypred = K.argmax(y_pred, axis = 1)
true1 = ytrue
pred1 = ypred
pred0 = ypred - K.cast(K.variable(1),dtype = 'int64')
pred0 = pred0 * K.cast(K.variable(-1),dtype = 'int64')
tp = K.sum(true1*pred1) #true positives
fn = K.sum(true1*pred0) #false negatives
return K.cast(fn/tp,dtype = 'float32')
model.compile(loss = custom_loss,
optimizer='adam',
metrics=['accuracy'])
# Generate dummy training data
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.random((1000, num_classes))
# Generate dummy validation data
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.random((100, num_classes))
y_a = np.random.random(y_train.shape)
y_b = np.random.random(y_train.shape)
out1 = K.eval(custom_loss(K.variable(y_a), K.variable(y_b)))
print(out1)
model.fit(x_train, y_train, batch_size=64, epochs=5, validation_data=(x_val, y_val))
我收到以下错误
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-0551e4a8e8ed> in <module>()
52 print(out1)
53
---> 54 model.fit(x_train, y_train, batch_size=64, epochs=5, validation_data=(x_val, y_val))
~\AppData\Local\Continuum\Anaconda3\envs\tensorflow\Lib\site-packages\keras\models.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
868 class_weight=class_weight,
869 sample_weight=sample_weight,
--> 870 initial_epoch=initial_epoch)
871
872 def evaluate(self, x, y, batch_size=32, verbose=1,
~\AppData\Local\Continuum\Anaconda3\envs\tensorflow\Lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
1488 else:
1489 ins = x + y + sample_weights
-> 1490 self._make_train_function()
1491 f = self.train_function
1492
~\AppData\Local\Continuum\Anaconda3\envs\tensorflow\Lib\site-packages\keras\engine\training.py in _make_train_function(self)
1012 self._collected_trainable_weights,
1013 self.constraints,
-> 1014 self.total_loss)
1015 updates = self.updates + training_updates
1016 # Gets loss and metrics. Updates weights at each call.
~\AppData\Local\Continuum\Anaconda3\envs\tensorflow\Lib\site-packages\keras\optimizers.py in get_updates(self, params, constraints, loss)
420
421 for p, g, m, v in zip(params, grads, ms, vs):
--> 422 m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
423 v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
424 p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
c:\users\skaul\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\ops\math_ops.py in binary_op_wrapper(x, y)
827 if not isinstance(y, sparse_tensor.SparseTensor):
828 try:
--> 829 y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
830 except TypeError:
831 # If the RHS is not a tensor, it might be a tensor aware object
c:\users\skaul\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)
674 name=name,
675 preferred_dtype=preferred_dtype,
--> 676 as_ref=False)
677
678
c:\users\skaul\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\framework\ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
739
740 if ret is None:
--> 741 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
742
743 if ret is NotImplemented:
c:\users\skaul\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
111 as_ref=False):
112 _ = as_ref
--> 113 return constant(v, dtype=dtype, name=name)
114
115
c:\users\skaul\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\framework\constant_op.py in constant(value, dtype, shape, name, verify_shape)
100 tensor_value = attr_value_pb2.AttrValue()
101 tensor_value.tensor.CopyFrom(
--> 102 tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
103 dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
104 const_tensor = g.create_op(
c:\users\skaul\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\framework\tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
362 else:
363 if values is None:
--> 364 raise ValueError("None values not supported.")
365 # if dtype is provided, forces numpy array to be the type
366 # provided if possible.
ValueError: None values not supported.
这让我相信我的损失函数正在返回一个“无”值,但是,我的代码输出 0.941634
在显示上述错误之前。这来自语句 print(out1),它在 RNN 之外测试损失函数。关于可能出现什么问题的任何想法?
【问题讨论】:
-
我无法找出问题所在,但关于 pred1 和 pred0 的问题有些可疑。 Pred1 将始终为 1,Pred0 将始终为 0。(您将之前的值相加,并且将 'softmax' 结果相加将始终为 1,这意味着 ytrue 和 ypred 由 1 组成。)。
-
但是您可以尝试不使用强制转换方法(无论如何它都会带来结果,如果 float64 部分对您很重要,可能会有更大的精度错误)
-
@Daniel 感谢您了解这一点 - 我现在使用 argmax 而不是 sum 来生成 ytrue ypred。我相信 keras 期望损失函数有一个 float32 输出,这就是我在那里投射的原因(如果我不这样做,我会得到一个错误)。我仍然收到无值错误。
-
这是您的完整代码吗?你可以用你的改变来更新它:)
-
好的,我测试了你的代码,它可以运行。问题肯定出在你没有表现出来的地方。
标签: python tensorflow keras loss rnn