【发布时间】:2018-10-26 20:41:36
【问题描述】:
我认为我在训练相对验证期间使用批量标准化时犯了一些错误。这是因为验证损失没有减少,验证误差始终为 1.0
我非常感谢您能帮我把它弄好。
我的TensorFlow模型声明如下
import tensorflow as tf
class OverFeatAccurateBase(object):
def __init__(self, minibatch, numclasses):
self._numclasses = numclasses
self._trainmode = tf.placeholder(tf.bool)
self._logits = self._buildmodel(minibatch)
@property
def numclasses(self):
return self._numclasses
@property
def mode(self):
return self._trainmode
@property
def logits(self):
return self._logits
def _bn(self, input, is_training, name):
out = tf.layers.batch_normalization(input, fused=True, renorm=True, training=is_training,
reuse=tf.AUTO_REUSE,
name=name)
return out
def _buildmodel(self, minibatch):
out = tf.layers.conv2d(minibatch, filters=96,
kernel_size=[7, 7],
strides=[2, 2],
padding='valid',
data_format='channels_first',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='conv1')
out = tf.layers.batch_normalization(out, axis=1, renorm=True, fused=True, name='batchnorm1', training=self.mode)
out = tf.layers.max_pooling2d(out, pool_size=[3, 3],
strides=[3, 3],
padding='valid',
data_format='channels_first',
name='pool1')
out = tf.layers.conv2d(out, filters=256,
kernel_size=[7, 7],
strides=[1, 1],
padding='valid',
data_format='channels_first',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='conv2')
out = tf.layers.batch_normalization(out, axis=1, renorm=True, fused=True, name='batchnorm2', training=self.mode)
out = tf.layers.max_pooling2d(out, pool_size=[2, 2],
strides=[2, 2],
padding='valid',
data_format='channels_first',
name='pool2')
out = tf.layers.conv2d(out, filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
data_format='channels_first',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='conv3')
out = tf.layers.batch_normalization(out, axis=1, renorm=True, fused=True, name='batchnorm3', training=self.mode)
out = tf.layers.conv2d(out, filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
data_format='channels_first',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='conv4')
out = tf.layers.batch_normalization(out, axis=1, renorm=True, fused=True, name='batchnorm4', training=self.mode)
out = tf.layers.conv2d(out, filters=1024,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
data_format='channels_first',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='conv5')
out = tf.layers.batch_normalization(out, axis=1, renorm=True, fused=True, name='batchnorm5', training=self.mode)
out = tf.layers.conv2d(out, filters=1024,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
data_format='channels_first',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='conv6')
out = tf.layers.batch_normalization(out, axis=1, renorm=True, fused=True, name='batchnorm6', training=self.mode)
out = tf.layers.max_pooling2d(out, pool_size=[3, 3],
strides=[3, 3],
padding='valid',
data_format='channels_first',
name='pool3')
out = tf.layers.flatten(out, name='flatten')
out = tf.layers.dense(out, units=4096, activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='full1'
)
out = tf.layers.batch_normalization(out, axis=-1, renorm=True, fused=True, name='batchnorm7', training=self.mode)
out = tf.layers.dense(out, units=4096, activation=tf.nn.relu,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
scale=0.00001),
reuse=tf.AUTO_REUSE,
name='full2'
)
out = tf.layers.batch_normalization(out, axis=-1, renorm=True, fused=True, name='batchnorm8', training=self.mode)
logits = tf.layers.dense(out, units=self.numclasses,
kernel_initializer=tf.initializers.random_normal(
stddev=0.01,
seed=0),
bias_initializer=tf.initializers.constant(0),
reuse=tf.AUTO_REUSE,
name='output'
)
return logits
为了执行图表,我做了如下(查看完整代码你可以go here)
验证
[loss, top1, top5, epoch, summaries_val,
top1_update, top5_update], feed_dict={net.mode: False, netmode: False})
培训
_, loss_value, top1_err, top5_err, eph, summaries, _, _, _ = sess.run(
[update_ops, loss, top1, top5, epoch, summaries_train, train_op,
top1_update,
top5_update], feed_dict={net.mode: True, netmode: True})
在上面几行中,net 是类OverFeatAccurateBase 的对象
netmode 是一个占位符,它的值决定了数据是从训练集还是验证集中读取的。
【问题讨论】:
-
您可以发布您使用的数据,或者至少发布其中的一些数据吗?我愿意试一试,但没有数据就无法运行。
-
@PeterSzoldan 您可以在drive.google.com/open?id=1QIPsJDjKlNz0wGUUd9OGFTFkR3G1pXi-找到数据子集
-
管理下载并运行它。在 123 个 epoch 之后,训练损失为 2.83,验证损失为 8.214,两者的误差均为 1.0:
INFO:tensorflow:TRAIN : Epoch[123], Iter[6027] Time for 100 iterations[9.120sec]- Loss=2.830, Top1 error=1.00, Top5 error=1.00 INFO:tensorflow:VALIDATION : Epoch[123], Iter[6076] Time for 100 iterations[0.976sec] - Loss=8.214, Top1 error=1.00, Top5 error=1.00与您所拥有的相似吗?训练损失在哪个时期开始减少? -
我可以相当自信地排除 BN 是问题的根源。当我使用训练数据进行验证,但网络与其他情况相同时,它确实给出了正常值(与训练完全相同)。所以网络似乎运行正常。但我可以确认,使用验证集,损失实际上会增加。我已经打印了提供给网络的图像,它们看起来也不错。标签也是。我没有任何好的想法现在如何进行。如果我有什么想法,我会回来的。
-
@End-2-End 请看reddit.com/r/MachineLearning/comments/67gonq/…
标签: tensorflow