【问题标题】:How to add dropout layer in the TensorFlow?? And How to augment Numpy array in python 3.x?如何在 TensorFlow 中添加 dropout 层?以及如何在 python 3.x 中增加 Numpy 数组?
【发布时间】:2023-03-30 14:10:01
【问题描述】:

首先,我是这个领域的新手,我正在尝试添加 dropout 层以查看模型中性能的变化。我无法弄清楚我应该在下面的代码中在哪里以及如何添加一个 dropout 层。 此外,我想对大小为 39*200 的 numpy 数组进行数据扩充(移位),以使第一列转移到第二列,第二列转移到第三列,依此类推。最后一个转移第一个。这就像剪切图像的最后一部分并将其粘贴到第一部分。

def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x) 

def maxpool2d(x, k=2):
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME')

weights = {
'wc1': tf.get_variable('W0', shape=(3,3,1,32), initializer=tf.contrib.layers.xavier_initializer()), 
'wc2': tf.get_variable('W1', shape=(3,3,32,64), initializer=tf.contrib.layers.xavier_initializer()), 
'wc3': tf.get_variable('W2', shape=(3,3,64,32), initializer=tf.contrib.layers.xavier_initializer()), 
'wc4': tf.get_variable('W3', shape=(3,3,32,128), initializer=tf.contrib.layers.xavier_initializer()),
'wc5': tf.get_variable('W4', shape=(3,3,128,64), initializer=tf.contrib.layers.xavier_initializer()),
'wd1': tf.get_variable('W7', shape=(4*4*56,64), initializer=tf.contrib.layers.xavier_initializer()), 
'out': tf.get_variable('W8', shape=(64,n_classes), initializer=tf.contrib.layers.xavier_initializer()), 

}

biases = {
'bc1': tf.get_variable('B0', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),
'bc2': tf.get_variable('B1', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
'bc3': tf.get_variable('B2', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),
'bc4': tf.get_variable('B3', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),
'bc5': tf.get_variable('B4', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
'bd1': tf.get_variable('B7', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable('B8', shape=(2), initializer=tf.contrib.layers.xavier_initializer()),

}

def conv_net(x, weights, biases):  
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
conv1 = maxpool2d(conv1, k=2)

conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
conv2 = maxpool2d(conv2, k=2)

conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
conv3 = maxpool2d(conv3, k=2)

conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])
conv4 = maxpool2d(conv4, k=2)

conv5 = conv2d(conv4, weights['wc5'], biases['bc5'])
conv5 = maxpool2d(conv5, k=2)

fc1 = tf.reshape(conv5, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1) 
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out

pred = conv_net(x, weights, biases)

cost =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, 
labels=y), name='Cost')



optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)


correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

init = tf.global_variables_initializer(),


with tf.Session() as sess:
sess.run(init)
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []

if not os.path.exists('summaries'):
    os.mkdir('summaries')
if not os.path.exists(os.path.join('summaries','first')):
    os.mkdir(os.path.join('summaries','first'))

summary_writer = tf.summary.FileWriter(os.path.join('summaries','first'), sess.graph)  

for i in range(training_iters):
    for batch in range(len(X_train)//batch_size):
        batch_x = X_train[batch*batch_size:min((batch+1)*batch_size,len(X_train))]
        batch_y = Y_train[batch*batch_size:min((batch+1)*batch_size,len(Y_train))]    
        opt = sess.run(optimizer, feed_dict={x: batch_x,
                                                          y: batch_y})
        loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
                                                          y: batch_y})
    print("Iter " + str(i) + ", Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
    print("Optimization Finished!")

    test_acc,valid_loss = sess.run([accuracy,cost], feed_dict={x: X_test,y : Y_test})
    train_loss.append(loss)
    test_loss.append(valid_loss)
    train_accuracy.append(acc)
    test_accuracy.append(test_acc)

    print("Testing Accuracy:","{:.5f}".format(test_acc))
    print("Accuracy:", accuracy.eval({x: X_test, y: Y_test}))

代码链接:[1]:https://drive.google.com/file/d/1BcbLAlVG0QR8QKToyij9gniQ7E9gvaCc/view?usp=sharing

【问题讨论】:

    标签: python numpy tensorflow machine-learning deep-learning


    【解决方案1】:

    辍学

    你可以将 dropout 层放在 max-pooling 之后,类似这样:

    # _____________ FIRST MAX POOLING LAYER _____________________
    A_pool1 = tf.nn.max_pool(A_conv1)
    
    # _____________ FIRST DROPOUT LAYER _____________________
    A_out1 = tf.nn.dropout(x=A_pool1, rate=dropout_prob)
    
    # _____________ SECOND CONVOLUTIONAL LAYER _____________________
    A_conv2 = tf.nn.relu(tf.nn.conv2d(A_out1, W_conv2))
    

    其中 dropout_prob 是 x 的每个元素被丢弃的概率。

    另一个例子是here,他们将 dropout 层放在密集层之后(最后)。

    在您的具体情况下,您可以这样做:

    conv2 = maxpool2d(conv2, k=2)
    A_out1 = tf.nn.dropout(x=conv2, rate=0.5)
    conv3 = conv2d(A_out1, weights['wc3'], biases['bc3'])
    

    数据增强

    要做到这一点,你可以使用 Numpy 函数 roll ,这里解释为 Numpy roll

    array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
    x2 = np.reshape(x, (2,5))
    
    >>> x2
    array([[0, 1, 2, 3, 4],
       [5, 6, 7, 8, 9]])
    
    np.roll(x2, 1, axis=1)
    array([[4, 0, 1, 2, 3],
           [9, 5, 6, 7, 8]])
    

    【讨论】:

    • 它不工作。我不应该在开始时定义 dropout 层吗?由于我是初学者,请更具体。请在我的代码中添加这些行。谢谢你:)
    • @Virtuall.Kingg 我可以有代码吗?共享 Colab 笔记本或 .py 文件,因为从 pdf 复制它不是最佳解决方案。
    • 谢谢,@Virtual.Kingg 你能附上你的部分数据文件吗?否则,我无法测试它是否正常工作。
    • 我的数据太大了。我在上传时遇到问题。请在代码中添加 dropout 层。谢谢你:)
    猜你喜欢
    • 1970-01-01
    • 2020-07-03
    • 2021-03-27
    • 1970-01-01
    • 2021-12-04
    • 2019-10-19
    • 1970-01-01
    • 2017-01-18
    • 2019-08-22
    相关资源
    最近更新 更多