【发布时间】:2023-03-30 14:10:01
【问题描述】:
首先,我是这个领域的新手,我正在尝试添加 dropout 层以查看模型中性能的变化。我无法弄清楚我应该在下面的代码中在哪里以及如何添加一个 dropout 层。 此外,我想对大小为 39*200 的 numpy 数组进行数据扩充(移位),以使第一列转移到第二列,第二列转移到第三列,依此类推。最后一个转移第一个。这就像剪切图像的最后一部分并将其粘贴到第一部分。
def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME')
weights = {
'wc1': tf.get_variable('W0', shape=(3,3,1,32), initializer=tf.contrib.layers.xavier_initializer()),
'wc2': tf.get_variable('W1', shape=(3,3,32,64), initializer=tf.contrib.layers.xavier_initializer()),
'wc3': tf.get_variable('W2', shape=(3,3,64,32), initializer=tf.contrib.layers.xavier_initializer()),
'wc4': tf.get_variable('W3', shape=(3,3,32,128), initializer=tf.contrib.layers.xavier_initializer()),
'wc5': tf.get_variable('W4', shape=(3,3,128,64), initializer=tf.contrib.layers.xavier_initializer()),
'wd1': tf.get_variable('W7', shape=(4*4*56,64), initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable('W8', shape=(64,n_classes), initializer=tf.contrib.layers.xavier_initializer()),
}
biases = {
'bc1': tf.get_variable('B0', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),
'bc2': tf.get_variable('B1', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
'bc3': tf.get_variable('B2', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),
'bc4': tf.get_variable('B3', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),
'bc5': tf.get_variable('B4', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
'bd1': tf.get_variable('B7', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable('B8', shape=(2), initializer=tf.contrib.layers.xavier_initializer()),
}
def conv_net(x, weights, biases):
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
conv1 = maxpool2d(conv1, k=2)
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
conv2 = maxpool2d(conv2, k=2)
conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
conv3 = maxpool2d(conv3, k=2)
conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])
conv4 = maxpool2d(conv4, k=2)
conv5 = conv2d(conv4, weights['wc5'], biases['bc5'])
conv5 = maxpool2d(conv5, k=2)
fc1 = tf.reshape(conv5, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
pred = conv_net(x, weights, biases)
cost =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,
labels=y), name='Cost')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer(),
with tf.Session() as sess:
sess.run(init)
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
if not os.path.exists('summaries'):
os.mkdir('summaries')
if not os.path.exists(os.path.join('summaries','first')):
os.mkdir(os.path.join('summaries','first'))
summary_writer = tf.summary.FileWriter(os.path.join('summaries','first'), sess.graph)
for i in range(training_iters):
for batch in range(len(X_train)//batch_size):
batch_x = X_train[batch*batch_size:min((batch+1)*batch_size,len(X_train))]
batch_y = Y_train[batch*batch_size:min((batch+1)*batch_size,len(Y_train))]
opt = sess.run(optimizer, feed_dict={x: batch_x,
y: batch_y})
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y})
print("Iter " + str(i) + ", Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
print("Optimization Finished!")
test_acc,valid_loss = sess.run([accuracy,cost], feed_dict={x: X_test,y : Y_test})
train_loss.append(loss)
test_loss.append(valid_loss)
train_accuracy.append(acc)
test_accuracy.append(test_acc)
print("Testing Accuracy:","{:.5f}".format(test_acc))
print("Accuracy:", accuracy.eval({x: X_test, y: Y_test}))
代码链接:[1]:https://drive.google.com/file/d/1BcbLAlVG0QR8QKToyij9gniQ7E9gvaCc/view?usp=sharing
【问题讨论】:
标签: python numpy tensorflow machine-learning deep-learning