【发布时间】:2018-10-08 01:34:07
【问题描述】:
我正在使用自定义预定义函数 trainDNN 运行 RNN 和 LSTM 模型
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
import h5py
import time
from sklearn.utils import shuffle
def trainDNN(path, n_days, n_features, n_neurons,
train_sequences, train_lengths, train_y,
test_sequences, test_y, test_lengths,
lstm=False, n_epochs=50, batch_size=256,
learning_rate=0.0003, TRAIN_REC=8, TEST_REC=8):
# we're doing binary classification
n_outputs = 2
# this is the initial learning rate
# adam optimzer decays the learning rate automatically
# learning_rate = 0.0001
#learning rate decay is determined by epsilon
epsilon = 0.001
# setup the graph
tf.reset_default_graph()
# inputs to the network
X = tf.placeholder(tf.float32, [None, n_days, n_features])
y = tf.placeholder(tf.int32, [None])
seq_length = tf.placeholder(tf.int32, [None])
# the network itself
cell = tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) if lstm else tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32, sequence_length=seq_length)
logits = fully_connected(states[-1] if lstm else states, n_outputs)
# the training process (minimize loss) including the training operatin itself
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=epsilon)
training_op = optimizer.minimize(loss)
# hold onto the accuracy for the logwriter
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# this saves the network for later querying
# currently only saves after all epochs are complete
# but we could for example save checkpoints on a
# regular basis
saver = tf.train.Saver()
# this is where we save the log files for tensorboard
now = int(time.time())
name = 'lstm' if lstm else 'rnn'
root_logdir = path+"tensorflow_logs/{}/{}-{}/".format(name.upper(), name, now)
train_logdir = "{}train".format(root_logdir)
eval_logdir = "{}eval".format(root_logdir)
print('train_logdir', train_logdir)
print('eval_logdir', eval_logdir)
# scalars that are written to the log files
loss_summary = tf.summary.scalar('loss', loss)
acc_summary = tf.summary.scalar('accuracy', accuracy)
# summary operation and writer for the training data
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_writer = tf.summary.FileWriter(train_logdir, tf.get_default_graph())
# summary operation and writer for the validation data
eval_summary_op = tf.summary.merge([loss_summary, acc_summary])
eval_writer = tf.summary.FileWriter(eval_logdir, tf.get_default_graph())
# initialize variables
init = tf.global_variables_initializer()
n_batches = len(train_sequences) // batch_size
print(n_batches, 'batches of size', batch_size, n_epochs, 'epochs,', n_neurons, 'neurons')
with tf.Session() as sess:
# actually run the initialization
init.run()
start_time = time.time()
for epoch in range(n_epochs):
# at the beginning of each epoch, shuffle the training data
train_sequences, train_y, train_lengths = shuffle(train_sequences, train_y, train_lengths)
for iteration in range(n_batches):
# extract the batch of training data for this iteration
start = iteration*batch_size
end = start+batch_size
X_batch = train_sequences[start:end]
y_batch = train_y[start:end]
y_batch = y_batch.ravel()
seq_length_batch = train_lengths[start:end]
# every TRAIN_REC steps, save a summary of training accuracy & loss
if iteration % TRAIN_REC == 0:
train_summary_str = train_summary_op.eval(
feed_dict = {X: X_batch, y: y_batch, seq_length: seq_length_batch}
)
step = epoch * n_batches + iteration
train_writer.add_summary(train_summary_str, step)
# without this flush, tensorboard isn't always current
train_writer.flush()
# every TEST_REC steps, save a summary of validation accuracy & loss
# TODO: this runs all validation data at once. if validation is
# sufficiently large, this will fail. better would be to either
# pick a random subset of validation data, or even better, run
# validation in multiple batches and save the validation accuracy
# & loss based on the aggregation of all of the validation batches.
if iteration % TEST_REC == 0:
summary_str = eval_summary_op.eval(
feed_dict = {X: test_sequences, y: test_y.ravel(), seq_length: test_lengths}
)
step = epoch * n_batches + iteration
eval_writer.add_summary(summary_str, step)
# without this flush, tensorboard isn't always current
eval_writer.flush()
# run training.
# this is where the network learns.
sess.run(
training_op,
feed_dict = {X: X_batch, y: y_batch, seq_length: seq_length_batch}
)
# after every epoch, calculate the accuracy of the last seen training batch
acc_train = accuracy.eval(
feed_dict = {X: X_batch, y: y_batch, seq_length: seq_length_batch}
)
# after each epoch, calculate the accuracy of the test data
acc_test = accuracy.eval(
feed_dict = {X: test_sequences, y: test_y.ravel(), seq_length: test_lengths}
)
# print the training & validation accuracy to the console
print(epoch, time.strftime('%m/%d %H:%M:%S'), "Accuracy train:", acc_train, "test:", acc_test)
# save the model (for more training or inference) after all
# training is complete
save_path = saver.save(sess, root_logdir+"model_final.ckpt")
# close the writers
train_writer.close()
eval_writer.close()
log(["{}-{} model score".format(name.upper(), now), percent(acc_test)])
上述函数在时序数据上训练 RNN 和 LSTM 模型并输出二进制分类分数。打印了训练和测试分数,但我想弄清楚如何计算 AUC 并为 RNN 和 LSTM 的二进制分类生成 ROC 曲线。
更新:
我使用以下脚本评估了 logits 和预测:
n_epochs = 2
batch_size = 2000
n_batches = len(train_sequences) // batch_size
print(n_batches)
with tf.Session() as sess:
init.run()
#sess.run( tf.local_variables_initializer() )
for epoch in range(n_epochs):
train_sequences, train_y, train_lengths = shuffle(train_sequences, train_y, train_lengths)
for iteration in range(n_batches):
start = iteration*batch_size
end = start+batch_size
X_batch = train_sequences[start:end]
y_batch = train_y[start:end]
seq_length_batch = train_lengths[start:end]
if iteration % 20 == 0:
train_summary_str = train_summary_op.eval(
feed_dict = {X: X_batch, y: y_batch, seq_length: seq_length_batch}
)
step = epoch * n_batches + iteration
if iteration % 200 == 0:
summary_str = eval_summary_op.eval(
feed_dict = {X: test_sequences, y: test_y, seq_length: test_lengths}
)
step = epoch * n_batches + iteration
sess.run(
training_op,
feed_dict = {X: X_batch, y: y_batch, seq_length: seq_length_batch}
)
acc_train = accuracy.eval(
feed_dict = {X: X_batch, y: y_batch, seq_length: seq_length_batch}
)
acc_test = accuracy.eval(
feed_dict = {X: test_sequences, y: test_y, seq_length: test_lengths}
)
probs = logits.eval(feed_dict = {X: test_sequences, y: test_y, seq_length: test_lengths})
predictions = correct.eval(feed_dict = {logits:probs, y: test_y})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)# "Manual score:", score)
这会返回 probs,它基本上是一个矩阵,其行数等于测试用例的数量,2 列包含 2 个二进制类中的每一个的概率。预测对象包含预测是否正确。 我持怀疑态度,因为 ReLU 函数概率分数不像 sigmoid 函数分数那样直观,因为它不再基于正负预测的默认 0.5 截止值。相反,预测是基于哪个类别的概率更大。真的可以从 ReLu 输出生成 ROC 曲线吗?
【问题讨论】:
标签: python tensorflow deep-learning lstm rnn