【问题标题】:Test a tensorflow cnn model after the training训练后测试一个 tensorflow cnn 模型
【发布时间】:2019-09-02 01:37:00
【问题描述】:

我创建了一个卷积神经网络模型,我实现了训练,现在我必须创建一个函数以在测试模式下运行该模型,但我不知道该怎么做。

Ho due dataset, uno per l'allenamento e uno per il test quindi dovrei trovare un modo per testare il modello nel dataset di test。

我可以以与训练数据集相同的方式加载测试数据集,但我不知道如何对已训练的模型进行测试。

这是模型函数

import tensorflow as tf

def cnn_model_fn(X, MODE, log=False):

    # INPUT LAYER
    with tf.name_scope('input_layer') as scope:
        input_layer = tf.reshape(X, [-1, 1000, 48, 1])

    # CONVOLUTIONAL LAYER #1
    with tf.name_scope('Conv1') as scope:
        conv1 = tf.layers.conv2d(
            inputs=input_layer,
            filters=4,
            kernel_size=[10, 10],
            strides=(2, 2),
            padding="valid",
        )
        if log==True:
            print('[LOG:conv1]: ' + str(conv1.shape))

        # apply the relu function
        conv1_relu = tf.nn.relu(conv1)
        if log==True:
            print('[LOG:conv1_relu]: ' + str(conv1_relu.shape))

    # POOLING LAYER #1
    with tf.name_scope('Pool1'):
        pool1 = tf.layers.max_pooling2d(
            inputs=conv1_relu,
            pool_size=[2, 2],
            strides=2
        )
        if log==True:
            print('[LOG:pool1]: ' + str(pool1.shape))

    # CONVOLUTIONAL LAYER #2
    with tf.name_scope('Conv2'):
        conv2 = tf.layers.conv2d(
            inputs=pool1,
            filters=64,
            kernel_size=[5, 5],
            padding="same",
        )
        if log==True:
            print('[LOG:conv2]: ' + str(conv2.shape))

        # apply the relu function
        conv2_relu = tf.nn.relu(conv2)
        if log==True:
            print('[LOG:conv2_relu]: ' + str(conv2_relu.shape))


    # POOLING LAYER #2
    with tf.name_scope('Pool2'):
        pool2 = tf.layers.max_pooling2d(
            inputs=conv2_relu,
            pool_size=[2, 2],
            strides=2
        )
        if log==True:
            print('[LOG:pool2]: ' + str(pool2.shape))

        # create a variable with the pool2 size because I need it to calculate the pool2_flat size
        x = tf.TensorShape.as_list(pool2.shape)

    # REDENSIFY POOL2 TO REDUCE COMPUTATIONAL LOAD
    with tf.name_scope('Reshape'):
        pool2_flat = tf.reshape(pool2, [-1, x[1] * x[2] * x[3]])
        if log==True:
            print('[LOG:pool2_flat]: ' + str(pool2_flat.shape))

    # DENSE LAYER
    with tf.name_scope('Dense_layer'):
        dense = tf.layers.dense(
            inputs=pool2_flat,
            units=1024,
        )
        if log==True:
            print('[LOG:dense]: ' + str(dense.shape))

        # apply the relu function
        dense_relu = tf.nn.relu(dense)
        if log==True:
            print('[LOG:dense_relu]: ' + str(dense_relu.shape))

    # add the dropout function
    with tf.name_scope('Dropout'):
        dropout = tf.layers.dropout(
            inputs=dense_relu,
            rate=0.4,
            training=MODE == tf.estimator.ModeKeys.TRAIN
        )
        if log==True:
            print('[LOG:dropout]: ' + str(dropout.shape))

    # LOGIT LAYER
    with tf.name_scope('Logit_layer'):
        logits = tf.layers.dense(
            inputs=dropout,
            units=2
        )
        if log==True:
            print('[LOG:logits]: ' + str(logits.shape))

    return logits

这是主程序

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function


# IMPORTS
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import sys
from tqdm import tqdm
import load_dataset
import datetime
import time
get_images = load_dataset.get_images
next_batch = load_dataset.next_batch

import cnn_model_fn
cnn_model_fn = cnn_model_fn.cnn_model_fn

os.system('clear')

local_path = os.getcwd()
save_path = local_path + '/.Checkpoints/model.ckpt'
TensorBoard_path = local_path + "/.TensorBoard"
dataset_path = local_path + '/DATASET/'

#Training Parameters
learning_rate = 0.001
batch_size = 5
epochs = 2

MODE = 'TRAIN'

len_X, X, Y = get_images(
    files_path=dataset_path,
    img_size_h=1000,
    img_size_w=48,
    mode='TRAIN',
    randomize=True
)

X_batch, Y_batch = next_batch(
    total=len_X,
    images=X,
    labels=Y,
    batch_size=batch_size,
    index=0
)

logits = cnn_model_fn(X_batch, MODE)
prediction = tf.nn.softmax(logits)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y_batch))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
correct_predict = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y_batch, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))

init = tf.global_variables_initializer()
best_acc=0

with tf.Session() as sess:

    sess.run(init)
    saver = tf.train.Saver()

    if MODE == 'TRAIN':
        os.system('clear')
        print("TRAINING MODE")
        print('\n[epoch, iter]\t\tAccuracy\tProgress\tTime')

        for step in range(1, epochs+1):
            for i in range(0, int(len_X/batch_size)+1):
                t0 = time.time()

                X_batch, Y_batch = next_batch(
                    total=len_X,
                    images=X,
                    labels=Y,
                    batch_size=batch_size,
                    index=i
                )

                sess.run(train_op)
                los, acc= sess.run([loss, accuracy])

                t1 = time.time()
                t = t1-t0

                check = '[ ]'
                if acc >= best_acc:
                    check = '[X]'
                    best_acc = acc
                    print('[e:' + str(step) + ', i:' + str(i) + ']\t\t' + '%.4f' % acc + '\t\t' + check + '\t\t' + '%.3f' % t + 's')
                    saver.save(sess,save_path)
                else:
                    print('[e:' + str(step) + ', i:' + str(i) + ']\t\t' + '%.4f' % acc + '\t\t' + check + '\t\t' + '%.3f' % t + 's')

        writer = tf.summary.FileWriter(TensorBoard_path, sess.graph)

    elif MODE=='TEST':
        os.system('clear')
        print("TESTING MODE")
        saver.restore(sess, save_path)
        # here I need to test the model 


sess.close()

非常感谢您的帮助和时间。

编辑:我解决了这个问题

saver.restore(sess, save_path)
print("Initialization Complete")

len_X_test, X_test, Y_test = get_images(
    files_path=dataset_path,
    img_size_h=img_size_h,
    img_size_w=img_size_w,
    mode='TEST',
    randomize=True
)

train_feed = {x: X_test, y: Y_test}

print("Testing Accuracy:"+str(sess.run(accuracy, feed_dict=train_feed)))

【问题讨论】:

    标签: python tensorflow machine-learning deep-learning tensorflow-estimator


    【解决方案1】:

    我解决了这个问题:

    saver.restore(sess, save_path)
    print("Initialization Complete")
    
    len_X_test, X_test, Y_test = get_images(
        files_path=dataset_path,
        img_size_h=img_size_h,
        img_size_w=img_size_w,
        mode='TEST',
        randomize=True
    )
    
    train_feed = {x: X_test, y: Y_test}
    
    # test the model
    print("Testing Accuracy:"+str(sess.run(accuracy, feed_dict=train_feed)))
    

    【讨论】:

      【解决方案2】:

      您可以按照与模型训练相同的方式进行操作。将测试分成多个批次,并独立计算每个批次的损失和准确度。假设测试集长度可以被批量大小整除:

      accuracies = []
      losses = []
      for i in range(0, len_X // batch_size + 1:
          X_batch, Y_batch = next_batch(
               total=len_X,
               images=X,
               labels=Y,
               batch_size=batch_size,
               index=i
          )
      
          los, acc= sess.run([loss, accuracy])
          accuracies.append(acc)
          losses.append(loss)
      test_acc = np.mean(accuracies)
      test_loss = np.mean(losses)
      

      【讨论】:

      • 感谢您的回复。我有一个问题,如果我以与训练相同的方式进行测试,我会去改变模型的权重吗?我希望在不改变权重的情况下完成测试。也许如果我在这里改变模式,我可以做测试? python dropout = tf.layers.dropout( inputs=dense_relu, rate=0.4, training=MODE == tf.estimator.ModeKeys.TRAIN )
      • 如果您不运行训练操作 sess.run(train_op),而仅运行 lossaccuracy,则权重不会改变。
      • 啊,好吧,不是 lo sapevo。 Ma allora qual è la differentenza tra usare tf.estimator.ModeKeys.TRAIN oppure tf.estimator.ModeKeys.EVAL all'interno della mia funzione? Se volessi fare una valutazione durante l'allenamento su un dataset di valutazione dovrei usare tf.estimator.ModeKeys.EVAL o posso usare tf.estimator.ModeKeys.TRAIN?有什么不同吗?
      • 这仅适用于辍学。 Dropout 在训练过程中引入了噪声,一方面可以防止模型过度拟合,另一方面会损害准确性。因此,在测试时,您希望关闭 dropout。
      • 嗯,好的。但是对于测试我有一个Accuracy函数,那不是更容易使用吗?
      猜你喜欢
      • 2018-09-26
      • 1970-01-01
      • 2016-01-11
      • 1970-01-01
      • 2018-03-15
      • 2017-06-17
      • 2016-11-01
      • 1970-01-01
      • 1970-01-01
      相关资源
      最近更新 更多