【问题标题】:Actually printing values from tensor object实际上从张量对象打印值
【发布时间】:2019-02-26 09:42:40
【问题描述】:

我目前正在尝试使用 Keras 实现一个基本的自动编码器,并且我已经到了想要第二个隐藏层的输出的阶段。我认为我能够获得正确的对象,问题是我将它作为张量对象获得,我一直试图运行的代码如下:

from keras.layers import Input, Dense, initializers
import numpy as np
from Dataset import Dataset
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense, Activation
import tensorflow as tf
import time

#global variables
d = Dataset()
num_features = d.X_train.shape[1]
#input = [784, 400, 100, 10, 100, 400]
#output = [400, 100, 10, 100, 400, 784]
names = ['hidden1', 'hidden2', 'hidden3', 'hidden4', 'hidden5', 'hidden6']

list_of_nodes = [784, 400, 144, 10]

def generate_hidden_nodes(list_of_nodes):
    input = []
    for j in range(len(list_of_nodes)):
        input.append(list_of_nodes[j])
    for i in range(len(list_of_nodes)-2):
        input.append(list_of_nodes[-2-i])
    output = input[::-1]
    return input, output

input,output = generate_hidden_nodes(list_of_nodes)





def autoencoder(epochs):
    w = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
    model = Sequential()
    input, output = generate_hidden_nodes(list_of_nodes)
    for j in range(len(input)):
        if j == (len(input)-1):
            model.add(Dense(output[j], activation='sigmoid', kernel_initializer=w, input_dim=input[j], name=names[j]))
            #model.add(Dropout(0.45))
        else:
            model.add(Dense(output[j], activation='relu', kernel_initializer=w, input_dim=input[j],
                            name = names[j]))
            #model.add(Dropout(0.45))
    model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics=['acc'])
    history = model.fit(d.X_train, d.X_train,
                        epochs=epochs,
                        batch_size=50,
                        shuffle=True,
                        validation_split = 0.2)
                        #validation_data=(d.X_test, d.X_test))
    #print(history.history.keys())
    #plt.plot(history.history['val_acc'])
    #print(history.history['val_acc'])
    plt.show()
    return model

def cv():
    accuracy = 0
    size = 5
    epochs = 20
    variance = 0
    storage = np.zeros((size, epochs))
    for j in range(size):
        ae = autoencoder(epochs)
        #print(ae.history.history['val_acc'])
        storage[j] = ae.history.history['val_acc']
    for i in range(size):
        accuracy += storage[i][-1]
    mean = accuracy/size
    for k in range(size):
        variance += ((storage[k][-1] - mean)**2)
    variance = variance/size
    return mean, variance

#mean, variance = cv()
#print(mean)
#print(variance)
#time.sleep(10)

def finding_index():
    elements, index = np.unique(d.Y_test, return_index=True)
    return elements, index

def plotting():
    ae = autoencoder(20)
    elements, index = finding_index()
    y_proba = ae.predict(d.X_test)
    plt.figure(figsize=(20, 4))
    # size = 20
    for i in range(len(index)):
        ax = plt.subplot(2, len(index), i + 1)
        plt.imshow(d.X_test[index[i]].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        ax = plt.subplot(2, len(index), i + 1 + len(index))
        plt.imshow(y_proba[index[i]].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
    plt.show()

def plotting_weights(epochs):
    ae = autoencoder(epochs)
    output_layer = ae.get_layer('hidden2')
    weights = output_layer.get_weights()[0]
    print(weights.shape)
    size = 20
    plt.figure(figsize=(20, 4))
    for j in range(3):
        plt.gray()
        plt.imshow(weights[j].reshape(12, 12))
        plt.show()

def get_output():
    w = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
    new_model = Sequential()
    new_model.add(Dense(400, activation='relu', kernel_initializer=w, input_dim = 784))
    new_model.add(Dense(144, activation='sigmoid', kernel_initializer=w, input_dim = 400))
    #new_model.add(Dense(784, activation='sigmoid', kernel_initializer=w, input_dim = 144))
    new_model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics=['acc'])
    history = new_model.fit(d.X_train, d.X_train,
                        epochs=20,
                        batch_size=50,
                        shuffle=True,
                        validation_split=0.2)
    y = new_model.predict(d.X_test)
    elements, index = finding_index()

    #return y.shape

def get_output2():
    ae = autoencoder(5)
    a =ae.layers[1].output()
    init_op = tf.initialize_all_variables()
    with tf.Session() as sess:
        sess.run(init_op)  # execute init_op
        # print the random values that we sample
        print(a)

get_output2()

我也尝试过 print(a),但正如我所说,这会返回一个张量对象。有人可以向我提供一些信息,我可以如何实际打印这些值吗?提前致谢!

【问题讨论】:

  • 我认为tf.enable_eager_execution() 也会有所帮助。

标签: keras


【解决方案1】:

最简单的:

import keras.backend as K
print(K.eval(ae.layers[1].output()))

这相当于:

with tf.Session() as sess:
  print(sess.run(a))

我发现简单地使用 keras.backend 接口更具可读性。

【讨论】:

  • 非常感谢您的评论!让我心烦意乱了一段时间
猜你喜欢
  • 2016-02-11
  • 2017-01-14
  • 2019-08-13
  • 1970-01-01
  • 1970-01-01
  • 2021-12-16
  • 2023-01-11
  • 1970-01-01
相关资源
最近更新 更多