【问题标题】:trying to pickle ML model can't pickle _thread.RLock objects in google colab试图腌制 ML 模型无法腌制 _thread.RLock 谷歌 colab 中的对象
【发布时间】:2020-12-31 04:44:32
【问题描述】:

我正在谷歌 colab 中使用 CNN 训练 MNIST 数据集,并希望使用 pickle 保存模型,当我尝试保存模型时出现错误 can't pickle _thread.RLock objects

我的代码

import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D , MaxPooling2D, Dense, Flatten,Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split


testRatio = 0.2
valRatio = 0.2
imageDimensions = (28,28,3)

batchSizeVal = 50
EPOCHS = 2
stepsPerEpoch = 2000




(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_validation , y_train , y_validation = train_test_split(X_train, y_train, test_size= valRatio)


X_train = X_train.reshape((48000, 28, 28, 1))
X_test = X_test.reshape((10000, 28, 28, 1))
X_validation = X_validation.reshape((12000, 28, 28, 1))




dataGen = ImageDataGenerator(width_shift_range = 0.1,
                                   height_shift_range = 0.1,
                                   zoom_range = 0.2,
                                   shear_range = 0.1,
                                   rotation_range= 10)


dataGen.fit(X_train)

y_train = to_categorical(y_train,10)
y_test= to_categorical(y_test,10)
y_validation = to_categorical(y_validation,10)


def myModel():
    noOfFiters = 60
    sizeOfFilter1 = (5,5)
    sizeOfFilter2 = (3,3)
    sizeOfPool = (2,2)
    noOfNode = 500

    model = Sequential()
    model.add((Conv2D(noOfFiters, sizeOfFilter1,input_shape=(imageDimensions[0]
                                                             ,imageDimensions[1],
                                                             1),
                                                            activation = "relu")))

    model.add((Conv2D(noOfFiters, sizeOfFilter1, activation = "relu")))
    model.add(MaxPooling2D(pool_size=sizeOfPool))
    model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
    model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
    model.add(MaxPooling2D(pool_size=sizeOfPool))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(noOfNode,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10,activation='softmax'))
    model.compile(Adam(lr=0.001),loss='categorical_crossentropy',
                  metrics=['accuracy'])
    
    return model

model = myModel()




history = model.fit(dataGen.flow(X_train, y_train,
                                 batch_size= batchSizeVal),
                                 steps_per_epoch = stepsPerEpoch,
                                 epochs =EPOCHS,
                                 validation_data = (X_validation,y_validation),
                                 shuffle= True)

plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training ', 'validation'])
plt.title("Loss")
plt.xlabel('epoch')

plt.figure(2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training ', 'validation'])
plt.title("Accuracy")
plt.xlabel('epoch')
plt.show()

score = model.evaluate(X_test,y_test,verbose=0)
print("Test Score = ",score[0])
print("Test Accuracy = ",score[1])


pickle_out = open("model_trained.pickle","wb" )
model = pickle.dump(model,pickle_out)
pickle_out.close()

我应该怎么做才能让它工作。 我试图将运行时更改为 cpu,因为我认为这是由 gpu 引起的,但即便如此它也无法正常工作

【问题讨论】:

    标签: python tensorflow machine-learning


    【解决方案1】:

    Keras 不支持 Pickle 序列化其对象(模型)。基本上,如果一个对象有__getstate____setstate__ 方法,pickle will use them 来序列化该对象。问题是 Keras Model doesn't implement these.

    @Zach Moshe,提出了解决此问题的修补程序。更多详情请参考his blog

    # Hotfix function
    def make_keras_picklable():
        def __getstate__(self):
            model_str = ""
            with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
                save_model(self, fd.name, overwrite=True)
                model_str = fd.read()
            d = {'model_str': model_str}
            return d
    
        def __setstate__(self, state):
            with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
                fd.write(state['model_str'])
                fd.flush()
                model = load_model(fd.name)
            self.__dict__ = model.__dict__
    
    
        cls = Model
        cls.__getstate__ = __getstate__
        cls.__setstate__ = __setstate__
    
    # Run the function
    make_keras_picklable()
    

    请参考下面的工作代码

    import pickle
    import numpy as np
    import tensorflow as tf
    import matplotlib.pyplot as plt
    import keras
    from keras.datasets import mnist
    from keras.utils import to_categorical
    from tensorflow.keras.models import Sequential, load_model, save_model, Model
    from keras.layers import Conv2D , MaxPooling2D, Dense, Flatten,Dropout
    from keras.optimizers import Adam
    from keras.preprocessing.image import ImageDataGenerator
    from sklearn.model_selection import train_test_split
    import tempfile
    
    
    testRatio = 0.2
    valRatio = 0.2
    imageDimensions = (28,28,3)
    
    batchSizeVal = 50
    EPOCHS = 2
    stepsPerEpoch = 2000
    
    
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train, X_validation , y_train , y_validation = train_test_split(X_train, y_train, test_size= valRatio)
    
    
    X_train = X_train.reshape((48000, 28, 28, 1))
    X_test = X_test.reshape((10000, 28, 28, 1))
    X_validation = X_validation.reshape((12000, 28, 28, 1))
    
     # Hotfix function
    def make_keras_picklable():
        def __getstate__(self):
            model_str = ""
            with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
                save_model(self, fd.name, overwrite=True)
                model_str = fd.read()
            d = {'model_str': model_str}
            return d
    
        def __setstate__(self, state):
            with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
                fd.write(state['model_str'])
                fd.flush()
                model = load_model(fd.name)
            self.__dict__ = model.__dict__
    
    
        cls = Model
        cls.__getstate__ = __getstate__
        cls.__setstate__ = __setstate__
    
    # Run the function
    make_keras_picklable()
    
    
    dataGen = ImageDataGenerator(width_shift_range = 0.1,
                                       height_shift_range = 0.1,
                                       zoom_range = 0.2,
                                       shear_range = 0.1,
                                       rotation_range= 10)
    
    
    dataGen.fit(X_train)
    
    y_train = to_categorical(y_train,10)
    y_test= to_categorical(y_test,10)
    y_validation = to_categorical(y_validation,10)
    
    
    def myModel():
        noOfFiters = 60
        sizeOfFilter1 = (5,5)
        sizeOfFilter2 = (3,3)
        sizeOfPool = (2,2)
        noOfNode = 500
    
        model = Sequential()
        model.add((Conv2D(noOfFiters, sizeOfFilter1,input_shape=(imageDimensions[0]
                                                                 ,imageDimensions[1],
                                                                 1),
                                                                activation = "relu")))
    
        model.add((Conv2D(noOfFiters, sizeOfFilter1, activation = "relu")))
        model.add(MaxPooling2D(pool_size=sizeOfPool))
        model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
        model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
        model.add(MaxPooling2D(pool_size=sizeOfPool))
        model.add(Dropout(0.5))
    
        model.add(Flatten())
        model.add(Dense(noOfNode,activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10,activation='softmax'))
        model.compile(Adam(lr=0.001),loss='categorical_crossentropy',
                      metrics=['accuracy'])
        
        return model
    
    model = myModel()
    
    
    history = model.fit(dataGen.flow(X_train, y_train,
                                     batch_size= batchSizeVal),
                                     steps_per_epoch = X_train.shape[0]//batchSizeVal,
                                     epochs =EPOCHS,
                                     validation_data = (X_validation,y_validation),
                                     shuffle= True)
    
    score = model.evaluate(X_test,y_test,verbose=0)
    print("Test Score = ",score[0])
    print("Test Accuracy = ",score[1])
    
    with open('model.pkl', 'wb') as f:
        pickle.dump(model, f)
    

    输出:

    Epoch 1/2
    960/960 [==============================] - 338s 352ms/step - loss: 1.0066 - accuracy: 0.6827 - val_loss: 0.1417 - val_accuracy: 0.9536
    Epoch 2/2
    960/960 [==============================] - 338s 352ms/step - loss: 0.3542 - accuracy: 0.8905 - val_loss: 0.0935 - val_accuracy: 0.9719
    
    
    Test Score =  0.07476004958152771
    Test Accuracy =  0.9761999845504761
    

    【讨论】:

      猜你喜欢
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 2019-03-03
      • 2018-09-18
      • 2019-05-24
      • 1970-01-01
      相关资源
      最近更新 更多