【发布时间】:2019-01-21 23:16:57
【问题描述】:
我在 np.save 上收到此错误。请让我知道原因以及如何解决此问题。 以下是我的代码:
import cv2
import numpy as np
import os
from random import shuffle
from tqdm import tqdm
import tflearn
import tensorflow as tf
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout,fully_connected
from tflearn.layers.estimator import regression
TRAIN_DIR = "/Users/preetisingh/Documents/PlantsDataset/train"
TEST_DIR = "/Users/preetisingh/Documents/PlantsDataset/test"
IMG_SIZE = 50
LR = 1e-3
MODEL_NAME = "plants-{}-{}.model".format(LR, "2conv-basic")
label_dict = {
"Abiotic Sunburn": 0,
"Alternaria Brown Spot": 1,
"Anthracnose of Banana": 2,
"Anthracnose of Cotton": 3,
"Bacterial Blight of Rice": 4,
"Bacterial Blight of Cotton": 5,
"Anthracnose of Grape": 6,
"Bacterial Canker": 7,
"Banana Bract Mosaic": 8,
}
def label_img(img):
label = label_dict[img]
return label
def create_train_data():
training_data = []
for direc in tqdm(os.listdir(TRAIN_DIR)):
j = 0
label = direc
disease = TRAIN_DIR + "/" + label
if label == ".DS_Store" or label == ".git":
continue
for img in os.listdir(disease):
labeldata = label_img(label)
labeldata = tf.one_hot(labeldata, 9)
if img == ".DS_Store":
continue
path = os.path.join(disease, img)
img = cv2.resize(
cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE)
)
training_data.append([np.array(img), np.array(labeldata)])
j += 1
shuffle(training_data)
np.save("plants_data.npy", training_data)
return training_data
def create_test_data():
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR, img)
img_num = img.split(".")[0]
img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
testing_data.append([np.array(img), img_num])
return testing_data
train_data = create_train_data()
test_data = create_test_data()
train = train_data[:-11]
test = train_data[-11:]
X = (
np.array([i[0] for i in train])
.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
.astype(np.float32)
)
Y = [i[1] for i in train]
test_x = (
np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 1).astype(np.float32)
)
test_y = [i[1] for i in test]
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name="input")
input_layer = tf.reshape(X, [-1, IMG_SIZE, IMG_SIZE, 1])
convnet = conv_2d(input_layer, 32, 2, activation="relu")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation="relu")
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 32, activation="relu")
convnet = dropout(convnet, 0.4)
logits = tf.layers.dense(inputs=convnet, units=9)
convnet = fully_connected(logits, 9, activation="softmax")
convnet = regression(
logits,
optimizer="adam",
learning_rate=LR,
loss="categorical_crossentropy",
name="targets",
)
model = tflearn.DNN(logits, tensorboard_dir="log")
if os.path.exists("{}.meta".format(MODEL_NAME)):
model.load(MODEL_NAME)
print("model loaded")
try:
model.fit(
{"input": X},
{"targets": Y},
n_epoch=3,
validation_set=({"input": test_x}, {"targets": test_y}),
snapshot_step=500,
show_metric=False,
run_id=MODEL_NAME,
)
except ValueError as e:
print(e)
我是机器学习的新手,正在尝试对 9 种植物病害进行图像分类。 我在 X 中有图像张量,在 Y 中有来自 label_dict 字典的目标标签值。
这是我得到的错误:
Traceback (most recent call last):
File "hello.py", line 72, in <module>
train_data = create_train_data()
File "hello.py", line 58, in create_train_data
np.save("plants_data.npy", training_data)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/numpy/lib/npyio.py", line 521, in save
pickle_kwargs=pickle_kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/numpy/lib/format.py", line 593, in write_array
pickle.dump(array, fp, protocol=2, **pickle_kwargs)
TypeError: can't pickle _thread.lock objects
谁能帮我解决这个错误?
【问题讨论】:
-
这段代码有多少是你自己的,什么是有效的例子?
traininf_data是包含在数组中的列表。 pickle 用于保存的非数组部分。 -
我在 youtube 上为这段代码推荐了一个视频。我不明白保存training_data的目的。 @hpaulj
标签: numpy tensorflow machine-learning