【发布时间】:2023-03-21 15:34:01
【问题描述】:
我已经尝试过多次修复,我也使用了来自functional.py 的示例代码,然后我得到了相同的“损失”值。我该如何解决这个问题?
我的图书馆
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
import matplotlib
import pandas as pd
from torch.autograd import Variable
from torch.utils.data import DataLoader,TensorDataset
from sklearn.model_selection import train_test_split
import warnings
import os
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as transforms
Mnis 的数据集
train=pd.read_csv("train.csv",dtype=np.float32)
targets_numpy = train.label.values
features_numpy = train.loc[:,train.columns != "label"].values/255 # normalization
features_train, features_test, targets_train, targets_test = train_test_split(features_numpy,
targets_numpy,test_size = 0.2,
random_state = 42)
featuresTrain=torch.from_numpy(features_train)
targetsTrain=torch.from_numpy(targets_train)
featuresTest=torch.from_numpy(features_test)
targetsTest=torch.from_numpy(targets_test)
batch_size=100
n_iterations=10000
num_epochs=n_iterations/(len(features_train)/batch_size)
num_epochs=int(num_epochs)
train=torch.utils.data.TensorDataset(featuresTrain,targetsTrain)
test=torch.utils.data.TensorDataset(featuresTest,targetsTest)
print(type(train))
train_loader=DataLoader(train,batch_size=batch_size,shuffle=False)
test_loader=DataLoader(test,batch_size=batch_size,shuffle=False)
print(type(train_loader))
plt.imshow(features_numpy[226].reshape(28,28))
plt.axis("off")
plt.title(str(targets_numpy[226]))
plt.show()
这是我的模型
class ANNModel(nn.Module):
def __init__(self,input_dim,hidden_dim,output_dim):
super(ANNModel,self).__init__()
self.fc1=nn.Linear(input_dim,hidden_dim)
self.relu1=nn.ReLU()
self.fc2=nn.Linear(hidden_dim,hidden_dim)
self.tanh2=nn.Tanh()
self.fc4=nn.Linear(hidden_dim,output_dim)
def forward (self,x): #forward ile elde edilen layer lar bağlanır
out=self.fc1(x)
out=self.relu1(out)
out=self.fc2(out)
out=self.tanh2(out)
out=self.fc4(out)
return out
input_dim=28*28
hidden_dim=150
output_dim=10
model=ANNModel(input_dim,hidden_dim,output_dim)
error=nn.CrossEntropyLoss()
learning_rate=0.02
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
问题出在哪里
count=0
loss_list=[]
iteration_list=[]
accuracy_list = []
for epoch in range(num_epochs):
for i,(images,labels) in enumerate(train_loader):
train=Variable(images.view(-1,28*28))
labels=Variable(labels)
#print(labels)
#print(outputs)
optimizer.zero_grad()
#forward propagation
outputs=model(train)
#outputs=torch.randn(784,10,requires_grad=True)
##labels=torch.randn(784,10).softmax(dim=1)
loss=error(outputs,labels)
loss.backward()
optimizer.step()
count+=1
if count %50 ==0:
correct=0
total=0
for images,labels in test_loader:
test=Variable(images.view(-1,28*28))
outputs=model(test)
predicted=torch.max(outputs.data,1)[1] #mantık???
total+= len(labels)
correct+=(predicted==labels).sum()
accuracy=100 *correct/float(total)
loss_list.append(loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
if count %500 ==0 :
print('Iteration: {} Loss: {} Accuracy: {} %'.format(count, loss.data, accuracy))
这给了
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-9-9e53988ad250> in <module>()
26 #outputs=torch.randn(784,10,requires_grad=True)
27 ##labels=torch.randn(784,10).softmax(dim=1)
---> 28 loss=error(outputs,labels)
29
30
2 frames
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
2844 if size_average is not None or reduce is not None:
2845 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2846 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
2847
2848
RuntimeError: expected scalar type Long but found Float
【问题讨论】:
-
网络应该做什么?跨 10 个类对图像进行分类?
-
没错,我们有10个类需要分类。
-
在这种情况下,您的网络缺少使用
softmax函数的非常重要的最终激活层,因为您对属于 10 个互斥的任何样本的概率感兴趣> 类。 -
但是教练说; “PyTorch 会自动应用 softmax”,我们不需要任何代码块。如果不是,我可以写什么?
-
你可以定义像
self.softmax = nn.Softmax(dim=1)这样的softmax函数。现在在forward函数中的最终输出之后应用它。所以out = self.softmax(out)并返回这个值。
标签: python machine-learning deep-learning pytorch artificial-intelligence