【发布时间】:2020-09-08 19:27:57
【问题描述】:
我想在 vae 重建损失中使用具有 reduction='mean' 的 L1loss 和 BCELoss
但它对所有不同的输入产生相同的结果,即result for landmark
所以我使用 reduction='sum' 它会产生正确的结果,即不同的输出对应不同的输入。
如何使用均值缩减??
L1Loss = nn.L1Loss(reduction='mean').to(device)
BCELoss = nn.BCELoss(reduction='mean').to(device)
kld_criterion = KLDLoss(reduction='mean').to(device)
训练中
rec_m, (rec_f, mean_f, logvar_f), (rec_l, mean_l, logvar_l) = model(origin)
lm_loss = CELoss(rec_l, lm)
f_loss = L1Loss(rec_f, f)
m_loss = CELoss(rec_m, m)
lm_kld_loss = kld_criterion(mean_l, logvar_l)
f_kld_loss = kld_criterion(mean_f, logvar_f)
loss = 4000*(f_loss + m_loss) + 30 * (lm_kld_loss + f_kld_loss) + 2000 * lm_loss
和型号代码
class VAE_NET(nn.Module):
def __init__(self, nc=3, ndf=32, nef=32, nz=128, isize=128, device=torch.device("cuda:0"), is_train=True):
super(VAE_NET, self).__init__()
self.nz = nz
# Encoder
self.l_encoder = Encoder(nc=nc, nef=nef, nz=nz, isize=isize, device=device)
self.f_encoder = Encoder(nc=nc, nef=nef, nz=nz, isize=isize, device=device)
# Decoder
self.l_decoder = Decoder(nc=nc, ndf=ndf, nz=nz, isize=isize)
self.m_decoder = Decoder(nc = nc, ndf = ndf, nz = nz * 2, isize = isize)
self.f_decoder = Decoder(nc = nc, ndf = ndf, nz = nz * 2, isize = isize)
if is_train == False:
for param in self.encoder.parameters():
param.requires_grad = False
for param in self.decoder.parameters():
param.requires_grad = False
def forward(self, x):
latent_l, mean_l, logvar_l = self.l_encoder(x)
latent_f, mean_f, logvar_f = self.f_encoder(x)
concat_latent = torch.cat((latent_l, latent_f), 1)
rec_l = self.l_decoder(latent_l)
rec_m = self.m_decoder(concat_latent)
rec_f = self.f_decoder(concat_latent)
return rec_m, (rec_f, mean_f, latent_f), (rec_l, mean_l, latent_l)
l 代表人脸地标
m 代表口罩
f 代表面部
【问题讨论】:
标签: machine-learning pytorch loss