debug
This commit is contained in:
parent
8a081af0a3
commit
b4f00f4378
@ -44,3 +44,5 @@
|
|||||||
================ Training Loss (Sun Feb 23 22:29:52 2025) ================
|
================ Training Loss (Sun Feb 23 22:29:52 2025) ================
|
||||||
================ Training Loss (Sun Feb 23 22:30:40 2025) ================
|
================ Training Loss (Sun Feb 23 22:30:40 2025) ================
|
||||||
================ Training Loss (Sun Feb 23 22:33:48 2025) ================
|
================ Training Loss (Sun Feb 23 22:33:48 2025) ================
|
||||||
|
================ Training Loss (Sun Feb 23 22:39:16 2025) ================
|
||||||
|
================ Training Loss (Sun Feb 23 22:39:48 2025) ================
|
||||||
|
|||||||
Binary file not shown.
@ -284,10 +284,10 @@ class RomaUnsbModel(BaseModel):
|
|||||||
for nce_layer in self.nce_layers:
|
for nce_layer in self.nce_layers:
|
||||||
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
|
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
|
||||||
self.criterionIdt = torch.nn.L1Loss().to(self.device)
|
self.criterionIdt = torch.nn.L1Loss().to(self.device)
|
||||||
self.optimizer_G1 = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
||||||
self.optimizer_D1 = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
||||||
self.optimizer_E1 = torch.optim.Adam(self.netE.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
self.optimizer_E = torch.optim.Adam(self.netE.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
||||||
self.optimizers = [self.optimizer_G1, self.optimizer_D1, self.optimizer_E1]
|
self.optimizers = [self.optimizer_G, self.optimizer_D, self.optimizer_E]
|
||||||
|
|
||||||
self.cao = ContentAwareOptimization(opt.lambda_inc, opt.eta_ratio) #损失函数
|
self.cao = ContentAwareOptimization(opt.lambda_inc, opt.eta_ratio) #损失函数
|
||||||
self.ctn = ContentAwareTemporalNorm() #生成的伪光流
|
self.ctn = ContentAwareTemporalNorm() #生成的伪光流
|
||||||
@ -483,28 +483,28 @@ class RomaUnsbModel(BaseModel):
|
|||||||
# [[1,576,768],[1,576,768],[1,576,768]]
|
# [[1,576,768],[1,576,768],[1,576,768]]
|
||||||
# [3,576,768]
|
# [3,576,768]
|
||||||
|
|
||||||
# 生成图像的梯度
|
## 生成图像的梯度
|
||||||
fake_gradient = torch.autograd.grad(self.mutil_fake_B0_tokens.sum(), self.mutil_fake_B0_tokens, create_graph=True)[0]
|
#fake_gradient = torch.autograd.grad(self.mutil_fake_B0_tokens.sum(), self.mutil_fake_B0_tokens, create_graph=True)[0]
|
||||||
|
#
|
||||||
|
## 梯度图
|
||||||
|
#self.weight_fake = self.cao.generate_weight_map(fake_gradient)
|
||||||
|
#
|
||||||
|
## 生成图像的CTN光流图
|
||||||
|
#self.f_content = self.ctn(self.weight_fake)
|
||||||
|
#
|
||||||
|
## 变换后的图片
|
||||||
|
#self.warped_real_A_noisy2 = warp(self.real_A_noisy, self.f_content)
|
||||||
|
#self.warped_fake_B0 = warp(self.fake_B0,self.f_content)
|
||||||
|
#
|
||||||
|
## 经过第二次生成器
|
||||||
|
#self.warped_fake_B0_2 = self.netG(self.warped_real_A_noisy2, self.time, z_in)
|
||||||
|
|
||||||
# 梯度图
|
#warped_fake_B0_2=self.warped_fake_B0_2
|
||||||
self.weight_fake = self.cao.generate_weight_map(fake_gradient)
|
#warped_fake_B0=self.warped_fake_B0
|
||||||
|
#self.warped_fake_B0_2_resize = self.resize(warped_fake_B0_2)
|
||||||
# 生成图像的CTN光流图
|
#self.warped_fake_B0_resize = self.resize(warped_fake_B0)
|
||||||
self.f_content = self.ctn(self.weight_fake)
|
#self.mutil_warped_fake_B0_tokens = self.netPreViT(self.warped_fake_B0_resize, self.atten_layers, get_tokens=True)
|
||||||
|
#self.mutil_fake_B0_2_tokens = self.netPreViT(self.warped_fake_B0_2_resize, self.atten_layers, get_tokens=True)
|
||||||
# 变换后的图片
|
|
||||||
self.warped_real_A_noisy2 = warp(self.real_A_noisy, self.f_content)
|
|
||||||
self.warped_fake_B0 = warp(self.fake_B0,self.f_content)
|
|
||||||
|
|
||||||
# 经过第二次生成器
|
|
||||||
self.warped_fake_B0_2 = self.netG(self.warped_real_A_noisy2, self.time, z_in)
|
|
||||||
|
|
||||||
warped_fake_B0_2=self.warped_fake_B0_2
|
|
||||||
warped_fake_B0=self.warped_fake_B0
|
|
||||||
self.warped_fake_B0_2_resize = self.resize(warped_fake_B0_2)
|
|
||||||
self.warped_fake_B0_resize = self.resize(warped_fake_B0)
|
|
||||||
self.mutil_warped_fake_B0_tokens = self.netPreViT(self.warped_fake_B0_resize, self.atten_layers, get_tokens=True)
|
|
||||||
self.mutil_fake_B0_2_tokens = self.netPreViT(self.warped_fake_B0_2_resize, self.atten_layers, get_tokens=True)
|
|
||||||
|
|
||||||
|
|
||||||
def compute_D_loss(self):
|
def compute_D_loss(self):
|
||||||
@ -563,9 +563,10 @@ class RomaUnsbModel(BaseModel):
|
|||||||
else:
|
else:
|
||||||
loss_global = 0.0
|
loss_global = 0.0
|
||||||
|
|
||||||
if self.opt.lambda_ctn > 0.0:
|
self.l2_loss = 0.0
|
||||||
wapped_fake_B = warp(self.fake_B, self.f_content) # use updated self.f_content
|
#if self.opt.lambda_ctn > 0.0:
|
||||||
self.l2_loss = F.mse_loss(self.fake_B_2, wapped_fake_B) # complete the loss calculation
|
# wapped_fake_B = warp(self.fake_B, self.f_content) # use updated self.f_content
|
||||||
|
# self.l2_loss = F.mse_loss(self.fake_B_2, wapped_fake_B) # complete the loss calculation
|
||||||
|
|
||||||
self.loss_G = self.loss_G_GAN + self.opt.lambda_SB * self.loss_SB + self.opt.lambda_ctn * self.l2_loss + loss_global * self.opt.lambda_global
|
self.loss_G = self.loss_G_GAN + self.opt.lambda_SB * self.loss_SB + self.opt.lambda_ctn * self.l2_loss + loss_global * self.opt.lambda_global
|
||||||
return self.loss_G
|
return self.loss_G
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user