diff --git a/checkpoints/ROMA_UNSB_001/loss_log.txt b/checkpoints/ROMA_UNSB_001/loss_log.txt index 2bc6f9f..d27cb3a 100644 --- a/checkpoints/ROMA_UNSB_001/loss_log.txt +++ b/checkpoints/ROMA_UNSB_001/loss_log.txt @@ -44,3 +44,5 @@ ================ Training Loss (Sun Feb 23 22:29:52 2025) ================ ================ Training Loss (Sun Feb 23 22:30:40 2025) ================ ================ Training Loss (Sun Feb 23 22:33:48 2025) ================ +================ Training Loss (Sun Feb 23 22:39:16 2025) ================ +================ Training Loss (Sun Feb 23 22:39:48 2025) ================ diff --git a/models/__pycache__/roma_unsb_model.cpython-39.pyc b/models/__pycache__/roma_unsb_model.cpython-39.pyc index e819f44..bd981a8 100644 Binary files a/models/__pycache__/roma_unsb_model.cpython-39.pyc and b/models/__pycache__/roma_unsb_model.cpython-39.pyc differ diff --git a/models/roma_unsb_model.py b/models/roma_unsb_model.py index ec2fe8d..2c275a7 100644 --- a/models/roma_unsb_model.py +++ b/models/roma_unsb_model.py @@ -284,10 +284,10 @@ class RomaUnsbModel(BaseModel): for nce_layer in self.nce_layers: self.criterionNCE.append(PatchNCELoss(opt).to(self.device)) self.criterionIdt = torch.nn.L1Loss().to(self.device) - self.optimizer_G1 = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) - self.optimizer_D1 = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) - self.optimizer_E1 = torch.optim.Adam(self.netE.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) - self.optimizers = [self.optimizer_G1, self.optimizer_D1, self.optimizer_E1] + self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_E = torch.optim.Adam(self.netE.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizers = [self.optimizer_G, self.optimizer_D, self.optimizer_E] self.cao = ContentAwareOptimization(opt.lambda_inc, opt.eta_ratio) #损失函数 self.ctn = ContentAwareTemporalNorm() #生成的伪光流 @@ -483,28 +483,28 @@ class RomaUnsbModel(BaseModel): # [[1,576,768],[1,576,768],[1,576,768]] # [3,576,768] - # 生成图像的梯度 - fake_gradient = torch.autograd.grad(self.mutil_fake_B0_tokens.sum(), self.mutil_fake_B0_tokens, create_graph=True)[0] + ## 生成图像的梯度 + #fake_gradient = torch.autograd.grad(self.mutil_fake_B0_tokens.sum(), self.mutil_fake_B0_tokens, create_graph=True)[0] + # + ## 梯度图 + #self.weight_fake = self.cao.generate_weight_map(fake_gradient) + # + ## 生成图像的CTN光流图 + #self.f_content = self.ctn(self.weight_fake) + # + ## 变换后的图片 + #self.warped_real_A_noisy2 = warp(self.real_A_noisy, self.f_content) + #self.warped_fake_B0 = warp(self.fake_B0,self.f_content) + # + ## 经过第二次生成器 + #self.warped_fake_B0_2 = self.netG(self.warped_real_A_noisy2, self.time, z_in) - # 梯度图 - self.weight_fake = self.cao.generate_weight_map(fake_gradient) - - # 生成图像的CTN光流图 - self.f_content = self.ctn(self.weight_fake) - - # 变换后的图片 - self.warped_real_A_noisy2 = warp(self.real_A_noisy, self.f_content) - self.warped_fake_B0 = warp(self.fake_B0,self.f_content) - - # 经过第二次生成器 - self.warped_fake_B0_2 = self.netG(self.warped_real_A_noisy2, self.time, z_in) - - warped_fake_B0_2=self.warped_fake_B0_2 - warped_fake_B0=self.warped_fake_B0 - self.warped_fake_B0_2_resize = self.resize(warped_fake_B0_2) - self.warped_fake_B0_resize = self.resize(warped_fake_B0) - self.mutil_warped_fake_B0_tokens = self.netPreViT(self.warped_fake_B0_resize, self.atten_layers, get_tokens=True) - self.mutil_fake_B0_2_tokens = self.netPreViT(self.warped_fake_B0_2_resize, self.atten_layers, get_tokens=True) + #warped_fake_B0_2=self.warped_fake_B0_2 + #warped_fake_B0=self.warped_fake_B0 + #self.warped_fake_B0_2_resize = self.resize(warped_fake_B0_2) + #self.warped_fake_B0_resize = self.resize(warped_fake_B0) + #self.mutil_warped_fake_B0_tokens = self.netPreViT(self.warped_fake_B0_resize, self.atten_layers, get_tokens=True) + #self.mutil_fake_B0_2_tokens = self.netPreViT(self.warped_fake_B0_2_resize, self.atten_layers, get_tokens=True) def compute_D_loss(self): @@ -563,9 +563,10 @@ class RomaUnsbModel(BaseModel): else: loss_global = 0.0 - if self.opt.lambda_ctn > 0.0: - wapped_fake_B = warp(self.fake_B, self.f_content) # use updated self.f_content - self.l2_loss = F.mse_loss(self.fake_B_2, wapped_fake_B) # complete the loss calculation + self.l2_loss = 0.0 + #if self.opt.lambda_ctn > 0.0: + # wapped_fake_B = warp(self.fake_B, self.f_content) # use updated self.f_content + # self.l2_loss = F.mse_loss(self.fake_B_2, wapped_fake_B) # complete the loss calculation self.loss_G = self.loss_G_GAN + self.opt.lambda_SB * self.loss_SB + self.opt.lambda_ctn * self.l2_loss + loss_global * self.opt.lambda_global return self.loss_G