From 378825d5e049e70897587fd82204afef68d401d4 Mon Sep 17 00:00:00 2001 From: atticuszz <1831768457@qq.com> Date: Wed, 3 Jul 2024 11:37:01 +0800 Subject: [PATCH] fix: failed to show lr in wandb --- src/eval/logger.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/eval/logger.py b/src/eval/logger.py index b34cbd3..411b187 100644 --- a/src/eval/logger.py +++ b/src/eval/logger.py @@ -60,17 +60,13 @@ def log_LR(self, model: torch.nn.Module, schedulers: list, step: int): :param schedulers: List of schedulers corresponding to the optimizers :param step: Current step number """ - lr_info = {} - scheduler_info = {} for i, (optimizer, scheduler) in enumerate(zip(model.optimizers, schedulers)): for j, param_group in enumerate(optimizer.param_groups): lr = param_group["lr"] - name = param_group.get("name", f"optimizer_{i}_group_{j}") - lr_info[f"Learning Rate/{name}"] = lr - scheduler_info[f"Scheduler Type/{name}"] = scheduler.__class__.__name__ - - wandb.log(lr_info, step=step) - wandb.log(scheduler_info, step=step) + param_name = param_group.get("name", f"optimizer_{i}_group_{j}") + s_type = f"Sch:{scheduler.__class__.__name__}" + l_name = s_type + f" LR: {param_name}" + wandb.log({l_name: lr}, step=step) # BUG: failed to show in wandb def log_gradients(self, model: torch.nn.Module, step: int):