From 49a30f768d75618990bc1e27ab4e26be019e17e7 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Thu, 24 Oct 2024 17:46:43 +0800 Subject: [PATCH] Avoid using lr from checkpoint. --- egs/librispeech/ASR/zipformer/optim.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/zipformer/optim.py b/egs/librispeech/ASR/zipformer/optim.py index 6f5180e29e..f99b4e31fe 100644 --- a/egs/librispeech/ASR/zipformer/optim.py +++ b/egs/librispeech/ASR/zipformer/optim.py @@ -787,7 +787,9 @@ def state_dict(self): is not the optimizer. """ return { - "base_lrs": self.base_lrs, + # the use might try to override the base_lr, so don't include this in the state. + # previously they were included. + # "base_lrs": self.base_lrs, "epoch": self.epoch, "batch": self.batch, } @@ -799,7 +801,12 @@ def load_state_dict(self, state_dict): state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ + # the things with base_lrs are a work-around for a previous problem + # where base_lrs were written with the state dict. + base_lrs = self.base_lrs self.__dict__.update(state_dict) + self.base_lrs = base_lrs + def get_last_lr(self) -> List[float]: """Return last computed learning rate by current scheduler. Will be a list of float."""