Skip to content

Commit 1b81cca

Browse files
committed
Fix TypeError: LRScheduler.__init__() with PyTorch 2.7
`TypeError: LRScheduler.__init__() got an unexpected keyword argument 'verbose'`
1 parent 13e3043 commit 1b81cca

File tree

6 files changed

+7
-17
lines changed

6 files changed

+7
-17
lines changed

examples/asr/emformer_rnnt/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def forward(self, input):
9393

9494

9595
class WarmupLR(torch.optim.lr_scheduler._LRScheduler):
96-
def __init__(self, optimizer, warmup_updates, last_epoch=-1, verbose=False):
96+
def __init__(self, optimizer, warmup_updates, last_epoch=-1):
9797
self.warmup_updates = warmup_updates
9898
super().__init__(optimizer, last_epoch=last_epoch)
9999

examples/asr/librispeech_conformer_rnnt/lightning.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,6 @@ class WarmupLR(torch.optim.lr_scheduler._LRScheduler):
2727
force_anneal_step (int): scheduler step at which annealing of learning rate begins.
2828
anneal_factor (float): factor to scale base learning rate by at each annealing step.
2929
last_epoch (int, optional): The index of last epoch. (Default: -1)
30-
verbose (bool, optional): If ``True``, prints a message to stdout for
31-
each update. (Default: ``False``)
3230
"""
3331

3432
def __init__(
@@ -38,12 +36,11 @@ def __init__(
3836
force_anneal_step: int,
3937
anneal_factor: float,
4038
last_epoch=-1,
41-
verbose=False,
4239
):
4340
self.warmup_steps = warmup_steps
4441
self.force_anneal_step = force_anneal_step
4542
self.anneal_factor = anneal_factor
46-
super().__init__(optimizer, last_epoch=last_epoch, verbose=verbose)
43+
super().__init__(optimizer, last_epoch=last_epoch)
4744

4845
def get_lr(self):
4946
if self._step_count < self.force_anneal_step:

examples/asr/librispeech_conformer_rnnt_biasing/lightning.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,6 @@ class WarmupLR(torch.optim.lr_scheduler._LRScheduler):
2626
force_anneal_step (int): scheduler step at which annealing of learning rate begins.
2727
anneal_factor (float): factor to scale base learning rate by at each annealing step.
2828
last_epoch (int, optional): The index of last epoch. (Default: -1)
29-
verbose (bool, optional): If ``True``, prints a message to stdout for
30-
each update. (Default: ``False``)
3129
"""
3230

3331
def __init__(
@@ -37,12 +35,11 @@ def __init__(
3735
force_anneal_step: int,
3836
anneal_factor: float,
3937
last_epoch=-1,
40-
verbose=False,
4138
):
4239
self.warmup_steps = warmup_steps
4340
self.force_anneal_step = force_anneal_step
4441
self.anneal_factor = anneal_factor
45-
super().__init__(optimizer, last_epoch=last_epoch, verbose=verbose)
42+
super().__init__(optimizer, last_epoch=last_epoch)
4643

4744
def get_lr(self):
4845
if self._step_count < self.force_anneal_step:

examples/avsr/schedulers.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,10 @@ def __init__(
1111
total_epochs: int,
1212
steps_per_epoch: int,
1313
last_epoch=-1,
14-
verbose=False,
1514
):
1615
self.warmup_steps = warmup_epochs * steps_per_epoch
1716
self.total_steps = total_epochs * steps_per_epoch
18-
super().__init__(optimizer, last_epoch=last_epoch, verbose=verbose)
17+
super().__init__(optimizer, last_epoch=last_epoch)
1918

2019
def get_lr(self):
2120
if self._step_count < self.warmup_steps:

examples/hubert/lightning_modules.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,10 @@ def __init__(
3434
warmup_updates: int,
3535
max_updates: int,
3636
last_epoch: int = -1,
37-
verbose: bool = False,
3837
):
3938
self.warmup_updates = warmup_updates
4039
self.max_updates = max_updates
41-
super().__init__(optimizer, last_epoch=last_epoch, verbose=verbose)
40+
super().__init__(optimizer, last_epoch=last_epoch)
4241

4342
def get_lr(self):
4443
if self._step_count <= self.warmup_updates:
@@ -62,15 +61,14 @@ def __init__(
6261
init_lr_scale: float = 0.01,
6362
final_lr_scale: float = 0.05,
6463
last_epoch: int = -1,
65-
verbose: bool = False,
6664
):
6765
self.warmup_updates = warmup_updates
6866
self.hold_updates = hold_updates
6967
self.decay_updates = decay_updates
7068
self.init_lr_scale = init_lr_scale
7169
self.final_lr_scale = final_lr_scale
7270

73-
super().__init__(optimizer, last_epoch=last_epoch, verbose=verbose)
71+
super().__init__(optimizer, last_epoch=last_epoch)
7472

7573
def get_lr(self):
7674
if self._step_count <= self.warmup_updates:

examples/self_supervised_learning/lr_schedulers/_linear_decay.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,10 @@ def __init__(
1111
warmup_updates: int,
1212
max_updates: int,
1313
last_epoch: int = -1,
14-
verbose: bool = False,
1514
):
1615
self.warmup_updates = warmup_updates
1716
self.max_updates = max_updates
18-
super().__init__(optimizer, last_epoch=last_epoch, verbose=verbose)
17+
super().__init__(optimizer, last_epoch=last_epoch)
1918

2019
def get_lr(self):
2120
if self._step_count <= self.warmup_updates:

0 commit comments

Comments
 (0)