Skip to content

Commit b543100

Browse files
committed
Remove dupe test
1 parent cfad43f commit b543100

File tree

2 files changed

+2
-35
lines changed

2 files changed

+2
-35
lines changed

CHANGELOG.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
7777

7878
- Added `opt_idx` to scheduler config if not assigned by user ([#11247](https://github.com/PyTorchLightning/pytorch-lightning/pull/11247))
7979

80-
- Added support for optimizer step progress tracking with manual optimization ([#11848](https://github.com/PyTorchLightning/pytorch-lightning/pull/11848))
8180

81+
- Added support for optimizer step progress tracking with manual optimization ([#11848](https://github.com/PyTorchLightning/pytorch-lightning/pull/11848))
8282

8383

8484
- Return the output of the `optimizer.step`. This can be useful for `LightningLite` users, manual optimization users, or users overriding `LightningModule.optimizer_step` ([#11711](https://github.com/PyTorchLightning/pytorch-lightning/pull/11711))

tests/trainer/optimization/test_manual_optimization.py

Lines changed: 1 addition & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -591,43 +591,9 @@ def optimizer_closure():
591591
assert trainer.global_step == limit_train_batches
592592

593593

594-
@patch("torch.optim.SGD.step")
595-
def test_step_with_optimizer_closure_and_extra_arguments(step_mock, tmpdir):
596-
"""Tests that `step` works with optimizer_closure and extra arguments."""
597-
598-
class TestModel(BoringModel):
599-
def __init__(self):
600-
super().__init__()
601-
self.automatic_optimization = False
602-
603-
def on_train_start(self) -> None:
604-
step_mock.reset_mock()
605-
606-
def training_step(self, batch, batch_idx):
607-
opt = self.optimizers()
608-
opt.step(closure=lambda: ..., foo=123)
609-
610-
model = TestModel()
611-
model.training_epoch_end = None
612-
613-
limit_train_batches = 2
614-
trainer = Trainer(
615-
default_root_dir=tmpdir,
616-
limit_train_batches=limit_train_batches,
617-
limit_val_batches=0,
618-
max_epochs=1,
619-
)
620-
621-
trainer.fit(model)
622-
assert step_mock.mock_calls == [call(closure=ANY, foo=123) for _ in range(limit_train_batches)]
623-
assert trainer.global_step == limit_train_batches
624-
625-
626594
@patch("torch.optim.Adam.step")
627595
@patch("torch.optim.SGD.step")
628596
def test_step_with_optimizer_closure_with_different_frequencies(mock_sgd_step, mock_adam_step, tmpdir):
629-
"""Tests that `step` works with optimizer_closure and different accumulated_gradient frequency."""
630-
631597
class TestModel(BoringModel):
632598
def __init__(self):
633599
super().__init__()
@@ -666,6 +632,7 @@ def dis_closure():
666632
# this will accumulate gradients for 2 batches and then call opt_gen.step()
667633
gen_closure()
668634
if batch_idx % 2 == 0:
635+
# passing a custom kwarg
669636
opt_gen.step(closure=gen_closure, optim="sgd")
670637
opt_gen.zero_grad()
671638

0 commit comments

Comments
 (0)