Skip to content

Commit 0f8fd20

Browse files
authored
Remove epoch from trainer.logged_metrics (#9904)
1 parent 4610fdd commit 0f8fd20

File tree

8 files changed

+23
-19
lines changed

8 files changed

+23
-19
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -448,6 +448,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
448448
- Removed a redundant warning with `ModelCheckpoint(monitor=None)` callback ([#9875](https://github.com/PyTorchLightning/pytorch-lightning/pull/9875))
449449

450450

451+
- Remove `epoch` from `trainer.logged_metrics` ([#9904](https://github.com/PyTorchLightning/pytorch-lightning/pull/9904))
452+
453+
451454
### Fixed
452455

453456

docs/source/common/trainer.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1706,7 +1706,7 @@ The metrics sent to the logger (visualizer).
17061706
.. code-block:: python
17071707
17081708
def training_step(self, batch, batch_idx):
1709-
self.log("a_val", 2, log=True)
1709+
self.log("a_val", 2, logger=True)
17101710
17111711
17121712
logged_metrics = trainer.logged_metrics

pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,9 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None:
103103

104104
if step is None:
105105
step = scalar_metrics.pop("step", None)
106+
107+
self._logged_metrics.update(scalar_metrics)
108+
106109
if step is None:
107110
# added metrics for convenience
108111
scalar_metrics.setdefault("epoch", self.trainer.current_epoch)
@@ -112,8 +115,6 @@ def log_metrics(self, metrics: _OUT_DICT, step: Optional[int] = None) -> None:
112115
self.trainer.logger.agg_and_log_metrics(scalar_metrics, step=step)
113116
self.trainer.logger.save()
114117

115-
self._logged_metrics.update(scalar_metrics)
116-
117118
"""
118119
Evaluation metric updates
119120
"""

tests/accelerators/test_multi_nodes_gpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def backward(self, loss, optimizer, optimizer_idx):
109109
trainer.fit(model)
110110

111111
# make sure all the metrics are available for callbacks
112-
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch", "epoch"}
112+
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch"}
113113

114114
# we don't want to enable val metrics during steps because it is not something that users should do
115115
# on purpose DO NOT allow b_step... it's silly to monitor val step metrics

tests/loops/batch/test_truncated_bptt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,4 +169,4 @@ def training_step(self, *args, **kwargs):
169169
enable_checkpointing=False,
170170
)
171171
trainer.fit(model)
172-
assert set(trainer.logged_metrics) == {"loss_step", "loss_epoch", "epoch"}
172+
assert set(trainer.logged_metrics) == {"loss_step", "loss_epoch"}

tests/trainer/logging_/test_eval_loop_logging.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def validation_step(self, batch, batch_idx):
5555
)
5656
trainer.fit(model)
5757

58-
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch", "epoch"}
58+
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch"}
5959

6060
# we don't want to enable val metrics during steps because it is not something that users should do
6161
# on purpose DO NOT allow b_step... it's silly to monitor val step metrics
@@ -94,7 +94,7 @@ def validation_epoch_end(self, outputs):
9494
trainer.fit(model)
9595

9696
# make sure all the metrics are available for loggers
97-
assert set(trainer.logged_metrics) == {"epoch", "a", "b_step", "b_epoch", "c", "d_step", "d_epoch", "g"}
97+
assert set(trainer.logged_metrics) == {"a", "b_step", "b_epoch", "c", "d_step", "d_epoch", "g"}
9898

9999
assert not trainer.progress_bar_metrics
100100

@@ -123,14 +123,14 @@ def validation_epoch_end(self, outputs):
123123

124124
# assert the loggers received the expected number
125125
logged_metrics = set(trainer.logged_metrics)
126-
assert logged_metrics == {"c", "d/e/f", "epoch"}
126+
assert logged_metrics == {"c", "d/e/f"}
127127

128128
pbar_metrics = set(trainer.progress_bar_metrics)
129129
assert pbar_metrics == {"c"}
130130

131131
# make sure all the metrics are available for callbacks
132132
callback_metrics = set(trainer.callback_metrics)
133-
assert callback_metrics == (logged_metrics | pbar_metrics) - {"epoch"}
133+
assert callback_metrics == (logged_metrics | pbar_metrics)
134134

135135

136136
def test_eval_float_logging(tmpdir):
@@ -153,7 +153,7 @@ def validation_step(self, batch, batch_idx):
153153
)
154154
trainer.fit(model)
155155

156-
assert set(trainer.logged_metrics) == {"a", "epoch"}
156+
assert set(trainer.logged_metrics) == {"a"}
157157

158158

159159
def test_eval_logging_auto_reduce(tmpdir):

tests/trainer/logging_/test_train_loop_logging.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -84,12 +84,12 @@ def training_step(self, batch, batch_idx):
8484
trainer.fit(model)
8585

8686
logged_metrics = set(trainer.logged_metrics)
87-
assert logged_metrics == {"epoch", "default", "l_e", "l_s", "l_se_step", "l_se_epoch"}
87+
assert logged_metrics == {"default", "l_e", "l_s", "l_se_step", "l_se_epoch"}
8888

8989
pbar_metrics = set(trainer.progress_bar_metrics)
9090
assert pbar_metrics == {"p_e", "p_s", "p_se_step", "p_se_epoch"}
9191

92-
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"p_se", "l_se"}) - {"epoch"}
92+
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"p_se", "l_se"})
9393

9494

9595
def test__training_step__epoch_end__log(tmpdir):
@@ -121,12 +121,12 @@ def training_epoch_end(self, outputs):
121121
trainer.fit(model)
122122

123123
logged_metrics = set(trainer.logged_metrics)
124-
assert logged_metrics == {"epoch", "a_step", "a_epoch", "b", "b1", "a1", "a2"}
124+
assert logged_metrics == {"a_step", "a_epoch", "b", "b1", "a1", "a2"}
125125

126126
pbar_metrics = set(trainer.progress_bar_metrics)
127127
assert pbar_metrics == {"b"}
128128

129-
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a"}) - {"epoch"}
129+
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a"})
130130

131131

132132
@pytest.mark.parametrize(["batches", "log_interval", "max_epochs"], [(1, 1, 1), (64, 32, 2)])
@@ -162,12 +162,12 @@ def training_epoch_end(self, outputs):
162162

163163
# make sure all the metrics are available for callbacks
164164
logged_metrics = set(trainer.logged_metrics)
165-
assert logged_metrics == {"a_step", "a_epoch", "b_step", "b_epoch", "c", "d/e/f", "epoch"}
165+
assert logged_metrics == {"a_step", "a_epoch", "b_step", "b_epoch", "c", "d/e/f"}
166166

167167
pbar_metrics = set(trainer.progress_bar_metrics)
168168
assert pbar_metrics == {"c", "b_epoch", "b_step"}
169169

170-
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a", "b"}) - {"epoch"}
170+
assert set(trainer.callback_metrics) == (logged_metrics | pbar_metrics | {"a", "b"})
171171

172172

173173
@pytest.mark.parametrize(
@@ -237,7 +237,7 @@ def val_dataloader(self):
237237
)
238238
trainer.fit(model)
239239

240-
assert set(trainer.logged_metrics) == {"a_step", "a_epoch", "n_step", "n_epoch", "epoch"}
240+
assert set(trainer.logged_metrics) == {"a_step", "a_epoch", "n_step", "n_epoch"}
241241

242242

243243
def test_log_works_in_train_callback(tmpdir):

tests/trainer/optimization/test_manual_optimization.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ def training_epoch_end(self, outputs) -> None:
193193
with mock.patch.object(Accelerator, "backward", wraps=trainer.accelerator.backward) as bwd_mock:
194194
trainer.fit(model)
195195
assert bwd_mock.call_count == limit_train_batches * 3
196-
assert set(trainer.logged_metrics) == {"a_step", "a_epoch", "epoch"}
196+
assert set(trainer.logged_metrics) == {"a_step", "a_epoch"}
197197

198198

199199
@RunIf(min_gpus=1)
@@ -1055,7 +1055,7 @@ def configure_optimizers(self):
10551055

10561056
trainer.fit(model)
10571057

1058-
assert set(trainer.logged_metrics) == {"epoch", "loss_d", "loss_g"}
1058+
assert set(trainer.logged_metrics) == {"loss_d", "loss_g"}
10591059
assert set(trainer.progress_bar_metrics) == {"loss_d", "loss_g"}
10601060

10611061

0 commit comments

Comments
 (0)