Skip to content

Commit dd47518

Browse files
Removed flush_logs_every_n_steps argument from Trainer (#13074)
Co-authored-by: Carlos Mocholí <[email protected]>
1 parent c5938f8 commit dd47518

File tree

7 files changed

+13
-58
lines changed

7 files changed

+13
-58
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
120120

121121
### Removed
122122

123+
- Removed the deprecated `flush_logs_every_n_steps` argument from the `Trainer` constructor ([#13074](https://github.com/PyTorchLightning/pytorch-lightning/pull/13074))
124+
125+
123126
- Removed the deprecated `process_position` argument from the `Trainer` constructor ([13071](https://github.com/PyTorchLightning/pytorch-lightning/pull/13071))
124127

125128

docs/source/common/trainer.rst

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -695,30 +695,6 @@ impact to subsequent runs. These are the changes enabled:
695695
- Disables the Tuner.
696696
- If using the CLI, the configuration file is not saved.
697697

698-
flush_logs_every_n_steps
699-
^^^^^^^^^^^^^^^^^^^^^^^^
700-
701-
.. warning:: ``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.
702-
Please configure flushing directly in the logger instead.
703-
704-
.. raw:: html
705-
706-
<video width="50%" max-width="400px" controls
707-
poster="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/thumb/flush_logs%E2%80%A8_every_n_steps.jpg"
708-
src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/flush_logs_every_n_steps.mp4"></video>
709-
710-
|
711-
712-
Writes logs to disk this often.
713-
714-
.. testcode::
715-
716-
# default used by the Trainer
717-
trainer = Trainer(flush_logs_every_n_steps=100)
718-
719-
See Also:
720-
- :doc:`logging <../extensions/logging>`
721-
722698
.. _gpus:
723699

724700
gpus

docs/source/visualize/logging_advanced.rst

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,20 +49,19 @@ To change this behaviour, set the *log_every_n_steps* :class:`~pytorch_lightning
4949
Modify flushing frequency
5050
=========================
5151

52-
Metrics are kept in memory for N steps to improve training efficiency. Every N steps, metrics flush to disk. To change the frequency of this flushing, use the *flush_logs_every_n_steps* Trainer argument.
52+
Some loggers keep logged metrics in memory for N steps and only periodically flush them to disk to improve training efficiency.
53+
Every logger handles this a bit differently. For example, here is how to fine-tune flushing for the TensorBoard logger:
5354

5455
.. code-block:: python
5556
56-
# faster training, high memory
57-
Trainer(flush_logs_every_n_steps=500)
57+
# Default used by TensorBoard: Write to disk after 10 logging events or every two minutes
58+
logger = TensorBoardLogger(..., max_queue=10, flush_secs=120)
5859
59-
# slower training, low memory
60-
Trainer(flush_logs_every_n_steps=500)
60+
# Faster training, more memory used
61+
logger = TensorBoardLogger(..., max_queue=100)
6162
62-
The higher *flush_logs_every_n_steps* is, the faster the model will train but the memory will build up until the next flush.
63-
The smaller *flush_logs_every_n_steps* is, the slower the model will train but memory will be kept to a minimum.
64-
65-
TODO: chart
63+
# Slower training, less memory used
64+
logger = TensorBoardLogger(..., max_queue=1)
6665
6766
----
6867

pytorch_lightning/loops/epoch/training_epoch_loop.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -528,9 +528,7 @@ def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool:
528528

529529
def _save_loggers_on_train_batch_end(self) -> None:
530530
"""Flushes loggers to disk."""
531-
# this assumes that `batches_that_stepped` was increased before
532-
should_flush = self._batches_that_stepped % self.trainer.flush_logs_every_n_steps == 0
533-
if should_flush or self.trainer.should_stop:
531+
if self.trainer.should_stop:
534532
for logger in self.trainer.loggers:
535533
logger.save()
536534

pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -43,19 +43,10 @@ def __init__(self, trainer: "pl.Trainer") -> None:
4343
def on_trainer_init(
4444
self,
4545
logger: Union[bool, Logger, Iterable[Logger]],
46-
flush_logs_every_n_steps: Optional[int],
4746
log_every_n_steps: int,
4847
move_metrics_to_cpu: bool,
4948
) -> None:
5049
self.configure_logger(logger)
51-
if flush_logs_every_n_steps is not None:
52-
rank_zero_deprecation(
53-
f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps})` is deprecated in v1.5 "
54-
"and will be removed in v1.7. Please configure flushing in the logger instead."
55-
)
56-
else:
57-
flush_logs_every_n_steps = 100 # original default parameter
58-
self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps
5950
self.trainer.log_every_n_steps = log_every_n_steps
6051
self.trainer.move_metrics_to_cpu = move_metrics_to_cpu
6152
for logger in self.trainer.loggers:

pytorch_lightning/trainer/trainer.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,6 @@ def __init__(
159159
limit_test_batches: Optional[Union[int, float]] = None,
160160
limit_predict_batches: Optional[Union[int, float]] = None,
161161
val_check_interval: Optional[Union[int, float]] = None,
162-
flush_logs_every_n_steps: Optional[int] = None,
163162
log_every_n_steps: int = 50,
164163
accelerator: Optional[Union[str, Accelerator]] = None,
165164
strategy: Optional[Union[str, Strategy]] = None,
@@ -260,12 +259,6 @@ def __init__(
260259
of train, val and test to find any bugs (ie: a sort of unit test).
261260
Default: ``False``.
262261
263-
flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
264-
265-
.. deprecated:: v1.5
266-
``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.
267-
Please configure flushing directly in the logger instead.
268-
269262
gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node
270263
Default: ``None``.
271264
@@ -555,7 +548,7 @@ def __init__(
555548

556549
# init logger flags
557550
self._loggers: List[Logger]
558-
self._logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)
551+
self._logger_connector.on_trainer_init(logger, log_every_n_steps, move_metrics_to_cpu)
559552

560553
# init debugging flags
561554
self.val_check_interval: Union[int, float]

tests/deprecated_api/test_remove_1-7.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,6 @@ def on_keyboard_interrupt(self, trainer, pl_module):
6161
trainer.fit(model)
6262

6363

64-
def test_v1_7_0_flush_logs_every_n_steps_trainer_constructor(tmpdir):
65-
with pytest.deprecated_call(match=r"Setting `Trainer\(flush_logs_every_n_steps=10\)` is deprecated in v1.5"):
66-
_ = Trainer(flush_logs_every_n_steps=10)
67-
68-
6964
class BoringCallbackDDPSpawnModel(BoringModel):
7065
def add_to_queue(self, queue):
7166
...

0 commit comments

Comments
 (0)