From 12a7fc5f9915663db2a265b18c2d4a489f99d6a8 Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Tue, 7 Sep 2021 16:00:48 -0700 Subject: [PATCH 01/11] deprecate flush_logs_every_n_steps on Trainer --- CHANGELOG.md | 2 ++ pytorch_lightning/loggers/csv_logs.py | 5 +++++ .../connectors/logger_connector/logger_connector.py | 8 +++++++- pytorch_lightning/trainer/trainer.py | 6 +++++- tests/deprecated_api/test_remove_1-7.py | 5 +++++ tests/loggers/test_csv.py | 12 ++++++++++++ 6 files changed, 36 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b8340849c9a4..e002b8aacd15e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1579,6 +1579,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated passing `ModelCheckpoint` instance to `checkpoint_callback` Trainer argument ([#4336](https://github.com/PyTorchLightning/pytorch-lightning/pull/4336)) +- Deprecated passing `flush_logs_every_n_steps` as a Trainer argument, instead pass it to the logger init ([#todo](todo)) + ### Fixed - Disable saving checkpoints if not trained ([#4372](https://github.com/PyTorchLightning/pytorch-lightning/pull/4372)) diff --git a/pytorch_lightning/loggers/csv_logs.py b/pytorch_lightning/loggers/csv_logs.py index 2d0a2a3edb8ca..70721a67afb02 100644 --- a/pytorch_lightning/loggers/csv_logs.py +++ b/pytorch_lightning/loggers/csv_logs.py @@ -118,6 +118,7 @@ class CSVLogger(LightningLoggerBase): version: Experiment version. If version is not specified the logger inspects the save directory for existing versions, then automatically assigns the next available version. prefix: A string to put at the beginning of metric keys. + flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps). """ LOGGER_JOIN_CHAR = "-" @@ -128,6 +129,7 @@ def __init__( name: Optional[str] = "default", version: Optional[Union[int, str]] = None, prefix: str = "", + flush_logs_every_n_steps: int = 100, ): super().__init__() self._save_dir = save_dir @@ -135,6 +137,7 @@ def __init__( self._version = version self._prefix = prefix self._experiment = None + self._flush_logs_every_n_steps = flush_logs_every_n_steps @property def root_dir(self) -> str: @@ -197,6 +200,8 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: metrics = self._add_prefix(metrics) self.experiment.log_metrics(metrics, step) + if step is not None and (step + 1) % self._flush_logs_every_n_steps == 0: + self.save() @rank_zero_only def save(self) -> None: diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index e8f71dc80f5b3..20e1b5a98ff53 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -23,6 +23,7 @@ from pytorch_lightning.utilities import DeviceType, memory from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device from pytorch_lightning.utilities.metrics import metrics_to_scalars +from pytorch_lightning.utilities.warnings import rank_zero_deprecation class LoggerConnector: @@ -44,11 +45,16 @@ def __init__(self, trainer: "pl.Trainer", log_gpu_memory: Optional[str] = None) def on_trainer_init( self, logger: Union[bool, LightningLoggerBase, Iterable[LightningLoggerBase]], - flush_logs_every_n_steps: int, + flush_logs_every_n_steps: Optional[int], log_every_n_steps: int, move_metrics_to_cpu: bool, ) -> None: self.configure_logger(logger) + if flush_logs_every_n_steps is not None: + rank_zero_deprecation( + f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps}) is deprecated in v1.5 and will be removed in " + "v1.7. Please pass `flush_logs_every_n_steps` to the logger instead." + ) self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps self.trainer.log_every_n_steps = log_every_n_steps self.trainer.move_metrics_to_cpu = move_metrics_to_cpu diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 34fbdba86cd81..9e16250622c3a 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -130,7 +130,7 @@ def __init__( limit_test_batches: Union[int, float] = 1.0, limit_predict_batches: Union[int, float] = 1.0, val_check_interval: Union[int, float] = 1.0, - flush_logs_every_n_steps: int = 100, + flush_logs_every_n_steps: Optional[int] = None, log_every_n_steps: int = 50, accelerator: Optional[Union[str, Accelerator]] = None, sync_batchnorm: bool = False, @@ -213,6 +213,10 @@ def __init__( flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps). + .. deprecated:: v1.5 + ``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7. + Please pass ``flush_logs_every_n_steps`` directly to the Logger instead. + gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=0`` disables gradient diff --git a/tests/deprecated_api/test_remove_1-7.py b/tests/deprecated_api/test_remove_1-7.py index 5dffd8501f241..5d7d7db761233 100644 --- a/tests/deprecated_api/test_remove_1-7.py +++ b/tests/deprecated_api/test_remove_1-7.py @@ -196,6 +196,11 @@ def test_v1_7_0_process_position_trainer_constructor(tmpdir): _ = Trainer(process_position=5) +def test_v1_7_0_flush_logs_every_n_steps_trainer_constructor(tmpdir): + with pytest.deprecated_call(match=r"Setting `Trainer\(flush_logs_every_n_steps=10\)` is deprecated in v1.5"): + _ = Trainer(flush_logs_every_n_steps=10) + + class BoringCallbackDDPSpawnModel(BoringModel): def __init__(self): super().__init__() diff --git a/tests/loggers/test_csv.py b/tests/loggers/test_csv.py index 2640ede1bf39f..1ac647bc267f9 100644 --- a/tests/loggers/test_csv.py +++ b/tests/loggers/test_csv.py @@ -20,6 +20,7 @@ from pytorch_lightning.core.saving import load_hparams_from_yaml from pytorch_lightning.loggers import CSVLogger from pytorch_lightning.loggers.csv_logs import ExperimentWriter +from unittest.mock import MagicMock def test_file_logger_automatic_versioning(tmpdir): @@ -103,3 +104,14 @@ def test_file_logger_log_hyperparams(tmpdir): path_yaml = os.path.join(logger.log_dir, ExperimentWriter.NAME_HPARAMS_FILE) params = load_hparams_from_yaml(path_yaml) assert all(n in params for n in hparams) + + +def test_flush_n_steps(tmpdir): + logger = CSVLogger(tmpdir, flush_logs_every_n_steps=2) + metrics = {"float": 0.3, "int": 1, "FloatTensor": torch.tensor(0.1), "IntTensor": torch.tensor(1)} + logger.save = MagicMock() + logger.log_metrics(metrics, step=0) + + logger.save.assert_not_called() + logger.log_metrics(metrics, step=1) + logger.save.assert_called_once() From 44363ec0e819dfc4e32a209cb934f7204fe8785f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 7 Sep 2021 23:04:09 +0000 Subject: [PATCH 02/11] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/loggers/test_csv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/loggers/test_csv.py b/tests/loggers/test_csv.py index 1ac647bc267f9..1985bbe7ef25d 100644 --- a/tests/loggers/test_csv.py +++ b/tests/loggers/test_csv.py @@ -13,6 +13,7 @@ # limitations under the License. import os from argparse import Namespace +from unittest.mock import MagicMock import pytest import torch @@ -20,7 +21,6 @@ from pytorch_lightning.core.saving import load_hparams_from_yaml from pytorch_lightning.loggers import CSVLogger from pytorch_lightning.loggers.csv_logs import ExperimentWriter -from unittest.mock import MagicMock def test_file_logger_automatic_versioning(tmpdir): From 211369fc73238cd5be3eea07cbd69e755897da82 Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Tue, 7 Sep 2021 16:06:06 -0700 Subject: [PATCH 03/11] add back original parameter to flush_logs_every_n_steps --- .../trainer/connectors/logger_connector/logger_connector.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 20e1b5a98ff53..c73a3cda47409 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -55,6 +55,7 @@ def on_trainer_init( f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps}) is deprecated in v1.5 and will be removed in " "v1.7. Please pass `flush_logs_every_n_steps` to the logger instead." ) + flush_logs_every_n_steps = 100 # original default parameter self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps self.trainer.log_every_n_steps = log_every_n_steps self.trainer.move_metrics_to_cpu = move_metrics_to_cpu From 1a17f3b32e5169d9a425346aa4f6539b9715fd42 Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Tue, 7 Sep 2021 16:10:29 -0700 Subject: [PATCH 04/11] make pep8 happy --- .../trainer/connectors/logger_connector/logger_connector.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index c73a3cda47409..6cec7d38e9d03 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -52,8 +52,8 @@ def on_trainer_init( self.configure_logger(logger) if flush_logs_every_n_steps is not None: rank_zero_deprecation( - f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps}) is deprecated in v1.5 and will be removed in " - "v1.7. Please pass `flush_logs_every_n_steps` to the logger instead." + f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps}) is deprecated in v1.5 " + "and will be removed in v1.7. Please pass `flush_logs_every_n_steps` to the logger instead." ) flush_logs_every_n_steps = 100 # original default parameter self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps From 811f396dd4a5200c3e8c91c5561adab7d20ac3c5 Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Tue, 7 Sep 2021 17:14:31 -0700 Subject: [PATCH 05/11] fix test --- .../trainer/connectors/logger_connector/logger_connector.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 6cec7d38e9d03..660fc722386ca 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -49,10 +49,12 @@ def on_trainer_init( log_every_n_steps: int, move_metrics_to_cpu: bool, ) -> None: + print("asdfasdf", flush_logs_every_n_steps) self.configure_logger(logger) if flush_logs_every_n_steps is not None: + print("is not none") rank_zero_deprecation( - f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps}) is deprecated in v1.5 " + f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps})` is deprecated in v1.5 " "and will be removed in v1.7. Please pass `flush_logs_every_n_steps` to the logger instead." ) flush_logs_every_n_steps = 100 # original default parameter From 84affa7b50f3b4bbf2d82b220a95db3c42a4e385 Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Tue, 7 Sep 2021 19:50:06 -0700 Subject: [PATCH 06/11] remove print statements --- .../trainer/connectors/logger_connector/logger_connector.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 660fc722386ca..9f7ff58b2d80d 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -49,10 +49,8 @@ def on_trainer_init( log_every_n_steps: int, move_metrics_to_cpu: bool, ) -> None: - print("asdfasdf", flush_logs_every_n_steps) self.configure_logger(logger) if flush_logs_every_n_steps is not None: - print("is not none") rank_zero_deprecation( f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps})` is deprecated in v1.5 " "and will be removed in v1.7. Please pass `flush_logs_every_n_steps` to the logger instead." From 19d4533172b8310e710ac5d3a13356930543db66 Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Tue, 7 Sep 2021 20:26:04 -0700 Subject: [PATCH 07/11] update changelog --- CHANGELOG.md | 5 +++-- _notebooks | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e002b8aacd15e..e8b948b26d15b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -215,6 +215,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated passing `process_position` to the `Trainer` constructor in favor of adding the `ProgressBar` callback with `process_position` directly to the list of callbacks ([#9222](https://github.com/PyTorchLightning/pytorch-lightning/pull/9222)) +- Deprecated passing `flush_logs_every_n_steps` as a Trainer argument, instead pass it to the logger init if supported ([#9366](https://github.com/PyTorchLightning/pytorch-lightning/pull/9366)) + + ### Removed @@ -1579,8 +1582,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated passing `ModelCheckpoint` instance to `checkpoint_callback` Trainer argument ([#4336](https://github.com/PyTorchLightning/pytorch-lightning/pull/4336)) -- Deprecated passing `flush_logs_every_n_steps` as a Trainer argument, instead pass it to the logger init ([#todo](todo)) - ### Fixed - Disable saving checkpoints if not trained ([#4372](https://github.com/PyTorchLightning/pytorch-lightning/pull/4372)) diff --git a/_notebooks b/_notebooks index 4fe3370eac9c4..8d77c79b66179 160000 --- a/_notebooks +++ b/_notebooks @@ -1 +1 @@ -Subproject commit 4fe3370eac9c448eceb36b835ff49ca30de7d404 +Subproject commit 8d77c79b661795537bc455344934fc291c538f8c From 691b2dd7ce4a655225e6b03b3ee4eefe82aef77e Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Thu, 9 Sep 2021 15:23:26 -0700 Subject: [PATCH 08/11] add flush every n steps functionality to tb --- pytorch_lightning/loggers/csv_logs.py | 2 +- pytorch_lightning/loggers/tensorboard.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/loggers/csv_logs.py b/pytorch_lightning/loggers/csv_logs.py index 70721a67afb02..77adfe551f72d 100644 --- a/pytorch_lightning/loggers/csv_logs.py +++ b/pytorch_lightning/loggers/csv_logs.py @@ -157,7 +157,7 @@ def log_dir(self) -> str: By default, it is named ``'version_${self.version}'`` but it can be overridden by passing a string value for the constructor's version parameter instead of ``None`` or an int. """ - # create a pseudo standard path ala test-tube + # create a pseudo standard path version = self.version if isinstance(self.version, str) else f"version_{self.version}" log_dir = os.path.join(self.root_dir, version) return log_dir diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index 6abc809bc65d1..b688436ddab22 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -73,6 +73,8 @@ class TensorBoardLogger(LightningLoggerBase): default_hp_metric: Enables a placeholder metric with key `hp_metric` when `log_hyperparams` is called without a metric (otherwise calls to log_hyperparams without a metric are ignored). prefix: A string to put at the beginning of metric keys. + sub_dir: Optional subdirectory to store logs. + flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps). \**kwargs: Additional arguments like `comment`, `filename_suffix`, etc. used by :class:`SummaryWriter` can be passed as keyword arguments in this logger. @@ -89,6 +91,7 @@ def __init__( default_hp_metric: bool = True, prefix: str = "", sub_dir: Optional[str] = None, + flush_logs_every_n_steps: int = 100, **kwargs, ): super().__init__() @@ -100,6 +103,7 @@ def __init__( self._default_hp_metric = default_hp_metric self._prefix = prefix self._fs = get_filesystem(save_dir) + self._flush_logs_every_n_steps = flush_logs_every_n_steps self._experiment = None self.hparams = {} @@ -230,6 +234,9 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> m = f"\n you tried to log {v} which is not currently supported. Try a dict or a scalar/tensor." raise ValueError(m) from ex + if step is not None and (step + 1) % self._flush_logs_every_n_steps == 0: + self.experiment.flush() + @rank_zero_only def log_graph(self, model: "pl.LightningModule", input_array=None): if self._log_graph: From 868494b797999c304c70ee595406c11df6f1a4f8 Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Thu, 9 Sep 2021 15:47:31 -0700 Subject: [PATCH 09/11] revert changes to _notebooks --- _notebooks | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_notebooks b/_notebooks index 8d77c79b66179..4fe3370eac9c4 160000 --- a/_notebooks +++ b/_notebooks @@ -1 +1 @@ -Subproject commit 8d77c79b661795537bc455344934fc291c538f8c +Subproject commit 4fe3370eac9c448eceb36b835ff49ca30de7d404 From 72de17f952b53c4bbe6da45e04652bda018a623e Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Thu, 9 Sep 2021 23:18:07 -0700 Subject: [PATCH 10/11] address comments --- pytorch_lightning/loggers/tensorboard.py | 19 +++++++------------ .../logger_connector/logger_connector.py | 2 +- pytorch_lightning/trainer/trainer.py | 2 +- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index b688436ddab22..f26fc75ac58db 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -64,19 +64,19 @@ class TensorBoardLogger(LightningLoggerBase): directory for existing versions, then automatically assigns the next available version. If it is a string then it is used as the run-specific subdirectory name, otherwise ``'version_${version}'`` is used. - sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed - then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which - logs are saved in ``/save_dir/version/``. log_graph: Adds the computational graph to tensorboard. This requires that the user has defined the `self.example_input_array` attribute in their model. default_hp_metric: Enables a placeholder metric with key `hp_metric` when `log_hyperparams` is called without a metric (otherwise calls to log_hyperparams without a metric are ignored). prefix: A string to put at the beginning of metric keys. - sub_dir: Optional subdirectory to store logs. - flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps). - \**kwargs: Additional arguments like `comment`, `filename_suffix`, etc. used by - :class:`SummaryWriter` can be passed as keyword arguments in this logger. + sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed + then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which + logs are saved in ``/save_dir/version/``. + \**kwargs: Additional arguments used by :class:`SummaryWriter` can be passed as keyword + arguments in this logger. To automatically flush to disk, `max_queue` sets the size + of the queue for pending logs before flushing. `flush_secs` determines how many seconds + elapses before flushing. """ NAME_HPARAMS_FILE = "hparams.yaml" @@ -91,7 +91,6 @@ def __init__( default_hp_metric: bool = True, prefix: str = "", sub_dir: Optional[str] = None, - flush_logs_every_n_steps: int = 100, **kwargs, ): super().__init__() @@ -103,7 +102,6 @@ def __init__( self._default_hp_metric = default_hp_metric self._prefix = prefix self._fs = get_filesystem(save_dir) - self._flush_logs_every_n_steps = flush_logs_every_n_steps self._experiment = None self.hparams = {} @@ -234,9 +232,6 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> m = f"\n you tried to log {v} which is not currently supported. Try a dict or a scalar/tensor." raise ValueError(m) from ex - if step is not None and (step + 1) % self._flush_logs_every_n_steps == 0: - self.experiment.flush() - @rank_zero_only def log_graph(self, model: "pl.LightningModule", input_array=None): if self._log_graph: diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 9f7ff58b2d80d..02a053e7d140e 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -53,7 +53,7 @@ def on_trainer_init( if flush_logs_every_n_steps is not None: rank_zero_deprecation( f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps})` is deprecated in v1.5 " - "and will be removed in v1.7. Please pass `flush_logs_every_n_steps` to the logger instead." + "and will be removed in v1.7. Please configure flushing in the logger instead." ) flush_logs_every_n_steps = 100 # original default parameter self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 9e16250622c3a..30bb39844ec18 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -215,7 +215,7 @@ def __init__( .. deprecated:: v1.5 ``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7. - Please pass ``flush_logs_every_n_steps`` directly to the Logger instead. + Please configure flushing directly in the logger instead. gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node From e619ce191ee5b2e95f703310f83278e6876b7f7f Mon Sep 17 00:00:00 2001 From: edward-io <53842584+edward-io@users.noreply.github.com> Date: Mon, 13 Sep 2021 20:16:45 -0700 Subject: [PATCH 11/11] address comments --- .../trainer/connectors/logger_connector/logger_connector.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 02a053e7d140e..c3356c1392b37 100644 --- a/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -55,7 +55,8 @@ def on_trainer_init( f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps})` is deprecated in v1.5 " "and will be removed in v1.7. Please configure flushing in the logger instead." ) - flush_logs_every_n_steps = 100 # original default parameter + else: + flush_logs_every_n_steps = 100 # original default parameter self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps self.trainer.log_every_n_steps = log_every_n_steps self.trainer.move_metrics_to_cpu = move_metrics_to_cpu