Skip to content

Commit 5fa4296

Browse files
committed
address comments
1 parent 73a25b5 commit 5fa4296

File tree

3 files changed

+9
-14
lines changed

3 files changed

+9
-14
lines changed

pytorch_lightning/loggers/tensorboard.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -64,19 +64,19 @@ class TensorBoardLogger(LightningLoggerBase):
6464
directory for existing versions, then automatically assigns the next available version.
6565
If it is a string then it is used as the run-specific subdirectory name,
6666
otherwise ``'version_${version}'`` is used.
67-
sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
68-
then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which
69-
logs are saved in ``/save_dir/version/``.
7067
log_graph: Adds the computational graph to tensorboard. This requires that
7168
the user has defined the `self.example_input_array` attribute in their
7269
model.
7370
default_hp_metric: Enables a placeholder metric with key `hp_metric` when `log_hyperparams` is
7471
called without a metric (otherwise calls to log_hyperparams without a metric are ignored).
7572
prefix: A string to put at the beginning of metric keys.
76-
sub_dir: Optional subdirectory to store logs.
77-
flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
78-
\**kwargs: Additional arguments like `comment`, `filename_suffix`, etc. used by
79-
:class:`SummaryWriter` can be passed as keyword arguments in this logger.
73+
sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
74+
then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which
75+
logs are saved in ``/save_dir/version/``.
76+
\**kwargs: Additional arguments used by :class:`SummaryWriter` can be passed as keyword
77+
arguments in this logger. To automatically flush to disk, `max_queue` sets the size
78+
of the queue for pending logs before flushing. `flush_secs` determines how many seconds
79+
elapses before flushing.
8080
8181
"""
8282
NAME_HPARAMS_FILE = "hparams.yaml"
@@ -91,7 +91,6 @@ def __init__(
9191
default_hp_metric: bool = True,
9292
prefix: str = "",
9393
sub_dir: Optional[str] = None,
94-
flush_logs_every_n_steps: int = 100,
9594
**kwargs,
9695
):
9796
super().__init__()
@@ -103,7 +102,6 @@ def __init__(
103102
self._default_hp_metric = default_hp_metric
104103
self._prefix = prefix
105104
self._fs = get_filesystem(save_dir)
106-
self._flush_logs_every_n_steps = flush_logs_every_n_steps
107105

108106
self._experiment = None
109107
self.hparams = {}
@@ -234,9 +232,6 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) ->
234232
m = f"\n you tried to log {v} which is not currently supported. Try a dict or a scalar/tensor."
235233
raise ValueError(m) from ex
236234

237-
if step is not None and (step + 1) % self._flush_logs_every_n_steps == 0:
238-
self.experiment.flush()
239-
240235
@rank_zero_only
241236
def log_graph(self, model: "pl.LightningModule", input_array=None):
242237
if self._log_graph:

pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def on_trainer_init(
5353
if flush_logs_every_n_steps is not None:
5454
rank_zero_deprecation(
5555
f"Setting `Trainer(flush_logs_every_n_steps={flush_logs_every_n_steps})` is deprecated in v1.5 "
56-
"and will be removed in v1.7. Please pass `flush_logs_every_n_steps` to the logger instead."
56+
"and will be removed in v1.7. Please configure flushing in the logger instead."
5757
)
5858
flush_logs_every_n_steps = 100 # original default parameter
5959
self.trainer.flush_logs_every_n_steps = flush_logs_every_n_steps

pytorch_lightning/trainer/trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def __init__(
216216
217217
.. deprecated:: v1.5
218218
``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.
219-
Please pass ``flush_logs_every_n_steps`` directly to the Logger instead.
219+
Please configure flushing directly in the logger instead.
220220
221221
gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node
222222

0 commit comments

Comments
 (0)