@@ -64,19 +64,19 @@ class TensorBoardLogger(LightningLoggerBase):
6464 directory for existing versions, then automatically assigns the next available version.
6565 If it is a string then it is used as the run-specific subdirectory name,
6666 otherwise ``'version_${version}'`` is used.
67- sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
68- then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which
69- logs are saved in ``/save_dir/version/``.
7067 log_graph: Adds the computational graph to tensorboard. This requires that
7168 the user has defined the `self.example_input_array` attribute in their
7269 model.
7370 default_hp_metric: Enables a placeholder metric with key `hp_metric` when `log_hyperparams` is
7471 called without a metric (otherwise calls to log_hyperparams without a metric are ignored).
7572 prefix: A string to put at the beginning of metric keys.
76- sub_dir: Optional subdirectory to store logs.
77- flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
78- \**kwargs: Additional arguments like `comment`, `filename_suffix`, etc. used by
79- :class:`SummaryWriter` can be passed as keyword arguments in this logger.
73+ sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
74+ then logs are saved in ``/save_dir/version/sub_dir/``. Defaults to ``None`` in which
75+ logs are saved in ``/save_dir/version/``.
76+ \**kwargs: Additional arguments used by :class:`SummaryWriter` can be passed as keyword
77+ arguments in this logger. To automatically flush to disk, `max_queue` sets the size
78+ of the queue for pending logs before flushing. `flush_secs` determines how many seconds
79+ elapses before flushing.
8080
8181 """
8282 NAME_HPARAMS_FILE = "hparams.yaml"
@@ -91,7 +91,6 @@ def __init__(
9191 default_hp_metric : bool = True ,
9292 prefix : str = "" ,
9393 sub_dir : Optional [str ] = None ,
94- flush_logs_every_n_steps : int = 100 ,
9594 ** kwargs ,
9695 ):
9796 super ().__init__ ()
@@ -103,7 +102,6 @@ def __init__(
103102 self ._default_hp_metric = default_hp_metric
104103 self ._prefix = prefix
105104 self ._fs = get_filesystem (save_dir )
106- self ._flush_logs_every_n_steps = flush_logs_every_n_steps
107105
108106 self ._experiment = None
109107 self .hparams = {}
@@ -234,9 +232,6 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) ->
234232 m = f"\n you tried to log { v } which is not currently supported. Try a dict or a scalar/tensor."
235233 raise ValueError (m ) from ex
236234
237- if step is not None and (step + 1 ) % self ._flush_logs_every_n_steps == 0 :
238- self .experiment .flush ()
239-
240235 @rank_zero_only
241236 def log_graph (self , model : "pl.LightningModule" , input_array = None ):
242237 if self ._log_graph :
0 commit comments