|
25 | 25 | from torch.nn.parallel.distributed import DistributedDataParallel |
26 | 26 |
|
27 | 27 | import pytorch_lightning as pl |
28 | | -from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger |
29 | 28 | from pytorch_lightning.overrides import LightningDistributedModule |
30 | 29 | from pytorch_lightning.overrides.distributed import prepare_for_backward |
31 | 30 | from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment |
@@ -147,17 +146,14 @@ def get_mp_spawn_kwargs(self, trainer: Optional["pl.Trainer"] = None) -> Dict[st |
147 | 146 | return {"nprocs": self.num_processes} |
148 | 147 |
|
149 | 148 | def start_training(self, trainer: "pl.Trainer") -> None: |
150 | | - self._clean_logger(trainer) |
151 | 149 | self.spawn(self.new_process, trainer, self.mp_queue, return_result=False) |
152 | 150 | # reset optimizers, since main process is never used for training and thus does not have a valid optim state |
153 | 151 | trainer.optimizers = [] |
154 | 152 |
|
155 | 153 | def start_evaluating(self, trainer: "pl.Trainer") -> None: |
156 | | - self._clean_logger(trainer) |
157 | 154 | self.spawn(self.new_process, trainer, self.mp_queue, return_result=False) |
158 | 155 |
|
159 | 156 | def start_predicting(self, trainer: "pl.Trainer") -> None: |
160 | | - self._clean_logger(trainer) |
161 | 157 | self.spawn(self.new_process, trainer, self.mp_queue, return_result=False) |
162 | 158 |
|
163 | 159 | def spawn(self, function: Callable, *args: Any, return_result: bool = True, **kwargs: Any) -> Optional[Any]: |
@@ -418,16 +414,3 @@ def teardown(self) -> None: |
418 | 414 | self.lightning_module.cpu() |
419 | 415 | # clean up memory |
420 | 416 | torch.cuda.empty_cache() |
421 | | - |
422 | | - @staticmethod |
423 | | - def _clean_logger(trainer: "pl.Trainer") -> None: |
424 | | - loggers = trainer.logger._logger_iterable if isinstance(trainer.logger, LoggerCollection) else [trainer.logger] |
425 | | - for logger in loggers: |
426 | | - if isinstance(logger, TensorBoardLogger) and logger._experiment is not None: |
427 | | - rank_zero_warn( |
428 | | - "When using `ddp_spawn`, the `TensorBoardLogger` experiment should be `None`. Setting it to `None`." |
429 | | - ) |
430 | | - # the experiment class of `TensorBoard` holds a multiprocessing queue which can make ours hang. |
431 | | - # we want to make sure these are closed before we spawn our own threads. |
432 | | - # assuming nothing else references the experiment object, python should instantly `__del__` it. |
433 | | - logger._experiment = None |
0 commit comments