Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

### Removed

- Removed the deprecated `process_position` argument from the `Trainer` constructor ([13071](https://github.com/PyTorchLightning/pytorch-lightning/pull/13071))


- Removed the deprecated `checkpoint_callback` argument from the `Trainer` constructor ([#13027](https://github.com/PyTorchLightning/pytorch-lightning/pull/13027))


Expand Down
25 changes: 0 additions & 25 deletions docs/source/common/trainer.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1214,31 +1214,6 @@ Half precision, or mixed precision, is the combined use of 32 and 16 bit floatin
# turn on 16-bit
trainer = Trainer(amp_backend="apex", amp_level="O2", precision=16, accelerator="gpu", devices=1)


process_position
^^^^^^^^^^^^^^^^

.. warning:: ``process_position`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``
directly to the Trainer's ``callbacks`` argument instead.

.. raw:: html

<video width="50%" max-width="400px" controls
poster="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/thumb/process_position.jpg"
src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/process_position.mp4"></video>

|

Orders the progress bar. Useful when running multiple trainers on the same node.

.. testcode::

# default used by the Trainer
trainer = Trainer(process_position=0)

.. note:: This argument is ignored if a custom callback is passed to :paramref:`~Trainer.callbacks`.

profiler
^^^^^^^^

Expand Down
14 changes: 3 additions & 11 deletions pytorch_lightning/trainer/connectors/callback_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def on_trainer_init(
callbacks: Optional[Union[List[Callback], Callback]],
enable_checkpointing: bool,
enable_progress_bar: bool,
process_position: int,
default_root_dir: Optional[str],
weights_save_path: Optional[str],
enable_model_summary: bool,
Expand Down Expand Up @@ -77,14 +76,7 @@ def on_trainer_init(
self._configure_timer_callback(max_time)

# init progress bar
if process_position != 0:
rank_zero_deprecation(
f"Setting `Trainer(process_position={process_position})` is deprecated in v1.5 and will be removed"
" in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with"
" `process_position` directly to the Trainer's `callbacks` argument instead."
)

self._configure_progress_bar(process_position, enable_progress_bar)
self._configure_progress_bar(enable_progress_bar)

# configure the ModelSummary callback
self._configure_model_summary_callback(enable_model_summary, weights_summary)
Expand Down Expand Up @@ -188,7 +180,7 @@ def _configure_model_summary_callback(
self.trainer.callbacks.append(model_summary)
self.trainer._weights_summary = weights_summary

def _configure_progress_bar(self, process_position: int = 0, enable_progress_bar: bool = True) -> None:
def _configure_progress_bar(self, enable_progress_bar: bool = True) -> None:
progress_bars = [c for c in self.trainer.callbacks if isinstance(c, ProgressBarBase)]
if len(progress_bars) > 1:
raise MisconfigurationException(
Expand All @@ -210,7 +202,7 @@ def _configure_progress_bar(self, process_position: int = 0, enable_progress_bar
)

if enable_progress_bar:
progress_bar_callback = TQDMProgressBar(process_position=process_position)
progress_bar_callback = TQDMProgressBar()
self.trainer.callbacks.append(progress_bar_callback)

def _configure_timer_callback(self, max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None) -> None:
Expand Down
9 changes: 0 additions & 9 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,6 @@ def __init__(
default_root_dir: Optional[str] = None,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
process_position: int = 0,
num_nodes: int = 1,
num_processes: Optional[int] = None, # TODO: Remove in 2.0
devices: Optional[Union[List[int], str, int]] = None,
Expand Down Expand Up @@ -305,13 +304,6 @@ def __init__(
log_every_n_steps: How often to log within steps.
Default: ``50``.

process_position: Orders the progress bar when running multiple models on same machine.

.. deprecated:: v1.5
``process_position`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``
directly to the Trainer's ``callbacks`` argument instead.

enable_progress_bar: Whether to enable to progress bar by default.
Default: ``False``.

Expand Down Expand Up @@ -508,7 +500,6 @@ def __init__(
callbacks,
enable_checkpointing,
enable_progress_bar,
process_position,
default_root_dir,
weights_save_path,
enable_model_summary,
Expand Down
5 changes: 0 additions & 5 deletions tests/deprecated_api/test_remove_1-7.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,6 @@ def on_keyboard_interrupt(self, trainer, pl_module):
trainer.fit(model)


def test_v1_7_0_process_position_trainer_constructor(tmpdir):
with pytest.deprecated_call(match=r"Setting `Trainer\(process_position=5\)` is deprecated in v1.5"):
_ = Trainer(process_position=5)


def test_v1_7_0_flush_logs_every_n_steps_trainer_constructor(tmpdir):
with pytest.deprecated_call(match=r"Setting `Trainer\(flush_logs_every_n_steps=10\)` is deprecated in v1.5"):
_ = Trainer(flush_logs_every_n_steps=10)
Expand Down