Skip to content

Commit 8e76000

Browse files
committed
Merge branch 'master' into refactor/setup-one-model
2 parents b4e5ab4 + e94dcf6 commit 8e76000

File tree

22 files changed

+86
-55
lines changed

22 files changed

+86
-55
lines changed

.github/workflows/ci_test-conda.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ jobs:
3131
python ./requirements/adjust_versions.py requirements/extra.txt
3232
python ./requirements/adjust_versions.py requirements/examples.txt
3333
pip install --requirement requirements/devel.txt --find-links https://download.pytorch.org/whl/nightly/torch_nightly.html
34+
pip install pytest-random-order
3435
pip list
3536
3637
- name: Pull checkpoints from S3
@@ -44,7 +45,7 @@ jobs:
4445
- name: Tests
4546
run: |
4647
# NOTE: run coverage on tests does not propagate failure status for Win, https://github.com/nedbat/coveragepy/issues/1003
47-
coverage run --source pytorch_lightning -m pytest pytorch_lightning tests -v --durations=50 --junitxml=junit/test-results-${{ runner.os }}-torch${{ matrix.pytorch-version }}.xml
48+
coverage run --source pytorch_lightning -m pytest --random-order-seed=1 pytorch_lightning tests -v --durations=50 --junitxml=junit/test-results-${{ runner.os }}-torch${{ matrix.pytorch-version }}.xml
4849
shell: bash -l {0}
4950

5051
- name: Upload pytest results

docs/source/common/trainer.rst

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,7 +516,9 @@ Example::
516516
checkpoint_callback
517517
^^^^^^^^^^^^^^^^^^^
518518

519-
Deprecated: This has been deprecated in v1.5 and will be removed in v1.7. Please use ``enable_checkpointing`` instead.
519+
.. warning:: `checkpoint_callback` has been deprecated in v1.5 and will be removed in v1.7.
520+
To disable checkpointing, pass ``enable_checkpointing = False`` to the Trainer instead.
521+
520522

521523
default_root_dir
522524
^^^^^^^^^^^^^^^^

docs/source/extensions/callbacks.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ Examples
7272
--------
7373
You can do pretty much anything with callbacks.
7474

75-
- `Add a MLP to fine-tune self-supervised networks <https://lightning-bolts.readthedocs.io/en/latest/self_supervised_callbacks.html#sslonlineevaluator>`_.
76-
- `Find how to modify an image input to trick the classification result <https://lightning-bolts.readthedocs.io/en/latest/vision_callbacks.html#confused-logit>`_.
77-
- `Interpolate the latent space of any variational model <https://lightning-bolts.readthedocs.io/en/latest/variational_callbacks.html#latent-dim-interpolator>`_.
78-
- `Log images to Tensorboard for any model <https://lightning-bolts.readthedocs.io/en/latest/vision_callbacks.html#tensorboard-image-generator>`_.
75+
- `Add a MLP to fine-tune self-supervised networks <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/self_supervised.html#sslonlineevaluator>`_.
76+
- `Find how to modify an image input to trick the classification result <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/vision.html#confused-logit>`_.
77+
- `Interpolate the latent space of any variational model <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/variational.html#latent-dim-interpolator>`_.
78+
- `Log images to Tensorboard for any model <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/vision.html#tensorboard-image-generator>`_.
7979

8080

8181
--------------

pytorch_lightning/__about__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import time
22

33
_this_year = time.strftime("%Y")
4-
__version__ = "1.5.0rc0"
4+
__version__ = "1.5.0rc1"
55
__author__ = "William Falcon et al."
66
__author_email__ = "[email protected]"
77
__license__ = "Apache-2.0"

pytorch_lightning/loops/dataloader/evaluation_loop.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def advance(self, *args: Any, **kwargs: Any) -> None:
101101

102102
dataloader_idx: int = self.current_dataloader_idx
103103
dataloader = self.trainer.training_type_plugin.process_dataloader(self.current_dataloader)
104-
dataloader = self.trainer.data_connector.get_profiled_dataloader(dataloader, dataloader_idx=dataloader_idx)
104+
dataloader = self.trainer._data_connector.get_profiled_dataloader(dataloader, dataloader_idx=dataloader_idx)
105105
dl_max_batches = self._max_batches[dataloader_idx]
106106

107107
dl_outputs = self.epoch_loop.run(dataloader, dataloader_idx, dl_max_batches, self.num_dataloaders)

pytorch_lightning/loops/epoch/evaluation_epoch_loop.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def advance(
107107
if batch is None:
108108
raise StopIteration
109109

110-
if not self.trainer.data_connector.evaluation_data_fetcher.store_on_device:
110+
if not self.trainer._data_connector.evaluation_data_fetcher.store_on_device:
111111
with self.trainer.profiler.profile("evaluation_batch_to_device"):
112112
batch = self.trainer.accelerator.batch_to_device(batch, dataloader_idx=dataloader_idx)
113113

pytorch_lightning/loops/epoch/training_epoch_loop.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def advance(self, *args: Any, **kwargs: Any) -> None:
147147

148148
batch_idx, (batch, self.batch_progress.is_last_batch) = next(self._dataloader_iter)
149149

150-
if not self.trainer.data_connector.train_data_fetcher.store_on_device:
150+
if not self.trainer._data_connector.train_data_fetcher.store_on_device:
151151
with self.trainer.profiler.profile("training_batch_to_device"):
152152
batch = self.trainer.accelerator.batch_to_device(batch)
153153

pytorch_lightning/loops/fit_loop.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ def on_advance_start(self) -> None:
212212
def advance(self) -> None:
213213
"""Runs one whole epoch."""
214214
dataloader = self.trainer.training_type_plugin.process_dataloader(self.trainer.train_dataloader)
215-
data_fetcher = self.trainer.data_connector.get_profiled_dataloader(dataloader)
215+
data_fetcher = self.trainer._data_connector.get_profiled_dataloader(dataloader)
216216

217217
with self.trainer.profiler.profile("run_training_epoch"):
218218
self.epoch_loop.run(data_fetcher)

pytorch_lightning/plugins/training_type/deepspeed.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -621,7 +621,7 @@ def _auto_select_batch_size(self):
621621
# train_micro_batch_size_per_gpu is used for throughput logging purposes
622622
# by default we try to use the batch size of the loader
623623
batch_size = 1
624-
train_dl_source = self.lightning_module.trainer.data_connector._train_dataloader_source
624+
train_dl_source = self.lightning_module.trainer._data_connector._train_dataloader_source
625625
if train_dl_source.is_defined():
626626
train_dataloader = train_dl_source.dataloader()
627627
if hasattr(train_dataloader, "batch_sampler"):

pytorch_lightning/plugins/training_type/tpu_spawn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def _validate_dataloader(dataloaders: Union[List[DataLoader], DataLoader]) -> No
9595
@staticmethod
9696
def _validate_patched_dataloaders(model: "pl.LightningModule") -> None:
9797
"""Validate and fail fast if the dataloaders were passed directly to fit."""
98-
connector: DataConnector = model.trainer.data_connector
98+
connector: DataConnector = model.trainer._data_connector
9999
sources = (
100100
connector._train_dataloader_source,
101101
connector._val_dataloader_source,

0 commit comments

Comments
 (0)