Skip to content

Commit 94e2bf5

Browse files
committed
Revert "Add support for init_meta_context, materialize_module (#9920)"
This reverts commit 454e93b.
1 parent d30e456 commit 94e2bf5

File tree

7 files changed

+2
-412
lines changed

7 files changed

+2
-412
lines changed

CHANGELOG.md

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -195,9 +195,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
195195
- Added `strategy` argument to Trainer ([#8597](https://github.com/PyTorchLightning/pytorch-lightning/pull/8597))
196196

197197

198-
- Added `init_meta_context`, `materialize_module` utilities ([#9920](https://github.com/PyTorchLightning/pytorch-lightning/pull/9920))
199-
200-
201198
- Added `TPUPrecisionPlugin` ([#10020](https://github.com/PyTorchLightning/pytorch-lightning/pull/#10020))
202199

203200

@@ -224,7 +221,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
224221
- Added `XLACheckpointIO` plugin ([#9972](https://github.com/PyTorchLightning/pytorch-lightning/pull/9972))
225222

226223

227-
228224
### Changed
229225

230226
- Setting `Trainer(accelerator="ddp_cpu")` now does not spawn a subprocess if `num_processes` is kept `1` along with `num_nodes > 1` ([#9603](https://github.com/PyTorchLightning/pytorch-lightning/pull/9603)).

pytorch_lightning/plugins/training_type/deepspeed.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ def _setup_model_and_optimizer(
426426
def init_deepspeed(self):
427427
# check that `configure_gradient_clipping` hook isn't overriden since deepspeed handles
428428
# gradient clipping internally
429-
if is_overridden("configure_gradient_clipping", self.lightning_module, pl.LightningModule):
429+
if is_overridden("configure_gradient_clipping", self.lightning_module):
430430
rank_zero_warn(
431431
"Since deepspeed handles gradient clipping internally, this hook will"
432432
" be ignored. Consider setting `gradient_clip_val` and `gradient_clip_algorithm`"

pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@
8989
from pytorch_lightning.utilities.distributed import distributed_available
9090
from pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException
9191
from pytorch_lightning.utilities.imports import _fault_tolerant_training
92-
from pytorch_lightning.utilities.meta import materialize_module
9392
from pytorch_lightning.utilities.model_helpers import is_overridden
9493
from pytorch_lightning.utilities.seed import reset_seed
9594
from pytorch_lightning.utilities.types import (
@@ -1350,7 +1349,6 @@ def _call_setup_hook(self) -> None:
13501349

13511350
def _call_configure_sharded_model(self) -> None:
13521351
with self.accelerator.model_sharded_context():
1353-
materialize_module(self.lightning_module)
13541352
self.call_hook("configure_sharded_model")
13551353
self.call_hook("on_configure_sharded_model")
13561354

pytorch_lightning/utilities/imports.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ def _compare_version(package: str, op: Callable, version: str, use_base_version:
9393
_OMEGACONF_AVAILABLE = _module_available("omegaconf")
9494
_POPTORCH_AVAILABLE = _module_available("poptorch")
9595
_RICH_AVAILABLE = _module_available("rich") and _compare_version("rich", operator.ge, "10.2.2")
96-
_TORCH_META_AVAILABLE = _compare_version("torch", operator.ge, "1.10.0.dev20210922")
9796
_TORCH_QUANTIZE_AVAILABLE = bool([eg for eg in torch.backends.quantized.supported_engines if eg != "none"])
9897
_TORCHTEXT_AVAILABLE = _module_available("torchtext")
9998
_TORCHVISION_AVAILABLE = _module_available("torchvision")

pytorch_lightning/utilities/meta.py

Lines changed: 0 additions & 323 deletions
This file was deleted.

0 commit comments

Comments
 (0)