Skip to content

Commit 28d5d80

Browse files
committed
revert rebase errors
1 parent 8be8909 commit 28d5d80

File tree

6 files changed

+2
-23
lines changed

6 files changed

+2
-23
lines changed

azure-pipelines.yml

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -72,12 +72,7 @@ jobs:
7272
displayName: 'Get legacy checkpoints'
7373
7474
- script: |
75-
# python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests -v --durations=50
76-
python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests --ignore tests/plugins/test_sharded_plugin.py --ignore tests/trainer/test_dataloaders.py --ignore tests/metrics -v --durations=50
77-
# Todo: Find why those tests are failing when run in the main pytest.
78-
python -m coverage run -a --source pytorch_lightning -m pytest tests/metrics -v --durations=50
79-
python -m coverage run -a --source pytorch_lightning -m pytest tests/plugins/test_sharded_plugin.py tests/trainer/test_dataloaders.py -v --durations=50
80-
75+
python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests -v --durations=50
8176
displayName: 'Testing: standard'
8277
8378
- script: |

pytorch_lightning/accelerators/accelerator.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,6 @@
3030
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available
3131
from pytorch_lightning.utilities.enums import AMPType, LightningEnum
3232

33-
if TYPE_CHECKING:
34-
from pytorch_lightning.trainer.trainer import Trainer
35-
3633

3734
class Accelerator(object):
3835
"""

pytorch_lightning/plugins/precision/apex_amp.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -92,15 +92,6 @@ def backward(
9292
closure_loss = closure_loss.detach()
9393
return closure_loss
9494

95-
def pre_optimizer_step(
96-
self, pl_module: LightningModule, optimizer: Optimizer, optimizer_idx: int, closure: Callable, **kwargs
97-
) -> bool:
98-
"""Hook to do something before each optimizer step."""
99-
# Apex: Amp does not support closure use with optimizers
100-
closure()
101-
optimizer.step()
102-
return False
103-
10495
def configure_apex(
10596
self,
10697
amp: object,

pytorch_lightning/plugins/training_type/ddp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
from pytorch_lightning.overrides.distributed import prepare_for_backward
3030
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
3131
from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin
32-
from pytorch_lightning.utilities import _HYDRA_AVAILABLE, _PYTORCH_GREATER_EQUAL_1_7_0, rank_zero_warn
32+
from pytorch_lightning.utilities import _HYDRA_AVAILABLE, _TORCH_GREATER_EQUAL_1_7, rank_zero_warn
3333
from pytorch_lightning.utilities.distributed import (
3434
find_free_network_port,
3535
rank_zero_only,

pytorch_lightning/utilities/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,6 @@
3535
_module_available,
3636
_NATIVE_AMP_AVAILABLE,
3737
_OMEGACONF_AVAILABLE,
38-
_PYTORCH_GREATER_EQUAL_1_7_0,
39-
_PYTORCH_PRUNE_AVAILABLE,
4038
_RPC_AVAILABLE,
4139
_TORCH_GREATER_EQUAL_1_6,
4240
_TORCH_GREATER_EQUAL_1_7,

tests/trainer/test_dataloaders.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -735,8 +735,6 @@ def __len__(self):
735735
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
736736
def test_dataloader_reinit_for_subclass(tmpdir):
737737

738-
del os.environ["PL_TRAINER_GPUS"]
739-
740738
class CustomDataLoader(torch.utils.data.DataLoader):
741739

742740
def __init__(

0 commit comments

Comments
 (0)