Skip to content

Commit 3431c62

Browse files
williamFalconBorda
andauthored
Remove error when test dataloader used in test (#1495)
* remove error when test dataloader used in test * remove error when test dataloader used in test * remove error when test dataloader used in test * remove error when test dataloader used in test * remove error when test dataloader used in test * remove error when test dataloader used in test * fix lost model reference * remove error when test dataloader used in test * fix lost model reference * moved optimizer types * moved optimizer types * moved optimizer types * moved optimizer types * moved optimizer types * moved optimizer types * moved optimizer types * moved optimizer types * added tests for warning * fix lost model reference * fix lost model reference * added tests for warning * added tests for warning * refactoring * refactoring * fix imports * refactoring * fix imports * refactoring * fix tests * fix mnist * flake8 * review Co-authored-by: J. Borovec <[email protected]>
1 parent 8322f1b commit 3431c62

23 files changed

+587
-58
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
111111

112112
### Removed
113113

114+
- Removed test for no test dataloader in .fit ([#1495](https://github.com/PyTorchLightning/pytorch-lightning/pull/1495))
114115
- Removed duplicated module `pytorch_lightning.utilities.arg_parse` for loading CLI arguments ([#1167](https://github.com/PyTorchLightning/pytorch-lightning/issues/1167))
115116
- Removed wandb logger's `finalize` method ([#1193](https://github.com/PyTorchLightning/pytorch-lightning/pull/1193))
116117
- Dropped `torchvision` dependency in tests and added own MNIST dataset class instead ([#986](https://github.com/PyTorchLightning/pytorch-lightning/issues/986))

benchmarks/test_trainer_parity.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import tests.base.utils as tutils
1212

1313
from pytorch_lightning import Trainer, LightningModule
14-
from tests.base.datasets import TestingMNIST
14+
from tests.base.datasets import TrialMNIST
1515

1616

1717
class ParityMNIST(LightningModule):
@@ -42,10 +42,10 @@ def configure_optimizers(self):
4242
return torch.optim.Adam(self.parameters(), lr=0.02)
4343

4444
def train_dataloader(self):
45-
return DataLoader(TestingMNIST(train=True,
46-
download=True,
47-
num_samples=500,
48-
digits=list(range(5))),
45+
return DataLoader(TrialMNIST(train=True,
46+
download=True,
47+
num_samples=500,
48+
digits=list(range(5))),
4949
batch_size=128)
5050

5151

@@ -65,10 +65,11 @@ def test_pytorch_parity(tmpdir):
6565
for pl_out, pt_out in zip(lightning_outs, manual_outs):
6666
np.testing.assert_almost_equal(pl_out, pt_out, 5)
6767

68-
tutils.assert_speed_parity(pl_times, pt_times, num_epochs)
68+
# the fist run initialize dataset (download & filter)
69+
tutils.assert_speed_parity(pl_times[1:], pt_times[1:], num_epochs)
6970

7071

71-
def set_seed(seed):
72+
def _set_seed(seed):
7273
np.random.seed(seed)
7374
torch.manual_seed(seed)
7475
if torch.cuda.is_available():
@@ -88,7 +89,7 @@ def vanilla_loop(MODEL, num_runs=10, num_epochs=10):
8889

8990
# set seed
9091
seed = i
91-
set_seed(seed)
92+
_set_seed(seed)
9293

9394
# init model parts
9495
model = MODEL()
@@ -134,7 +135,7 @@ def lightning_loop(MODEL, num_runs=10, num_epochs=10):
134135

135136
# set seed
136137
seed = i
137-
set_seed(seed)
138+
_set_seed(seed)
138139

139140
# init model parts
140141
model = MODEL()

pytorch_lightning/trainer/evaluation_loop.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -418,10 +418,8 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode:
418418
# make dataloader_idx arg in validation_step optional
419419
args = [batch, batch_idx]
420420

421-
if test_mode and len(self.test_dataloaders) > 1:
422-
args.append(dataloader_idx)
423-
424-
elif not test_mode and len(self.val_dataloaders) > 1:
421+
if (test_mode and len(self.test_dataloaders) > 1) \
422+
or (not test_mode and len(self.val_dataloaders) > 1):
425423
args.append(dataloader_idx)
426424

427425
# handle DP, DDP forward

pytorch_lightning/trainer/model_hooks.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ def is_overriden(self, method_name: str, model: LightningModule = None) -> bool:
2121
return False
2222

2323
instance_attr = getattr(model, method_name)
24+
if not instance_attr:
25+
return False
2426
super_attr = getattr(super_object, method_name)
2527

2628
# when code pointers are different, it was implemented

pytorch_lightning/trainer/trainer.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -939,11 +939,14 @@ def test(self, model: Optional[LightningModule] = None, test_dataloaders: Option
939939
self.testing = True
940940

941941
if test_dataloaders is not None:
942-
if model is not None:
942+
if model:
943943
self.__attach_dataloaders(model, test_dataloaders=test_dataloaders)
944944
else:
945945
self.__attach_dataloaders(self.model, test_dataloaders=test_dataloaders)
946946

947+
# give proper warnings if user only passed in loader without hooks
948+
self.check_testing_model_configuration(model if model else self.model)
949+
947950
if model is not None:
948951
self.model = model
949952
self.fit(model)
@@ -1012,10 +1015,25 @@ def check_model_configuration(self, model: LightningModule):
10121015
'You have defined a `test_dataloader()` and have defined a `test_step()`, you may also want to'
10131016
' define `test_epoch_end()` for accumulating stats.', RuntimeWarning
10141017
)
1015-
else:
1016-
if self.is_overriden('test_step', model):
1017-
raise MisconfigurationException('You have defined `test_step()`,'
1018-
' but have not passed in a `test_dataloader()`.')
1018+
1019+
def check_testing_model_configuration(self, model: LightningModule):
1020+
1021+
has_test_step = self.is_overriden('test_step', model)
1022+
has_test_epoch_end = self.is_overriden('test_epoch_end', model)
1023+
gave_test_loader = hasattr(model, 'test_dataloader') and model.test_dataloader()
1024+
1025+
if gave_test_loader and not has_test_step:
1026+
raise MisconfigurationException('You passed in a `test_dataloader` but did not implement `test_step()`')
1027+
1028+
if has_test_step and not gave_test_loader:
1029+
raise MisconfigurationException('You defined `test_step()` but did not implement'
1030+
' `test_dataloader` nor passed in `.fit(test_dataloaders`.')
1031+
1032+
if has_test_step and gave_test_loader and not has_test_epoch_end:
1033+
rank_zero_warn(
1034+
'You passed in a `test_dataloader` and have defined a `test_step()`, you may also want to'
1035+
' define `test_epoch_end()` for accumulating stats.', RuntimeWarning
1036+
)
10191037

10201038

10211039
class _PatchDataLoader(object):

tests/base/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import torch
44

55
from tests.base.models import TestModelBase, DictHparamsModel
6+
from tests.base.eval_model_template import EvalModelTemplate
67
from tests.base.mixins import (
78
LightEmptyTestStep,
89
LightValidationStepMixin,

tests/base/datasets.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class MNIST(Dataset):
4949
cache_folder_name = 'complete'
5050

5151
def __init__(self, root: str = PATH_DATASETS, train: bool = True,
52-
normalize: tuple = (0.5, 1.0), download: bool = False):
52+
normalize: tuple = (0.5, 1.0), download: bool = True):
5353
super().__init__()
5454
self.root = root
5555
self.train = train # training set or test set
@@ -111,7 +111,7 @@ def normalize_tensor(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> Ten
111111
return tensor
112112

113113

114-
class TestingMNIST(MNIST):
114+
class TrialMNIST(MNIST):
115115
"""Constrain image dataset
116116
117117
Args:
@@ -127,7 +127,7 @@ class TestingMNIST(MNIST):
127127
digits: list selected MNIST digits/classes
128128
129129
Examples:
130-
>>> dataset = TestingMNIST(download=True)
130+
>>> dataset = TrialMNIST(download=True)
131131
>>> len(dataset)
132132
300
133133
>>> sorted(set([d.item() for d in dataset.targets]))
@@ -179,6 +179,8 @@ def prepare_data(self, download: bool) -> None:
179179
self._download(super().cached_folder_path)
180180

181181
for fname in (self.TRAIN_FILE_NAME, self.TEST_FILE_NAME):
182-
data, targets = torch.load(os.path.join(super().cached_folder_path, fname))
182+
path_fname = os.path.join(super().cached_folder_path, fname)
183+
assert os.path.isfile(path_fname), 'Missing cached file: %s' % path_fname
184+
data, targets = torch.load(path_fname)
183185
data, targets = self._prepare_subset(data, targets, self.num_samples, self.digits)
184186
torch.save((data, targets), os.path.join(self.cached_folder_path, fname))

tests/base/debug.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from torch.utils.data import DataLoader
44

55
import pytorch_lightning as pl
6-
from tests.base.datasets import TestingMNIST
6+
from tests.base.datasets import TrialMNIST
77

88

99
# from test_models import assert_ok_test_acc, load_model, \
@@ -42,10 +42,10 @@ def configure_optimizers(self):
4242
return [torch.optim.Adam(self.parameters(), lr=0.02)]
4343

4444
def train_dataloader(self):
45-
return DataLoader(TestingMNIST(train=True, num_samples=100), batch_size=16)
45+
return DataLoader(TrialMNIST(train=True, num_samples=100), batch_size=16)
4646

4747
def val_dataloader(self):
48-
return DataLoader(TestingMNIST(train=False, num_samples=50), batch_size=16)
48+
return DataLoader(TrialMNIST(train=False, num_samples=50), batch_size=16)
4949

5050
def test_dataloader(self):
51-
return DataLoader(TestingMNIST(train=False, num_samples=50), batch_size=16)
51+
return DataLoader(TrialMNIST(train=False, num_samples=50), batch_size=16)
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
from abc import ABC
2+
3+
from torch import optim
4+
5+
6+
class ConfigureOptimizersPool(ABC):
7+
def configure_optimizers(self):
8+
"""
9+
return whatever optimizers we want here.
10+
:return: list of optimizers
11+
"""
12+
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
13+
return optimizer
14+
15+
def configure_optimizers_empty(self):
16+
return None
17+
18+
def configure_optimizers_lbfgs(self):
19+
"""
20+
return whatever optimizers we want here.
21+
:return: list of optimizers
22+
"""
23+
optimizer = optim.LBFGS(self.parameters(), lr=self.hparams.learning_rate)
24+
return optimizer
25+
26+
def configure_optimizers_multiple_optimizers(self):
27+
"""
28+
return whatever optimizers we want here.
29+
:return: list of optimizers
30+
"""
31+
# try no scheduler for this model (testing purposes)
32+
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
33+
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
34+
return optimizer1, optimizer2
35+
36+
def configure_optimizers_single_scheduler(self):
37+
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
38+
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
39+
return [optimizer], [lr_scheduler]
40+
41+
def configure_optimizers_multiple_schedulers(self):
42+
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
43+
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
44+
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
45+
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
46+
47+
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
48+
49+
def configure_optimizers_mixed_scheduling(self):
50+
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
51+
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
52+
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 4, gamma=0.1)
53+
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
54+
55+
return [optimizer1, optimizer2], \
56+
[{'scheduler': lr_scheduler1, 'interval': 'step'}, lr_scheduler2]
57+
58+
def configure_optimizers_reduce_lr_on_plateau(self):
59+
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
60+
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)
61+
return [optimizer], [lr_scheduler]

tests/base/eval_model_template.py

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
import torch
2+
import torch.nn as nn
3+
import torch.nn.functional as F
4+
5+
from tests.base.datasets import TrialMNIST
6+
from pytorch_lightning.core.lightning import LightningModule
7+
from tests.base.eval_model_optimizers import ConfigureOptimizersPool
8+
from tests.base.eval_model_test_dataloaders import TestDataloaderVariations
9+
from tests.base.eval_model_test_epoch_ends import TestEpochEndVariations
10+
from tests.base.eval_model_test_steps import TestStepVariations
11+
from tests.base.eval_model_train_dataloaders import TrainDataloaderVariations
12+
from tests.base.eval_model_train_steps import TrainingStepVariations
13+
from tests.base.eval_model_valid_dataloaders import ValDataloaderVariations
14+
from tests.base.eval_model_valid_epoch_ends import ValidationEpochEndVariations
15+
from tests.base.eval_model_valid_steps import ValidationStepVariations
16+
from tests.base.eval_model_utils import ModelTemplateUtils
17+
18+
19+
class EvalModelTemplate(
20+
ModelTemplateUtils,
21+
TrainingStepVariations,
22+
ValidationStepVariations,
23+
ValidationEpochEndVariations,
24+
TestStepVariations,
25+
TestEpochEndVariations,
26+
TrainDataloaderVariations,
27+
ValDataloaderVariations,
28+
TestDataloaderVariations,
29+
ConfigureOptimizersPool,
30+
LightningModule
31+
):
32+
"""
33+
This template houses all combinations of model configurations we want to test
34+
"""
35+
def __init__(self, hparams):
36+
"""Pass in parsed HyperOptArgumentParser to the model."""
37+
# init superclass
38+
super().__init__()
39+
self.hparams = hparams
40+
41+
# if you specify an example input, the summary will show input/output for each layer
42+
self.example_input_array = torch.rand(5, 28 * 28)
43+
44+
# build model
45+
self.__build_model()
46+
47+
def __build_model(self):
48+
"""
49+
Simple model for testing
50+
:return:
51+
"""
52+
self.c_d1 = nn.Linear(
53+
in_features=self.hparams.in_features,
54+
out_features=self.hparams.hidden_dim
55+
)
56+
self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)
57+
self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)
58+
59+
self.c_d2 = nn.Linear(
60+
in_features=self.hparams.hidden_dim,
61+
out_features=self.hparams.out_features
62+
)
63+
64+
def forward(self, x):
65+
x = self.c_d1(x)
66+
x = torch.tanh(x)
67+
x = self.c_d1_bn(x)
68+
x = self.c_d1_drop(x)
69+
70+
x = self.c_d2(x)
71+
logits = F.log_softmax(x, dim=1)
72+
73+
return logits
74+
75+
def loss(self, labels, logits):
76+
nll = F.nll_loss(logits, labels)
77+
return nll
78+
79+
def prepare_data(self):
80+
_ = TrialMNIST(root=self.hparams.data_root, train=True, download=True)

0 commit comments

Comments
 (0)