|
11 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
| 14 | +import logging |
14 | 15 | import os |
15 | 16 | from copy import deepcopy |
16 | 17 |
|
|
19 | 20 |
|
20 | 21 | from pytorch_lightning import seed_everything, Trainer |
21 | 22 | from pytorch_lightning.demos.boring_classes import BoringModel |
| 23 | +from pytorch_lightning.tuner.lr_finder import _LRFinder |
22 | 24 | from pytorch_lightning.utilities.exceptions import MisconfigurationException |
23 | 25 | from tests_pytorch.helpers.datamodules import ClassifDataModule |
24 | 26 | from tests_pytorch.helpers.simple_models import ClassificationModel |
@@ -359,3 +361,55 @@ def test_multiple_lr_find_calls_gives_same_results(tmpdir): |
359 | 361 | for curr_lr_finder in all_res[1:] |
360 | 362 | for k in all_res[0].keys() |
361 | 363 | ) |
| 364 | + |
| 365 | + |
| 366 | +@pytest.mark.parametrize( |
| 367 | + "skip_begin,skip_end,losses,expected_error", |
| 368 | + [ |
| 369 | + (0, 0, [], True), |
| 370 | + (10, 1, [], True), |
| 371 | + (0, 2, [0, 1, 2], True), |
| 372 | + (0, 1, [0, 1, 2], False), |
| 373 | + (1, 1, [0, 1, 2], True), |
| 374 | + (1, 1, [0, 1, 2, 3], False), |
| 375 | + (0, 1, [float("nan"), float("nan"), 0, float("inf"), 1, 2, 3, float("inf"), 2, float("nan"), 1], False), |
| 376 | + (4, 1, [float("nan"), float("nan"), 0, float("inf"), 1, 2, 3, float("inf"), 2, float("nan"), 1], False), |
| 377 | + ], |
| 378 | +) |
| 379 | +def test_suggestion_not_enough_finite_points(losses, skip_begin, skip_end, expected_error, caplog): |
| 380 | + """Tests the error handling when not enough finite points are available to make a suggestion.""" |
| 381 | + caplog.clear() |
| 382 | + lr_finder = _LRFinder( |
| 383 | + mode="exponential", |
| 384 | + lr_min=1e-8, |
| 385 | + lr_max=1, |
| 386 | + num_training=100, |
| 387 | + ) |
| 388 | + lrs = list(torch.arange(len(losses))) |
| 389 | + lr_finder.results = { |
| 390 | + "lr": lrs, |
| 391 | + "loss": losses, |
| 392 | + } |
| 393 | + with caplog.at_level(logging.ERROR, logger="root.tuner.lr_finder"): |
| 394 | + lr = lr_finder.suggestion(skip_begin=skip_begin, skip_end=skip_end) |
| 395 | + |
| 396 | + if expected_error: |
| 397 | + assert lr is None |
| 398 | + assert "Failed to compute suggestion for learning rate" in caplog.text |
| 399 | + else: |
| 400 | + assert lr is not None |
| 401 | + |
| 402 | + |
| 403 | +def test_lr_attribute_when_suggestion_invalid(tmpdir): |
| 404 | + """Tests learning rate finder ends before `num_training` steps.""" |
| 405 | + |
| 406 | + class TestModel(BoringModel): |
| 407 | + def __init__(self): |
| 408 | + super().__init__() |
| 409 | + self.learning_rate = 0.123 |
| 410 | + |
| 411 | + model = TestModel() |
| 412 | + trainer = Trainer(default_root_dir=tmpdir) |
| 413 | + lr_finder = trainer.tuner.lr_find(model=model, update_attr=True, num_training=1) # force insufficient data points |
| 414 | + assert lr_finder.suggestion() is None |
| 415 | + assert model.learning_rate == 0.123 # must remain unchanged because suggestion is not possible |
0 commit comments