From cc0c7957a8aee694e5f5553c62661d18d6701708 Mon Sep 17 00:00:00 2001 From: ananthsub Date: Mon, 7 Jun 2021 22:30:25 -0700 Subject: [PATCH] Convert tmpdir test fixture to str after fsspec changes --- requirements.txt | 2 +- tests/callbacks/test_early_stopping.py | 1 + .../test_checkpoint_callback_frequency.py | 1 + tests/checkpointing/test_model_checkpoint.py | 11 ++++++++++- tests/models/test_cpu.py | 4 ++++ tests/trainer/test_trainer.py | 2 ++ 6 files changed, 19 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4ad6d15f158df..be7367c5e30fb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ torch>=1.4 future>=0.17.1 # required for builtins in setup.py tqdm>=4.41.0 PyYAML>=5.1,<=5.4.1 -fsspec[http]>=2021.05.0, !=2021.06.0 +fsspec[http]>=2021.05.0 tensorboard>=2.2.0, !=2.5.0 # 2.5.0 GPU CI error: 'Couldn't build proto file into descriptor pool!' torchmetrics>=0.2.0 pyDeprecate==0.3.0 diff --git a/tests/callbacks/test_early_stopping.py b/tests/callbacks/test_early_stopping.py index 7d303e6ed00d6..3860d2623f11d 100644 --- a/tests/callbacks/test_early_stopping.py +++ b/tests/callbacks/test_early_stopping.py @@ -56,6 +56,7 @@ def test_resume_early_stopping_from_checkpoint(tmpdir): https://github.com/PyTorchLightning/pytorch-lightning/issues/1464 https://github.com/PyTorchLightning/pytorch-lightning/issues/1463 """ + tmpdir = str(tmpdir) seed_everything(42) model = ClassificationModel() dm = ClassifDataModule() diff --git a/tests/checkpointing/test_checkpoint_callback_frequency.py b/tests/checkpointing/test_checkpoint_callback_frequency.py index 9fdd69dba7a9a..d361f1ad24427 100644 --- a/tests/checkpointing/test_checkpoint_callback_frequency.py +++ b/tests/checkpointing/test_checkpoint_callback_frequency.py @@ -91,6 +91,7 @@ def training_step(self, batch, batch_idx): self.last_coeff *= 0.999 return loss + tmpdir = str(tmpdir) model = TestModel() trainer = Trainer( callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor='my_loss', save_top_k=k)], diff --git a/tests/checkpointing/test_model_checkpoint.py b/tests/checkpointing/test_model_checkpoint.py index 62b9d8364b01c..e4ce0a20687ed 100644 --- a/tests/checkpointing/test_model_checkpoint.py +++ b/tests/checkpointing/test_model_checkpoint.py @@ -325,6 +325,7 @@ def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int): @pytest.mark.parametrize('save_top_k', [-1, 0, 1, 2]) def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int): """ Test that None in checkpoint callback is valid and that chkp_path is set correctly """ + tmpdir = str(tmpdir) tutils.reset_seed() model = LogInTwoMethods() @@ -480,7 +481,7 @@ def test_model_checkpoint_file_extension(tmpdir): """ Test ModelCheckpoint with different file extension. """ - + tmpdir = str(tmpdir) model = LogInTwoMethods() model_checkpoint = ModelCheckpointExtensionTest( monitor='early_stop_on', @@ -535,6 +536,7 @@ def test_invalid_top_k(tmpdir): def test_none_monitor_top_k(tmpdir): """ Test that a warning appears for positive top_k with monitor=None. """ + tmpdir = str(tmpdir) with pytest.raises( MisconfigurationException, match=r'ModelCheckpoint\(save_top_k=3, monitor=None\) is not a valid*' ): @@ -657,6 +659,7 @@ def test_model_checkpoint_period(tmpdir, period: int): @pytest.mark.parametrize("every_n_val_epochs", list(range(4))) def test_model_checkpoint_every_n_val_epochs(tmpdir, every_n_val_epochs): + tmpdir = str(tmpdir) model = LogInTwoMethods() epochs = 5 checkpoint_callback = ModelCheckpoint( @@ -681,6 +684,7 @@ def test_model_checkpoint_every_n_val_epochs(tmpdir, every_n_val_epochs): @pytest.mark.parametrize("every_n_val_epochs", list(range(4))) def test_model_checkpoint_every_n_val_epochs_and_period(tmpdir, every_n_val_epochs): """ Tests that if period is set, it takes precedence over every_n_val_epochs for backwards compatibility. """ + tmpdir = str(tmpdir) model = LogInTwoMethods() epochs = 5 checkpoint_callback = ModelCheckpoint( @@ -896,6 +900,7 @@ def test_model_checkpoint_save_last_warning( def test_model_checkpoint_save_last_checkpoint_contents(tmpdir): """ Tests that the save_last checkpoint contains the latest information. """ + tmpdir = str(tmpdir) seed_everything(100) model = LogInTwoMethods() num_epochs = 3 @@ -931,6 +936,7 @@ def test_model_checkpoint_save_last_checkpoint_contents(tmpdir): @mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"}) @pytest.mark.parametrize('mode', ['min', 'max']) def test_checkpointing_with_nan_as_first(tmpdir, mode: int): + tmpdir = str(tmpdir) monitor = [float('nan')] monitor += [5, 7, 8] if mode == 'max' else [8, 7, 5] @@ -1150,6 +1156,7 @@ def test_val_check_interval_checkpoint_files(tmpdir): def test_current_score(tmpdir): """ Check that the current_score value is correct and was saved """ + tmpdir = str(tmpdir) class TestModel(BoringModel): @@ -1183,6 +1190,7 @@ def training_step(self, *args): @pytest.mark.parametrize("mode", ["min", "max"]) def test_current_score_when_nan(tmpdir, mode: str): """ Check that ModelCheckpoint handles NaN values correctly """ + tmpdir = str(tmpdir) class TestModel(BoringModel): @@ -1213,6 +1221,7 @@ def training_step(self, *args): @pytest.mark.parametrize("hparams_type", [dict, Container]) def test_hparams_type(tmpdir, hparams_type): + tmpdir = str(tmpdir) class TestModel(BoringModel): diff --git a/tests/models/test_cpu.py b/tests/models/test_cpu.py index b54e0d091bd16..682a87b348375 100644 --- a/tests/models/test_cpu.py +++ b/tests/models/test_cpu.py @@ -94,6 +94,7 @@ def on_train_epoch_start(self, trainer, model): def test_early_stopping_cpu_model(tmpdir): + tmpdir = str(tmpdir) class ModelTrainVal(BoringModel): @@ -128,6 +129,7 @@ def validation_step(self, *args, **kwargs): def test_multi_cpu_model_ddp(tmpdir): """Make sure DDP works.""" tutils.set_random_master_port() + tmpdir = str(tmpdir) trainer_options = dict( default_root_dir=tmpdir, @@ -295,6 +297,7 @@ def test_simple_cpu(tmpdir): def test_cpu_model(tmpdir): """Make sure model trains on CPU.""" + tmpdir = str(tmpdir) trainer_options = dict( default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=1, limit_train_batches=4, limit_val_batches=4 ) @@ -305,6 +308,7 @@ def test_cpu_model(tmpdir): def test_all_features_cpu_model(tmpdir): """Test each of the trainer options.""" + tmpdir = str(tmpdir) trainer_options = dict( default_root_dir=tmpdir, gradient_clip_val=1.0, diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index a8567db70d0a6..340e7259f4702 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -318,6 +318,7 @@ def test_loading_yaml(tmpdir): ) def test_model_checkpoint_options(tmpdir, save_top_k, save_last, expected_files): """Test ModelCheckpoint options.""" + tmpdir = str(tmpdir) def mock_save_function(filepath, *args): open(filepath, "a").close() @@ -1836,6 +1837,7 @@ def validation_epoch_end(self, outputs) -> None: @RunIf(skip_windows=True) def test_fit_test_synchronization(tmpdir): """Test that the trainer synchronizes processes before returning control back to the caller. """ + tmpdir = str(tmpdir) tutils.set_random_master_port() model = TestDummyModelForCheckpoint() checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor='x', mode='min', save_top_k=1)