Skip to content

A warning that may comes from legacy code #2024

@DKandrew

Description

@DKandrew

🐛 Bug

Receive this warning when running a simple Lightning module, is it related to the recent update? Maybe this warning is related to the past hyperparameters design?

[path to]/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:23: UserWarning: Did not find hyperparameters at model hparams. Saving checkpoint without hyperparameters.
  warnings.warn(*args, **kwargs)

To Reproduce

Use the following code sample

import os
import torch
from torch.nn import functional as F
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
from torchvision import transforms
from pytorch_lightning import LightningModule
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.tensorboard import SummaryWriter

class MyNet(LightningModule):
    def __init__(self):
        super(MyNet, self).__init__()
        self.l1 = torch.nn.Linear(28 * 28, 10)

    def forward(self, x):
        return torch.relu(self.l1(x.view(x.size(0), -1)))

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=0.001)

    def train_dataloader(self):
        dataset = MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor())
        loader = DataLoader(dataset, batch_size=32, num_workers=4, shuffle=True)
        return loader

    def test_dataloader(self):
        dataset = MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor())
        loader = DataLoader(dataset, batch_size=32, num_workers=4, shuffle=False)
        return loader

    def training_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, y)
        tensorboard_logs = {'train_loss': loss}
        return {'loss': loss, 'log': tensorboard_logs}

    def test_step(self, batch, batch_idx):
        x, y = batch
        y_hat = self(x)
        loss = F.cross_entropy(y_hat, y)
        tensorboard_logs = {'test_loss': loss}
        return {'loss': loss, 'log': tensorboard_logs}

    def test_epoch_end(self, output):
        with SummaryWriter(self.logger.log_dir) as w:
            for i in range(5):
                w.add_hparams({'lr': 0.1 * i, 'bsize': i}, {'hparam/accuracy': 10 * i, 'hparam/loss': 10 * i})
        return {}


dir_path = "."
tb_logger = TensorBoardLogger(dir_path, name='run2')
model = MyNet()
trainer = Trainer(gpus=1, max_epochs=1, logger=tb_logger)
trainer.fit(model)
trainer.test()

Environments

  • CUDA:
    • GPU:
      • Fastest GeForce In The Moon
    • available: True
    • version: 10.2
  • Packages:
    • numpy: 1.18.1
    • pyTorch_debug: False
    • pyTorch_version: 1.5.0
    • pytorch-lightning: 0.7.6
    • tensorboard: 2.2.1
    • tqdm: 4.46.0
  • System:

Metadata

Metadata

Assignees

Labels

bugSomething isn't workinghelp wantedOpen to be worked onquestionFurther information is requested

Type

No type

Projects

No projects

Milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions