Skip to content

AttributeError: 'NoneType' object has no attribute 'param_groups' #7698

@yllgl

Description

@yllgl
class myModel(pl.LightningModule):
    def __init__(self):
        super(myModel, self).__init__()
        self.automatic_optimization = False
        self.model=nn.Linear(1,1)
    def training_step(self,batch,batch_idx):
        opt = self.optimizers()
        opt.zero_grad()
        x,y=batch
        x=x.float()
        y=y.float()
        x=self.model(x)
        loss_fn=nn.MSELoss()
        loss = loss_fn(x,y)
        self.manual_backward(loss)
        return loss
    def configure_optimizers(self):
        return torch.optim.Adam(self.model.parameters())
    def train_dataloader(self):
        num_sample=3
        x=torch.randn(num_sample,1)
        y=torch.randn(num_sample,1)
        return DataLoader(TensorDataset(x,y),batch_size=1)
mymodel = myModel()
trainer = pl.Trainer(gradient_clip_val=10)
trainer.fit(mymodel)

the error is following:

error
AttributeError                            Traceback (most recent call last)

<ipython-input-11-7e84802c7444> in <module>
     24 mymodel = myModel()
     25 trainer = pl.Trainer(gradient_clip_val=10)
---> 26 trainer.fit(mymodel)

D:\Python37\lib\site-packages\pytorch_lightning\trainer\trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
    456         )
    457 
--> 458         self._run(model)
    459 
    460         assert self.state.stopped

D:\Python37\lib\site-packages\pytorch_lightning\trainer\trainer.py in _run(self, model)
    754 
    755         # dispatch `start_training` or `start_evaluating` or `start_predicting`
--> 756         self.dispatch()
    757 
    758         # plugin will finalized fitting (e.g. ddp_spawn will load trained model)

D:\Python37\lib\site-packages\pytorch_lightning\trainer\trainer.py in dispatch(self)
    795             self.accelerator.start_predicting(self)
    796         else:
--> 797             self.accelerator.start_training(self)
    798 
    799     def run_stage(self):

D:\Python37\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in start_training(self, trainer)
     94 
     95     def start_training(self, trainer: 'pl.Trainer') -> None:
---> 96         self.training_type_plugin.start_training(trainer)
     97 
     98     def start_evaluating(self, trainer: 'pl.Trainer') -> None:

D:\Python37\lib\site-packages\pytorch_lightning\plugins\training_type\training_type_plugin.py in start_training(self, trainer)
    142     def start_training(self, trainer: 'pl.Trainer') -> None:
    143         # double dispatch to initiate the training loop
--> 144         self._results = trainer.run_stage()
    145 
    146     def start_evaluating(self, trainer: 'pl.Trainer') -> None:

D:\Python37\lib\site-packages\pytorch_lightning\trainer\trainer.py in run_stage(self)
    805         if self.predicting:
    806             return self.run_predict()
--> 807         return self.run_train()
    808 
    809     def _pre_training_routine(self):

D:\Python37\lib\site-packages\pytorch_lightning\trainer\trainer.py in run_train(self)
    867                 with self.profiler.profile("run_training_epoch"):
    868                     # run train epoch
--> 869                     self.train_loop.run_training_epoch()
    870 
    871                 if self.max_steps and self.max_steps <= self.global_step:

D:\Python37\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_epoch(self)
    487             # ------------------------------------
    488             with self.trainer.profiler.profile("run_training_batch"):
--> 489                 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
    490 
    491             # when returning -1 from train_step, we end epoch early

D:\Python37\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
    731                     else:
    732                         self._curr_step_result = self.training_step(
--> 733                             split_batch, batch_idx, opt_idx, self.trainer.hiddens
    734                         )
    735 

D:\Python37\lib\site-packages\pytorch_lightning\trainer\training_loop.py in training_step(self, split_batch, batch_idx, opt_idx, hiddens)
    278             model_ref._results = Result()
    279             with self.trainer.profiler.profile("training_step"):
--> 280                 training_step_output = self.trainer.accelerator.training_step(args)
    281                 self.trainer.accelerator.post_training_step()
    282 

D:\Python37\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in training_step(self, args)
    202 
    203         with self.precision_plugin.train_step_context(), self.training_type_plugin.train_step_context():
--> 204             return self.training_type_plugin.training_step(*args)
    205 
    206     def post_training_step(self) -> None:

D:\Python37\lib\site-packages\pytorch_lightning\plugins\training_type\training_type_plugin.py in training_step(self, *args, **kwargs)
    153 
    154     def training_step(self, *args, **kwargs):
--> 155         return self.lightning_module.training_step(*args, **kwargs)
    156 
    157     def post_training_step(self):

<ipython-input-11-7e84802c7444> in training_step(self, batch, batch_idx)
     13         loss_fn=nn.MSELoss()
     14         loss = loss_fn(x,y)
---> 15         self.manual_backward(loss)
     16         return loss
     17     def configure_optimizers(self):

D:\Python37\lib\site-packages\pytorch_lightning\core\lightning.py in manual_backward(self, loss, optimizer, *args, **kwargs)
   1250         # backward
   1251         self._running_manual_backward = True
-> 1252         self.trainer.train_loop.backward(loss, optimizer=None, opt_idx=None, *args, **kwargs)
   1253         self._running_manual_backward = False
   1254 

D:\Python37\lib\site-packages\pytorch_lightning\trainer\training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
    866         if not self.should_accumulate():
    867             # track gradients
--> 868             self.track_and_norm_grad(optimizer=optimizer)
    869 
    870     def update_train_loop_lr_schedulers(self, monitor_metrics=None):

D:\Python37\lib\site-packages\pytorch_lightning\trainer\training_loop.py in track_and_norm_grad(self, optimizer)
    445         # clip gradients
    446         self.trainer.accelerator.clip_gradients(
--> 447             optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
    448         )
    449         self._cur_grad_norm_dict = grad_norm_dic

D:\Python37\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in clip_gradients(self, optimizer, clip_val, gradient_clip_algorithm)
    352             clip_val,
    353             gradient_clip_algorithm=gradient_clip_algorithm,
--> 354             model=self.model,
    355         )
    356 

D:\Python37\lib\site-packages\pytorch_lightning\plugins\precision\precision_plugin.py in clip_gradients(self, optimizer, clip_val, gradient_clip_algorithm, model)
    119         elif gradient_clip_algorithm == GradClipAlgorithmType.NORM:
    120             # TODO: there should be a mechanism to set `norm_type`
--> 121             self.clip_grad_by_norm(optimizer, clip_val, eps=self.EPSILON)
    122 
    123     def clip_grad_by_value(self, optimizer: Optimizer, clip_val: Union[int, float]) -> None:

D:\Python37\lib\site-packages\pytorch_lightning\plugins\precision\precision_plugin.py in clip_grad_by_norm(self, optimizer, clip_val, norm_type, eps)
    133 
    134         # TODO: replace this with torch.nn.clip_grad_norm_
--> 135         parameters = list(filter(lambda p: p.grad is not None, parameters))
    136         device = parameters[0].device
    137 

D:\Python37\lib\site-packages\pytorch_lightning\plugins\precision\precision_plugin.py in master_params(self, optimizer)
     40         Maybe different in other precision plugins.
     41         """
---> 42         for group in optimizer.param_groups:
     43             for p in group["params"]:
     44                 yield p

AttributeError: 'NoneType' object has no attribute 'param_groups'

if set gradient_clip_val and self.automatic_optimization = False then the error will occur.
Is this expected?

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't workinghelp wantedOpen to be worked onwaiting on authorWaiting on user action, correction, or update

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions