Skip to content

Commit d83ed9d

Browse files
committed
update
1 parent 2cec4b0 commit d83ed9d

File tree

2 files changed

+20
-15
lines changed

2 files changed

+20
-15
lines changed

docs/source/common/debugging.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,7 @@ By default it only prints the top-level modules. If you want to show all submodu
9898
`'max_depth'` option:
9999

100100
.. testcode::
101-
from pytorch_lightning.callbacks import ModelSummary
102-
103-
trainer = Trainer(callbacks=[ModelSummary(max_depth=-1)])
101+
trainer = Trainer(weights_summary="full")
104102

105103
You can also display the intermediate input- and output sizes of all your layers by setting the
106104
``example_input_array`` attribute in your LightningModule. It will print a table like this

pytorch_lightning/trainer/connectors/callback_connector.py

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,9 @@ def _configure_accumulated_gradients(
118118
if grad_accum_callback:
119119
if accumulate_grad_batches is not None:
120120
raise MisconfigurationException(
121-
"You have set both `accumulate_grad_batches` and passed an instance of "
122-
"`GradientAccumulationScheduler` inside callbacks. Either remove `accumulate_grad_batches` "
123-
"from trainer or remove `GradientAccumulationScheduler` from callbacks list."
121+
"You have set both `accumulate_grad_batches` and passed an instance of"
122+
" `GradientAccumulationScheduler` inside callbacks. Either remove `accumulate_grad_batches`"
123+
" from trainer or remove `GradientAccumulationScheduler` from callbacks list."
124124
)
125125
grad_accum_callback = grad_accum_callback[0]
126126
else:
@@ -162,19 +162,27 @@ def _configure_checkpoint_callbacks(self, checkpoint_callback: bool) -> None:
162162
def _configure_model_summary_callback(
163163
self, enable_model_summary: bool, weights_summary: Optional[str] = None
164164
) -> None:
165-
if not enable_model_summary:
166-
return
167-
if any(isinstance(cb, ModelSummary) for cb in self.trainer.callbacks):
168-
return
169165
if weights_summary is None:
170166
rank_zero_deprecation(
171167
"Setting `Trainer(weights_summary=None)` is deprecated in v1.5 and will be removed"
172168
" in v1.7. Please set `Trainer(enable_model_summary=False) instead."
173169
)
174170
return
175-
# Prior default in the Trainer for `weights_summary` which we explicitly check here
176-
# to preserve backwards compatibility
177-
if weights_summary != "top":
171+
if not enable_model_summary:
172+
return
173+
174+
model_summary_cbs = [type(cb) for cb in self.trainer.callbacks if isinstance(cb, ModelSummary)]
175+
if model_summary_cbs:
176+
rank_zero_info(
177+
f"Trainer already configured with model summary callbacks: {model_summary_cbs}."
178+
" Skipping setting a default `ModelSummary` callback."
179+
)
180+
return
181+
182+
if weights_summary == "top":
183+
# special case the default value for weights_summary to preserve backward compatibility
184+
max_depth = 1
185+
else:
178186
rank_zero_deprecation(
179187
f"Setting `Trainer(weights_summary={weights_summary})` is deprecated in v1.5 and will be removed"
180188
" in v1.7. Please pass `pytorch_lightning.callbacks.model_summary.ModelSummary` with"
@@ -186,8 +194,7 @@ def _configure_model_summary_callback(
186194
f" but got {weights_summary}",
187195
)
188196
max_depth = ModelSummaryMode.get_max_depth(weights_summary)
189-
else:
190-
max_depth = 1
197+
191198
if self.trainer._progress_bar_callback is not None and isinstance(
192199
self.trainer._progress_bar_callback, RichProgressBar
193200
):

0 commit comments

Comments
 (0)