Skip to content

Commit a787df5

Browse files
committed
Revert "debug"
This reverts commit a6e6101. Revert "debug" This reverts commit 5ddeaec. debug debug Revert "debug" This reverts commit 605be74. Revert "Revert "debug"" This reverts commit a7612d5. debug x x
1 parent 88ca10d commit a787df5

File tree

2 files changed

+3
-6
lines changed

2 files changed

+3
-6
lines changed

pytorch_lightning/loops/epoch/evaluation_epoch_loop.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,6 @@ def advance(
100100
if batch is None:
101101
raise StopIteration
102102

103-
assert self.num_dataloaders is not None
104-
self.trainer.logger_connector.on_evaluation_batch_start(batch, batch_idx, dataloader_idx, self.num_dataloaders)
105-
106103
with self.trainer.profiler.profile("evaluation_batch_to_device"):
107104
batch = self.trainer.accelerator.batch_to_device(batch, dataloader_idx=dataloader_idx)
108105

@@ -175,6 +172,9 @@ def on_evaluation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx:
175172
"""
176173
self.trainer.logger_connector.on_batch_start()
177174

175+
assert self.num_dataloaders is not None
176+
self.trainer.logger_connector.on_evaluation_batch_start(batch, batch_idx, dataloader_idx, self.num_dataloaders)
177+
178178
if self.trainer.testing:
179179
self.trainer.call_hook("on_test_batch_start", batch, batch_idx, dataloader_idx)
180180
else:

pytorch_lightning/loops/epoch/training_epoch_loop.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,13 +115,11 @@ def advance(self, dataloader_iter: Iterator, **kwargs: Any) -> None:
115115
# TRAINING_STEP + TRAINING_STEP_END
116116
# ------------------------------------
117117
with self.trainer.profiler.profile("training_batch_to_device"):
118-
print("before run", self.iteration_count, torch.cuda.memory_allocated())
119118
batch = self.trainer.accelerator.batch_to_device(batch, dataloader_idx=self._dataloader_idx)
120119

121120
with self.trainer.profiler.profile("run_training_batch"):
122121
batch_output = self.batch_loop.run(batch, self.iteration_count, self._dataloader_idx)
123122
self.batches_seen += 1
124-
print("after run", self.iteration_count, torch.cuda.memory_allocated())
125123

126124
# when returning -1 from train_step, we end epoch early
127125
if batch_output.signal == -1:
@@ -157,7 +155,6 @@ def on_advance_end(self):
157155
Raises:
158156
StopIteration: if :attr:`done` evaluates to ``True`` to finish this epoch
159157
"""
160-
print("advance end", self.iteration_count, torch.cuda.memory_allocated())
161158
# -----------------------------------------
162159
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
163160
# -----------------------------------------

0 commit comments

Comments
 (0)