|
153 | 153 | # - ``DDP_SHARDED`` |
154 | 154 | # - ``DDP_SHARDED_SPAWN`` |
155 | 155 | # |
| 156 | +# Custom or officially unsupported strategies can be used by setting [FinetuningScheduler.allow_untested](https://finetuning-scheduler.readthedocs.io/en/stable/api/finetuning_scheduler.fts.html?highlight=allow_untested#finetuning_scheduler.fts.FinetuningScheduler.params.allow_untested) to ``True``. |
| 157 | +# Note that most currently unsupported strategies are so because they require varying degrees of modification to be compatible (e.g. ``deepspeed`` requires an ``add_param_group`` method, ``tpu_spawn`` an override of the current broadcast method to include python objects) |
156 | 158 | # </div> |
157 | 159 |
|
158 | 160 | # %% [markdown] |
@@ -387,9 +389,12 @@ def training_step(self, batch, batch_idx): |
387 | 389 | self.log("train_loss", loss) |
388 | 390 | return loss |
389 | 391 |
|
390 | | - def training_epoch_end(self, outputs: List[Any]) -> None: |
| 392 | + def on_train_epoch_start(self) -> None: |
391 | 393 | if self.finetuningscheduler_callback: |
392 | | - self.log("finetuning_schedule_depth", float(self.finetuningscheduler_callback.curr_depth)) |
| 394 | + self.logger.log_metrics( |
| 395 | + metrics={"finetuning_schedule_depth": float(self.finetuningscheduler_callback.curr_depth)}, |
| 396 | + step=self.global_step, |
| 397 | + ) |
393 | 398 |
|
394 | 399 | def validation_step(self, batch, batch_idx, dataloader_idx=0): |
395 | 400 | outputs = self(**batch) |
@@ -524,6 +529,8 @@ def configure_optimizers(self): |
524 | 529 | # used in other pytorch-lightning tutorials) also work with FinetuningScheduler. Though the LR scheduler is theoretically |
525 | 530 | # justified [(Loshchilov & Hutter, 2016)](#f4), the particular values provided here are primarily empircally driven. |
526 | 531 | # |
| 532 | +# [FinetuningScheduler](https://finetuning-scheduler.readthedocs.io/en/stable/api/finetuning_scheduler.fts.html#finetuning_scheduler.fts.FinetuningScheduler) also supports LR scheduler |
| 533 | +# reinitialization in both explicit and implicit finetuning schedule modes. See the [advanced usage documentation](https://finetuning-scheduler.readthedocs.io/en/stable/advanced/lr_scheduler_reinitialization.html) for explanations and demonstration of the extension's support for more complex requirements. |
527 | 534 | # </div> |
528 | 535 |
|
529 | 536 |
|
|
0 commit comments