@@ -78,7 +78,7 @@ def progress_bar_metrics(self, progress_bar_metrics: Dict) -> None:
7878
7979 @property
8080 def cached_results (self ) -> Union [EpochResultStore , None ]:
81- return self ._cached_results .get (self .trainer ._running_stage ) # type: ignore
81+ return self ._cached_results .get (self .trainer ._running_stage )
8282
8383 def get_metrics (self , key : str ) -> Dict :
8484 metrics_holder = getattr (self , f"_{ key } " , None )
@@ -125,8 +125,6 @@ def cache_logged_metrics(self):
125125 def on_trainer_init (self , logger , flush_logs_every_n_steps : int , log_every_n_steps : int , move_metrics_to_cpu : bool ):
126126 # logging
127127 self .configure_logger (logger )
128- # todo: IDE is complaining, these shall be initialized in the Trainer init at leas as placeholders
129- # and assign here the desired value
130128 self .trainer .flush_logs_every_n_steps = flush_logs_every_n_steps
131129 self .trainer .log_every_n_steps = log_every_n_steps
132130 self .trainer .move_metrics_to_cpu = move_metrics_to_cpu
@@ -189,9 +187,6 @@ def cache_training_step_metrics(self, opt_closure_result):
189187 batch_log_metrics = opt_closure_result .training_step_output .log_metrics
190188 logged_metrics_tmp .update (batch_log_metrics )
191189
192- callback_metrics = opt_closure_result .training_step_output .callback_metrics
193- callback_metrics_tmp .update (callback_metrics )
194-
195190 batch_pbar_metrics = opt_closure_result .training_step_output .pbar_on_batch_end
196191 pbar_metrics_tmp .update (batch_pbar_metrics )
197192
@@ -214,9 +209,6 @@ def log_metrics(self, metrics, grad_norm_dic, step=None):
214209 metrics (dict): Metric values
215210 grad_norm_dic (dict): Gradient norms
216211 step (int): Step for which metrics should be logged. Default value corresponds to `self.global_step`
217- log_train_step_metrics (bool): Used to track if `log_metrics` function is being called in during training
218- steps. In training steps, we will log metrics on step: `total_nb_idx` (for accumulated gradients)
219- and global_step for the rest.
220212 """
221213 # add gpu memory
222214 if self .trainer ._device_type == DeviceType .GPU and self .log_gpu_memory :
@@ -350,27 +342,6 @@ def _track_callback_metrics(self, eval_results):
350342 if self .trainer .testing :
351343 self .trainer .logger_connector .evaluation_callback_metrics .update (flat )
352344
353- def __process_eval_epoch_end_results_and_log_legacy_update (self , prog_bar_metrics , log_metrics , callback_metrics ):
354- # eval loop returns all metrics
355- dataloader_result_metrics = {** prog_bar_metrics , ** log_metrics , ** callback_metrics }
356-
357- # add metrics to prog bar
358- self .trainer .logger_connector .add_progress_bar_metrics (prog_bar_metrics )
359-
360- # log metrics
361- if len (log_metrics ) > 0 :
362- self .trainer .logger_connector .log_metrics (log_metrics , {})
363-
364- # track metrics for callbacks (all prog bar, logged and callback metrics)
365- callback_metrics .update (log_metrics )
366- callback_metrics .update (prog_bar_metrics )
367- self .trainer .logger_connector .callback_metrics .update (callback_metrics )
368- if self .trainer .testing :
369- self .trainer .logger_connector .evaluation_callback_metrics .update (callback_metrics )
370-
371- if len (dataloader_result_metrics ) > 0 :
372- self .eval_loop_results .append (dataloader_result_metrics )
373-
374345 def __process_eval_epoch_end_results_and_log_legacy (self , eval_results ):
375346 if self .trainer .running_sanity_check :
376347 return
@@ -381,21 +352,21 @@ def __process_eval_epoch_end_results_and_log_legacy(self, eval_results):
381352 if not isinstance (eval_results , list ):
382353 eval_results = [eval_results ]
383354
384- num_loaders : int = self .trainer .evaluation_loop .num_dataloaders
385- prog_bar_metrics , log_metrics , callback_metrics = {}, {}, {}
386-
387355 for result_idx , result in enumerate (eval_results ):
388- _ , prog_bar_metrics , log_metrics , callback_metrics , _ = self .trainer .process_dict_result (result )
356+ _ , prog_bar_metrics , log_metrics , _ = self .trainer .process_dict_result (result )
357+
358+ # eval loop returns all metrics
359+ dataloader_result_metrics = {** prog_bar_metrics , ** log_metrics }
360+
361+ # add metrics to prog bar
362+ self .trainer .logger_connector .add_progress_bar_metrics (prog_bar_metrics )
389363
390- if num_loaders > 1 :
391- self .__process_eval_epoch_end_results_and_log_legacy_update (
392- prog_bar_metrics , log_metrics , callback_metrics
393- )
364+ # log metrics
365+ if len (log_metrics ) > 0 :
366+ self .trainer .logger_connector .log_metrics (log_metrics , {})
394367
395- if num_loaders == 1 :
396- self .__process_eval_epoch_end_results_and_log_legacy_update (
397- prog_bar_metrics , log_metrics , callback_metrics
398- )
368+ if len (dataloader_result_metrics ) > 0 :
369+ self .eval_loop_results .append (dataloader_result_metrics )
399370
400371 def on_train_epoch_end (self ):
401372 # inform cached logger connector epoch finished
@@ -448,10 +419,9 @@ def log_train_epoch_end_metrics(
448419
449420 # TODO: deprecate 1.0
450421 else :
451- out = self .__run_legacy_training_epoch_end (
452- num_optimizers , epoch_output , model , is_result_obj , epoch_callback_metrics
422+ epoch_log_metrics , epoch_progress_bar_metrics = self .__run_legacy_training_epoch_end (
423+ num_optimizers , epoch_output , model , is_result_obj
453424 )
454- epoch_log_metrics , epoch_progress_bar_metrics , epoch_callback_metrics = out
455425
456426 # it will perform reduction over epoch and return log metrics
457427 cached_epoch_log_metrics = self .cached_results .get_epoch_log_metrics ()
@@ -503,9 +473,7 @@ def training_epoch_end(self, model, epoch_output, num_optimizers):
503473 # capture logging
504474 self .trainer .logger_connector .cache_logged_metrics ()
505475
506- def __run_legacy_training_epoch_end (
507- self , num_optimizers , epoch_output , model , is_result_obj , epoch_callback_metrics
508- ):
476+ def __run_legacy_training_epoch_end (self , num_optimizers , epoch_output , model , is_result_obj ):
509477
510478 epoch_log_metrics = {}
511479 epoch_progress_bar_metrics = {}
@@ -536,15 +504,14 @@ def __run_legacy_training_epoch_end(
536504 _processed_outputs = self .trainer .process_dict_result (epoch_output )
537505 epoch_progress_bar_metrics = _processed_outputs [1 ]
538506 epoch_log_metrics = _processed_outputs [2 ]
539- epoch_callback_metrics = _processed_outputs [3 ]
540507
541508 # --------------------------
542509 # Structured Result (auto epoch end)
543510 # --------------------------
544511 elif is_result_obj :
545512 epoch_log_metrics , epoch_progress_bar_metrics = self .__auto_reduce_results_on_epoch_end (epoch_output )
546513
547- return epoch_log_metrics , epoch_progress_bar_metrics , epoch_callback_metrics
514+ return epoch_log_metrics , epoch_progress_bar_metrics
548515
549516 def __auto_reduce_results_on_epoch_end (self , epoch_output ):
550517 epoch_log_metrics = {}
0 commit comments