@@ -700,232 +700,6 @@ def collate_tensors(items: Union[List, Tuple]) -> Union[Tensor, List, Tuple]:
700700 return items
701701
702702
703- class EvalResult (Result ):
704- def __init__ (
705- self ,
706- early_stop_on : Optional [Tensor ] = None ,
707- checkpoint_on : Optional [Tensor ] = None ,
708- hiddens : Optional [Tensor ] = None ,
709- ):
710- """
711- Used in val/train loop to auto-log to a logger or progress bar without needing to define
712- a _step_end or _epoch_end method
713-
714- Example::
715-
716- def validation_step(self, batch, batch_idx):
717- loss = ...
718- result = EvalResult()
719- result.log('val_loss', loss)
720- return result
721-
722- def test_step(self, batch, batch_idx):
723- loss = ...
724- result = EvalResult()
725- result.log('val_loss', loss)
726- return result
727-
728- Args:
729- early_stop_on: Metric to early stop on.
730- Should be a one element tensor if combined with default
731- :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping`.
732- If this result is returned by
733- :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_step`,
734- the specified value will be averaged across all steps.
735- checkpoint_on: Metric to checkpoint on.
736- Should be a one element tensor if combined with default checkpoint callback.
737- If this result is returned by
738- :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_step`,
739- the specified value will be averaged across all steps.
740- hiddens:
741- """
742-
743- super ().__init__ (None , early_stop_on , checkpoint_on , hiddens )
744-
745- def log (
746- self ,
747- name ,
748- value ,
749- prog_bar : bool = False ,
750- logger : bool = True ,
751- on_step : bool = False ,
752- on_epoch : bool = True ,
753- reduce_fx : Callable = torch .mean ,
754- tbptt_reduce_fx : Callable = torch .mean ,
755- tbptt_pad_token : int = 0 ,
756- enable_graph : bool = False ,
757- sync_dist : bool = False ,
758- sync_dist_op : Union [Any , str ] = 'mean' ,
759- sync_dist_group : Optional [Any ] = None ,
760- ):
761- """
762- Log a key, value
763-
764- Example::
765-
766- result.log('val_loss', loss)
767-
768- # defaults used
769- result.log(
770- name,
771- value,
772- on_step=False,
773- on_epoch=True,
774- logger=True,
775- prog_bar=False,
776- reduce_fx=torch.mean
777- )
778-
779-
780- Args:
781- name: key name
782- value: value name
783- prog_bar: if True logs to the progress base
784- logger: if True logs to the logger
785- on_step: if True logs the output of validation_step or test_step
786- on_epoch: if True, logs the output of the training loop aggregated
787- reduce_fx: Torch.mean by default
788- tbptt_reduce_fx: function to reduce on truncated back prop
789- tbptt_pad_token: token to use for padding
790- enable_graph: if True, will not auto detach the graph
791- sync_dist: if True, reduces the metric across GPUs/TPUs
792- sync_dist_op: the op to sync across
793- sync_dist_group: the ddp group
794- """
795- super ().log (
796- name = name ,
797- value = value ,
798- prog_bar = prog_bar ,
799- logger = logger ,
800- on_step = on_step ,
801- on_epoch = on_epoch ,
802- reduce_fx = reduce_fx ,
803- enable_graph = enable_graph ,
804- sync_dist = sync_dist ,
805- sync_dist_group = sync_dist_group ,
806- sync_dist_op = sync_dist_op ,
807- tbptt_pad_token = tbptt_pad_token ,
808- tbptt_reduce_fx = tbptt_reduce_fx ,
809- )
810-
811- def log_dict (
812- self ,
813- dictionary : dict ,
814- prog_bar : bool = False ,
815- logger : bool = True ,
816- on_step : bool = False ,
817- on_epoch : bool = True ,
818- reduce_fx : Callable = torch .mean ,
819- tbptt_reduce_fx : Callable = torch .mean ,
820- tbptt_pad_token : int = 0 ,
821- enable_graph : bool = False ,
822- sync_dist : bool = False ,
823- sync_dist_op : Union [Any , str ] = 'mean' ,
824- sync_dist_group : Optional [Any ] = None ,
825- ):
826- """
827- Log a dictonary of values at once
828-
829- Example::
830-
831- values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
832- result.log_dict(values)
833-
834- Args:
835- dictionary: key value pairs (str, tensors)
836- prog_bar: if True logs to the progress base
837- logger: if True logs to the logger
838- on_step: if True logs the output of validation_step or test_step
839- on_epoch: if True, logs the output of the training loop aggregated
840- reduce_fx: Torch.mean by default
841- tbptt_reduce_fx: function to reduce on truncated back prop
842- tbptt_pad_token: token to use for padding
843- enable_graph: if True, will not auto detach the graph
844- sync_dist: if True, reduces the metric across GPUs/TPUs
845- sync_dist_op: the op to sync across
846- sync_dist_group: the ddp group
847- """
848- for k , v in dictionary .items ():
849- self .log (
850- name = k ,
851- value = v ,
852- prog_bar = prog_bar ,
853- logger = logger ,
854- on_step = on_step ,
855- on_epoch = on_epoch ,
856- reduce_fx = reduce_fx ,
857- enable_graph = enable_graph ,
858- sync_dist = sync_dist ,
859- sync_dist_group = sync_dist_group ,
860- sync_dist_op = sync_dist_op ,
861- tbptt_pad_token = tbptt_pad_token ,
862- tbptt_reduce_fx = tbptt_reduce_fx ,
863- )
864-
865- def get_callback_metrics (self ) -> dict :
866- result = {}
867- if self .early_stop_on :
868- result ['early_stop_on' ] = self .early_stop_on
869- if self .checkpoint_on :
870- result ['checkpoint_on' ] = self .checkpoint_on
871- return result
872-
873- def write (self , name : str , values : Union [Tensor , list ], filename : str = 'predictions.pt' ):
874- """Add feature name and value pair to collection of predictions that will be written to disk on
875- `validation_end` or `test_end`. If running on multiple GPUs, you will get separate `n_gpu`
876- prediction files with the rank prepended onto filename.
877-
878- Example::
879-
880- result = pl.EvalResult()
881- result.write('ids', [0, 1, 2])
882- result.write('preds', ['cat', 'dog', 'dog'])
883-
884- Args:
885- name: Feature name that will turn into column header of predictions file
886- values: Flat tensor or list of row values for given feature column 'name'.
887- filename: Filepath where your predictions will be saved. Defaults to 'predictions.pt'.
888- """
889- # Type check the incoming arguments
890- if not isinstance (name , str ):
891- raise ValueError (f"Expected str for 'name' but got { type (name )} " )
892- if not isinstance (filename , str ):
893- raise ValueError (f"Expected str for 'filename' but got { type (name )} " )
894-
895- if isinstance (values , Tensor ):
896- values = values .detach ()
897-
898- preds = getattr (self , 'predictions' , None )
899- if preds is None :
900- self .predictions = {filename : {name : values }}
901- elif filename not in preds :
902- preds [filename ] = {name : values }
903- elif name not in preds [filename ]:
904- preds [filename ][name ] = values
905- elif isinstance (values , Tensor ):
906- preds [filename ][name ] = torch .cat ((preds [filename ][name ], values ))
907- elif isinstance (values , list ):
908- preds [filename ][name ].extend (values )
909-
910- def write_dict (self , predictions_dict , filename = 'predictions.pt' ):
911- """Calls EvalResult.write() for each key-value pair in predictions_dict.
912-
913- It is recommended that you use this function call instead of .write if you need to
914- store more than one column of predictions in your output file.
915-
916- Example::
917-
918- predictions_to_write = {'preds': ['cat', 'dog'], 'ids': tensor([0, 1])}
919- result.write_dict(predictions_to_write)
920-
921- Args:
922- predictions_dict ([type]): Dict of predictions to store and then write to filename at eval end.
923- filename (str, optional): File where your predictions will be stored. Defaults to './predictions.pt'.
924- """
925- for k , v in predictions_dict .items ():
926- self .write (k , v , filename )
927-
928-
929703def weighted_mean (result , weights ):
930704
931705 if isinstance (result , dict ):
0 commit comments