Skip to content

Commit df2b46d

Browse files
committed
forgot to push these files...
1 parent fd3332a commit df2b46d

File tree

2 files changed

+12
-18
lines changed

2 files changed

+12
-18
lines changed

pytorch_lightning/metrics/metric.py

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import torch
55
import torch.distributed
66

7-
from pytorch_lightning.metrics.convertors import tensor_metric, numpy_metric
7+
from pytorch_lightning.metrics.converters import tensor_metric, numpy_metric
88
from pytorch_lightning.utilities.apply_func import apply_to_collection
99

1010
__all__ = ['Metric', 'TensorMetric', 'NumpyMetric']
@@ -35,7 +35,7 @@ def dtype(self) -> Union[str, torch.dtype]:
3535

3636
@dtype.setter
3737
def dtype(self, new_dtype: Union[str, torch.dtype]):
38-
# Necessary to avoid infinite recursion
38+
# necessary to avoid infinite recursion
3939
raise RuntimeError('Cannot set the dtype explicitly. Please use metric.to(new_dtype).')
4040

4141
@property
@@ -107,25 +107,19 @@ def to(self, *args, **kwargs) -> torch.nn.Module:
107107
tensor([[0.4963, 0.7682, 0.0885, 0.1320],
108108
[0.3074, 0.6341, 0.4901, 0.8964],
109109
[0.4556, 0.6323, 0.3489, 0.4017]])
110-
>>> metric.to(torch.double)
110+
>>> metric.to(torch.double) #doctest: +ELLIPSIS
111111
ExampleMetric()
112112
>>> metric.weight
113-
tensor([[0.4963, 0.7682, 0.0885, 0.1320],
114-
[0.3074, 0.6341, 0.4901, 0.8964],
115-
[0.4556, 0.6323, 0.3489, 0.4017]], dtype=torch.float64)
113+
tensor([[...]], dtype=torch.float64)
116114
>>> cpu = torch.device('cpu')
117115
>>> metric.to(cpu, dtype=torch.half, non_blocking=True)
118116
ExampleMetric()
119-
>>> metric.weight
120-
tensor([[0.4963, 0.7682, 0.0885, 0.1320],
121-
[0.3074, 0.6341, 0.4901, 0.8964],
122-
[0.4556, 0.6323, 0.3489, 0.4017]], dtype=torch.float16)
117+
>>> metric.weight #doctest: +ELLIPSIS
118+
tensor([[...]], dtype=torch.float16)
123119
>>> metric.to(cpu)
124120
ExampleMetric()
125-
>>> metric.weight
126-
tensor([[0.4963, 0.7682, 0.0885, 0.1320],
127-
[0.3074, 0.6341, 0.4901, 0.8964],
128-
[0.4556, 0.6323, 0.3489, 0.4017]], dtype=torch.float16)
121+
>>> metric.weight #doctest: +ELLIPSIS
122+
tensor([[...]], dtype=torch.float16)
129123
130124
131125
"""

tests/metrics/test_convertors.py renamed to tests/metrics/test_converters.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
import torch.distributed as dist
55

66
import tests.base.utils as tutils
7-
from pytorch_lightning.metrics.convertors import (
7+
from pytorch_lightning.metrics.converters import (
88
_apply_to_inputs, _apply_to_outputs, _convert_to_tensor, _convert_to_numpy,
9-
_numpy_metric_conversion, _tensor_metric_conversion, _sync_ddp, tensor_metric, numpy_metric)
9+
_numpy_metric_conversion, _tensor_metric_conversion, _sync_ddp_if_available, tensor_metric, numpy_metric)
1010

1111

1212
@pytest.mark.parametrize(['args', 'kwargs'],
@@ -109,7 +109,7 @@ def test_sync_reduce_ddp():
109109

110110
tensor = torch.tensor([1.], device='cuda:0')
111111

112-
reduced_tensor = _sync_ddp(tensor)
112+
reduced_tensor = _sync_ddp_if_available(tensor)
113113

114114
assert reduced_tensor.item() == dist.get_world_size(), \
115115
'Sync-Reduce does not work properly with DDP and Tensors'
@@ -121,7 +121,7 @@ def test_sync_reduce_simple():
121121
"""Make sure sync-reduce works without DDP"""
122122
tensor = torch.tensor([1.], device='cpu')
123123

124-
reduced_tensor = _sync_ddp(tensor)
124+
reduced_tensor = _sync_ddp_if_available(tensor)
125125

126126
assert torch.allclose(tensor, reduced_tensor), \
127127
'Sync-Reduce does not work properly without DDP and Tensors'

0 commit comments

Comments
 (0)