1414from typing import Any , Callable , Optional
1515
1616import torch
17+ from torchmetrics import Accuracy as __Accuracy
1718
18- from pytorch_lightning .metrics .functional .accuracy import _accuracy_compute , _accuracy_update
19- from pytorch_lightning .metrics .metric import Metric
19+ from pytorch_lightning .utilities import rank_zero_warn
2020
2121
22- class Accuracy (Metric ):
22+ class Accuracy (__Accuracy ):
2323 r"""
24- Computes `Accuracy <https://en.wikipedia.org/wiki/Accuracy_and_precision>`__:
25-
26- .. math::
27- \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
28-
29- Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a
30- tensor of predictions.
31-
32- For multi-class and multi-dimensional multi-class data with probability predictions, the
33- parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the
34- top-K highest probability items are considered to find the correct label.
35-
36- For multi-label and multi-dimensional multi-class inputs, this metric computes the "global"
37- accuracy by default, which counts all labels or sub-samples separately. This can be
38- changed to subset accuracy (which requires all labels or sub-samples in the sample to
39- be correctly predicted) by setting ``subset_accuracy=True``.
40-
41- Args:
42- threshold:
43- Threshold probability value for transforming probability predictions to binary
44- (0,1) predictions, in the case of binary or multi-label inputs.
45- top_k:
46- Number of highest probability predictions considered to find the correct label, relevant
47- only for (multi-dimensional) multi-class inputs with probability predictions. The
48- default value (``None``) will be interpreted as 1 for these inputs.
49-
50- Should be left at default (``None``) for all other types of inputs.
51- subset_accuracy:
52- Whether to compute subset accuracy for multi-label and multi-dimensional
53- multi-class inputs (has no effect for other input types).
54-
55- - For multi-label inputs, if the parameter is set to ``True``, then all labels for
56- each sample must be correctly predicted for the sample to count as correct. If it
57- is set to ``False``, then all labels are counted separately - this is equivalent to
58- flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``).
59-
60- - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all
61- sub-sample (on the extra axis) must be correct for the sample to be counted as correct.
62- If it is set to ``False``, then all sub-samples are counter separately - this is equivalent,
63- in the case of label predictions, to flattening the inputs beforehand (i.e.
64- ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter
65- still applies in both cases, if set.
66-
67- compute_on_step:
68- Forward only calls ``update()`` and return ``None`` if this is set to ``False``.
69- dist_sync_on_step:
70- Synchronize metric state across processes at each ``forward()``
71- before returning the value at the step
72- process_group:
73- Specify the process group on which synchronization is called.
74- default: ``None`` (which selects the entire world)
75- dist_sync_fn:
76- Callback that performs the allgather operation on the metric state. When ``None``, DDP
77- will be used to perform the allgather
78-
79- Raises:
80- ValueError:
81- If ``threshold`` is not between ``0`` and ``1``.
82- ValueError:
83- If ``top_k`` is not an ``integer`` larger than ``0``.
84-
85- Example:
86-
87- >>> from pytorch_lightning.metrics import Accuracy
88- >>> target = torch.tensor([0, 1, 2, 3])
89- >>> preds = torch.tensor([0, 2, 1, 3])
90- >>> accuracy = Accuracy()
91- >>> accuracy(preds, target)
92- tensor(0.5000)
93-
94- >>> target = torch.tensor([0, 1, 2])
95- >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]])
96- >>> accuracy = Accuracy(top_k=2)
97- >>> accuracy(preds, target)
98- tensor(0.6667)
24+ This implementation refers to :class:`~torchmetrics.Accuracy`.
9925
26+ .. warning:: This metric is deprecated, use ``torchmetrics.Accuracy``. Will be removed in v1.5.0.
10027 """
10128
10229 def __init__ (
@@ -109,44 +36,16 @@ def __init__(
10936 process_group : Optional [Any ] = None ,
11037 dist_sync_fn : Callable = None ,
11138 ):
39+ rank_zero_warn (
40+ "This `Accuracy` was deprecated in v1.3.0 in favor of `torchmetrics.Accuracy`."
41+ " It will be removed in v1.5.0" , DeprecationWarning
42+ )
11243 super ().__init__ (
44+ threshold = threshold ,
45+ top_k = top_k ,
46+ subset_accuracy = subset_accuracy ,
11347 compute_on_step = compute_on_step ,
11448 dist_sync_on_step = dist_sync_on_step ,
11549 process_group = process_group ,
11650 dist_sync_fn = dist_sync_fn ,
11751 )
118-
119- self .add_state ("correct" , default = torch .tensor (0 ), dist_reduce_fx = "sum" )
120- self .add_state ("total" , default = torch .tensor (0 ), dist_reduce_fx = "sum" )
121-
122- if not 0 < threshold < 1 :
123- raise ValueError (f"The `threshold` should be a float in the (0,1) interval, got { threshold } " )
124-
125- if top_k is not None and (not isinstance (top_k , int ) or top_k <= 0 ):
126- raise ValueError (f"The `top_k` should be an integer larger than 0, got { top_k } " )
127-
128- self .threshold = threshold
129- self .top_k = top_k
130- self .subset_accuracy = subset_accuracy
131-
132- def update (self , preds : torch .Tensor , target : torch .Tensor ):
133- """
134- Update state with predictions and targets.
135-
136- Args:
137- preds: Predictions from model (probabilities, or labels)
138- target: Ground truth labels
139- """
140-
141- correct , total = _accuracy_update (
142- preds , target , threshold = self .threshold , top_k = self .top_k , subset_accuracy = self .subset_accuracy
143- )
144-
145- self .correct += correct
146- self .total += total
147-
148- def compute (self ) -> torch .Tensor :
149- """
150- Computes accuracy based on inputs passed in to ``update`` previously.
151- """
152- return _accuracy_compute (self .correct , self .total )
0 commit comments