|
15 | 15 | """Implements Cohen's Kappa.""" |
16 | 16 |
|
17 | 17 | import tensorflow as tf |
| 18 | + |
18 | 19 | import numpy as np |
19 | 20 | import tensorflow.keras.backend as K |
20 | 21 | from tensorflow.keras.metrics import Metric |
|
27 | 28 | @tf.keras.utils.register_keras_serializable(package="Addons") |
28 | 29 | class CohenKappa(Metric): |
29 | 30 | """Computes Kappa score between two raters. |
30 | | -
|
31 | 31 | The score lies in the range [-1, 1]. A score of -1 represents |
32 | 32 | complete disagreement between two raters whereas a score of 1 |
33 | 33 | represents complete agreement between the two raters. |
34 | 34 | A score of 0 means agreement by chance. |
35 | | -
|
36 | 35 | Note: As of now, this implementation considers all labels |
37 | 36 | while calculating the Cohen's Kappa score. |
38 | 37 |
|
39 | 38 | Usage: |
40 | 39 |
|
41 | | - ```python |
42 | | - actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32) |
43 | | - preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32) |
44 | | - weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32) |
45 | | -
|
46 | | - m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) |
47 | | - m.update_state(actuals, preds) |
48 | | - print('Final result: ', m.result().numpy()) # Result: 0.61904764 |
49 | | -
|
50 | | - # To use this with weights, sample_weight argument can be used. |
51 | | - m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) |
52 | | - m.update_state(actuals, preds, sample_weight=weights) |
53 | | - print('Final result: ', m.result().numpy()) # Result: 0.37209308 |
54 | | - ``` |
| 40 | + >>> actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32) |
| 41 | + >>> preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32) |
| 42 | + >>> weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32) |
| 43 | + >>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) |
| 44 | + >>> m.update_state(actuals, preds) |
| 45 | + <tf.Tensor: shape=(5, 5), dtype=float32, numpy= |
| 46 | + array([[0., 0., 0., 0., 0.], |
| 47 | + [0., 2., 0., 0., 0.], |
| 48 | + [0., 0., 0., 0., 1.], |
| 49 | + [0., 0., 0., 1., 0.], |
| 50 | + [0., 0., 1., 0., 3.]], dtype=float32)> |
| 51 | + >>> m.result().numpy() |
| 52 | + 0.61904764 |
| 53 | + >>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) |
| 54 | + >>> m.update_state(actuals, preds, sample_weight=weights) |
| 55 | + <tf.Tensor: shape=(5, 5), dtype=float32, numpy= |
| 56 | + array([[ 0., 0., 0., 0., 0.], |
| 57 | + [ 0., 6., 0., 0., 0.], |
| 58 | + [ 0., 0., 0., 0., 10.], |
| 59 | + [ 0., 0., 0., 2., 0.], |
| 60 | + [ 0., 0., 2., 0., 7.]], dtype=float32)> |
| 61 | + >>> m.result().numpy() |
| 62 | + 0.37209308 |
55 | 63 |
|
56 | 64 | Usage with tf.keras API: |
57 | | -
|
58 | | - ```python |
59 | 65 | model = tf.keras.models.Model(inputs, outputs) |
60 | 66 | model.add_metric(tfa.metrics.CohenKappa(num_classes=5)(outputs)) |
61 | 67 | model.compile('sgd', loss='mse') |
62 | | - ``` |
63 | 68 | """ |
64 | 69 |
|
65 | 70 | @typechecked |
|
0 commit comments