@@ -34,25 +34,37 @@ class CohenKappa(Metric):
3434 A score of 0 means agreement by chance.
3535 Note: As of now, this implementation considers all labels
3636 while calculating the Cohen's Kappa score.
37+
3738 Usage:
38- ```python
39- actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32)
40- preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32)
41- weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)
42- m = tfa.metrics.CohenKappa(num_classes=5)
43- m.update_state(actuals, preds)
44- print('Final result: ', m.result().numpy()) # Result: 0.61904764
45- # To use this with weights, sample_weight argument can be used.
46- m = tfa.metrics.CohenKappa(num_classes=5)
47- m.update_state(actuals, preds, sample_weight=weights)
48- print('Final result: ', m.result().numpy()) # Result: 0.37209308
49- ```
39+
40+ >>> actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32)
41+ >>> preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32)
42+ >>> weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)
43+ >>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
44+ >>> m.update_state(actuals, preds)
45+ <tf.Tensor: shape=(5, 5), dtype=float32, numpy=
46+ array([[0., 0., 0., 0., 0.],
47+ [0., 2., 0., 0., 0.],
48+ [0., 0., 0., 0., 1.],
49+ [0., 0., 0., 1., 0.],
50+ [0., 0., 1., 0., 3.]], dtype=float32)>
51+ >>> m.result().numpy()
52+ 0.61904764
53+ >>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
54+ >>> m.update_state(actuals, preds, sample_weight=weights)
55+ <tf.Tensor: shape=(5, 5), dtype=float32, numpy=
56+ array([[ 0., 0., 0., 0., 0.],
57+ [ 0., 6., 0., 0., 0.],
58+ [ 0., 0., 0., 0., 10.],
59+ [ 0., 0., 0., 2., 0.],
60+ [ 0., 0., 2., 0., 7.]], dtype=float32)>
61+ >>> m.result().numpy()
62+ 0.37209308
63+
5064 Usage with tf.keras API:
51- ```python
5265 model = tf.keras.models.Model(inputs, outputs)
5366 model.add_metric(tfa.metrics.CohenKappa(num_classes=5)(outputs))
5467 model.compile('sgd', loss='mse')
55- ```
5668 """
5769
5870 @typechecked
0 commit comments