Skip to content

Commit 706e22f

Browse files
autoihWindQAQ
andauthored
doctest addons (#677)
* doctest for addons * minor sanity * add tfa & cleanup * doctest example * sanity check * files for doctest * remove tensor id * flake8 * use pytest * remove tfa_doctest * pytest for doctest * remove section in build * remove unsused files * update conftest; with no option named skip_custom_ops issue * flake8 * move np, tfa, tf to confest * concise command * fix docstr issue and add example * sanity check and missing doctest modules * reomve FloatTensorLike Co-authored-by: Tzu-Wei Sung <[email protected]>
1 parent 151e2f7 commit 706e22f

File tree

13 files changed

+158
-155
lines changed

13 files changed

+158
-155
lines changed

tensorflow_addons/conftest.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,20 @@
1414
pytest_collection_modifyitems,
1515
)
1616

17+
import numpy as np
18+
import pytest
19+
20+
import tensorflow as tf
21+
import tensorflow_addons as tfa
22+
23+
1724
# fixtures present in this file will be available
1825
# when running tests and can be referenced with strings
1926
# https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions
27+
28+
29+
@pytest.fixture(autouse=True)
30+
def add_np(doctest_namespace):
31+
doctest_namespace["np"] = np
32+
doctest_namespace["tf"] = tf
33+
doctest_namespace["tfa"] = tfa

tensorflow_addons/layers/wrappers.py

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
import logging
1717

1818
import tensorflow as tf
19+
1920
from typeguard import typechecked
2021

2122

@@ -30,21 +31,25 @@ class WeightNormalization(tf.keras.layers.Wrapper):
3031
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
3132
Tim Salimans, Diederik P. Kingma (2016)
3233
WeightNormalization wrapper works for keras and tf layers.
33-
```python
34-
net = WeightNormalization(
35-
tf.keras.layers.Conv2D(2, 2, activation='relu'),
36-
input_shape=(32, 32, 3),
37-
data_init=True)(x)
38-
net = WeightNormalization(
39-
tf.keras.layers.Conv2D(16, 5, activation='relu'),
40-
data_init=True)(net)
41-
net = WeightNormalization(
42-
tf.keras.layers.Dense(120, activation='relu'),
43-
data_init=True)(net)
44-
net = WeightNormalization(
45-
tf.keras.layers.Dense(n_classes),
46-
data_init=True)(net)
47-
```
34+
35+
Usage:
36+
37+
>>> net = tfa.layers.WeightNormalization(
38+
... tf.keras.layers.Conv2D(2, 2, activation='relu'),
39+
... input_shape=(32, 32, 3),
40+
... data_init=True)(np.random.rand(32, 32, 3, 1).astype('f'))
41+
>>> net = tfa.layers.WeightNormalization(
42+
... tf.keras.layers.Conv2D(16, 2, activation='relu'),
43+
... data_init=True)(net)
44+
>>> net = tfa.layers.WeightNormalization(
45+
... tf.keras.layers.Dense(120, activation='relu'),
46+
... data_init=True)(net)
47+
>>> net = tfa.layers.WeightNormalization(
48+
... tf.keras.layers.Dense(2),
49+
... data_init=True)(net)
50+
>>> net.shape
51+
TensorShape([32, 30, 1, 2])
52+
4853
Arguments:
4954
layer: a layer instance.
5055
data_init: If `True` use data dependent variable initialization

tensorflow_addons/losses/focal_loss.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
"""Implements Focal loss."""
1616

1717
import tensorflow as tf
18+
1819
import tensorflow.keras.backend as K
1920

2021
from tensorflow_addons.utils.keras_utils import LossFunctionWrapper
@@ -37,15 +38,11 @@ class SigmoidFocalCrossEntropy(LossFunctionWrapper):
3738
3839
Usage:
3940
40-
```python
41-
fl = tfa.losses.SigmoidFocalCrossEntropy()
42-
loss = fl(
43-
y_true = [[1.0], [1.0], [0.0]],
44-
y_pred = [[0.97], [0.91], [0.03]])
45-
print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,
46-
1.9097870e-04,
47-
2.0559824e-05]
48-
```
41+
>>> fl = tfa.losses.SigmoidFocalCrossEntropy()
42+
>>> loss = fl([[0.97], [0.91], [0.03]], [[1.0], [1.0], [0.0]])
43+
>>> loss
44+
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.00010971, 0.00329749, 0.00030611], dtype=float32)>
45+
4946
Usage with tf.keras API:
5047
5148
```python

tensorflow_addons/losses/giou_loss.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -33,13 +33,13 @@ class GIoULoss(LossFunctionWrapper):
3333
3434
Usage:
3535
36-
```python
37-
gl = tfa.losses.GIoULoss()
38-
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
39-
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
40-
loss = gl(boxes1, boxes2)
41-
print('Loss: ', loss.numpy()) # Loss: [1.07500000298023224, 1.9333333373069763]
42-
```
36+
>>> gl = tfa.losses.GIoULoss()
37+
>>> boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
38+
>>> boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
39+
>>> loss = gl(boxes1, boxes2)
40+
>>> loss
41+
<tf.Tensor: shape=(), dtype=float32, numpy=1.5041667>
42+
4343
Usage with tf.keras API:
4444
4545
```python

tensorflow_addons/losses/quantiles.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -84,15 +84,11 @@ class PinballLoss(LossFunctionWrapper):
8484
See: https://en.wikipedia.org/wiki/Quantile_regression
8585
8686
Usage:
87-
```python
88-
pinball = tfa.losses.PinballLoss(tau=.1)
89-
loss = pinball([0., 0., 1., 1.], [1., 1., 1., 0.])
90-
91-
# loss = max(0.1 * (y_true - y_pred), (0.1 - 1) * (y_true - y_pred))
92-
# = (0.9 + 0.9 + 0 + 0.1) / 4
9387
94-
print('Loss: ', loss.numpy()) # Loss: 0.475
95-
```
88+
>>> pinball = tfa.losses.PinballLoss(tau=.1)
89+
>>> loss = pinball([0., 0., 1., 1.], [1., 1., 1., 0.])
90+
>>> loss
91+
<tf.Tensor: shape=(), dtype=float32, numpy=0.475>
9692
9793
Usage with the `compile` API:
9894

tensorflow_addons/metrics/cohens_kappa.py

Lines changed: 24 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
"""Implements Cohen's Kappa."""
1616

1717
import tensorflow as tf
18+
1819
import numpy as np
1920
import tensorflow.keras.backend as K
2021
from tensorflow.keras.metrics import Metric
@@ -27,39 +28,43 @@
2728
@tf.keras.utils.register_keras_serializable(package="Addons")
2829
class CohenKappa(Metric):
2930
"""Computes Kappa score between two raters.
30-
3131
The score lies in the range [-1, 1]. A score of -1 represents
3232
complete disagreement between two raters whereas a score of 1
3333
represents complete agreement between the two raters.
3434
A score of 0 means agreement by chance.
35-
3635
Note: As of now, this implementation considers all labels
3736
while calculating the Cohen's Kappa score.
3837
3938
Usage:
4039
41-
```python
42-
actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32)
43-
preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32)
44-
weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)
45-
46-
m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
47-
m.update_state(actuals, preds)
48-
print('Final result: ', m.result().numpy()) # Result: 0.61904764
49-
50-
# To use this with weights, sample_weight argument can be used.
51-
m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
52-
m.update_state(actuals, preds, sample_weight=weights)
53-
print('Final result: ', m.result().numpy()) # Result: 0.37209308
54-
```
40+
>>> actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32)
41+
>>> preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32)
42+
>>> weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)
43+
>>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
44+
>>> m.update_state(actuals, preds)
45+
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
46+
array([[0., 0., 0., 0., 0.],
47+
[0., 2., 0., 0., 0.],
48+
[0., 0., 0., 0., 1.],
49+
[0., 0., 0., 1., 0.],
50+
[0., 0., 1., 0., 3.]], dtype=float32)>
51+
>>> m.result().numpy()
52+
0.61904764
53+
>>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True)
54+
>>> m.update_state(actuals, preds, sample_weight=weights)
55+
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
56+
array([[ 0., 0., 0., 0., 0.],
57+
[ 0., 6., 0., 0., 0.],
58+
[ 0., 0., 0., 0., 10.],
59+
[ 0., 0., 0., 2., 0.],
60+
[ 0., 0., 2., 0., 7.]], dtype=float32)>
61+
>>> m.result().numpy()
62+
0.37209308
5563
5664
Usage with tf.keras API:
57-
58-
```python
5965
model = tf.keras.models.Model(inputs, outputs)
6066
model.add_metric(tfa.metrics.CohenKappa(num_classes=5)(outputs))
6167
model.compile('sgd', loss='mse')
62-
```
6368
"""
6469

6570
@typechecked

tensorflow_addons/metrics/matthews_correlation_coefficient.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,13 @@ class MatthewsCorrelationCoefficient(tf.keras.metrics.Metric):
4242
((TP + FP) * (TP + FN) * (TN + FP ) * (TN + FN))^(1/2)
4343
4444
Usage:
45-
```python
46-
actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]],
47-
dtype=tf.float32)
48-
preds = tf.constant([[1.0], [0.0], [1.0], [1.0]],
49-
dtype=tf.float32)
50-
# Matthews correlation coefficient
51-
mcc = MatthewsCorrelationCoefficient(num_classes=1)
52-
mcc.update_state(actuals, preds)
53-
print('Matthews correlation coefficient is:',
54-
mcc.result().numpy())
55-
# Matthews correlation coefficient is : -0.33333334
56-
```
45+
46+
>>> actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]], dtype=tf.float32)
47+
>>> preds = tf.constant([[1.0], [0.0], [1.0], [1.0]], dtype=tf.float32)
48+
>>> mcc = tfa.metrics.MatthewsCorrelationCoefficient(num_classes=1)
49+
>>> mcc.update_state(actuals, preds)
50+
>>> mcc.result()
51+
<tf.Tensor: shape=(1,), dtype=float32, numpy=array([-0.33333334], dtype=float32)>
5752
"""
5853

5954
@typechecked

tensorflow_addons/metrics/multilabel_confusion_matrix.py

Lines changed: 32 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,11 @@
1616

1717
import warnings
1818

19+
import numpy as np
1920
import tensorflow as tf
21+
2022
from tensorflow.keras import backend as K
2123
from tensorflow.keras.metrics import Metric
22-
import numpy as np
2324

2425
from typeguard import typechecked
2526
from tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike
@@ -46,30 +47,36 @@ class MultiLabelConfusionMatrix(Metric):
4647
- false negatives for class i in M(1,0)
4748
- true positives for class i in M(1,1)
4849
49-
```python
50-
# multilabel confusion matrix
51-
y_true = tf.constant([[1, 0, 1], [0, 1, 0]],
52-
dtype=tf.int32)
53-
y_pred = tf.constant([[1, 0, 0],[0, 1, 1]],
54-
dtype=tf.int32)
55-
output = MultiLabelConfusionMatrix(num_classes=3)
56-
output.update_state(y_true, y_pred)
57-
print('Confusion matrix:', output.result().numpy())
58-
59-
# Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]]
60-
[[0 1] [1 0]]]
61-
62-
# if multiclass input is provided
63-
y_true = tf.constant([[1, 0, 0], [0, 1, 0]],
64-
dtype=tf.int32)
65-
y_pred = tf.constant([[1, 0, 0],[0, 0, 1]],
66-
dtype=tf.int32)
67-
output = MultiLabelConfusionMatrix(num_classes=3)
68-
output.update_state(y_true, y_pred)
69-
print('Confusion matrix:', output.result().numpy())
70-
71-
# Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]]
72-
```
50+
Usage:
51+
52+
>>> y_true = tf.constant([[1, 0, 1], [0, 1, 0]], dtype=tf.int32)
53+
>>> y_pred = tf.constant([[1, 0, 0],[0, 1, 1]], dtype=tf.int32)
54+
>>> output1 = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3)
55+
>>> output1.update_state(y_true, y_pred)
56+
>>> output1.result()
57+
<tf.Tensor: shape=(3, 2, 2), dtype=float32, numpy=
58+
array([[[1., 0.],
59+
[0., 1.]],
60+
<BLANKLINE>
61+
[[1., 0.],
62+
[0., 1.]],
63+
<BLANKLINE>
64+
[[0., 1.],
65+
[1., 0.]]], dtype=float32)>
66+
>>> y_true = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.int32)
67+
>>> y_pred = tf.constant([[1, 0, 0],[0, 0, 1]], dtype=tf.int32)
68+
>>> output2 = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3)
69+
>>> output2.update_state(y_true, y_pred)
70+
>>> output2.result()
71+
<tf.Tensor: shape=(3, 2, 2), dtype=float32, numpy=
72+
array([[[1., 0.],
73+
[0., 1.]],
74+
<BLANKLINE>
75+
[[1., 0.],
76+
[1., 0.]],
77+
<BLANKLINE>
78+
[[1., 1.],
79+
[0., 0.]]], dtype=float32)>
7380
"""
7481

7582
@typechecked

tensorflow_addons/metrics/r_square.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from typing import Tuple
1717

1818
import tensorflow as tf
19+
1920
from tensorflow.keras import backend as K
2021
from tensorflow.keras.metrics import Metric
2122
from tensorflow.python.ops import weights_broadcast_ops
@@ -61,13 +62,13 @@ class RSquare(Metric):
6162
of the same metric.
6263
6364
Usage:
64-
```python
65-
actuals = tf.constant([1, 4, 3], dtype=tf.float32)
66-
preds = tf.constant([2, 4, 4], dtype=tf.float32)
67-
result = tf.keras.metrics.RSquare()
68-
result.update_state(actuals, preds)
69-
print('R^2 score is: ', r1.result().numpy()) # 0.57142866
70-
```
65+
66+
>>> actuals = tf.constant([1, 4, 3], dtype=tf.float32)
67+
>>> preds = tf.constant([2, 4, 4], dtype=tf.float32)
68+
>>> ans = tfa.metrics.RSquare()
69+
>>> ans.update_state(actuals, preds)
70+
>>> ans.result()
71+
<tf.Tensor: shape=(), dtype=float32, numpy=0.57142854>
7172
"""
7273

7374
@typechecked

tensorflow_addons/optimizers/lookahead.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,10 +34,8 @@ class Lookahead(tf.keras.optimizers.Optimizer):
3434
3535
Example of usage:
3636
37-
```python
38-
opt = tf.keras.optimizers.SGD(learning_rate)
39-
opt = tfa.optimizers.Lookahead(opt)
40-
```
37+
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.01)
38+
>>> opt = tfa.optimizers.Lookahead(opt)
4139
"""
4240

4341
@typechecked

0 commit comments

Comments
 (0)