Skip to content

Commit a232c04

Browse files
author
autoih
committed
files for doctest
1 parent c152593 commit a232c04

File tree

9 files changed

+111
-128
lines changed

9 files changed

+111
-128
lines changed

tensorflow_addons/layers/wrappers.py

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,21 +30,25 @@ class WeightNormalization(tf.keras.layers.Wrapper):
3030
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
3131
Tim Salimans, Diederik P. Kingma (2016)
3232
WeightNormalization wrapper works for keras and tf layers.
33-
```python
34-
net = WeightNormalization(
35-
tf.keras.layers.Conv2D(2, 2, activation='relu'),
36-
input_shape=(32, 32, 3),
37-
data_init=True)(x)
38-
net = WeightNormalization(
39-
tf.keras.layers.Conv2D(16, 5, activation='relu'),
40-
data_init=True)(net)
41-
net = WeightNormalization(
42-
tf.keras.layers.Dense(120, activation='relu'),
43-
data_init=True)(net)
44-
net = WeightNormalization(
45-
tf.keras.layers.Dense(n_classes),
46-
data_init=True)(net)
47-
```
33+
34+
Usage:
35+
36+
>>> net = tfa.layers.WeightNormalization(
37+
... tf.keras.layers.Conv2D(2, 2, activation='relu'),
38+
... input_shape=(32, 32, 3),
39+
... data_init=True)(np.random.rand(32, 32, 3, 1).astype('f'))
40+
>>> net = tfa.layers.WeightNormalization(
41+
... tf.keras.layers.Conv2D(16, 2, activation='relu'),
42+
... data_init=True)(net)
43+
>>> net = tfa.layers.WeightNormalization(
44+
... tf.keras.layers.Dense(120, activation='relu'),
45+
... data_init=True)(net)
46+
>>> net = tfa.layers.WeightNormalization(
47+
... tf.keras.layers.Dense(2),
48+
... data_init=True)(net)
49+
>>> net.shape
50+
TensorShape([32, 30, 1, 2])
51+
4852
Arguments:
4953
layer: a layer instance.
5054
data_init: If `True` use data dependent variable initialization

tensorflow_addons/losses/focal_loss.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,14 @@ class SigmoidFocalCrossEntropy(tf.keras.losses.Loss):
3737
3838
Usage:
3939
40-
```python
41-
fl = tfa.losses.SigmoidFocalCrossEntropy()
42-
loss = fl(
43-
[[0.97], [0.91], [0.03]],
44-
[[1.0], [1.0], [0.0]])
45-
print('Loss: ', loss.numpy()) # Loss: [[0.00010971]
46-
[0.0032975]
47-
[0.00030611]]
48-
```
40+
>>> fl = tfa.losses.SigmoidFocalCrossEntropy()
41+
>>> loss = fl([[0.97], [0.91], [0.03]], [[1.0], [1.0], [0.0]])
42+
>>> loss
43+
<tf.Tensor: shape=(3, 1), dtype=float32, numpy=
44+
array([[0.00010971],
45+
[0.0032975 ],
46+
[0.00030611]], dtype=float32)>
47+
4948
Usage with tf.keras API:
5049
5150
```python
@@ -65,7 +64,7 @@ class SigmoidFocalCrossEntropy(tf.keras.losses.Loss):
6564
ValueError: If the shape of `sample_weight` is invalid or value of
6665
`gamma` is less than zero
6766
"""
68-
67+
6968
def __init__(self,
7069
from_logits=False,
7170
alpha=0.25,

tensorflow_addons/losses/giou_loss.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -33,13 +33,13 @@ class GIoULoss(tf.keras.losses.Loss):
3333
3434
Usage:
3535
36-
```python
37-
gl = tfa.losses.GIoULoss()
38-
boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
39-
boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
40-
loss = gl(boxes1, boxes2)
41-
print('Loss: ', loss.numpy()) # Loss: [1.07500000298023224, 1.9333333373069763]
42-
```
36+
>>> gl = tfa.losses.GIoULoss()
37+
>>> boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
38+
>>> boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
39+
>>> loss = gl(boxes1, boxes2)
40+
>>> loss
41+
<tf.Tensor: shape=(), dtype=float32, numpy=1.5041667>
42+
4343
Usage with tf.keras API:
4444
4545
```python

tensorflow_addons/metrics/matthews_correlation_coefficient.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -42,20 +42,16 @@ class MatthewsCorrelationCoefficient(tf.keras.metrics.Metric):
4242
((TP + FP) * (TP + FN) * (TN + FP ) * (TN + FN))^(1/2)
4343
4444
Usage:
45-
```python
46-
actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]],
47-
dtype=tf.float32)
48-
preds = tf.constant([[1.0], [0.0], [1.0], [1.0]],
49-
dtype=tf.float32)
50-
# Matthews correlation coefficient
51-
mcc = MatthewsCorrelationCoefficient(num_classes=1)
52-
mcc.update_state(actuals, preds)
53-
print('Matthews correlation coefficient is:',
54-
mcc.result().numpy())
55-
# Matthews correlation coefficient is : -0.33333334
56-
```
57-
"""
5845
46+
>>> actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]], dtype=tf.float32)
47+
>>> preds = tf.constant([[1.0], [0.0], [1.0], [1.0]], dtype=tf.float32)
48+
>>> mcc = tfa.metrics.MatthewsCorrelationCoefficient(num_classes=1)
49+
>>> mcc.update_state(actuals, preds)
50+
>>> mcc.result()
51+
<tf.Tensor: shape=(1,), dtype=float32, numpy=array([-0.33333334], dtype=float32)>
52+
53+
"""
54+
5955
def __init__(self,
6056
num_classes=None,
6157
name='MatthewsCorrelationCoefficient',

tensorflow_addons/metrics/multilabel_confusion_matrix.py

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -44,32 +44,30 @@ class MultiLabelConfusionMatrix(Metric):
4444
- false negatives for class i in M(1,0)
4545
- true positives for class i in M(1,1)
4646
47-
```python
47+
Usage:
48+
4849
# multilabel confusion matrix
49-
y_true = tf.constant([[1, 0, 1], [0, 1, 0]],
50-
dtype=tf.int32)
51-
y_pred = tf.constant([[1, 0, 0],[0, 1, 1]],
52-
dtype=tf.int32)
53-
output = MultiLabelConfusionMatrix(num_classes=3)
54-
output.update_state(y_true, y_pred)
55-
print('Confusion matrix:', output.result().numpy())
56-
57-
# Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]]
58-
[[0 1] [1 0]]]
50+
>>> y_true = tf.constant([[1, 0, 1], [0, 1, 0]], dtype=tf.int32)
51+
>>> y_pred = tf.constant([[1, 0, 0],[0, 1, 1]], dtype=tf.int32)
52+
>>> output = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3)
53+
>>> output.update_state(y_true, y_pred)
54+
>>> output.result()
55+
<tf.Tensor: shape=(3, 2, 2), dtype=int32, numpy=
56+
array([[[1, 0], [0, 1]], [[1, 0], [0, 1]], [[0, 1],
57+
[1, 0]]], dtype=int32)>
5958
6059
# if multiclass input is provided
61-
y_true = tf.constant([[1, 0, 0], [0, 1, 0]],
62-
dtype=tf.int32)
63-
y_pred = tf.constant([[1, 0, 0],[0, 0, 1]],
64-
dtype=tf.int32)
65-
output = MultiLabelConfusionMatrix(num_classes=3)
66-
output.update_state(y_true, y_pred)
67-
print('Confusion matrix:', output.result().numpy())
68-
69-
# Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]]
70-
```
60+
>>> y_true = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.int32)
61+
>>> y_pred = tf.constant([[1, 0, 0],[0, 0, 1]], dtype=tf.int32)
62+
>>> output = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3)
63+
>>> output.update_state(y_true, y_pred)
64+
>>> output.result()
65+
<tf.Tensor: shape=(3, 2, 2), dtype=int32, numpy=
66+
array([[[1, 0], [0, 1]], [[1, 0], [1, 0]], [[1, 1], [0, 0]]],
67+
dtype=int32)>
68+
7169
"""
72-
70+
7371
def __init__(self,
7472
num_classes,
7573
name='Multilabel_confusion_matrix',

tensorflow_addons/metrics/r_square.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,15 +35,16 @@ class RSquare(Metric):
3535
- It can also be negative if the model is worse.
3636
3737
Usage:
38-
```python
39-
actuals = tf.constant([1, 4, 3], dtype=tf.float32)
40-
preds = tf.constant([2, 4, 4], dtype=tf.float32)
41-
result = tf.keras.metrics.RSquare()
42-
result.update_state(actuals, preds)
43-
print('R^2 score is: ', r1.result().numpy()) # 0.57142866
44-
```
45-
"""
4638
39+
>>> actuals = tf.constant([1, 4, 3], dtype=tf.float32)
40+
>>> preds = tf.constant([2, 4, 4], dtype=tf.float32)
41+
>>> ans = tfa.metrics.RSquare()
42+
>>> ans.update_state(actuals, preds)
43+
>>> ans.result()
44+
<tf.Tensor: shape=(), dtype=float32, numpy=0.57142866>
45+
46+
"""
47+
4748
def __init__(self, name='r_square', dtype=tf.float32):
4849
super(RSquare, self).__init__(name=name, dtype=dtype)
4950
self.squared_sum = self.add_weight("squared_sum", initializer="zeros")

tensorflow_addons/optimizers/lookahead.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,9 @@ class Lookahead(tf.keras.optimizers.Optimizer):
3535
3636
Example of usage:
3737
38-
```python
39-
opt = tf.keras.optimizers.SGD(learning_rate)
40-
opt = tfa.optimizers.Lookahead(opt)
41-
```
38+
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.01)
39+
>>> opt = tfa.optimizers.Lookahead(opt)
40+
4241
"""
4342

4443
def __init__(self,

tensorflow_addons/optimizers/moving_average.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,11 +32,9 @@ class MovingAverage(tf.keras.optimizers.Optimizer):
3232
3333
Example of usage:
3434
35-
```python
36-
opt = tf.keras.optimizers.SGD(learning_rate)
37-
opt = tfa.optimizers.MovingAverage(opt)
35+
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.01)
36+
>>> opt = tfa.optimizers.MovingAverage(opt)
3837
39-
```
4038
"""
4139

4240
def __init__(self,

tensorflow_addons/optimizers/weight_decay_optimizers.py

Lines changed: 33 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -54,18 +54,15 @@ def __init__(self, weight_decay, *args, **kwargs):
5454
Note: when applying a decay to the learning rate, be sure to manually apply
5555
the decay to the `weight_decay` as well. For example:
5656
57-
```python
58-
step = tf.Variable(0, trainable=False)
59-
schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
60-
[10000, 15000], [1e-0, 1e-1, 1e-2])
61-
# lr and wd can be a function or a tensor
62-
lr = 1e-1 * schedule(step)
63-
wd = lambda: 1e-4 * schedule(step)
64-
65-
# ...
66-
67-
optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
68-
```
57+
Usage:
58+
59+
>>> step = tf.Variable(0, trainable=False)
60+
>>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
61+
... [10000, 15000], [1e-0, 1e-1, 1e-2])
62+
>>> lr = 1e-1 * schedule(step)
63+
>>> wd = lambda: 1e-4 * schedule(step)
64+
>>> optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
65+
6966
"""
7067

7168
def __init__(self, weight_decay, **kwargs):
@@ -213,18 +210,15 @@ def extend_with_decoupled_weight_decay(base_optimizer):
213210
Note: when applying a decay to the learning rate, be sure to manually apply
214211
the decay to the `weight_decay` as well. For example:
215212
216-
```python
217-
step = tf.Variable(0, trainable=False)
218-
schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
219-
[10000, 15000], [1e-0, 1e-1, 1e-2])
220-
# lr and wd can be a function or a tensor
221-
lr = 1e-1 * schedule(step)
222-
wd = lambda: 1e-4 * schedule(step)
213+
Usage:
223214
224-
# ...
215+
>>> step = tf.Variable(0, trainable=False)
216+
>>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
217+
... [10000, 15000], [1e-0, 1e-1, 1e-2])
218+
>>> lr = 1e-1 * schedule(step)
219+
>>> wd = lambda: 1e-4 * schedule(step)
220+
>>> optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
225221
226-
optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
227-
```
228222
229223
Note: you might want to register your own custom optimizer using
230224
`tf.keras.utils.get_custom_objects()`.
@@ -287,19 +281,16 @@ class SGDW(DecoupledWeightDecayExtension, tf.keras.optimizers.SGD):
287281
Note: when applying a decay to the learning rate, be sure to manually apply
288282
the decay to the `weight_decay` as well. For example:
289283
290-
```python
291-
step = tf.Variable(0, trainable=False)
292-
schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
293-
[10000, 15000], [1e-0, 1e-1, 1e-2])
294-
# lr and wd can be a function or a tensor
295-
lr = 1e-1 * schedule(step)
296-
wd = lambda: 1e-4 * schedule(step)
297-
298-
# ...
284+
Usage:
299285
300-
optimizer = tfa.optimizers.SGDW(
301-
learning_rate=lr, weight_decay=wd, momentum=0.9)
302-
```
286+
>>> step = tf.Variable(0, trainable=False)
287+
>>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
288+
... [10000, 15000], [1e-0, 1e-1, 1e-2])
289+
>>> lr = 1e-1 * schedule(step)
290+
>>> wd = lambda: 1e-4 * schedule(step)
291+
>>> optimizer = tfa.optimizers.SGDW(
292+
... learning_rate=lr, weight_decay=wd, momentum=0.9)
293+
303294
"""
304295

305296
def __init__(self,
@@ -362,18 +353,15 @@ class AdamW(DecoupledWeightDecayExtension, tf.keras.optimizers.Adam):
362353
Note: when applying a decay to the learning rate, be sure to manually apply
363354
the decay to the `weight_decay` as well. For example:
364355
365-
```python
366-
step = tf.Variable(0, trainable=False)
367-
schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
368-
[10000, 15000], [1e-0, 1e-1, 1e-2])
369-
# lr and wd can be a function or a tensor
370-
lr = 1e-1 * schedule(step)
371-
wd = lambda: 1e-4 * schedule(step)
372-
373-
# ...
356+
Usage:
374357
375-
optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
376-
```
358+
>>> step = tf.Variable(0, trainable=False)
359+
>>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
360+
... [10000, 15000], [1e-0, 1e-1, 1e-2])
361+
>>> lr = 1e-1 * schedule(step)
362+
>>> wd = lambda: 1e-4 * schedule(step)
363+
>>> optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
364+
377365
"""
378366

379367
def __init__(self,

0 commit comments

Comments
 (0)