diff --git a/tensorflow_addons/optimizers/adabelief.py b/tensorflow_addons/optimizers/adabelief.py index 1ecabb4448..5ee8f923b0 100644 --- a/tensorflow_addons/optimizers/adabelief.py +++ b/tensorflow_addons/optimizers/adabelief.py @@ -113,7 +113,7 @@ def __init__( rectify: boolean. Whether to apply learning rate rectification as from RAdam. total_steps: An integer. Total number of training steps. Enable - warmup by setting a positive value. + warmup by setting a value greater than zero. warmup_proportion: A floating point value. The proportion of increasing steps. min_lr: A floating point value. Minimum learning rate after warmup.