Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions tensorflow_addons/examples/tfa_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,11 @@ def generate_data(num_validation):


def train_and_eval():
"""Train and evalute simple MNIST model using LazyAdamOptimizer."""
"""Train and evalute simple MNIST model using LazyAdam."""
data = generate_data(num_validation=VALIDATION_SAMPLES)
dense_net = build_mnist_model()
dense_net.compile(
optimizer=tfa.optimizers.LazyAdamOptimizer(0.001),
optimizer=tfa.optimizers.LazyAdam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])

Expand Down
8 changes: 4 additions & 4 deletions tensorflow_addons/optimizers/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ py_library(
name = "optimizers",
srcs = [
"__init__.py",
"lazy_adam_optimizer.py",
"lazy_adam.py",
],
srcs_version = "PY2AND3",
deps = [
Expand All @@ -15,12 +15,12 @@ py_library(
)

py_test(
name = "lazy_adam_optimizer_test",
name = "lazy_adam_test",
size = "small",
srcs = [
"lazy_adam_optimizer_test.py",
"lazy_adam_test.py",
],
main = "lazy_adam_optimizer_test.py",
main = "lazy_adam_test.py",
srcs_version = "PY2AND3",
deps = [
":optimizers",
Expand Down
4 changes: 2 additions & 2 deletions tensorflow_addons/optimizers/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
## Maintainers
| Submodule | Maintainers | Contact Info |
|:---------- |:------------- |:--------------|
| lazy_adam_optimizer | SIG-Addons | addons@tensorflow.org |
| lazy_adam | SIG-Addons | addons@tensorflow.org |

## Components
| Submodule | Optimizer | Reference |
|:----------------------- |:---------------------- |:---------|
| lazy_adam_optimizer | LazyAdamOptimizer | https://arxiv.org/abs/1412.6980 |
| lazy_adam | LazyAdam | https://arxiv.org/abs/1412.6980 |


## Contribution Guidelines
Expand Down
2 changes: 1 addition & 1 deletion tensorflow_addons/optimizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@
from __future__ import division
from __future__ import print_function

from tensorflow_addons.optimizers.lazy_adam_optimizer import LazyAdamOptimizer
from tensorflow_addons.optimizers.lazy_adam import LazyAdam
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@


@keras_utils.register_keras_custom_object
class LazyAdamOptimizer(tf.keras.optimizers.Adam):
class LazyAdam(tf.keras.optimizers.Adam):
"""Variant of the Adam optimizer that handles sparse updates more
efficiently.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LazyAdamOptimizer."""
"""Tests for LazyAdam."""

from __future__ import absolute_import
from __future__ import division
Expand All @@ -23,7 +23,7 @@

from tensorflow.python.eager import context
from tensorflow.python.ops import variables
from tensorflow_addons.optimizers import lazy_adam_optimizer
from tensorflow_addons.optimizers import lazy_adam
from tensorflow_addons.utils import test_utils


Expand Down Expand Up @@ -54,7 +54,7 @@ def get_beta_accumulators(opt, dtype):
return (beta_1_power, beta_2_power)


class LazyAdamOptimizerTest(tf.test.TestCase):
class LazyAdamTest(tf.test.TestCase):

# TODO: remove v1 tests (keep pace with adam_test.py in keras).
@test_utils.run_deprecated_v1
Expand All @@ -80,7 +80,7 @@ def testSparse(self):
grads1 = tf.IndexedSlices(
tf.constant(grads1_np[grads1_np_indices]),
tf.constant(grads1_np_indices), tf.constant([3]))
opt = lazy_adam_optimizer.LazyAdamOptimizer()
opt = lazy_adam.LazyAdam()
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
Expand Down Expand Up @@ -118,7 +118,7 @@ def testSparseDevicePlacement(self):
var = tf.Variable([[1.0], [2.0]])
indices = tf.constant([0, 1], dtype=index_dtype)
g_sum = lambda: tf.math.reduce_sum(tf.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
optimizer = lazy_adam.LazyAdam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(minimize_op)
Expand All @@ -137,10 +137,10 @@ def testSparseRepeatedIndices(self):
grad_aggregated = tf.IndexedSlices(
tf.constant([0.2], shape=[1, 1], dtype=dtype),
tf.constant([1]), tf.constant([2, 1]))
repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
repeated_update_opt = lazy_adam.LazyAdam()
repeated_update = repeated_update_opt.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
aggregated_update_opt = lazy_adam.LazyAdam()
aggregated_update = aggregated_update_opt.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
Expand Down Expand Up @@ -181,8 +181,7 @@ def doTestBasic(self, use_callable_params=False):
beta2 = beta2()
epsilon = epsilon()

opt = lazy_adam_optimizer.LazyAdamOptimizer(
learning_rate=learning_rate)
opt = lazy_adam.LazyAdam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
Expand Down Expand Up @@ -241,7 +240,7 @@ def testTensorLearningRate(self):
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = lazy_adam_optimizer.LazyAdamOptimizer(tf.constant(0.001))
opt = lazy_adam.LazyAdam(tf.constant(0.001))
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
Expand Down Expand Up @@ -285,7 +284,7 @@ def testSharing(self):
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = lazy_adam_optimizer.LazyAdamOptimizer()
opt = lazy_adam.LazyAdam()
update1 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(
Expand Down Expand Up @@ -324,7 +323,7 @@ def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = tf.Variable(1.)
v2 = tf.Variable(1.)
opt = lazy_adam_optimizer.LazyAdamOptimizer(1.)
opt = lazy_adam.LazyAdam(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len(set(opt.variables())))
Expand Down