Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Missing: - testing - sparse support - GPU support
- Loading branch information
Showing
8 changed files
with
362 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
# Copyright 2015 Google Inc. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================== | ||
|
||
"""Adadelta for TensorFlow.""" | ||
from __future__ import absolute_import | ||
from __future__ import division | ||
from __future__ import print_function | ||
|
||
from tensorflow.python.framework import ops | ||
from tensorflow.python.ops import constant_op | ||
from tensorflow.python.training import optimizer | ||
from tensorflow.python.training import training_ops | ||
|
||
|
||
class AdadeltaOptimizer(optimizer.Optimizer): | ||
"""Optimizer that implements the Adadelta algorithm. | ||
@@__init__ | ||
""" | ||
|
||
def __init__(self, decay_rate=0.001, epsilon=1e-8, | ||
use_locking=False, name="Adadelta"): | ||
"""Construct a new Adadelta optimizer. | ||
Implementation is based on http://arxiv.org/abs/1212.5701 | ||
Args: | ||
decay_rate: A `Tensor` or a floating point value. The decay_rate. | ||
epsilon: A `Tensor` or a floating point value. A constant epsilon used | ||
to better conditioning the grad update. | ||
use_locking: If `True` use locks for update operations. | ||
name: Optional name prefix for the operations created when applying | ||
gradients. Defaults to "Adadelta". | ||
""" | ||
super(AdadeltaOptimizer, self).__init__(use_locking, name) | ||
self._decay_rate = decay_rate | ||
self._epsilon = epsilon | ||
|
||
# Tensor versions of the constructor arguments, created in _prepare(). | ||
self._decay_rate_t = None | ||
self.epsilon_t = None | ||
|
||
def _create_slots(self, var_list): | ||
for v in var_list: | ||
self._zeros_slot(v, "decay_rate", self._name) | ||
self._zeros_slot(v, "epsilon", self._name) | ||
self._zeros_slot(v, "accum", self._name) | ||
self._zeros_slot(v, "update_accum", self._name) | ||
|
||
def _prepare(self): | ||
self._decay_rate_t = ops.convert_to_tensor(self._decay_rate, | ||
name="decay_rate") | ||
self._epsilon_t = ops.convert_to_tensor(self._epsilon, | ||
name="epsilon") | ||
|
||
def _apply_dense(self, grad, var): | ||
decay_rate = self.get_slot(var, "decay_rate") | ||
epsilon = self.get_slot(var, "epsilon") | ||
accum = self.get_slot(var, "accum") | ||
update_accum = self.get_slot(var, "update_accum") | ||
|
||
return training_ops.apply_adadelta( | ||
var, accum, update_accum, | ||
self._decay_rate_t, self._epsilon_t, grad, | ||
use_locking=self._use_locking).op | ||
|
||
# def _apply_sparse(self, grad, var): | ||
# mom = self.get_slot(var, "adadelta") | ||
# return training_ops.sparse_apply_adadelta( | ||
# var, mom, | ||
# self._learning_rate_tensor, grad.values, grad.indices, | ||
# self._adadelta_tensor, use_locking=self._use_locking).op |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
# Copyright 2015 Google Inc. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================== | ||
|
||
"""Tests for Momentum.""" | ||
from __future__ import absolute_import | ||
from __future__ import division | ||
from __future__ import print_function | ||
|
||
import tensorflow.python.platform | ||
|
||
import numpy as np | ||
from six.moves import xrange # pylint: disable=redefined-builtin | ||
import tensorflow as tf | ||
|
||
|
||
class AdadeltaOptimizerTest(tf.test.TestCase): | ||
|
||
def testBasic(self): | ||
with self.test_session(): | ||
var0 = tf.Variable([1.0, 2.0]) | ||
var1 = tf.Variable([3.0, 4.0]) | ||
grads0 = tf.constant([0.1, 0.1]) | ||
grads1 = tf.constant([0.01, 0.01]) | ||
adadelta_opt = tf.train.AdadeltaOptimizer() | ||
adadelta_update = adadelta_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) | ||
tf.initialize_all_variables().run() | ||
|
||
# Check we have slots | ||
self.assertEqual(["accum", "decay_rate", "epsilon", "update_accum"], adadelta_opt.get_slot_names()) | ||
slot0 = adadelta_opt.get_slot(var0, "accum") | ||
self.assertEquals(slot0.get_shape(), var0.get_shape()) | ||
self.assertFalse(slot0 in tf.trainable_variables()) | ||
slot1 = adadelta_opt.get_slot(var1, "accum") | ||
self.assertEquals(slot1.get_shape(), var1.get_shape()) | ||
self.assertFalse(slot1 in tf.trainable_variables()) | ||
|
||
# Fetch params to validate initial values | ||
self.assertAllClose([1.0, 2.0], var0.eval()) | ||
self.assertAllClose([3.0, 4.0], var1.eval()) | ||
|
||
adadelta_update.run() | ||
|
||
# Check that the accumulators have been updated. | ||
self.assertAllClose(np.array([0.1, 0.1]), slot0.eval()) | ||
self.assertAllClose(np.array([0.01, 0.01]), slot1.eval()) | ||
|
||
# Check that the parameters have been updated. | ||
self.assertAllClose(np.array([1.0 - (0.1 * 2.0), | ||
2.0 - (0.1 * 2.0)]), | ||
var0.eval()) | ||
self.assertAllClose(np.array([3.0 - (0.01 * 2.0), | ||
4.0 - (0.01 * 2.0)]), | ||
var1.eval()) | ||
# Step 2: the momentum accumulators contain the previous update. | ||
adadelta_update.run() | ||
# Check that the momentum accumulators have been updated. | ||
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), | ||
slot0.eval()) | ||
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), | ||
slot1.eval()) | ||
# Check that the parameters have been updated. | ||
self.assertAllClose( | ||
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), | ||
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), | ||
var0.eval()) | ||
self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), | ||
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), | ||
var1.eval()) | ||
|
||
if __name__ == "__main__": | ||
tf.test.main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.