Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add piecewise constant op #2442

Merged
merged 1 commit into from
May 24, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions tensorflow/python/training/learning_rate_decay.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import control_flow_ops


def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
Expand Down Expand Up @@ -84,3 +85,62 @@ def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
if staircase:
p = math_ops.floor(p)
return math_ops.mul(learning_rate, math_ops.pow(decay_rate, p), name=name)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Minor style nit: can you add another blank line here (our linter will complain otherwise...)?


def piecewise_constant(x, boundaries, values, name=None):
""" Piecewise constant from boundaries and interval values.

Example: use a learning rate that's 1.0 for the first 100000 steps, 0.5
for steps 100001 to 110000, and 0.1 for any additional steps.

```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)

# Later, whenever we perform an optimization step, we increment global_step.
```

Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.

Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
"""

with ops.op_scope([x, boundaries, values, name],
name, 'PiecewiseConstant') as name:
x = ops.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
boundaries = ops.convert_n_to_tensor(boundaries)
if not all(b.dtype == x.dtype for b in boundaries):
raise ValueError('boundaries must have the same dtype as x.')
# TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.
values = ops.convert_n_to_tensor(values)
if not all(v.dtype == values[0].dtype for v in values):
raise ValueError('values must have elements all with the same dtype.')

pred_fn_pairs = {}
pred_fn_pairs[x <= boundaries[0]] = lambda: values[0]
pred_fn_pairs[x > boundaries[-1]] = lambda: values[-1]
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x > low) & (x <= high)
pred_fn_pairs[pred] = lambda v=v: v

# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
35 changes: 35 additions & 0 deletions tensorflow/python/training/learning_rate_decay_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,41 @@ def testVariables(self):
expected = .1 * 0.96**(100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)

def testPiecewiseConstant(self):
with self.test_session():
x = variables.Variable(-999)
assign_100 = x.assign(100)
assign_105 = x.assign(105)
assign_110 = x.assign(110)
assign_120 = x.assign(120)
assign_999 = x.assign(999)
pc = learning_rate_decay.piecewise_constant(x, [100, 110, 120],
[1.0, 0.1, 0.01, 0.001])

variables.initialize_all_variables().run()
self.assertAllClose(pc.eval(), 1.0, 1e-6)
assign_100.op.run()
self.assertAllClose(pc.eval(), 1.0, 1e-6)
assign_105.op.run()
self.assertAllClose(pc.eval(), 0.1, 1e-6)
assign_110.op.run()
self.assertAllClose(pc.eval(), 0.1, 1e-6)
assign_120.op.run()
self.assertAllClose(pc.eval(), 0.01, 1e-6)
assign_999.op.run()
self.assertAllClose(pc.eval(), 0.001, 1e-6)

def testPiecewiseConstantEdgeCases(self):
with self.test_session():
with self.assertRaises(ValueError):
x_int = variables.Variable(0, dtype=variables.dtypes.int32)
boundaries, values = [-1.0, 1.0], [1, 2, 3]
pc = learning_rate_decay.piecewise_constant(x_int, boundaries, values)
with self.assertRaises(ValueError):
x = variables.Variable(0.0)
boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
pc = learning_rate_decay.piecewise_constant(x, boundaries, values)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you add some tests for error conditions (e.g. mismatched types in the list of boundaries, values)?


if __name__ == "__main__":
googletest.main()