Skip to content

Commit

Permalink
Merge a0d5ce3 into 002b800
Browse files Browse the repository at this point in the history
  • Loading branch information
haowen-xu committed Jan 14, 2019
2 parents 002b800 + a0d5ce3 commit b626f99
Show file tree
Hide file tree
Showing 52 changed files with 1,477 additions and 1,102 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
This version introduces breaking changes. Existing code might better stick to [v0.1.2](https://github.com/haowen-xu/tfsnippet/tree/v0.1.2)

### Added
- Utilities have been exported to the root package, and now it's recommended to use TFSnippet by ``import tfsnippet as sn``.
- Utilities have been exported to the root package, and now it's recommended to use TFSnippet by ``import tfsnippet as spt``.
- `tfsnippet.layers` package, including dense layer, convolutional layers, normalization layers, and flow layers.
- `tfsnippet.utils.debugging` module, including several utilities to write debugging code with a global switch to enable/disable.

Expand Down
22 changes: 11 additions & 11 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ From the very beginning, you might import the TFSnippet as:

.. code-block:: python
import tfsnippet as sn
import tfsnippet as spt
Distributions
~~~~~~~~~~~~~
Expand All @@ -76,7 +76,7 @@ log-likelihood by simply calling ``log_prob()``.

.. code-block:: python
normal = sn.Normal(0., 1.)
normal = spt.Normal(0., 1.)
# The type of `samples` is :class:`tfsnippet.stochastic.StochasticTensor`.
samples = normal.sample(n_samples=100)
# You may obtain the log-likelhood of `samples` under `normal` by:
Expand All @@ -93,7 +93,7 @@ haven't provided a wrapper for a certain ZhuSuan distribution:
import zhusuan as zs
uniform = sn.as_distribution(zs.distributions.Uniform())
uniform = spt.as_distribution(zs.distributions.Uniform())
# The type of `samples` is :class:`tfsnippet.stochastic.StochasticTensor`.
samples = uniform.sample(n_samples=100)
Expand All @@ -108,7 +108,7 @@ the mini-batch iterators.
# Obtain a shuffled, two-array data flow, with batch-size 64.
# Any batch with samples fewer than 64 would be discarded.
flow = sn.DataFlow.arrays(
flow = spt.DataFlow.arrays(
[x, y], batch_size=64, shuffle=True, skip_incomplete=True)
for batch_x, batch_y in flow:
... # Do something with batch_x and batch_y
Expand Down Expand Up @@ -150,26 +150,26 @@ quickly run a training-loop by using utilities from TFSnippet:
# We shall adopt learning-rate annealing, the initial learning rate is
# 0.001, and we would anneal it by a factor of 0.99995 after every step.
learning_rate = tf.placeholder(shape=(), dtype=tf.float32)
learning_rate_var = sn.AnnealingDynamicValue(0.001, 0.99995)
learning_rate_var = spt.AnnealingDynamicValue(0.001, 0.99995)
# Build the training operation by AdamOptimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, var_list=params)
# Build the training data-flow
train_flow = sn.DataFlow.arrays(
train_flow = spt.DataFlow.arrays(
[train_x, train_y], batch_size=64, shuffle=True, skip_incomplete=True)
# Build the validation data-flow
valid_flow = sn.DataFlow.arrays([valid_x, valid_y], batch_size=256)
valid_flow = spt.DataFlow.arrays([valid_x, valid_y], batch_size=256)
with sn.TrainLoop(params, max_epoch=max_epoch, early_stopping=True) as loop:
trainer = sn.Trainer(loop, train_op, [input_x, input_y], train_flow,
metrics={'loss': loss})
with spt.TrainLoop(params, max_epoch=max_epoch, early_stopping=True) as loop:
trainer = spt.Trainer(loop, train_op, [input_x, input_y], train_flow,
metrics={'loss': loss})
# Anneal the learning-rate after every step by 0.99995.
trainer.anneal_after_steps(learning_rate_var, freq=1)
# Do validation and apply early-stopping after every epoch.
trainer.evaluate_after_epochs(
sn.Evaluator(loop, loss, [input_x, input_y], valid_flow),
spt.Evaluator(loop, loss, [input_x, input_y], valid_flow),
freq=1
)
# You may log the learning-rate after every epoch by adding a callback
Expand Down
1 change: 1 addition & 0 deletions docs/api/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
!index.rst

!tfsnippet.rst
!tfsnippet.dataflows.rst
!tfsnippet.datasets.rst
!tfsnippet.layers.rst
!tfsnippet.ops.rst
Expand Down
1 change: 1 addition & 0 deletions docs/api/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ API Docs
:maxdepth: 1

tfsnippet
tfsnippet.dataflows
tfsnippet.datasets
tfsnippet.layers
tfsnippet.ops
Expand Down
4 changes: 4 additions & 0 deletions docs/api/tfsnippet.dataflows.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
tfsnippet.dataflows
===================

.. automodapi:: tfsnippet.dataflows
6 changes: 3 additions & 3 deletions tests/distributions/test_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ class FlowDistributionTestCase(tf.test.TestCase):

def test_errors(self):
normal = Normal(mean=0., std=1.)
with pytest.raises(TypeError,
match='`flow` is not an instance of `Flow`: 123'):
with pytest.raises(TypeError, match='`flow` is not an instance of '
'`BaseFlow`: 123'):
_ = FlowDistribution(normal, 123)

flow = QuadraticFlow(2., 5.)
Expand Down Expand Up @@ -89,7 +89,7 @@ def test_log_prob(self):
mean = tf.constant([0., 1., 2.], dtype=tf.float64)
normal = Normal(mean=mean, std=tf.constant(1., dtype=tf.float64))
flow = QuadraticFlow(2., 5.)
flow.build(tf.constant(0.))
flow.build(tf.constant(0., dtype=tf.float64))
distrib = FlowDistribution(normal, flow)

y = tf.constant([1., -1., 2.], dtype=tf.float64)
Expand Down
53 changes: 14 additions & 39 deletions tests/layers/flows/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def quadratic_inverse_transform(ops, y, a, b):
class QuadraticFlow(BaseFlow):

def __init__(self, a, b):
super(QuadraticFlow, self).__init__()
super(QuadraticFlow, self).__init__(x_value_ndims=0)
self.a = a
self.b = b

Expand All @@ -46,25 +46,20 @@ def _build(self, input=None):
def explicitly_invertible(self):
return True

def _transform(self, x, compute_y, compute_log_det, previous_log_det):
def _transform(self, x, compute_y, compute_log_det):
y, log_det = quadratic_transform(tfops, x, self.a, self.b)
if not compute_y:
y = None
if not compute_log_det:
log_det = None
elif previous_log_det is not None:
log_det = previous_log_det + log_det
return y, log_det

def _inverse_transform(self, y, compute_x, compute_log_det,
previous_log_det):
def _inverse_transform(self, y, compute_x, compute_log_det):
x, log_det = quadratic_inverse_transform(tfops, y, self.a, self.b)
if not compute_x:
x = None
if not compute_log_det:
log_det = None
elif previous_log_det is not None:
log_det = previous_log_det + log_det
return x, log_det


Expand All @@ -82,34 +77,14 @@ def invertible_flow_standard_check(self, flow, session, x, feed_dict=None,
np.testing.assert_allclose(x2_out, x_out, atol=atol, rtol=rtol)
np.testing.assert_allclose(
-log_det_x_out, log_det_y_out, atol=atol, rtol=rtol)

if flow.value_ndims > 0:
log_det_shape = x_out.shape[:-flow.value_ndims]
else:
log_det_shape = x_out.shape
self.assertTupleEqual(log_det_y_out.shape, log_det_shape)
self.assertTupleEqual(log_det_x_out.shape, log_det_shape)

# test with previous_log_det
previous_log_det_y = 10. * np.random.normal(
size=log_det_y_out.shape).astype(log_det_y_out.dtype)
previous_log_det_x = 10. * np.random.normal(
size=log_det_x_out.shape).astype(log_det_x_out.dtype)

np.testing.assert_allclose(
session.run(
flow.transform(x, previous_log_det=previous_log_det_y)[1],
feed_dict=feed_dict
),
log_det_y_out + previous_log_det_y,
atol=atol, rtol=rtol
)

np.testing.assert_allclose(
session.run(
flow.inverse_transform(y, previous_log_det=previous_log_det_x)[1],
feed_dict=feed_dict
),
log_det_x_out + previous_log_det_x,
atol=atol, rtol=rtol
)
self.assertEqual(np.size(x_out), np.size(y_out))

x_batch_shape = x_out.shape
y_batch_shape = y_out.shape
if flow.x_value_ndims > 0:
x_batch_shape = x_batch_shape[:-flow.x_value_ndims]
if flow.y_value_ndims > 0:
y_batch_shape = y_batch_shape[:-flow.y_value_ndims]
self.assertTupleEqual(log_det_y_out.shape, x_batch_shape)
self.assertTupleEqual(log_det_x_out.shape, y_batch_shape)
self.assertTupleEqual(log_det_y_out.shape, log_det_x_out.shape)
116 changes: 53 additions & 63 deletions tests/layers/flows/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@ def explicitly_invertible(self):
return False
with pytest.raises(RuntimeError,
match='The flow is not explicitly invertible'):
_ = _Flow().inverse_transform(tf.constant(0.))
_ = _Flow(x_value_ndims=0).inverse_transform(tf.constant(0.))

# test build without input will cause error
with pytest.raises(ValueError,
match='`input` is required to build _Flow'):
_ = _Flow().build(None)
_ = _Flow(x_value_ndims=0).build(None)

# specify neither `compute_y` nor `compute_log_det` will cause error
flow = QuadraticFlow(2., 5.)
Expand All @@ -61,18 +61,6 @@ def explicitly_invertible(self):
_ = flow.inverse_transform(
tf.constant(0.), compute_x=False, compute_log_det=False)

# specify `previous_log_det` without `compute_log_det` will cause error
with pytest.raises(
ValueError, match='`previous_log_det` is specified but '
'`compute_log_det` is False'):
_ = flow.transform(tf.constant(0.), compute_log_det=False,
previous_log_det=tf.constant(1.))
with pytest.raises(
ValueError, match='`previous_log_det` is specified but '
'`compute_log_det` is False'):
_ = flow.inverse_transform(tf.constant(0.), compute_log_det=False,
previous_log_det=tf.constant(1.))

# test `inverse_transform` should only be called after built
flow = QuadraticFlow(2., 5.)
self.assertFalse(flow._has_built)
Expand All @@ -97,54 +85,49 @@ def _inverse_transform(self, y, compute_x, compute_log_det):
return y, y - 1.

with self.test_session() as sess:
# shape assertions in transform
flow = _Flow(value_ndims=1)
with pytest.raises(Exception,
match='`x.ndims` must be known and >= '
'`value_ndims`'):
sess.run(flow.transform(tf.constant(0.)))
# shape assertions
flow = _Flow(x_value_ndims=1, y_value_ndims=2)

# shape assertions in transform, require_batch_ndims is True
flow = _Flow(value_ndims=1, require_batch_dims=True)
with pytest.raises(Exception,
match=r'`x.ndims` must be known and >= '
r'`value_ndims \+ 1`'):
sess.run(flow.transform(tf.constant([0.])))
match='`x.ndims` must be known and >= '
'`x_value_ndims`'):
flow.build(tf.zeros([]))

# shape assertions in inverse_transform
flow = _Flow(value_ndims=1)
flow.build(tf.zeros([2, 3]))
self.assertEqual(flow._x_input_spec.shape, ('...', '?'))
self.assertEqual(flow._y_input_spec.shape, ('...', '?', '?'))

with pytest.raises(Exception, match='The shape of `x` is invalid'):
sess.run(flow.transform(tf.zeros([])))

with pytest.raises(Exception, match='The shape of `y` is invalid'):
sess.run(flow.inverse_transform(tf.zeros([3])))

# shape assertions, require_batch_ndims is True
flow = _Flow(x_value_ndims=1, y_value_ndims=2,
require_batch_dims=True)

with pytest.raises(Exception,
match='`y.ndims` must be known and >= '
'`value_ndims`'):
sess.run(flow.inverse_transform(tf.constant(0.)))
match=r'`x.ndims` must be known and >= '
r'`x_value_ndims \+ 1`'):
flow.build(tf.zeros([3]))

# shape assertions in transform, require_batch_ndims is True
flow = _Flow(value_ndims=1, require_batch_dims=True)
flow.build(tf.zeros([2, 3]))
with pytest.raises(Exception,
match=r'`y.ndims` must be known and >= '
r'`value_ndims \+ 1`'):
sess.run(flow.inverse_transform(tf.constant([0.])))
self.assertEqual(flow._x_input_spec.shape, ('...', '?', '?'))
self.assertEqual(flow._y_input_spec.shape, ('...', '?', '?', '?'))

# shape assertions in build
flow = _Flow(value_ndims=1)
with pytest.raises(Exception,
match='`input.ndims` must be known and >= '
'`value_ndims`'):
sess.run(flow.build(tf.constant(0.)))
with pytest.raises(Exception, match='The shape of `x` is invalid'):
sess.run(flow.transform(tf.zeros([3])))

flow = _Flow(value_ndims=1, require_batch_dims=True)
with pytest.raises(Exception,
match=r'`input.ndims` must be known and >= '
r'`value_ndims \+ 1`'):
sess.run(flow.build(tf.constant([0.])))
with pytest.raises(Exception, match='The shape of `y` is invalid'):
sess.run(flow.inverse_transform(tf.zeros([2, 3])))


class MultiLayerQuadraticFlow(MultiLayerFlow):

def __init__(self, n_layers):
super(MultiLayerQuadraticFlow, self).__init__(n_layers=n_layers)
super(MultiLayerQuadraticFlow, self).__init__(x_value_ndims=0,
n_layers=n_layers)
self._flows = []

with tf.variable_scope(None, default_name='MultiLayerQuadraticFlow'):
Expand All @@ -159,16 +142,13 @@ def _build(self, input=None):
def explicitly_invertible(self):
return True

def _transform_layer(self, layer_id, x, compute_y, compute_log_det,
previous_log_det):
def _transform_layer(self, layer_id, x, compute_y, compute_log_det):
flow = self._flows[layer_id]
return flow.transform(x, compute_y, compute_log_det, previous_log_det)
return flow.transform(x, compute_y, compute_log_det)

def _inverse_transform_layer(self, layer_id, y, compute_x, compute_log_det,
previous_log_det):
def _inverse_transform_layer(self, layer_id, y, compute_x, compute_log_det):
flow = self._flows[layer_id]
return flow.inverse_transform(
y, compute_x, compute_log_det, previous_log_det)
return flow.inverse_transform(y, compute_x, compute_log_det)


class MultiLayerFlowTestCase(tf.test.TestCase):
Expand All @@ -194,18 +174,12 @@ def test_with_multi_layer_quadratic_flow(self):
test_y, tmp = quadratic_transform(
npyops, test_y, i + 1., i * 2 + 1.)
test_log_det += tmp
previous_log_det = \
10. * np.random.normal(size=test_log_det.shape).astype(np.float32)

y, log_det_y = flow.transform(tf.constant(test_x))
_, log_det_y2 = flow.transform(
tf.constant(test_x), previous_log_det=previous_log_det)

with self.test_session() as sess:
np.testing.assert_allclose(sess.run(y), test_y)
np.testing.assert_allclose(sess.run(log_det_y), test_log_det)
np.testing.assert_allclose(sess.run(log_det_y2),
test_log_det + previous_log_det)
invertible_flow_standard_check(self, flow, sess, test_x)

def test_errors(self):
Expand All @@ -217,20 +191,36 @@ def test_errors(self):
class FeatureMappingFlowTestCase(tf.test.TestCase):

def test_property(self):
class _MyFlow(FeatureMappingFlow):
def _build(self, input=None):
pass

# test axis is integer
flow = FeatureMappingFlow(axis=1, value_ndims=2)
flow = _MyFlow(axis=1, value_ndims=2)
flow.build(tf.zeros([2, 3, 4]))
self.assertEqual(flow.axis, -2)

# test axis is tuple
flow = FeatureMappingFlow(axis=[-1, 1], value_ndims=2)
flow = _MyFlow(axis=[-1, 1], value_ndims=2)
flow.build(tf.zeros([2, 3, 4]))
self.assertEqual(flow.axis, (-2, -1))

def test_errors(self):
with pytest.raises(ValueError, match='`axis` must not be empty'):
_ = FeatureMappingFlow(axis=(), value_ndims=1)

with pytest.raises(ValueError, match='Specifying `x_value_ndims` or '
'`y_value_ndims` for a '
'`FeatureMappingFlow` is not '
'allowed'):
_ = FeatureMappingFlow(axis=-1, value_ndims=1, x_value_ndims=2)

with pytest.raises(ValueError, match='Specifying `x_value_ndims` or '
'`y_value_ndims` for a '
'`FeatureMappingFlow` is not '
'allowed'):
_ = FeatureMappingFlow(axis=-1, value_ndims=1, y_value_ndims=2)

with pytest.raises(ValueError, match='`axis` out of range, or not '
'covered by `value_ndims`'):
layer = FeatureMappingFlow(axis=-2, value_ndims=1)
Expand Down

0 comments on commit b626f99

Please sign in to comment.