Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
mkunesch committed Apr 21, 2023
2 parents 93a7774 + e5f910e commit e25c56c
Show file tree
Hide file tree
Showing 13 changed files with 49 additions and 38 deletions.
2 changes: 1 addition & 1 deletion examples/lookahead_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def train_step(params, optimizer_state, batch):
test_dataset.as_numpy_iterator())
print(f'Epoch {epoch+1}: test acc: {test_acc:.2f}')

return test_acc
return test_acc # pytype: disable=bad-return-type # numpy-scalars


if __name__ == '__main__':
Expand Down
4 changes: 2 additions & 2 deletions examples/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def model_accuracy(model: Callable[[chex.Array], chex.Array],
accuracy_sum += _single_batch_accuracy(logits, batch['label']) * batch_size
dataset_size += batch_size

return accuracy_sum / dataset_size
return accuracy_sum / dataset_size # pytype: disable=bad-return-type # numpy-scalars # pylint: disable=line-too-long


# Optax is agnostic to which (if any) neural network library is used. Below we
Expand Down Expand Up @@ -111,7 +111,7 @@ def train_step(params, optimizer_state, batch):
test_acc = model_accuracy(eval_model, test_dataset.as_numpy_iterator())
print(f'Epoch {epoch+1}: test acc: {test_acc:.2f}')

return test_acc
return test_acc # pytype: disable=bad-return-type # numpy-scalars


def main(unused_argv):
Expand Down
2 changes: 1 addition & 1 deletion optax/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@
from optax._src.wrappers import skip_large_updates
from optax._src.wrappers import skip_not_finite

__version__ = "0.1.5.dev"
__version__ = "0.1.5"

__all__ = (
"adabelief",
Expand Down
6 changes: 3 additions & 3 deletions optax/_src/alias_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,13 +188,13 @@ def test_explicit_dtype(self, dtype):
expected_dtype = jax.dtypes.canonicalize_dtype(dtype) # None -> float32
tx = alias.sgd(0.1, momentum=0.9, accumulator_dtype=dtype)
trace_state, _ = tx.init(jnp.array([0.0, 0.0]))
self.assertEqual(expected_dtype, trace_state.trace.dtype)
self.assertEqual(expected_dtype, getattr(trace_state, 'trace').dtype)
tx = alias.adam(0.1, mu_dtype=dtype)
adam_state, _ = tx.init(jnp.array([0.0, 0.0]))
self.assertEqual(expected_dtype, adam_state.mu.dtype)
self.assertEqual(expected_dtype, getattr(adam_state, 'mu').dtype)
tx = alias.adamw(0.1, mu_dtype=dtype)
adam_state, _, _ = tx.init(jnp.array([0.0, 0.0]))
self.assertEqual(expected_dtype, adam_state.mu.dtype)
self.assertEqual(expected_dtype, getattr(adam_state, 'mu').dtype)


if __name__ == '__main__':
Expand Down
8 changes: 4 additions & 4 deletions optax/_src/base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def test_stateless_no_params(self):
def opt(g, _):
return jax.tree_util.tree_map(lambda g_: g_ * 2, g)

state = opt.init(None)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state)
expected_updates = {'linear': jnp.full((5, 3), 6.0)}
Expand All @@ -159,7 +159,7 @@ def weight_decay(g, p):
return jax.tree_util.tree_map(lambda g_, p_: g_ + 0.1 * p_, g, p)

opt = base.stateless(weight_decay)
state = opt.init(None)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertIsInstance(state, base.EmptyState)


Expand All @@ -183,15 +183,15 @@ def test_stateless_with_tree_map_no_params(self):
updates = {'linear': jnp.full((5, 3), 3.0)}

opt = base.stateless_with_tree_map(lambda g, _: g * 2.0)
state = opt.init(None)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state)
expected_updates = {'linear': jnp.full((5, 3), 6.0)}
chex.assert_trees_all_close(new_updates, expected_updates)

def test_init_returns_emptystate(self):
opt = base.stateless_with_tree_map(lambda g, p: g + 0.1 * p)
state = opt.init(None)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertIsInstance(state, base.EmptyState)


Expand Down
2 changes: 1 addition & 1 deletion optax/_src/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def update_fn(updates, state, params=None, **extra_args):


class MultiTransformState(NamedTuple):
inner_states: Mapping[Hashable, NamedTuple]
inner_states: Mapping[Hashable, base.OptState]


def multi_transform(
Expand Down
4 changes: 2 additions & 2 deletions optax/_src/control_variates.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def control_variates_jacobians(
"""
control_variate = control_variate_from_function(function)
stochastic_cv, expected_value_cv, update_state_cv = control_variate
data_dim = params[0].shape[0]
data_dim = jax.tree_util.tree_leaves(params)[0].shape[0]
if estimate_cv_coeffs:
cv_coeffs = estimate_control_variate_coefficients(
function, control_variate_from_function, grad_estimator, params,
Expand Down Expand Up @@ -315,7 +315,7 @@ def param_fn(x):
lambda x: expected_value_cv(x, control_variate_state))(params)

jacobians = []
for param_index, param in enumerate(params):
for param_index, param in enumerate(jax.tree_util.tree_leaves(params)):
chex.assert_shape(function_jacobians[param_index], (num_samples, data_dim))
chex.assert_shape(cv_jacobians[param_index], (num_samples, data_dim))
chex.assert_shape(cv_param_grads[param_index], (data_dim,))
Expand Down
5 changes: 2 additions & 3 deletions optax/_src/lookahead.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,8 @@ def lookahead(
raise ValueError('Synchronization period must be >= 1.')

def init_fn(params: base.Params) -> LookaheadState:
try:
fast_params = params.fast
except AttributeError:
fast_params = getattr(params, 'fast', None)
if fast_params is None:
# Allowing init_fn to be called with fast parameters reduces the
# modifications necessary to adapt code to use lookahead in some cases.
logging.warning(
Expand Down
10 changes: 7 additions & 3 deletions optax/_src/state_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"""Tools for mapping over optimizer states."""

import typing
from typing import Any, Callable, Optional, Protocol, Union
from typing import Any, Callable, Optional, Protocol, Union, cast

import jax
from optax._src import base
Expand Down Expand Up @@ -78,10 +78,14 @@ def tree_map_params(
optional extra arguments.
"""

# Cast for pytype checks (no-op for other usages).
placeholder = cast(base.chex.ArrayTree, _ParamsPlaceholder())

if isinstance(initable, Initable):
state_with_placeholders = initable.init(_ParamsPlaceholder())
initable = cast(Initable, initable) # for pytype checks
state_with_placeholders = initable.init(placeholder)
else:
state_with_placeholders = initable(_ParamsPlaceholder())
state_with_placeholders = initable(placeholder)

def map_params(maybe_placeholder_value, value):
if isinstance(maybe_placeholder_value, _ParamsPlaceholder):
Expand Down
15 changes: 10 additions & 5 deletions optax/_src/state_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"""Tests for state_utils."""

import dataclasses
from typing import Optional, TypedDict
from typing import Optional, TypedDict, cast

from absl.testing import absltest
import chex
Expand All @@ -37,7 +37,7 @@ class FakeShardSpec:
class ScaleByAdamStateDict(TypedDict):
"""An opt state that uses dictionaries instead of classes."""

count: int
count: chex.Array
params: TypedDict('Params', {'mu': chex.ArrayTree, 'nu': chex.ArrayTree})


Expand All @@ -48,6 +48,7 @@ def _scale_by_adam_with_dicts():

def init(params):
state = t.init(params)
state = cast(transform.ScaleByAdamState, state)

return ScaleByAdamStateDict(
count=state.count,
Expand All @@ -62,6 +63,7 @@ def update(updates, state, params=None):
)

updates, state = t.update(updates, state, params)
state = cast(transform.ScaleByAdamState, state)
return ScaleByAdamStateDict(
count=state.count,
params={'mu': state.mu, 'nu': state.nu},
Expand Down Expand Up @@ -138,9 +140,10 @@ def init(params):

state = init(params)
state = state_utils.tree_map_params(init, lambda v: v+1, state)
state = cast(Foo, state)

self.assertEqual(state.count, 0)
self.assertEqual(state.v, {'w': 1})
self.assertEqual(int(state.count), 0)
self.assertEqual(state.v, {'w': jnp.array(1)})

def test_adam(self):
params = _fake_params()
Expand Down Expand Up @@ -192,10 +195,12 @@ def test_inject_hparams(self):
params = _fake_params()
state = opt.init(params)
state = state_utils.tree_map_params(opt, lambda v: v+1, state)
state = cast(schedule.InjectHyperparamsState, state)

self.assertEqual(1e-3, state.hyperparams['learning_rate'])
params_plus_one = jax.tree_map(lambda v: v+1, params)
chex.assert_trees_all_close(state.inner_state[0].mu, params_plus_one)
mu = getattr(state.inner_state[0], 'mu')
chex.assert_trees_all_close(mu, params_plus_one)

def test_map_params_to_none(self):
opt = alias.adagrad(1e-4)
Expand Down
8 changes: 4 additions & 4 deletions optax/_src/transform_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def test_scale(self):
factor = 0.1 ** i
rescaler = transform.scale(factor)
# Apply rescaling.
scaled_updates, _ = rescaler.update(updates, None)
scaled_updates, _ = rescaler.update(updates, {})
# Manually scale updates.
def rescale(t):
return t * factor # pylint:disable=cell-var-from-loop
Expand All @@ -240,7 +240,7 @@ def test_centralize(self, inputs, outputs):
inputs = jnp.asarray(inputs)
outputs = jnp.asarray(outputs)
centralizer = transform.centralize()
centralized_inputs, _ = centralizer.update(inputs, None)
centralized_inputs, _ = centralizer.update(inputs, {})
chex.assert_trees_all_close(centralized_inputs, outputs)

@chex.all_variants
Expand Down Expand Up @@ -282,10 +282,10 @@ def f(params: jnp.ndarray) -> jnp.ndarray:
og = transform.scale_by_optimistic_gradient()
og_state = og.init(initial_params)
# Provide some arbitrary previous gradient.
og_state.trace['x'] = 1.5
getattr(og_state, 'trace')['x'] = 1.5

g = jax.grad(f)(initial_params)
og_true = 2 * g['x'] - og_state.trace['x']
og_true = 2 * g['x'] - getattr(og_state, 'trace')['x']
og, og_state = og.update(g, og_state)

# Compare transformation output with manually computed optimistic gradient.
Expand Down
7 changes: 5 additions & 2 deletions optax/_src/wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,8 +427,11 @@ def mid_step(args):

return new_updates, new_state

def has_updated(self, state: MultiStepsState) -> Array:
return jnp.logical_and(state.mini_step == 0, state.gradient_step > 0)
def has_updated(self, state: Union[MultiStepsState, chex.ArrayTree]) -> Array:
# Use `getattr` to bypass pytype checks.
return jnp.logical_and(
getattr(state, 'mini_step') == 0, getattr(state, 'gradient_step') > 0
)

def gradient_transformation(self) -> base.GradientTransformation:
return base.GradientTransformation(init=self.init, update=self.update)
Expand Down
14 changes: 7 additions & 7 deletions optax/_src/wrappers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,35 +111,35 @@ def fn(x):
# We know exactly what should be the value of params since we are
# effectively using sgd in all cases.
self.assertEqual(-1., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertTrue(bool(state.last_finite))
self.assertTrue(bool(getattr(state, 'last_finite')))
# Check 2 rejected param updates
for step in range(2):
grads = grads_fn(params, nan)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertEqual(-1., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertFalse(bool(state.last_finite))
self.assertEqual(step + 1, int(state.notfinite_count))
self.assertFalse(bool(getattr(state, 'last_finite')))
self.assertEqual(step + 1, int(getattr(state, 'notfinite_count')))
# Next successful param update
grads = grads_fn(params, one)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertEqual(-2., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertTrue(bool(state.last_finite))
self.assertTrue(bool(getattr(state, 'last_finite')))
# Again 2 rejected param updates
for step in range(2):
grads = grads_fn(params, nan)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertEqual(-2., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertFalse(bool(state.last_finite))
self.assertEqual(step + 1, int(state.notfinite_count))
self.assertFalse(bool(getattr(state, 'last_finite')))
self.assertEqual(step + 1, int(getattr(state, 'notfinite_count')))
# Next param update with NaN is accepted since we reached maximum
grads = grads_fn(params, nan)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertTrue(bool(jnp.isnan(jax.tree_util.tree_flatten(params)[0][0])))
self.assertEqual(5, int(state.total_notfinite))
self.assertEqual(5, int(getattr(state, 'total_notfinite')))

def test_apply_if_finite_pmap(self):
# Unlike in `test_apply_if_finite`:
Expand Down

0 comments on commit e25c56c

Please sign in to comment.