Skip to content

Commit

Permalink
Resolve pytype errors.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 525737739
  • Loading branch information
hbq1 authored and OptaxDev committed Apr 20, 2023
1 parent ceebb9c commit e188350
Show file tree
Hide file tree
Showing 9 changed files with 56 additions and 45 deletions.
6 changes: 3 additions & 3 deletions optax/_src/alias_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,13 +188,13 @@ def test_explicit_dtype(self, dtype):
expected_dtype = jax.dtypes.canonicalize_dtype(dtype) # None -> float32
tx = alias.sgd(0.1, momentum=0.9, accumulator_dtype=dtype)
trace_state, _ = tx.init(jnp.array([0.0, 0.0]))
self.assertEqual(expected_dtype, trace_state.trace.dtype) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(expected_dtype, getattr(trace_state, 'trace').dtype)
tx = alias.adam(0.1, mu_dtype=dtype)
adam_state, _ = tx.init(jnp.array([0.0, 0.0]))
self.assertEqual(expected_dtype, adam_state.mu.dtype) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(expected_dtype, getattr(adam_state, 'mu').dtype)
tx = alias.adamw(0.1, mu_dtype=dtype)
adam_state, _, _ = tx.init(jnp.array([0.0, 0.0]))
self.assertEqual(expected_dtype, adam_state.mu.dtype) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(expected_dtype, getattr(adam_state, 'mu').dtype)


if __name__ == '__main__':
Expand Down
6 changes: 3 additions & 3 deletions optax/_src/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def update_fn(updates, state, params=None, **extra_args):


class MultiTransformState(NamedTuple):
inner_states: Mapping[Hashable, NamedTuple]
inner_states: Mapping[Hashable, base.OptState]


def multi_transform(
Expand Down Expand Up @@ -146,7 +146,7 @@ def init_fn(params):
group: wrappers.masked(tx, make_mask(labels, group)).init(params)
for group, tx in transforms.items()
}
return MultiTransformState(inner_states) # pytype: disable=wrong-arg-types # numpy-scalars # pylint: disable=line-too-long
return MultiTransformState(inner_states)

def update_fn(updates, state, params=None):
labels = param_labels(updates) if callable(param_labels) else param_labels
Expand All @@ -155,6 +155,6 @@ def update_fn(updates, state, params=None):
masked_tx = wrappers.masked(tx, make_mask(labels, group))
updates, new_inner_state[group] = masked_tx.update(
updates, state.inner_states[group], params)
return updates, MultiTransformState(new_inner_state) # pytype: disable=wrong-arg-types # numpy-scalars # pylint: disable=line-too-long
return updates, MultiTransformState(new_inner_state)

return base.GradientTransformation(init_fn, update_fn)
6 changes: 3 additions & 3 deletions optax/_src/control_variates.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def control_variates_jacobians(
"""
control_variate = control_variate_from_function(function)
stochastic_cv, expected_value_cv, update_state_cv = control_variate
data_dim = params[0].shape[0] # pytype: disable=attribute-error # numpy-scalars # pylint: disable=line-too-long
data_dim = jax.tree_util.tree_leaves(params)[0].shape[0]
if estimate_cv_coeffs:
cv_coeffs = estimate_control_variate_coefficients(
function, control_variate_from_function, grad_estimator, params,
Expand Down Expand Up @@ -315,7 +315,7 @@ def param_fn(x):
lambda x: expected_value_cv(x, control_variate_state))(params)

jacobians = []
for param_index, param in enumerate(params):
for param_index, param in enumerate(jax.tree_util.tree_leaves(params)):
chex.assert_shape(function_jacobians[param_index], (num_samples, data_dim))
chex.assert_shape(cv_jacobians[param_index], (num_samples, data_dim))
chex.assert_shape(cv_param_grads[param_index], (data_dim,))
Expand All @@ -330,7 +330,7 @@ def param_fn(x):
# \nabla_{\theta} E_{p(x; \theta)}]
param_jacobians += cv_coeff * expected_value_grads[param_index]

chex.assert_shape(param_jacobians, (num_samples,) + param.shape) # pytype: disable=attribute-error # numpy-scalars # pylint: disable=line-too-long
chex.assert_shape(param_jacobians, (num_samples,) + param.shape)
jacobians.append(param_jacobians)

return jacobians, control_variate_state
Expand Down
5 changes: 2 additions & 3 deletions optax/_src/lookahead.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,8 @@ def lookahead(
raise ValueError('Synchronization period must be >= 1.')

def init_fn(params: base.Params) -> LookaheadState:
try:
fast_params = params.fast # pytype: disable=attribute-error # numpy-scalars # pylint: disable=line-too-long
except AttributeError:
fast_params = getattr(params, 'fast', None)
if fast_params is None:
# Allowing init_fn to be called with fast parameters reduces the
# modifications necessary to adapt code to use lookahead in some cases.
logging.warning(
Expand Down
10 changes: 7 additions & 3 deletions optax/_src/state_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"""Tools for mapping over optimizer states."""

import typing
from typing import Any, Callable, Optional, Protocol, Union
from typing import Any, Callable, Optional, Protocol, Union, cast

import jax
from optax._src import base
Expand Down Expand Up @@ -78,10 +78,14 @@ def tree_map_params(
optional extra arguments.
"""

# Cast for pytype checks (no-op for other usages).
placeholder = cast(base.chex.ArrayTree, _ParamsPlaceholder())

if isinstance(initable, Initable):
state_with_placeholders = initable.init(_ParamsPlaceholder()) # type: ignore # numpy-scalars # pylint: disable=line-too-long
initable = cast(Initable, initable) # for pytype checks
state_with_placeholders = initable.init(placeholder)
else:
state_with_placeholders = initable(_ParamsPlaceholder()) # pytype: disable=wrong-arg-types # numpy-scalars # pylint: disable=line-too-long
state_with_placeholders = initable(placeholder)

def map_params(maybe_placeholder_value, value):
if isinstance(maybe_placeholder_value, _ParamsPlaceholder):
Expand Down
25 changes: 15 additions & 10 deletions optax/_src/state_utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"""Tests for state_utils."""

import dataclasses
from typing import Optional, TypedDict
from typing import Optional, TypedDict, cast

from absl.testing import absltest
import chex
Expand All @@ -37,7 +37,7 @@ class FakeShardSpec:
class ScaleByAdamStateDict(TypedDict):
"""An opt state that uses dictionaries instead of classes."""

count: int
count: chex.Array
params: TypedDict('Params', {'mu': chex.ArrayTree, 'nu': chex.ArrayTree})


Expand All @@ -48,10 +48,11 @@ def _scale_by_adam_with_dicts():

def init(params):
state = t.init(params)
state = cast(transform.ScaleByAdamState, state)

return ScaleByAdamStateDict(
count=state.count, # pytype: disable=attribute-error # numpy-scalars
params={'mu': state.mu, 'nu': state.nu}, # pytype: disable=attribute-error # numpy-scalars
count=state.count,
params={'mu': state.mu, 'nu': state.nu},
)

def update(updates, state, params=None):
Expand All @@ -62,9 +63,10 @@ def update(updates, state, params=None):
)

updates, state = t.update(updates, state, params)
state = cast(transform.ScaleByAdamState, state)
return ScaleByAdamStateDict(
count=state.count, # pytype: disable=attribute-error # numpy-scalars
params={'mu': state.mu, 'nu': state.nu}, # pytype: disable=attribute-error # numpy-scalars
count=state.count,
params={'mu': state.mu, 'nu': state.nu},
)

return base.GradientTransformation(init, update)
Expand Down Expand Up @@ -138,9 +140,10 @@ def init(params):

state = init(params)
state = state_utils.tree_map_params(init, lambda v: v+1, state)
state = cast(Foo, state)

self.assertEqual(state.count, 0) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(state.v, {'w': 1}) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(int(state.count), 0)
self.assertEqual(state.v, {'w': jnp.array(1)})

def test_adam(self):
params = _fake_params()
Expand Down Expand Up @@ -192,10 +195,12 @@ def test_inject_hparams(self):
params = _fake_params()
state = opt.init(params)
state = state_utils.tree_map_params(opt, lambda v: v+1, state)
state = cast(schedule.InjectHyperparamsState, state)

self.assertEqual(1e-3, state.hyperparams['learning_rate']) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(1e-3, state.hyperparams['learning_rate'])
params_plus_one = jax.tree_map(lambda v: v+1, params)
chex.assert_trees_all_close(state.inner_state[0].mu, params_plus_one) # pytype: disable=attribute-error # numpy-scalars
mu = getattr(state.inner_state[0], 'mu')
chex.assert_trees_all_close(mu, params_plus_one)

def test_map_params_to_none(self):
opt = alias.adagrad(1e-4)
Expand Down
8 changes: 4 additions & 4 deletions optax/_src/transform_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def test_scale(self):
factor = 0.1 ** i
rescaler = transform.scale(factor)
# Apply rescaling.
scaled_updates, _ = rescaler.update(updates, None) # pytype: disable=wrong-arg-types # numpy-scalars
scaled_updates, _ = rescaler.update(updates, {})
# Manually scale updates.
def rescale(t):
return t * factor # pylint:disable=cell-var-from-loop
Expand All @@ -240,7 +240,7 @@ def test_centralize(self, inputs, outputs):
inputs = jnp.asarray(inputs)
outputs = jnp.asarray(outputs)
centralizer = transform.centralize()
centralized_inputs, _ = centralizer.update(inputs, None) # pytype: disable=wrong-arg-types # numpy-scalars
centralized_inputs, _ = centralizer.update(inputs, {})
chex.assert_trees_all_close(centralized_inputs, outputs)

@chex.all_variants
Expand Down Expand Up @@ -282,10 +282,10 @@ def f(params: jnp.ndarray) -> jnp.ndarray:
og = transform.scale_by_optimistic_gradient()
og_state = og.init(initial_params)
# Provide some arbitrary previous gradient.
og_state.trace['x'] = 1.5 # type: ignore # numpy-scalars
getattr(og_state, 'trace')['x'] = 1.5

g = jax.grad(f)(initial_params)
og_true = 2 * g['x'] - og_state.trace['x'] # pytype: disable=attribute-error # numpy-scalars
og_true = 2 * g['x'] - getattr(og_state, 'trace')['x']
og, og_state = og.update(g, og_state)

# Compare transformation output with manually computed optimistic gradient.
Expand Down
7 changes: 5 additions & 2 deletions optax/_src/wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,8 +427,11 @@ def mid_step(args):

return new_updates, new_state

def has_updated(self, state: MultiStepsState) -> Array:
return jnp.logical_and(state.mini_step == 0, state.gradient_step > 0)
def has_updated(self, state: Union[MultiStepsState, chex.ArrayTree]) -> Array:
# Use `getattr` to bypass pytype checks.
return jnp.logical_and(
getattr(state, 'mini_step') == 0, getattr(state, 'gradient_step') > 0
)

def gradient_transformation(self) -> base.GradientTransformation:
return base.GradientTransformation(init=self.init, update=self.update)
Expand Down
28 changes: 14 additions & 14 deletions optax/_src/wrappers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,35 +111,35 @@ def fn(x):
# We know exactly what should be the value of params since we are
# effectively using sgd in all cases.
self.assertEqual(-1., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertTrue(bool(state.last_finite)) # pytype: disable=attribute-error # numpy-scalars
self.assertTrue(bool(getattr(state, 'last_finite')))
# Check 2 rejected param updates
for step in range(2):
grads = grads_fn(params, nan)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertEqual(-1., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertFalse(bool(state.last_finite)) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(step + 1, int(state.notfinite_count)) # pytype: disable=attribute-error # numpy-scalars
self.assertFalse(bool(getattr(state, 'last_finite')))
self.assertEqual(step + 1, int(getattr(state, 'notfinite_count')))
# Next successful param update
grads = grads_fn(params, one)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertEqual(-2., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertTrue(bool(state.last_finite)) # pytype: disable=attribute-error # numpy-scalars
self.assertTrue(bool(getattr(state, 'last_finite')))
# Again 2 rejected param updates
for step in range(2):
grads = grads_fn(params, nan)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertEqual(-2., float(jax.tree_util.tree_flatten(params)[0][0]))
self.assertFalse(bool(state.last_finite)) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(step + 1, int(state.notfinite_count)) # pytype: disable=attribute-error # numpy-scalars
self.assertFalse(bool(getattr(state, 'last_finite')))
self.assertEqual(step + 1, int(getattr(state, 'notfinite_count')))
# Next param update with NaN is accepted since we reached maximum
grads = grads_fn(params, nan)
updates, state = opt.update(grads, state, params)
params = update.apply_updates(params, updates)
self.assertTrue(bool(jnp.isnan(jax.tree_util.tree_flatten(params)[0][0])))
self.assertEqual(5, int(state.total_notfinite)) # pytype: disable=attribute-error # numpy-scalars
self.assertEqual(5, int(getattr(state, 'total_notfinite')))

def test_apply_if_finite_pmap(self):
# Unlike in `test_apply_if_finite`:
Expand Down Expand Up @@ -247,18 +247,18 @@ def test_multi_steps_every_k_schedule(self):
params = dict(a=jnp.zeros([]))
opt_state = opt_init(params)
grad = dict(a=jnp.zeros([]))
self.assertFalse(ms_opt.has_updated(opt_state)) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertFalse(ms_opt.has_updated(opt_state))
# First two steps have 1 mini-step per update.
for _ in range(2):
_, opt_state = opt_update(grad, opt_state, params)
self.assertTrue(ms_opt.has_updated(opt_state)) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertTrue(ms_opt.has_updated(opt_state))
# Subsequently, mini-steps should have 3 mini-steps per update.
for _ in range(5):
for _ in range(2):
_, opt_state = opt_update(grad, opt_state, params)
self.assertFalse(ms_opt.has_updated(opt_state)) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertFalse(ms_opt.has_updated(opt_state))
_, opt_state = opt_update(grad, opt_state, params)
self.assertTrue(ms_opt.has_updated(opt_state)) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertTrue(ms_opt.has_updated(opt_state))

def test_multi_steps_computes_mean(self):
k_steps = 4
Expand All @@ -268,16 +268,16 @@ def test_multi_steps_computes_mean(self):
params = dict(a=jnp.zeros([]))
opt_state = opt_init(params)
grads = [dict(a=jnp.ones([]) * i) for i in [1, 2, 3, 4]]
self.assertFalse(ms_opt.has_updated(opt_state)) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertFalse(ms_opt.has_updated(opt_state))

# First 3 steps don't update.
for grad in grads[:-1]:
_, opt_state = opt_update(grad, opt_state, params)
self.assertFalse(ms_opt.has_updated(opt_state)) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertFalse(ms_opt.has_updated(opt_state))

# Actual update.
new_params, opt_state = opt_update(grads[-1], opt_state, params)
self.assertTrue(ms_opt.has_updated(opt_state)) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertTrue(ms_opt.has_updated(opt_state))
np.testing.assert_array_equal(new_params['a'], 2.5)

def test_skip_not_finite(self):
Expand Down

0 comments on commit e188350

Please sign in to comment.