Skip to content

Commit

Permalink
ENH add scaled regularization in learn_d_z
Browse files Browse the repository at this point in the history
  • Loading branch information
TomDLT committed Nov 5, 2018
1 parent b7fe2b2 commit dfe1b7b
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 22 deletions.
41 changes: 32 additions & 9 deletions alphacsc/learn_d_z.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from joblib import Parallel

from .utils import construct_X, check_random_state
from .utils.dictionary import get_lambda_max
from .update_z import update_z
from .update_d import update_d_block

Expand Down Expand Up @@ -42,10 +43,10 @@ def compute_X_and_objective(X, z_hat, d_hat, reg, sample_weights=None,


def learn_d_z(X, n_atoms, n_times_atom, func_d=update_d_block, reg=0.1,
n_iter=60, random_state=None, n_jobs=1, solver_z='l-bfgs',
solver_d_kwargs=dict(), solver_z_kwargs=dict(), ds_init=None,
sample_weights=None, verbose=10, callback=None,
stopping_pobj=None):
lmbd_max='fixed', n_iter=60, random_state=None, n_jobs=1,
solver_z='l-bfgs', solver_d_kwargs=dict(),
solver_z_kwargs=dict(), ds_init=None, sample_weights=None,
verbose=10, callback=None, stopping_pobj=None):
"""Univariate Convolutional Sparse Coding.
Parameters
Expand All @@ -60,6 +61,17 @@ def learn_d_z(X, n_atoms, n_times_atom, func_d=update_d_block, reg=0.1,
The function to update the atoms.
reg : float
The regularization parameter
lmbd_max : 'fixed' | 'scaled' | 'per_atom' | 'shared'
If not fixed, adapt the regularization rate as a ratio of lambda_max:
- 'scaled': the regularization parameter is fixed as a ratio of its
maximal value at init __ie__
reg_ = reg * lmbd_max(uv_init)
- 'shared': the regularization parameter is set at each iteration as
a ratio of its maximal value for the current dictionary estimate
__ie__ reg_ = reg * lmbd_max(uv_hat)
- 'per_atom': the regularization parameter is set per atom and at
each iteration as a ratio of its maximal value for this atom __ie__
reg_[k] = reg * lmbd_max(uv_hat[k])
n_iter : int
The number of coordinate-descent iterations.
random_state : int | None
Expand Down Expand Up @@ -106,6 +118,11 @@ def learn_d_z(X, n_atoms, n_times_atom, func_d=update_d_block, reg=0.1,
d_norm = np.linalg.norm(d_hat, axis=1)
d_hat /= d_norm[:, None]

reg0 = reg
lambda_max = get_lambda_max(X[:, None, :], d_hat[:, None, :]).max()
if lmbd_max == "scaled":
reg = reg0 * lambda_max

pobj = list()
times = list()

Expand All @@ -129,19 +146,25 @@ def learn_d_z(X, n_atoms, n_times_atom, func_d=update_d_block, reg=0.1,
print('Coordinate descent loop %d / %d [n_jobs=%d]' %
(ii, n_iter, n_jobs))

if lmbd_max not in ['fixed', 'scaled']:
lambda_max = get_lambda_max(X[:, None, :], d_hat[:, None, :])
reg = reg0 * lambda_max
if lmbd_max == 'shared':
reg = reg.max()

start = time.time()
z_hat = update_z(X, d_hat, reg, z0=z_hat,
parallel=parallel, solver=solver_z,
b_hat_0=b_hat_0, solver_kwargs=solver_z_kwargs,
z_hat = update_z(X, d_hat, reg, z0=z_hat, parallel=parallel,
solver=solver_z, b_hat_0=b_hat_0,
solver_kwargs=solver_z_kwargs,
sample_weights=sample_weights)
times.append(time.time() - start)

# monitor cost function
pobj.append(
compute_X_and_objective(X, z_hat, d_hat, reg, sample_weights))
if verbose > 1:
print('[seed %s] Objective (z_hat) : %0.8f' %
(random_state, pobj[-1]))
print('[seed %s] Objective (z_hat) : %0.8f' % (random_state,
pobj[-1]))

if len(z_hat.nonzero()[0]) == 0:
import warnings
Expand Down
33 changes: 22 additions & 11 deletions alphacsc/learn_d_z_mcem.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@
from .update_w import estimate_phi_mh


def learn_d_z_weighted(X, n_atoms, n_times_atom, func_d=update_d_block,
reg=0.1, alpha=1.9, n_iter_global=10, init_tau=False,
n_iter_optim=10, n_iter_mcmc=10, n_burnin_mcmc=0,
random_state=None, n_jobs=1, solver_z='l-bfgs',
solver_d_kwargs=dict(), solver_z_kwargs=dict(),
ds_init=None, verbose=0, callback=None):
def learn_d_z_weighted(
X, n_atoms, n_times_atom, func_d=update_d_block, reg=0.1, alpha=1.9,
lmbd_max='fixed', n_iter_global=10, init_tau=False, n_iter_optim=10,
n_iter_mcmc=10, n_burnin_mcmc=0, random_state=None, n_jobs=1,
solver_z='l-bfgs', solver_d_kwargs=dict(), solver_z_kwargs=dict(),
ds_init=None, verbose=0, callback=None):
"""Univariate Convolutional Sparse Coding with an alpha-stable distribution
Parameters
Expand All @@ -33,6 +33,17 @@ def learn_d_z_weighted(X, n_atoms, n_times_atom, func_d=update_d_block,
The regularization parameter
alpha : float in [0, 2[:
Parameter of the alpha-stable noise distribution.
lmbd_max : 'fixed' | 'scaled' | 'per_atom' | 'shared'
If not fixed, adapt the regularization rate as a ratio of lambda_max:
- 'scaled': the regularization parameter is fixed as a ratio of its
maximal value at init __ie__
reg_ = reg * lmbd_max(uv_init)
- 'shared': the regularization parameter is set at each iteration as
a ratio of its maximal value for the current dictionary estimate
__ie__ reg_ = reg * lmbd_max(uv_hat)
- 'per_atom': the regularization parameter is set per atom and at
each iteration as a ratio of its maximal value for this atom __ie__
reg_[k] = reg * lmbd_max(uv_hat[k])
n_iter_global : int
The number of iteration of the Expectation-Maximisation outer loop.
init_tau : boolean
Expand Down Expand Up @@ -89,11 +100,11 @@ def learn_d_z_weighted(X, n_atoms, n_times_atom, func_d=update_d_block,

# Optimize d and z wrt the new weights
pobj, times, d_hat, z_hat = learn_d_z(
X, n_atoms, n_times_atom, func_d, reg=reg, n_iter=n_iter_optim,
random_state=rng, sample_weights=2 * tau, ds_init=d_hat,
solver_d_kwargs=solver_d_kwargs, solver_z_kwargs=solver_z_kwargs,
verbose=verbose, solver_z=solver_z, n_jobs=n_jobs,
callback=callback)
X, n_atoms, n_times_atom, func_d, reg=reg, lmbd_max=lmbd_max,
n_iter=n_iter_optim, random_state=rng, sample_weights=2 * tau,
ds_init=d_hat, solver_d_kwargs=solver_d_kwargs,
solver_z_kwargs=solver_z_kwargs, verbose=verbose,
solver_z=solver_z, n_jobs=n_jobs, callback=callback)

# Estimate the expectation via MCMC
X_hat = construct_X(z_hat, d_hat)
Expand Down
2 changes: 0 additions & 2 deletions alphacsc/update_z.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ def update_z(X, ds, reg, z0=None, debug=False, parallel=None,
Parameters for the solver
sample_weights: array, shape (n_trials, n_times)
Weights applied on the cost function.
verbose : int
Verbosity level.
Returns
-------
Expand Down

0 comments on commit dfe1b7b

Please sign in to comment.