Skip to content

Commit

Permalink
improve and homogeneize docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
fabianp committed Mar 19, 2021
1 parent f21b689 commit e5cb995
Show file tree
Hide file tree
Showing 2 changed files with 138 additions and 76 deletions.
154 changes: 103 additions & 51 deletions copt/constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,53 @@
from scipy.sparse import linalg as splinalg

class LinfBall:
"""L-infinity ball.
Args:
alpha: float
radius of the ball.
"""
p = np.inf

def __init__(self, alpha):
self.alpha = alpha

def prox(self, x, step_size=None):
"""Projection onto the L-infinity ball.
Args:
x: array-like
Returns:
p : array-like, same shape as x
projection of x onto the L-infinity ball.
"""
return x.clip(-self.alpha, self.alpha)


class L2Ball:
"""L2 ball.
Args:
alpha: float
radius of the ball.
"""
p = 2

def __init__(self, alpha):
self.alpha = alpha

def prox(self, x, step_size=None):
"""Projection onto the L-2 ball.
Args:
x: array-like
Returns:
p : array-like, same shape as x
projection of x onto the L-2 ball.
"""

norm = np.sqrt((x ** 2).sum())
if norm <= self.alpha:
return x
Expand All @@ -29,9 +60,13 @@ def prox(self, x, step_size=None):
class L1Ball:
"""Indicator function over the L1 ball
This function is 0 if the sum of absolute values is less than or equal to
alpha, and infinity otherwise.
"""
This function is 0 if the sum of absolute values is less than or equal to
alpha, and infinity otherwise.
Args:
alpha: float
radius of the ball.
"""
p = 1

def __init__(self, alpha):
Expand All @@ -44,6 +79,17 @@ def __call__(self, x):
return np.infty

def prox(self, x, step_size=None):
"""Projection onto the L-infinity ball.
Parameters
----------
x: array-like
Returns
-------
p : array-like, same shape as x
projection of x onto the L-infinity ball.
"""
return euclidean_proj_l1ball(x, self.alpha)

def lmo(self, u, x, active_set=None):
Expand All @@ -53,9 +99,9 @@ def lmo(self, u, x, active_set=None):
max_{||s||_1 <= alpha} <u, s>
Args:
u: array
u: array-like
usually -gradient
x: array
x: array-like
usually the iterate of the considered algorithm
active_set: no effect here.
Expand All @@ -68,7 +114,7 @@ def lmo(self, u, x, active_set=None):
None: not used here
max_step_size: float
1. for a Frank-Wolfe step.
"""
"""
abs_u = np.abs(u)
largest_coordinate = np.argmax(abs_u)
sign = np.sign(u[largest_coordinate])
Expand Down Expand Up @@ -145,30 +191,31 @@ def lmo(self, u, x):

def euclidean_proj_simplex(v, s=1.0):
r""" Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: float, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
"""
Solves the optimization problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Args:
v: (n,) numpy array,
n-dimensional vector to project
s: float, optional, default: 1,
radius of the simplex
Returns:
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes:
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References:
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
(n,) = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
Expand All @@ -189,25 +236,24 @@ def euclidean_proj_simplex(v, s=1.0):

def euclidean_proj_l1ball(v, s=1):
""" Compute the Euclidean projection on a L1-ball
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. || w ||_1 <= s
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: float, optional, default: 1,
radius of the L1-ball
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the L1-ball of radius s
Notes
-----
Solves the problem by a reduction to the positive simplex case
See also
--------
euclidean_proj_simplex
"""
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. || w ||_1 <= s
Args:
v: (n,) numpy array,
n-dimensional vector to project
s: float, optional, default: 1,
radius of the L1-ball
Returns:
w: (n,) numpy array,
Euclidean projection of v on the L1-ball of radius s
Notes:
Solves the problem by a reduction to the positive simplex case
See also :ref:`euclidean_proj_simplex`
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
if len(v.shape) > 1:
raise ValueError
Expand All @@ -226,7 +272,13 @@ def euclidean_proj_l1ball(v, s=1):


class TraceBall:
"""Projection onto the trace (aka nuclear) norm, sum of singular values"""
"""Projection onto the trace (aka nuclear) norm, sum of singular values
Args:
alpha: float
radius of the ball.
"""

is_separable = False

Expand Down Expand Up @@ -267,7 +319,7 @@ def lmo(self, u, x, active_set=None):
None: not used here
None: not used here
max_step_size: 1. for a Frank-Wolfe step.
"""
"""
u_mat = u.reshape(self.shape)
ut, _, vt = splinalg.svds(u_mat, k=1)
vertex = self.alpha * np.outer(ut, vt).ravel()
Expand Down
60 changes: 35 additions & 25 deletions copt/penalty.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,14 @@
class L1Norm:
"""L1 norm, that is, the sum of absolute values:
.. math::
\\alpha\\sum_i^d |x_i|
.. math::
\\alpha\\sum_i^d |x_i|
Args:
alpha: float
constant multiplying the L1 norm
Args:
alpha: float
constant multiplying the L1 norm
"""
"""

def __init__(self, alpha):
self.alpha = alpha
Expand Down Expand Up @@ -55,17 +55,15 @@ def _prox_L1(x, i, indices, indptr, d, step_size):

class GroupL1:
"""
Group Lasso penalty
Parameters
----------
Group Lasso penalty
alpha: float
Constant multiplying this loss
Args:
alpha: float
Constant multiplying this loss
blocks: list of lists
blocks: list of lists
"""
"""

def __init__(self, alpha, groups):
self.alpha = alpha
Expand Down Expand Up @@ -147,16 +145,12 @@ def _prox_gl(x, i, indices, indptr, d, step_size):

class FusedLasso:
"""
Fused Lasso penalty
Parameters
----------
Fused Lasso penalty
alpha: scalar
Examples
--------
"""
Args:
alpha: float
Constant multiplying this function.
"""

def __init__(self, alpha):
self.alpha = alpha
Expand Down Expand Up @@ -242,7 +236,15 @@ def _prox_2_fl(x, i, indices, indptr, d, step_size):


class TraceNorm:
"""Trace (aka nuclear) norm, sum of singular values"""
"""Trace (aka nuclear) norm, sum of singular values.
Args:
alpha: float
Constant multiplying this function.
shape: float
Shape of original matrix, since input is given as
a raveled vector.
"""

is_separable = False

Expand All @@ -268,7 +270,15 @@ def prox_factory(self):


class TotalVariation2D:
"""2-dimensional Total Variation pseudo-norm"""
"""2-dimensional Total Variation pseudo-norm.
Args:
alpha: float
Constant multiplying this function.
shape: float
Shape of original matrix, since input is given as
a raveled vector.
"""

def __init__(self, alpha, shape, max_iter=100, tol=1e-6):
self.alpha = alpha
Expand Down

0 comments on commit e5cb995

Please sign in to comment.