Skip to content

Commit

Permalink
removed __all__.
Browse files Browse the repository at this point in the history
  • Loading branch information
frankong committed Jul 10, 2018
1 parent d3d13c4 commit 1aaf500
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 37 deletions.
15 changes: 7 additions & 8 deletions sigpy/alg.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@
if config.cupy_enabled:
import cupy as cp

__all__ = ['PowerMethod', 'ConjugateGradient', 'GradientMethod',
'PrimalDualHybridGradient', 'AltMin', 'Alg']


class Alg(object):
"""Abstraction for iterative algorithm.
Expand Down Expand Up @@ -64,7 +61,7 @@ class PowerMethod(Alg):
max_iter (int): Maximum number of iterations.
Attributes:
max_eig (float): Maximum eigenvalue of A.
float: Maximum eigenvalue of `A`.
"""

def __init__(self, A, x, max_iter=30):
Expand Down Expand Up @@ -250,6 +247,7 @@ class NewtonsMethod(Alg):
hessf (function): Function to compute Hessian of f at x,
proxHg (function): Function to compute proximal operator of g.
x (array): Optimization variable.
"""

def __init__(self, gradf, hessf, proxHg, x,
Expand Down Expand Up @@ -299,16 +297,17 @@ class PrimalDualHybridGradient(Alg):
Args:
proxfc (function): Function to compute proximal operator of f^*.
proxg (function): Function to compute proximal operator of g.
A (function): Function to compute linear mapping A.
AH (function): Function to compute adjoint linear mapping of A.
A (function): Function to compute a linear mapping.
AH (function): Function to compute the adjoint linear mapping of `A`.
x (array): Primal solution.
u (array): Dual solution.
tau (float): Primal step-size.
sigma (float): Dual step-size.
theta (float): Primal extrapolation parameter.
P (function): Function to compute precondition x.
D (function): Function to compute precondition u.
P (function): Function to compute precondition primal variable.
D (function): Function to compute precondition dual variable.
max_iter (int): Maximum number of iterations.
"""

def __init__(
Expand Down
27 changes: 12 additions & 15 deletions sigpy/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@
import cupy as cp


__all__ = ["MaxEig", "LinearLeastSquares", "SecondOrderConeConstraint"]


class App(object):
"""Iterative algorithm application. Each App has its own Alg.
Expand Down Expand Up @@ -94,10 +91,10 @@ class LinearLeastSquares(App):
.. math::
\min_x \frac{1}{2} \| W^{0.5} (A x - y) \|_2^2 + g(G x) + \frac{\lambda}{2} \| R x \|_2^2
Three algorithms can be used: ConjugateGradient, GradientMethod,
and PrimalDualHybridGradient. If alg_name is None, ConjugateGradient is used
when proxg is not specified. If proximal operator is specified,
then GradientMethod is used when G is specified, and PrimalDualHybridGradient is
Three algorithms can be used: `ConjugateGradient`, `GradientMethod`,
and `PrimalDualHybridGradient`. If `alg_name` is None, `ConjugateGradient` is used
when `proxg` is not specified. If `proxg` is specified,
then `GradientMethod` is used when `G` is specified, and `PrimalDualHybridGradient` is
used otherwise.
Args:
Expand All @@ -106,18 +103,18 @@ class LinearLeastSquares(App):
x (array): Solution.
proxg (Prox): Proximal operator of g.
lamda (float): l2 regularization parameter.
g (None or function): Regularization function. Only used for when save_objs is true.
g (None or function): Regularization function. Only used for when `save_objs` is true.
G (None or Linop): Regularization linear operator.
R (None or Linop): l2 regularization linear operator.
weights (float or array): Weights for least squares.
alg_name (str): {'ConjugateGradient', 'GradientMethod', 'PrimalDualHybridGradient'}.
alpha (None or float): Step size for GradientMethod.
accelerate (bool): Toggle Nesterov acceleration for GradientMethod.
alg_name (str): {`'ConjugateGradient'`, `'GradientMethod'`, `'PrimalDualHybridGradient'`}.
alpha (None or float): Step size for `GradientMethod`.
accelerate (bool): Toggle Nesterov acceleration for `GradientMethod`.
max_power_iter (int): Maximum number of iterations for power method.
Used for GradientMethod and PrimalDualHybridGradient when alpha is not specified.
tau (float): Primal step-size for PrimalDualHybridGradient.
sigma (float): Dual step-size for PrimalDualHybridGradient.
theta (float): Primal extrapolation parameter for PrimalDualHybridGradient.
Used for `GradientMethod` and `PrimalDualHybridGradient` when `alpha` is not specified.
tau (float): Primal step-size for `PrimalDualHybridGradient`.
sigma (float): Dual step-size for `PrimalDualHybridGradient`.
theta (float): Primal extrapolation parameter for `PrimalDualHybridGradient`.
"""
def __init__(self, A, y, x, proxg=None,
Expand Down
5 changes: 0 additions & 5 deletions sigpy/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,6 @@
from sigpy import fft, util, config


__all__ = ['convolve', 'correlate',
'cudnn_convolve', 'cudnn_convolve_backward_filter',
'cudnn_convolve_backward_data']


def convolve(input1, input2, axes=None, mode='full'):
"""Multi-dimensional convolution.
Expand Down
3 changes: 0 additions & 3 deletions sigpy/fft.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@
import cupy as cp


__all__ = ['fft', 'ifft']


def fft(input, oshape=None, axes=None, center=True, norm='ortho'):
"""FFT function that supports centering.
Expand Down
3 changes: 0 additions & 3 deletions sigpy/interp.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,6 @@
import cupy as cp


__all__ = ['interp', 'gridding']


def interp(input, width, table, coord):
"""Interpolation from array to points specified by coordinates.
Expand Down
3 changes: 0 additions & 3 deletions sigpy/nufft.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
from sigpy import fft, util, interp


__all__ = ['nufft', 'nufft_adjoint', 'estimate_shape']


def nufft(input, coord, oversamp=1.25, width=4.0, n=128):
"""Non-uniform Fast Fourier Transform.
Expand Down

0 comments on commit 1aaf500

Please sign in to comment.