From f11ba554eb37f4514e66e6c3a35561a4e7ebeb19 Mon Sep 17 00:00:00 2001 From: Thilo Balke Date: Wed, 19 Jan 2022 16:15:50 -0700 Subject: [PATCH 1/5] remove is_smooth attribute (#89) --- docs/source/functional.rst | 8 ++-- examples/scripts/denoise_tv_iso_pgm.py | 4 -- .../scripts/sparsecode_poisson_blkarr_pgm.py | 1 - examples/scripts/sparsecode_poisson_pgm.py | 1 - scico/_generic_operators.py | 12 ----- scico/functional/_denoiser.py | 1 - scico/functional/_flax.py | 1 - scico/functional/_functional.py | 13 ------ scico/functional/_indicator.py | 2 - scico/functional/_norm.py | 5 -- scico/loss.py | 8 ---- scico/optimize/pgm.py | 11 ++--- scico/test/test_functional.py | 12 +---- scico/test/test_operator.py | 46 ------------------- 14 files changed, 7 insertions(+), 118 deletions(-) diff --git a/docs/source/functional.rst b/docs/source/functional.rst index 24cc0eac9..499aea9be 100644 --- a/docs/source/functional.rst +++ b/docs/source/functional.rst @@ -39,8 +39,7 @@ An instance of :class:`.Functional`, ``f``, may provide three core operations. - ``f.grad(x)`` returns the gradient of the functional evaluated at ``x``. - Gradients are calculated using JAX reverse-mode automatic differentiation, exposed through :func:`scico.grad`. - - A functional that is smooth has the attribute ``f.is_smooth == True``. - - NOTE: The gradient of a functional ``f`` can be evaluated even if ``f.is_smooth == False``. + - NOTE: The gradient of a functional ``f`` can be evaluated even if that functional is not smooth. All that is required is that the functional can be evaluated, ``f.has_eval == True``. However, the result may not be a valid gradient (or subgradient) for all inputs. * Proximal operator @@ -92,7 +91,7 @@ in the parameterized form :math:`\mathrm{prox}_{c f}`. In SCICO, multiplying a :class:`.Functional` by a scalar will return a :class:`.ScaledFunctional`. -This :class:`.ScaledFunctional` retains the ``has_eval``, ``is_smooth``, and ``has_prox`` attributes +This :class:`.ScaledFunctional` retains the ``has_eval`` and ``has_prox`` attributes from the original :class:`.Functional`, but the proximal method is modified to accomodate the additional scalar. @@ -129,7 +128,7 @@ To add a new functional, create a class which 1. inherits from base :class:`.Functional`; -2. has ``has_eval``, ``is_smooth``, and ``has_prox`` flags; +2. has ``has_eval`` and ``has_prox`` flags; 3. has ``_eval`` and ``prox`` methods, as necessary. For example, @@ -139,7 +138,6 @@ For example, class MyFunctional(scico.functional.Functional): has_eval = True - is_smooth = False has_prox = True def _eval(self, x: JaxArray) -> float: diff --git a/examples/scripts/denoise_tv_iso_pgm.py b/examples/scripts/denoise_tv_iso_pgm.py index ab94dd43e..28455c44f 100644 --- a/examples/scripts/denoise_tv_iso_pgm.py +++ b/examples/scripts/denoise_tv_iso_pgm.py @@ -113,7 +113,6 @@ class IsoProjector(functional.Functional): has_eval = True has_prox = True - is_smooth = False def __call__(self, x: Union[JaxArray, BlockArray]) -> float: return 0.0 @@ -136,7 +135,6 @@ def prox(self, v: JaxArray, lam: float, **kwargs) -> JaxArray: """ reg_weight_iso = 1.4e0 f_iso = DualTVLoss(y=y, A=A, lmbda=reg_weight_iso) -f_iso.is_smooth = True g_iso = IsoProjector() solver_iso = AcceleratedPGM( @@ -168,7 +166,6 @@ class AnisoProjector(functional.Functional): has_eval = True has_prox = True - is_smooth = False def __call__(self, x: Union[JaxArray, BlockArray]) -> float: return 0.0 @@ -186,7 +183,6 @@ def prox(self, v: JaxArray, lam: float, **kwargs) -> JaxArray: reg_weight_aniso = 1.2e0 f = DualTVLoss(y=y, A=A, lmbda=reg_weight_aniso) -f.is_smooth = True g = AnisoProjector() solver = AcceleratedPGM( diff --git a/examples/scripts/sparsecode_poisson_blkarr_pgm.py b/examples/scripts/sparsecode_poisson_blkarr_pgm.py index 9ac4f7cec..01efaeff8 100644 --- a/examples/scripts/sparsecode_poisson_blkarr_pgm.py +++ b/examples/scripts/sparsecode_poisson_blkarr_pgm.py @@ -95,7 +95,6 @@ def _eval(self, x: BlockArray) -> BlockArray: Set up the loss function and the regularization. """ f = loss.PoissonLoss(y=y, A=A) -f.is_smooth = True g = functional.NonNegativeIndicator() diff --git a/examples/scripts/sparsecode_poisson_pgm.py b/examples/scripts/sparsecode_poisson_pgm.py index febbb7adf..05be9b170 100644 --- a/examples/scripts/sparsecode_poisson_pgm.py +++ b/examples/scripts/sparsecode_poisson_pgm.py @@ -65,7 +65,6 @@ """ A = linop.MatrixOperator(D) f = loss.PoissonLoss(y=y, A=A) -f.is_smooth = True g = functional.NonNegativeIndicator() diff --git a/scico/_generic_operators.py b/scico/_generic_operators.py index 7cb059137..bbb222dab 100644 --- a/scico/_generic_operators.py +++ b/scico/_generic_operators.py @@ -79,7 +79,6 @@ def __init__( input_dtype: DType = np.float32, output_dtype: Optional[DType] = None, jit: bool = False, - is_smooth: bool = None, ): r"""Operator init method. @@ -160,9 +159,6 @@ def __init__( self.shape = (self.output_shape, self.input_shape) self.matrix_shape = (self.output_size, self.input_size) - #: True if this is a smooth mapping; false otherwise - self.is_smooth = is_smooth - if jit: self.jit() @@ -192,7 +188,6 @@ def __call__( eval_fn=lambda z: self(x(z)), input_dtype=self.input_dtype, output_dtype=x.output_dtype, - is_smooth=(self.is_smooth and x.is_smooth), ) raise ValueError(f"""Incompatible shapes {self.shape}, {x.shape} """) @@ -216,7 +211,6 @@ def __add__(self, other): eval_fn=lambda x: self(x) + other(x), input_dtype=self.input_dtype, output_dtype=result_type(self.output_dtype, other.output_dtype), - is_smooth=(self.is_smooth and other.is_smooth), ) raise ValueError(f"shapes {self.shape} and {other.shape} do not match") raise TypeError(f"Operation __add__ not defined between {type(self)} and {type(other)}") @@ -230,7 +224,6 @@ def __sub__(self, other): eval_fn=lambda x: self(x) - other(x), input_dtype=self.input_dtype, output_dtype=result_type(self.output_dtype, other.output_dtype), - is_smooth=(self.is_smooth and other.is_smooth), ) raise ValueError(f"shapes {self.shape} and {other.shape} do not match") raise TypeError(f"Operation __sub__ not defined between {type(self)} and {type(other)}") @@ -243,7 +236,6 @@ def __mul__(self, other): eval_fn=lambda x: other * self(x), input_dtype=self.input_dtype, output_dtype=result_type(self.output_dtype, other), - is_smooth=self.is_smooth, ) def __neg__(self): @@ -258,7 +250,6 @@ def __rmul__(self, other): eval_fn=lambda x: other * self(x), input_dtype=self.input_dtype, output_dtype=result_type(self.output_dtype, other), - is_smooth=self.is_smooth, ) @_wrap_mul_div_scalar @@ -269,7 +260,6 @@ def __truediv__(self, other): eval_fn=lambda x: self(x) / other, input_dtype=self.input_dtype, output_dtype=result_type(self.output_dtype, other), - is_smooth=self.is_smooth, ) def jvp(self, primals, tangents): @@ -354,7 +344,6 @@ def concat_args(args): input_shape=input_shape, output_shape=self.output_shape, eval_fn=lambda x: self(concat_args(x)), - is_smooth=self.is_smooth, ) @@ -467,7 +456,6 @@ def __init__( input_dtype=input_dtype, output_dtype=output_dtype, jit=False, - is_smooth=True, ) if not hasattr(self, "_adj"): diff --git a/scico/functional/_denoiser.py b/scico/functional/_denoiser.py index 1947301d4..9962b663a 100644 --- a/scico/functional/_denoiser.py +++ b/scico/functional/_denoiser.py @@ -38,7 +38,6 @@ class BM3D(Functional): has_eval = False has_prox = True - is_smooth = False def __init__(self, is_rgb: Optional[bool] = False): r"""Initialize a :class:`BM3D` object. diff --git a/scico/functional/_flax.py b/scico/functional/_flax.py index 94b61ced6..2a8adbd63 100644 --- a/scico/functional/_flax.py +++ b/scico/functional/_flax.py @@ -25,7 +25,6 @@ class FlaxMap(Functional): has_eval = False has_prox = True - is_smooth = False def __init__(self, model: Callable[..., nn.Module], variables: PyTree): r"""Initialize a :class:`FlaxMap` object. diff --git a/scico/functional/_functional.py b/scico/functional/_functional.py index fda71628e..cfddcb49b 100644 --- a/scico/functional/_functional.py +++ b/scico/functional/_functional.py @@ -7,7 +7,6 @@ """Functional base class.""" -import warnings from typing import List, Optional, Union import jax @@ -38,11 +37,6 @@ class Functional: #: This attribute must be overridden and set to True or False in any derived classes. has_prox: Optional[bool] = None - #: True if this functional is differentiable, False otherwise. - #: Note that ``is_smooth = False`` does not preclude the use of the :func:`.grad` method. - #: This attribute must be overridden and set to True or False in any derived classes. - is_smooth: Optional[bool] = None - def __init__(self): self._grad = scico.grad(self.__call__) @@ -50,7 +44,6 @@ def __repr__(self): return f"""{type(self)} has_eval = {self.has_eval} has_prox = {self.has_prox} -is_smooth = {self.is_smooth} """ def __mul__(self, other): @@ -136,9 +129,6 @@ def grad(self, x: Union[JaxArray, BlockArray]): Args: x: Point at which to evaluate gradient. """ - if not self.is_smooth: # could be True, False, or None - warnings.warn("This functional isn't smooth!", stacklevel=2) - return self._grad(x) @@ -151,7 +141,6 @@ def __repr__(self): def __init__(self, functional: Functional, scale: float): self.functional = functional self.scale = scale - self.is_smooth = functional.is_smooth self.has_eval = functional.has_eval self.has_prox = functional.has_prox super().__init__() @@ -209,7 +198,6 @@ def __init__(self, functional_list: List[Functional]): self.has_eval: bool = all(fi.has_eval for fi in functional_list) self.has_prox: bool = all(fi.has_prox for fi in functional_list) - self.is_smooth: bool = all(fi.is_smooth for fi in functional_list) super().__init__() @@ -256,7 +244,6 @@ class ZeroFunctional(Functional): has_eval = True has_prox = True - is_smooth = True def __call__(self, x: Union[JaxArray, BlockArray]) -> float: return 0.0 diff --git a/scico/functional/_indicator.py b/scico/functional/_indicator.py index f183e15af..def48cca5 100644 --- a/scico/functional/_indicator.py +++ b/scico/functional/_indicator.py @@ -37,7 +37,6 @@ class NonNegativeIndicator(Functional): has_eval = True has_prox = True - is_smooth = False def __call__(self, x: Union[JaxArray, BlockArray]) -> float: if snp.iscomplexobj(x): @@ -87,7 +86,6 @@ class L2BallIndicator(Functional): has_eval = True has_prox = True - is_smooth = False def __init__(self, radius: float = 1): r"""Initialize a :class:`L2BallIndicator` object. diff --git a/scico/functional/_norm.py b/scico/functional/_norm.py index 6c626b107..0a9dacab1 100644 --- a/scico/functional/_norm.py +++ b/scico/functional/_norm.py @@ -31,7 +31,6 @@ class L0Norm(Functional): has_eval = True has_prox = True - is_smooth = False def __call__(self, x: Union[JaxArray, BlockArray]) -> float: return count_nonzero(x) @@ -71,7 +70,6 @@ class L1Norm(Functional): has_eval = True has_prox = True - is_smooth = False def __call__(self, x: Union[JaxArray, BlockArray]) -> float: return snp.abs(x).sum() @@ -118,7 +116,6 @@ class SquaredL2Norm(Functional): has_eval = True has_prox = True - is_smooth = True def __call__(self, x: Union[JaxArray, BlockArray]) -> float: # Directly implement the squared l2 norm to avoid nondifferentiable @@ -152,7 +149,6 @@ class L2Norm(Functional): has_eval = True has_prox = True - is_smooth = False def __call__(self, x: Union[JaxArray, BlockArray]) -> float: return norm(x) @@ -210,7 +206,6 @@ class L21Norm(Functional): has_eval = True has_prox = True - is_smooth = False def __init__(self, l2_axis: int = 0): r""" diff --git a/scico/loss.py b/scico/loss.py index 83edd97b8..8482ae149 100644 --- a/scico/loss.py +++ b/scico/loss.py @@ -160,11 +160,6 @@ def __init__( prox_kwargs = dict self.prox_kwargs = prox_kwargs - if isinstance(A, operator.Operator): - self.is_smooth = A.is_smooth - else: - self.is_smooth = None - if isinstance(self.A, linop.Diagonal) and isinstance(self.W, linop.Diagonal): self.has_prox = True @@ -289,9 +284,6 @@ def __init__( #: Constant term in Poisson log likehood; equal to ln(y!) self.const: float = gammaln(self.y + 1) # ln(y!) - # The Poisson Loss is only smooth in the positive quadrant. - self.is_smooth = None - def __call__(self, x: Union[JaxArray, BlockArray]) -> float: Ax = self.A(x) return self.scale * snp.sum(Ax - self.y * snp.log(Ax) + self.const) diff --git a/scico/optimize/pgm.py b/scico/optimize/pgm.py index de0f553c2..ae14e4146 100644 --- a/scico/optimize/pgm.py +++ b/scico/optimize/pgm.py @@ -387,8 +387,7 @@ class PGM: Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`. - The function :math:`f` must be smooth and :math:`g` must have a - defined prox. + The function :math:`g` must have a defined prox. Uses helper :class:`StepSize` to provide an estimate of the Lipschitz constant :math:`L` of :math:`f`. The step size :math:`\alpha` is the @@ -428,9 +427,6 @@ def __init__( this parameter. """ - if f.is_smooth is not True: - raise Exception(f"The functional f ({type(f)}) must be smooth.") - #: Functional or Loss to minimize; must have grad method defined. self.f: Union[Loss, Functional] = f @@ -556,9 +552,8 @@ class AcceleratedPGM(PGM): Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`. - The function :math:`f` must be smooth and :math:`g` must have a - defined prox. The accelerated form of PGM is also known as FISTA - :cite:`beck-2009-fast`. + The function :math:`g` must have a defined prox. The accelerated + form of PGM is also known as FISTA :cite:`beck-2009-fast`. For documentation on inherited attributes, see :class:`.PGM`. """ diff --git a/scico/test/test_functional.py b/scico/test/test_functional.py index cd90a873f..c53a23f83 100644 --- a/scico/test/test_functional.py +++ b/scico/test/test_functional.py @@ -187,7 +187,6 @@ def test_scaled_attrs(self, norm, test_prox_obj): unscaled = norm() scaled = test_prox_obj.scalar * norm() - assert scaled.is_smooth == unscaled.is_smooth assert scaled.has_eval == unscaled.has_eval assert scaled.has_prox == unscaled.has_prox assert scaled.scale == test_prox_obj.scalar @@ -278,7 +277,7 @@ def test_prox_scale_invariance(self, cnstr, test_proj_obj): class TestCheckAttrs: - # Ensure that the has_eval, has_prox, is_smooth attrs are overridden + # Ensure that the has_eval, has_prox attrs are overridden # and set to True/False in the Functional subclasses. # Generate a list of all functionals in scico.functionals that we will check @@ -298,10 +297,6 @@ def test_has_eval(self, cls): def test_has_prox(self, cls): assert isinstance(cls.has_prox, bool) - @pytest.mark.parametrize("cls", to_check) - def test_is_smooth(self, cls): - assert isinstance(cls.is_smooth, bool) - def test_scalar_vmap(): x = np.random.randn(4, 4) @@ -349,7 +344,6 @@ def setup_method(self): def test_squared_l2(self): L = loss.SquaredL2Loss(y=self.y, A=self.Ao) - assert L.is_smooth == True assert L.has_eval == True assert L.has_prox == False # not diagonal @@ -367,7 +361,6 @@ def test_squared_l2(self): # test eval np.testing.assert_allclose(L_d(self.v), 0.5 * ((self.Do @ self.v - self.y) ** 2).sum()) - assert L_d.is_smooth == True assert L_d.has_eval == True assert L_d.has_prox == True @@ -382,7 +375,6 @@ def test_squared_l2(self): def test_weighted_squared_l2(self): L = loss.WeightedSquaredL2Loss(y=self.y, A=self.Ao, W=self.W) - assert L.is_smooth == True assert L.has_eval == True assert L.has_prox == False # not diagonal @@ -399,7 +391,6 @@ def test_weighted_squared_l2(self): # SquaredL2 with Diagonal linop has a prox L_d = loss.WeightedSquaredL2Loss(y=self.y, A=self.Do, W=self.W) - assert L_d.is_smooth == True assert L_d.has_eval == True assert L_d.has_prox == True @@ -419,7 +410,6 @@ def test_weighted_squared_l2(self): def test_poisson(self): L = loss.PoissonLoss(y=self.y, A=self.Ao_abs) - assert L.is_smooth == None assert L.has_eval == True assert L.has_prox == False diff --git a/scico/test/test_operator.py b/scico/test/test_operator.py index 36ac30d69..ae17e29d6 100644 --- a/scico/test/test_operator.py +++ b/scico/test/test_operator.py @@ -213,49 +213,3 @@ def test_freeze_2arg(): np.testing.assert_allclose(A(x), Ab(b), rtol=5e-4) np.testing.assert_allclose(A(x), Aa(a), rtol=5e-4) - - -class SmoothTestObj: - def __init__(self): - n = 32 - self.smooth = Operator(input_shape=(n,), eval_fn=lambda x: x, is_smooth=True) - self.not_smooth = Operator(input_shape=(n,), eval_fn=lambda x: x, is_smooth=False) - self.unknown = Operator(input_shape=(n,), eval_fn=lambda x: x) - - -@pytest.fixture -def testsmooth(): - yield SmoothTestObj() - - -def test_smoothness_compose(testsmooth): - smooth = testsmooth.smooth - not_smooth = testsmooth.not_smooth - unknown = testsmooth.unknown - - assert smooth(smooth).is_smooth - assert smooth(not_smooth).is_smooth is False - assert smooth(unknown).is_smooth is None - - -@pytest.mark.parametrize("operator", [op.add, op.sub]) -def test_smoothness_add_sub(testsmooth, operator): - smooth = testsmooth.smooth - not_smooth = testsmooth.not_smooth - unknown = testsmooth.unknown - - assert operator(smooth, smooth).is_smooth - assert operator(smooth, not_smooth).is_smooth is False - assert operator(smooth, unknown).is_smooth is None - - -@pytest.mark.parametrize("operator", [op.mul, op.truediv]) -def test_smoothness_mul_div(testsmooth, operator): - smooth = testsmooth.smooth - not_smooth = testsmooth.not_smooth - unknown = testsmooth.unknown - - scalar = 3.14 - assert operator(smooth, scalar).is_smooth - assert operator(not_smooth, scalar).is_smooth is False - assert operator(unknown, scalar).is_smooth is None From 0fdfcf6806058312a186307c2e71b7cb83011d14 Mon Sep 17 00:00:00 2001 From: Thilo Balke Date: Thu, 20 Jan 2022 09:23:36 -0700 Subject: [PATCH 2/5] markup --- docs/source/functional.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/functional.rst b/docs/source/functional.rst index 499aea9be..b24efb0bc 100644 --- a/docs/source/functional.rst +++ b/docs/source/functional.rst @@ -39,7 +39,7 @@ An instance of :class:`.Functional`, ``f``, may provide three core operations. - ``f.grad(x)`` returns the gradient of the functional evaluated at ``x``. - Gradients are calculated using JAX reverse-mode automatic differentiation, exposed through :func:`scico.grad`. - - NOTE: The gradient of a functional ``f`` can be evaluated even if that functional is not smooth. + - *Note:* The gradient of a functional ``f`` can be evaluated even if that functional is not smooth. All that is required is that the functional can be evaluated, ``f.has_eval == True``. However, the result may not be a valid gradient (or subgradient) for all inputs. * Proximal operator From 02fe9bbfd71e88af7792dd1a8f8741c1a8eef9d4 Mon Sep 17 00:00:00 2001 From: Thilo Balke Date: Tue, 25 Jan 2022 08:45:56 -0700 Subject: [PATCH 3/5] rephrasing of f conditions --- scico/optimize/pgm.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scico/optimize/pgm.py b/scico/optimize/pgm.py index ae14e4146..511d890f8 100644 --- a/scico/optimize/pgm.py +++ b/scico/optimize/pgm.py @@ -387,7 +387,8 @@ class PGM: Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`. - The function :math:`g` must have a defined prox. + The function :math:`g` must have a defined prox and convergence is + guaranteed if :math:`f` is smooth. Uses helper :class:`StepSize` to provide an estimate of the Lipschitz constant :math:`L` of :math:`f`. The step size :math:`\alpha` is the @@ -552,7 +553,8 @@ class AcceleratedPGM(PGM): Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`. - The function :math:`g` must have a defined prox. The accelerated + The function :math:`g` must have a defined prox and convergence is + guaranteed if :math:`f` is smooth. The accelerated form of PGM is also known as FISTA :cite:`beck-2009-fast`. For documentation on inherited attributes, see :class:`.PGM`. From 5af596b6fee787f2cf046194960633c3f7e69d96 Mon Sep 17 00:00:00 2001 From: Thilo Balke Date: Thu, 27 Jan 2022 15:58:22 -0700 Subject: [PATCH 4/5] simplify specs of f and g --- scico/optimize/pgm.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scico/optimize/pgm.py b/scico/optimize/pgm.py index 511d890f8..f8a6d4dbb 100644 --- a/scico/optimize/pgm.py +++ b/scico/optimize/pgm.py @@ -385,10 +385,9 @@ def update(self, v: Union[JaxArray, BlockArray]) -> float: class PGM: r"""Proximal Gradient Method (PGM) base class. - Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`. - - The function :math:`g` must have a defined prox and convergence is - guaranteed if :math:`f` is smooth. + Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`, where + :math:`f` and the :math:`g` are instances of + :class:`.Functional`. Uses helper :class:`StepSize` to provide an estimate of the Lipschitz constant :math:`L` of :math:`f`. The step size :math:`\alpha` is the @@ -553,9 +552,10 @@ class AcceleratedPGM(PGM): Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`. - The function :math:`g` must have a defined prox and convergence is - guaranteed if :math:`f` is smooth. The accelerated - form of PGM is also known as FISTA :cite:`beck-2009-fast`. + Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`, where + :math:`f` and the :math:`g` are instances of + :class:`.Functional`. The accelerated form of PGM is also known as + FISTA :cite:`beck-2009-fast`. For documentation on inherited attributes, see :class:`.PGM`. """ From 547352102fbac93b63fba78757cfb92e2786f1f3 Mon Sep 17 00:00:00 2001 From: Thilo Balke Date: Thu, 27 Jan 2022 16:05:13 -0700 Subject: [PATCH 5/5] simplify specs of f and g --- scico/optimize/pgm.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/scico/optimize/pgm.py b/scico/optimize/pgm.py index f8a6d4dbb..e639f4bff 100644 --- a/scico/optimize/pgm.py +++ b/scico/optimize/pgm.py @@ -386,8 +386,7 @@ class PGM: r"""Proximal Gradient Method (PGM) base class. Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`, where - :math:`f` and the :math:`g` are instances of - :class:`.Functional`. + :math:`f` and the :math:`g` are instances of :class:`.Functional`. Uses helper :class:`StepSize` to provide an estimate of the Lipschitz constant :math:`L` of :math:`f`. The step size :math:`\alpha` is the @@ -553,9 +552,9 @@ class AcceleratedPGM(PGM): Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`. Minimize a function of the form :math:`f(\mb{x}) + g(\mb{x})`, where - :math:`f` and the :math:`g` are instances of - :class:`.Functional`. The accelerated form of PGM is also known as - FISTA :cite:`beck-2009-fast`. + :math:`f` and the :math:`g` are instances of :class:`.Functional`. + The accelerated form of PGM is also known as FISTA + :cite:`beck-2009-fast`. For documentation on inherited attributes, see :class:`.PGM`. """