From 33284e455a328dc76e0d82bb9c1304a40e6f38ef Mon Sep 17 00:00:00 2001 From: Jeffrey Hokanson Date: Thu, 12 Nov 2020 11:22:53 -0700 Subject: [PATCH] Refactored Stabilized SK iter --- polyrat/__init__.py | 1 + polyrat/rational.py | 58 -------------- polyrat/skiter.py | 116 ++++++++-------------------- polyrat/skiter_stabilized.py | 142 +++++++++++++++++++++++++++++++++++ tests/test_rational.py | 53 ------------- tests/test_rational_ratio.py | 6 +- tests/test_skiter.py | 67 +++++++++++++++-- 7 files changed, 239 insertions(+), 204 deletions(-) create mode 100644 polyrat/skiter_stabilized.py diff --git a/polyrat/__init__.py b/polyrat/__init__.py index ff88051..9728ff7 100644 --- a/polyrat/__init__.py +++ b/polyrat/__init__.py @@ -17,4 +17,5 @@ from .aaa import * from .paaa import * from .skiter import * +from .skiter_stabilized import * diff --git a/polyrat/rational.py b/polyrat/rational.py index 2117a00..f806c4f 100644 --- a/polyrat/rational.py +++ b/polyrat/rational.py @@ -4,7 +4,6 @@ import scipy.optimize from .basis import * from .polynomial import * -from .skiter import * from .rational_ratio import * from copy import deepcopy @@ -89,63 +88,6 @@ def refine(self, X, y, norm = 2, verbose = False, **kwargs): # print(f"final residual norm {res_norm:21.15e}") -class SKRationalApproximation(RationalApproximation, RationalRatio): - r""" - - Parameters - ---------- - - """ - - def __init__(self, num_degree, denom_degree, refine = False, norm = 2, - Basis = None, rebase = True, maxiter = 20, verbose = True, xtol = 1e-7): - - RationalApproximation.__init__(self, num_degree, denom_degree) - self._refine = refine - self.norm = norm - self.xtol = float(xtol) - #if self.norm != 2: - # raise NotImplementedError - - self.maxiter = int(maxiter) - self.verbose = verbose - self.rebase = rebase - if Basis is None: - Basis = LegendrePolynomialBasis - - self.Basis = Basis - - self.numerator = None - self.denominator = None - - def fit(self, X, y, denom0 = None): - X = np.array(X) - y = np.array(y) - assert X.shape[0] == y.shape[0], "X and y do not have the same number of rows" - - if self.rebase: - self.numerator, self.denominator, self.hist = skfit_rebase( - X, y, self.num_degree, self.denom_degree, - maxiter = self.maxiter, verbose = self.verbose, norm = self.norm, - history = True, xtol = self.xtol, denom0 = denom0, - ) - else: - num_basis = self.Basis(X, self.num_degree) - denom_basis = self.Basis(X, self.denom_degree) - P = num_basis.vandermonde_X - Q = denom_basis.vandermonde_X - - a, b, self.hist = skfit(y, P, Q, maxiter = self.maxiter, verbose = self.verbose, norm = self.norm, history = True, - xtol = self.xtol, denom0 = denom0) - - self.numerator = Polynomial(num_basis, a) - self.denominator = Polynomial(denom_basis, b) - - if self._refine: - self.refine(X, y, norm = self.norm) - - - class RationalBarycentric(RationalFunction): r""" diff --git a/polyrat/skiter.py b/polyrat/skiter.py index 62cb545..5ca33be 100644 --- a/polyrat/skiter.py +++ b/polyrat/skiter.py @@ -7,10 +7,10 @@ from .basis import * from .arnoldi import * from .polynomial import * +from .rational import * from iterprinter import IterationPrinter - def _minimize_2_norm(A): r""" Solve the optimization problem @@ -240,101 +240,47 @@ def skfit(y, P, Q, maxiter = 20, verbose = True, history = False, denom0 = None, return best_sol - -def skfit_rebase(X, y, num_degree, denom_degree, maxiter = 20, verbose = True, - xtol = 1e-7, history = False, denom0 = None, norm = 2): - r""" The SK-iteration, but at each step use Vandermonde with Arnoldi to construct a new basis +class SKRationalApproximation(RationalApproximation, RationalRatio): + r""" Parameters ---------- - X: np.array (M,dim) - - y: np.array (M,) - - - Returns - ------- - + """ - if history: - hist = [] + def __init__(self, num_degree, denom_degree, norm = 2, + Basis = None, maxiter = 20, verbose = True, xtol = 1e-7): + RationalApproximation.__init__(self, num_degree, denom_degree) + self.norm = norm + self.xtol = float(xtol) + #if self.norm != 2: + # raise NotImplementedError - if denom0 is None: - denom = np.ones(X.shape[0], dtype = X.dtype) - else: - assert denom0.shape[0] == X.shape[0] - denom = denom0 + self.maxiter = int(maxiter) + self.verbose = verbose + if Basis is None: + Basis = LegendrePolynomialBasis + + self.Basis = Basis + self.numerator = None + self.denominator = None - if np.isclose(norm, 2): - linearized_solution = _minimize_2_norm - elif ~np.isfinite(norm): - linearized_solution = _minimize_inf_norm - else: - raise NotImplementedError - - if verbose: - printer = IterationPrinter(it = '4d', res_norm = '21.15e', delta_fit = '8.2e', cond = '8.2e') - printer.print_header(it = 'iter', res_norm = 'residual norm', delta_fit = 'delta fit', cond = 'cond') + def fit(self, X, y, denom0 = None): + X = np.array(X) + y = np.array(y) + assert X.shape[0] == y.shape[0], "X and y do not have the same number of rows" - # As there are no guarntees about convergence, - # we record the best iteration - best_res_norm = np.inf - best_sol = None + num_basis = self.Basis(X, self.num_degree) + denom_basis = self.Basis(X, self.denom_degree) + P = num_basis.vandermonde_X + Q = denom_basis.vandermonde_X - # For comparison with current iterate to determine termination - fit_old = np.zeros(y.shape[0], dtype = X.dtype) + a, b, self.hist = skfit(y, P, Q, maxiter = self.maxiter, verbose = self.verbose, norm = self.norm, history = True, + xtol = self.xtol, denom0 = denom0) - for it in range(maxiter): - try: - num_basis = ArnoldiPolynomialBasis(X, num_degree, weight = 1./denom) - denom_basis = ArnoldiPolynomialBasis(X, denom_degree, weight = 1./denom) - P = num_basis.vandermonde_X - Q = denom_basis.vandermonde_X - #P, RP, _ = vandermonde_arnoldi_CGS(X, num_degree, weight = 1./denom) - #Q, RQ, _ = vandermonde_arnoldi_CGS(X, denom_degree, weight = 1./denom) - - A = np.hstack([P, np.multiply(-y[:,None], Q) ]) - x, cond = linearized_solution(A) - a = x[:P.shape[1]] - b = x[-Q.shape[1]:] - - Pa = P @ a - Qb = Q @ b - - fit = Pa/Qb - - delta_fit = np.linalg.norm(fit - fit_old, norm) - res_norm = np.linalg.norm(fit - y, norm) + self.numerator = Polynomial(num_basis, a) + self.denominator = Polynomial(denom_basis, b) - except (LinAlgError, ValueError) as e: - if verbose: print(e) - break - - - # If we have improved the fit, append this - if res_norm < best_res_norm: - numerator = Polynomial(num_basis, a) - denominator = Polynomial(denom_basis, b) - best_sol = [numerator, denominator] - best_res_norm = res_norm - - if history: - hist.append({'fit': fit, 'cond':cond}) - if verbose: - printer.print_iter(it = it, delta_fit = delta_fit, res_norm = res_norm, cond = cond) - - if delta_fit < xtol: - break - - denom = np.abs(denom * Qb) - denom[denom == 0.] = 1. - fit_old = fit - - if history: - return best_sol + [hist] - else: - return best_sol diff --git a/polyrat/skiter_stabilized.py b/polyrat/skiter_stabilized.py new file mode 100644 index 0000000..9c42172 --- /dev/null +++ b/polyrat/skiter_stabilized.py @@ -0,0 +1,142 @@ +r""" A stabilized variant of the Sanathanan-Koerner iteration + +""" + +import numpy as np +from numpy.linalg import LinAlgError +from iterprinter import IterationPrinter +from .skiter import _minimize_2_norm, _minimize_inf_norm +from .arnoldi import * +from .rational import * + + + +def skfit_stabilized(X, y, num_degree, denom_degree, maxiter = 20, verbose = True, + xtol = 1e-7, history = False, denom0 = None, norm = 2): + r""" The Stabilized Sanathanan-Koerner Iteration + + + Parameters + ---------- + X: np.array (M,dim) + + y: np.array (M,) + + + Returns + ------- + + """ + + if history: + hist = [] + + if denom0 is None: + denom = np.ones(X.shape[0], dtype = X.dtype) + else: + assert denom0.shape[0] == X.shape[0] + denom = denom0 + + + if np.isclose(norm, 2): + linearized_solution = _minimize_2_norm + elif ~np.isfinite(norm): + linearized_solution = _minimize_inf_norm + else: + raise NotImplementedError + + if verbose: + printer = IterationPrinter(it = '4d', res_norm = '21.15e', delta_fit = '8.2e', cond = '8.2e') + printer.print_header(it = 'iter', res_norm = 'residual norm', delta_fit = 'delta fit', cond = 'cond') + + # As there are no guarntees about convergence, + # we record the best iteration + best_res_norm = np.inf + best_sol = None + + # For comparison with current iterate to determine termination + fit_old = np.zeros(y.shape[0], dtype = X.dtype) + + for it in range(maxiter): + try: + num_basis = ArnoldiPolynomialBasis(X, num_degree, weight = 1./denom) + denom_basis = ArnoldiPolynomialBasis(X, denom_degree, weight = 1./denom) + P = num_basis.vandermonde_X + Q = denom_basis.vandermonde_X + + A = np.hstack([P, np.multiply(-y[:,None], Q) ]) + x, cond = linearized_solution(A) + a = x[:P.shape[1]] + b = x[-Q.shape[1]:] + + Pa = P @ a + Qb = Q @ b + + fit = Pa/Qb + + delta_fit = np.linalg.norm( (fit - fit_old).flatten(), norm) + res_norm = np.linalg.norm( (fit - y).flatten(), norm) + + except (LinAlgError, ValueError) as e: + if verbose: print(e) + break + + + # If we have improved the fit, append this + if res_norm < best_res_norm: + numerator = Polynomial(num_basis, a) + denominator = Polynomial(denom_basis, b) + best_sol = [numerator, denominator] + best_res_norm = res_norm + + if history: + hist.append({'fit': fit, 'cond':cond}) + + if verbose: + printer.print_iter(it = it, delta_fit = delta_fit, res_norm = res_norm, cond = cond) + + if delta_fit < xtol: + break + + denom = np.abs(denom * Qb) + denom[denom == 0.] = 1. + fit_old = fit + + if history: + return best_sol + [hist] + else: + return best_sol + + + +class StabilizedSKRationalApproximation(RationalApproximation, RationalRatio): + r""" + + Parameters + ---------- + + """ + + def __init__(self, num_degree, denom_degree, norm = 2, maxiter = 20, verbose = True, xtol = 1e-7): + + RationalApproximation.__init__(self, num_degree, denom_degree) + self.norm = norm + self.xtol = float(xtol) + + self.maxiter = int(maxiter) + self.verbose = verbose + + self.numerator = None + self.denominator = None + + def fit(self, X, y, denom0 = None): + X = np.array(X) + y = np.array(y) + assert X.shape[0] == y.shape[0], "X and y do not have the same number of rows" + + self.numerator, self.denominator, self.hist = skfit_stabilized( + X, y, self.num_degree, self.denom_degree, + maxiter = self.maxiter, verbose = self.verbose, norm = self.norm, + history = True, xtol = self.xtol, denom0 = denom0, + ) + diff --git a/tests/test_rational.py b/tests/test_rational.py index ada8f46..f86f432 100644 --- a/tests/test_rational.py +++ b/tests/test_rational.py @@ -4,61 +4,8 @@ import pytest -@pytest.mark.parametrize("M", [1000]) -@pytest.mark.parametrize("num_degree", [5, [3,4]]) -@pytest.mark.parametrize("denom_degree", [5, [5,3]]) -@pytest.mark.parametrize("dim", [1, 2]) -@pytest.mark.parametrize("Basis", - [LegendrePolynomialBasis, - ArnoldiPolynomialBasis]) -@pytest.mark.parametrize("seed", [0]) -@pytest.mark.parametrize("norm", [2]) -@pytest.mark.parametrize("refine", [True, False]) -@pytest.mark.parametrize("rebase", [True, False]) -@pytest.mark.parametrize("complex_", [True, False]) - -def test_skfit_exact(M, dim, num_degree, denom_degree, refine, norm, Basis, seed, complex_, rebase): - r""" - When the data is a *exactly* a rational function of the specified degree, - SK iteration should recover it exactly (modulo conditioning issues) - """ - - X, y = random_data(M, dim, complex_, seed) - - # Exit without error if testing a total degree problem - try: - num_degree = int(num_degree) - except (TypeError, ValueError): - if len(num_degree) != dim: return - try: - denom_degree = int(denom_degree) - except (TypeError, ValueError): - if len(denom_degree) != dim: return - - # Generate exact fit - P = LegendrePolynomialBasis(X, num_degree).vandermonde_X - Q = LegendrePolynomialBasis(X, denom_degree).vandermonde_X - - # coefficients - a = np.random.randn(P.shape[1]) - b = np.random.randn(Q.shape[1]) - if complex_: - a = a + 1j*np.random.randn(*a.shape) - b = b + 1j*np.random.randn(*b.shape) - - y = (P @ a)/(Q @ b) - - sk = SKRationalApproximation(num_degree, denom_degree, refine = refine, - norm = norm, Basis = Basis, rebase = rebase, verbose = True) - - sk.fit(X, y) - - err = np.linalg.norm(sk(X) - y) - print(f" error : {err:8.2e}") - assert err < 5e-8, "Expected an exact fit" if __name__ == '__main__': - test_skfit_exact(1000, 2, 5, 5, True, ArnoldiPolynomialBasis, 0, complex_ = False, rebase = True) pass #test_skfit_rebase() #test_minimize_1_norm() diff --git a/tests/test_rational_ratio.py b/tests/test_rational_ratio.py index dbc0f3a..b4b7e9b 100644 --- a/tests/test_rational_ratio.py +++ b/tests/test_rational_ratio.py @@ -19,7 +19,7 @@ def test_rational_jacobian_real_arnoldi(): num_degree = 20 denom_degree = 20 - sk = SKRationalApproximation(num_degree, denom_degree, refine = False, maxiter = 5) + sk = StabilizedSKRationalApproximation(num_degree, denom_degree, maxiter = 5) sk.fit(X, y) P = sk.P @@ -43,7 +43,7 @@ def test_rational_jacobian_complex_arnoldi(): num_degree = 20 denom_degree = 20 - sk = SKRationalApproximation(num_degree, denom_degree, refine = False, maxiter = 5) + sk = StabilizedSKRationalApproximation(num_degree, denom_degree, maxiter = 5) sk.fit(X, y) P = sk.P @@ -123,7 +123,7 @@ def test_rational_ratio_inf_complex(): #X, y = random_data(M, dim, complex_, seed) X, y = absolute_value(M, complex_) - sk = SKRationalApproximation(num_degree, denom_degree, refine = False, maxiter = 5, rebase= True) + sk = StabilizedSKRationalApproximation(num_degree, denom_degree, maxiter = 5) sk.fit(X, y) print(sk.a) print(sk.b) diff --git a/tests/test_skiter.py b/tests/test_skiter.py index 5bfbad6..6984cde 100644 --- a/tests/test_skiter.py +++ b/tests/test_skiter.py @@ -1,7 +1,12 @@ + + import numpy as np from polyrat import * from polyrat.skiter import _minimize_inf_norm_real, _minimize_inf_norm_complex +import pytest +from .test_data import * + def test_minimize_inf_norm_real(): np.random.seed(0) A = np.random.randn(100, 10) @@ -34,12 +39,64 @@ def test_minimize_inf_norm_complex(): assert obj <= obj_new -def test_skfit_rebase(): - X = np.random.randn(50,1) #+ 1j*np.random.randn(100,1) - #y = np.random.randn(100,) + 1j*np.random.randn(100,) - y = np.abs(X).flatten() - skfit_rebase(X, y, 4, 4, norm = np.inf) + + +@pytest.mark.parametrize("M", [1000]) +@pytest.mark.parametrize("num_degree", [5, [3,4]]) +@pytest.mark.parametrize("denom_degree", [5, [5,3]]) +@pytest.mark.parametrize("dim", [1, 2]) +@pytest.mark.parametrize("Basis", + [LegendrePolynomialBasis, + ArnoldiPolynomialBasis]) +@pytest.mark.parametrize("seed", [0]) +@pytest.mark.parametrize("norm", [2]) +@pytest.mark.parametrize("refine", [True, False]) +@pytest.mark.parametrize("complex_", [True, False]) + +def test_skfit_exact(M, dim, num_degree, denom_degree, refine, norm, Basis, seed, complex_): + r""" + When the data is a *exactly* a rational function of the specified degree, + SK iteration should recover it exactly (modulo conditioning issues) + """ + + X, y = random_data(M, dim, complex_, seed) + + # Exit without error if testing a total degree problem + try: + num_degree = int(num_degree) + except (TypeError, ValueError): + if len(num_degree) != dim: return + try: + denom_degree = int(denom_degree) + except (TypeError, ValueError): + if len(denom_degree) != dim: return + + # Generate exact fit + P = LegendrePolynomialBasis(X, num_degree).vandermonde_X + Q = LegendrePolynomialBasis(X, denom_degree).vandermonde_X + + # coefficients + a = np.random.randn(P.shape[1]) + b = np.random.randn(Q.shape[1]) + if complex_: + a = a + 1j*np.random.randn(*a.shape) + b = b + 1j*np.random.randn(*b.shape) + + y = (P @ a)/(Q @ b) + + sk = SKRationalApproximation(num_degree, denom_degree, + norm = norm, Basis = Basis, verbose = True) + + sk.fit(X, y) + if refine: + sk.refine(X, y) + + err = np.linalg.norm(sk(X) - y) + print(f" error : {err:8.2e}") + assert err < 5e-8, "Expected an exact fit" + + if __name__ == '__main__': #test_minimize_inf_norm_complex()