Skip to content

Commit

Permalink
Refactored Stabilized SK iter
Browse files Browse the repository at this point in the history
  • Loading branch information
jeffrey-hokanson committed Nov 12, 2020
1 parent c149ced commit 33284e4
Show file tree
Hide file tree
Showing 7 changed files with 239 additions and 204 deletions.
1 change: 1 addition & 0 deletions polyrat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,5 @@
from .aaa import *
from .paaa import *
from .skiter import *
from .skiter_stabilized import *

58 changes: 0 additions & 58 deletions polyrat/rational.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import scipy.optimize
from .basis import *
from .polynomial import *
from .skiter import *
from .rational_ratio import *
from copy import deepcopy

Expand Down Expand Up @@ -89,63 +88,6 @@ def refine(self, X, y, norm = 2, verbose = False, **kwargs):
# print(f"final residual norm {res_norm:21.15e}")


class SKRationalApproximation(RationalApproximation, RationalRatio):
r"""
Parameters
----------
"""

def __init__(self, num_degree, denom_degree, refine = False, norm = 2,
Basis = None, rebase = True, maxiter = 20, verbose = True, xtol = 1e-7):

RationalApproximation.__init__(self, num_degree, denom_degree)
self._refine = refine
self.norm = norm
self.xtol = float(xtol)
#if self.norm != 2:
# raise NotImplementedError

self.maxiter = int(maxiter)
self.verbose = verbose
self.rebase = rebase
if Basis is None:
Basis = LegendrePolynomialBasis

self.Basis = Basis

self.numerator = None
self.denominator = None

def fit(self, X, y, denom0 = None):
X = np.array(X)
y = np.array(y)
assert X.shape[0] == y.shape[0], "X and y do not have the same number of rows"

if self.rebase:
self.numerator, self.denominator, self.hist = skfit_rebase(
X, y, self.num_degree, self.denom_degree,
maxiter = self.maxiter, verbose = self.verbose, norm = self.norm,
history = True, xtol = self.xtol, denom0 = denom0,
)
else:
num_basis = self.Basis(X, self.num_degree)
denom_basis = self.Basis(X, self.denom_degree)
P = num_basis.vandermonde_X
Q = denom_basis.vandermonde_X

a, b, self.hist = skfit(y, P, Q, maxiter = self.maxiter, verbose = self.verbose, norm = self.norm, history = True,
xtol = self.xtol, denom0 = denom0)

self.numerator = Polynomial(num_basis, a)
self.denominator = Polynomial(denom_basis, b)

if self._refine:
self.refine(X, y, norm = self.norm)




class RationalBarycentric(RationalFunction):
r"""
Expand Down
116 changes: 31 additions & 85 deletions polyrat/skiter.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
from .basis import *
from .arnoldi import *
from .polynomial import *
from .rational import *
from iterprinter import IterationPrinter



def _minimize_2_norm(A):
r"""
Solve the optimization problem
Expand Down Expand Up @@ -240,101 +240,47 @@ def skfit(y, P, Q, maxiter = 20, verbose = True, history = False, denom0 = None,
return best_sol



def skfit_rebase(X, y, num_degree, denom_degree, maxiter = 20, verbose = True,
xtol = 1e-7, history = False, denom0 = None, norm = 2):
r""" The SK-iteration, but at each step use Vandermonde with Arnoldi to construct a new basis
class SKRationalApproximation(RationalApproximation, RationalRatio):
r"""
Parameters
----------
X: np.array (M,dim)
y: np.array (M,)
Returns
-------
"""

if history:
hist = []
def __init__(self, num_degree, denom_degree, norm = 2,
Basis = None, maxiter = 20, verbose = True, xtol = 1e-7):

RationalApproximation.__init__(self, num_degree, denom_degree)
self.norm = norm
self.xtol = float(xtol)
#if self.norm != 2:
# raise NotImplementedError

if denom0 is None:
denom = np.ones(X.shape[0], dtype = X.dtype)
else:
assert denom0.shape[0] == X.shape[0]
denom = denom0
self.maxiter = int(maxiter)
self.verbose = verbose
if Basis is None:
Basis = LegendrePolynomialBasis

self.Basis = Basis

self.numerator = None
self.denominator = None

if np.isclose(norm, 2):
linearized_solution = _minimize_2_norm
elif ~np.isfinite(norm):
linearized_solution = _minimize_inf_norm
else:
raise NotImplementedError

if verbose:
printer = IterationPrinter(it = '4d', res_norm = '21.15e', delta_fit = '8.2e', cond = '8.2e')
printer.print_header(it = 'iter', res_norm = 'residual norm', delta_fit = 'delta fit', cond = 'cond')
def fit(self, X, y, denom0 = None):
X = np.array(X)
y = np.array(y)
assert X.shape[0] == y.shape[0], "X and y do not have the same number of rows"

# As there are no guarntees about convergence,
# we record the best iteration
best_res_norm = np.inf
best_sol = None
num_basis = self.Basis(X, self.num_degree)
denom_basis = self.Basis(X, self.denom_degree)
P = num_basis.vandermonde_X
Q = denom_basis.vandermonde_X

# For comparison with current iterate to determine termination
fit_old = np.zeros(y.shape[0], dtype = X.dtype)
a, b, self.hist = skfit(y, P, Q, maxiter = self.maxiter, verbose = self.verbose, norm = self.norm, history = True,
xtol = self.xtol, denom0 = denom0)

for it in range(maxiter):
try:
num_basis = ArnoldiPolynomialBasis(X, num_degree, weight = 1./denom)
denom_basis = ArnoldiPolynomialBasis(X, denom_degree, weight = 1./denom)
P = num_basis.vandermonde_X
Q = denom_basis.vandermonde_X
#P, RP, _ = vandermonde_arnoldi_CGS(X, num_degree, weight = 1./denom)
#Q, RQ, _ = vandermonde_arnoldi_CGS(X, denom_degree, weight = 1./denom)

A = np.hstack([P, np.multiply(-y[:,None], Q) ])
x, cond = linearized_solution(A)
a = x[:P.shape[1]]
b = x[-Q.shape[1]:]

Pa = P @ a
Qb = Q @ b

fit = Pa/Qb

delta_fit = np.linalg.norm(fit - fit_old, norm)
res_norm = np.linalg.norm(fit - y, norm)
self.numerator = Polynomial(num_basis, a)
self.denominator = Polynomial(denom_basis, b)

except (LinAlgError, ValueError) as e:
if verbose: print(e)
break


# If we have improved the fit, append this
if res_norm < best_res_norm:
numerator = Polynomial(num_basis, a)
denominator = Polynomial(denom_basis, b)
best_sol = [numerator, denominator]
best_res_norm = res_norm

if history:
hist.append({'fit': fit, 'cond':cond})

if verbose:
printer.print_iter(it = it, delta_fit = delta_fit, res_norm = res_norm, cond = cond)

if delta_fit < xtol:
break

denom = np.abs(denom * Qb)
denom[denom == 0.] = 1.
fit_old = fit

if history:
return best_sol + [hist]
else:
return best_sol
142 changes: 142 additions & 0 deletions polyrat/skiter_stabilized.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
r""" A stabilized variant of the Sanathanan-Koerner iteration
"""

import numpy as np
from numpy.linalg import LinAlgError
from iterprinter import IterationPrinter
from .skiter import _minimize_2_norm, _minimize_inf_norm
from .arnoldi import *
from .rational import *



def skfit_stabilized(X, y, num_degree, denom_degree, maxiter = 20, verbose = True,
xtol = 1e-7, history = False, denom0 = None, norm = 2):
r""" The Stabilized Sanathanan-Koerner Iteration
Parameters
----------
X: np.array (M,dim)
y: np.array (M,)
Returns
-------
"""

if history:
hist = []

if denom0 is None:
denom = np.ones(X.shape[0], dtype = X.dtype)
else:
assert denom0.shape[0] == X.shape[0]
denom = denom0


if np.isclose(norm, 2):
linearized_solution = _minimize_2_norm
elif ~np.isfinite(norm):
linearized_solution = _minimize_inf_norm
else:
raise NotImplementedError

if verbose:
printer = IterationPrinter(it = '4d', res_norm = '21.15e', delta_fit = '8.2e', cond = '8.2e')
printer.print_header(it = 'iter', res_norm = 'residual norm', delta_fit = 'delta fit', cond = 'cond')

# As there are no guarntees about convergence,
# we record the best iteration
best_res_norm = np.inf
best_sol = None

# For comparison with current iterate to determine termination
fit_old = np.zeros(y.shape[0], dtype = X.dtype)

for it in range(maxiter):
try:
num_basis = ArnoldiPolynomialBasis(X, num_degree, weight = 1./denom)
denom_basis = ArnoldiPolynomialBasis(X, denom_degree, weight = 1./denom)
P = num_basis.vandermonde_X
Q = denom_basis.vandermonde_X

A = np.hstack([P, np.multiply(-y[:,None], Q) ])
x, cond = linearized_solution(A)
a = x[:P.shape[1]]
b = x[-Q.shape[1]:]

Pa = P @ a
Qb = Q @ b

fit = Pa/Qb

delta_fit = np.linalg.norm( (fit - fit_old).flatten(), norm)
res_norm = np.linalg.norm( (fit - y).flatten(), norm)

except (LinAlgError, ValueError) as e:
if verbose: print(e)
break


# If we have improved the fit, append this
if res_norm < best_res_norm:
numerator = Polynomial(num_basis, a)
denominator = Polynomial(denom_basis, b)
best_sol = [numerator, denominator]
best_res_norm = res_norm

if history:
hist.append({'fit': fit, 'cond':cond})

if verbose:
printer.print_iter(it = it, delta_fit = delta_fit, res_norm = res_norm, cond = cond)

if delta_fit < xtol:
break

denom = np.abs(denom * Qb)
denom[denom == 0.] = 1.
fit_old = fit

if history:
return best_sol + [hist]
else:
return best_sol



class StabilizedSKRationalApproximation(RationalApproximation, RationalRatio):
r"""
Parameters
----------
"""

def __init__(self, num_degree, denom_degree, norm = 2, maxiter = 20, verbose = True, xtol = 1e-7):

RationalApproximation.__init__(self, num_degree, denom_degree)
self.norm = norm
self.xtol = float(xtol)

self.maxiter = int(maxiter)
self.verbose = verbose

self.numerator = None
self.denominator = None

def fit(self, X, y, denom0 = None):
X = np.array(X)
y = np.array(y)
assert X.shape[0] == y.shape[0], "X and y do not have the same number of rows"

self.numerator, self.denominator, self.hist = skfit_stabilized(
X, y, self.num_degree, self.denom_degree,
maxiter = self.maxiter, verbose = self.verbose, norm = self.norm,
history = True, xtol = self.xtol, denom0 = denom0,
)

0 comments on commit 33284e4

Please sign in to comment.