Skip to content

Commit

Permalink
Miscellaneous cleanup (#95)
Browse files Browse the repository at this point in the history
* bverbose removed
* alpha0 as kwarg
* mixedcase var
* comments and whitespace
  • Loading branch information
pgkirsch committed Jul 9, 2021
1 parent 9966aaa commit 5b2b316
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 42 deletions.
1 change: 0 additions & 1 deletion gpfit/classes.py
Expand Up @@ -31,7 +31,6 @@ def max_affine(x, ba):

# augment data with column of ones
X = np.hstack((np.ones((npt, 1)), x))

y, partition = np.dot(X, ba).max(1), np.dot(X, ba).argmax(1)

# The not-sparse sparse version
Expand Down
11 changes: 5 additions & 6 deletions gpfit/fit.py
Expand Up @@ -6,7 +6,6 @@
from .print_fit import print_isma, print_sma, print_ma
from .constraint_set import FitConstraintSet

ALPHA0 = 10
CLASSES = {
"ISMA": implicit_softmax_affine,
"SMA": softmax_affine,
Expand All @@ -15,7 +14,7 @@


# pylint: disable=invalid-name
def get_parameters(ftype, K, xdata, ydata):
def get_parameters(ftype, K, xdata, ydata, alpha0):
"Perform least-squares fitting optimization."

ydata_col = ydata.reshape(ydata.size, 1)
Expand All @@ -28,9 +27,9 @@ def residual(params):
return r, drdp

if ftype == "ISMA":
params, _ = levenberg_marquardt(residual, hstack((ba, ALPHA0*ones(K))))
params, _ = levenberg_marquardt(residual, hstack((ba, alpha0*ones(K))))
elif ftype == "SMA":
params, _ = levenberg_marquardt(residual, hstack((ba, ALPHA0)))
params, _ = levenberg_marquardt(residual, hstack((ba, alpha0)))
else:
params, _ = levenberg_marquardt(residual, ba)

Expand All @@ -40,7 +39,7 @@ def residual(params):
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=import-error
def fit(xdata, ydata, K, ftype="ISMA"):
def fit(xdata, ydata, K, ftype="ISMA", alpha0=10):
"""
Fit a log-convex function to multivariate data, returning a FitConstraintSet
Expand Down Expand Up @@ -81,7 +80,7 @@ def fit(xdata, ydata, K, ftype="ISMA"):
fitdata["lb%d" % i] = exp(min(xdata.T[i]))
fitdata["ub%d" % i] = exp(max(xdata.T[i]))

params = get_parameters(ftype, K, xdata, ydata)
params = get_parameters(ftype, K, xdata, ydata, alpha0)

# A: exponent parameters, B: coefficient parameters
A = params[[i for i in range(K*(d+1)) if i % (d + 1) != 0]]
Expand Down
7 changes: 0 additions & 7 deletions gpfit/initialize.py
Expand Up @@ -23,9 +23,6 @@ def get_initial_parameters(x, y, K):
2D array [(dimx+1) x k]
"""
defaults = {}
defaults['bverbose'] = False
options = defaults

npt, dimx = x.shape

Expand Down Expand Up @@ -60,7 +57,6 @@ def get_initial_parameters(x, y, K):
sortdistind = sqdists[:, k].argsort()

i = sum(inds) # number of points in partition
iinit = i

if i < dimx+1:
# obviously, at least need dimx+1 points. fill these in before
Expand All @@ -73,9 +69,6 @@ def get_initial_parameters(x, y, K):
i = i+1
inds[sortdistind[i]] = 1

if options['bverbose']:
print("Initialization: Added %s points to partition %s to "
"maintain full rank for local fitting." % (i-iinit, k))
# now create the local fit
b[:, k] = lstsq(X[inds.nonzero()], y[inds.nonzero()], rcond=-1)[0][:, 0]
# Rank condition specified to default for python upgrades
Expand Down
16 changes: 8 additions & 8 deletions gpfit/least_squares.py
Expand Up @@ -45,7 +45,7 @@ def levenberg_marquardt(residfun, initparams,
-------
params: np.array (1D)
Parameter vector that locally minimizes norm(residfun, 2)
RMStraj: np.array
rmstraj: np.array
History of RMS errors after each step (first point is initialization)
"""

Expand Down Expand Up @@ -84,7 +84,7 @@ def levenberg_marquardt(residfun, initparams,
diagJJ = sum(J*J, 0).T
zeropad = np.zeros((nparam, 1))
lamb = lambdainit
RMStraj = [rms]
rmstraj = [rms]

# Display info for 1st iter
if verbose:
Expand All @@ -107,8 +107,8 @@ def levenberg_marquardt(residfun, initparams,
print('Reached maxtime (%s seconds)' % maxtime)
break
elif (itr >= 2 and
abs(RMStraj[itr] - RMStraj[itr-2]) <
RMStraj[itr]*tolrms):
abs(rmstraj[itr] - rmstraj[itr-2]) <
rmstraj[itr]*tolrms):
# Should really only allow this exit case
# if trust region constraint is slack
if verbose:
Expand Down Expand Up @@ -143,7 +143,7 @@ def levenberg_marquardt(residfun, initparams,
# Check function value at trialp
trialr, trialJ = residfun(trialp)
trialrms = norm(trialr)/np.sqrt(npt)
RMStraj.append(trialrms)
rmstraj.append(trialrms)

# Accept or reject trial params
if trialrms < rms:
Expand Down Expand Up @@ -176,9 +176,9 @@ def levenberg_marquardt(residfun, initparams,
prev_trial_accepted = False
params_updated = False

assert len(RMStraj) == itr + 1
RMStraj = np.array(RMStraj)
assert len(rmstraj) == itr + 1
rmstraj = np.array(rmstraj)
if verbose:
print('Final RMS: ' + repr(rms))

return params, RMStraj
return params, rmstraj
20 changes: 0 additions & 20 deletions gpfit/logsumexp.py
Expand Up @@ -29,8 +29,6 @@ def lse_implicit(x, alpha):
2D array [nPoints x nDim]
"""

bverbose = False

tol = 10*spacing(1)
npt, nx = x.shape

Expand All @@ -42,12 +40,7 @@ def lse_implicit(x, alpha):
m = x.max(1) # maximal x values
# distance from m; note h <= 0 for all entries
h = x - (tile(m, (nx, 1))).T
# (unless x has infs; in this case h has nans;
# can prob deal w/ this gracefully)
# should also deal with alpha==inf case gracefully

L = zeros((npt,)) # initial guess. note y = m + L

Lmat = (tile(L, (nx, 1))).T

# initial eval
Expand Down Expand Up @@ -75,10 +68,6 @@ def lse_implicit(x, alpha):
# update inds that need to be evaluated
i[i] = abs(f[i]) > tol

if bverbose:
print('lse_implicit converged in ' +
repr(neval) + ' newton-raphson steps')

y = m + L

dydx = alphaexpo/(tile(sumalphaexpo, (nx, 1))).T
Expand Down Expand Up @@ -112,21 +101,12 @@ def lse_scaled(x, alpha):
"""

_, n = x.shape

m = x.max(axis=1) # maximal x values

h = x - (tile(m, (n, 1))).T # distance from m; note h <= 0 for all entries
# (unless x has infs; in this case h has nans;
# can prob deal w/ this gracefully)
# should also deal with alpha==inf case gracefully

expo = exp(alpha*h)
sumexpo = expo.sum(axis=1)

L = log(sumexpo)/alpha

y = L + m

dydx = expo/(tile(sumexpo, (n, 1))).T
# note that sum(dydx,2)==1, i.e. dydx is a probability distribution
dydalpha = ((h*expo).sum(axis=1)/sumexpo - L)/alpha
Expand Down

0 comments on commit 5b2b316

Please sign in to comment.