Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ENH: Get rid of hard-coded lbfgs. Closes #988. #1364

Merged
merged 1 commit into from Feb 6, 2014
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
40 changes: 17 additions & 23 deletions statsmodels/tsa/ar_model.py
Expand Up @@ -453,7 +453,7 @@ def select_order(self, maxlag, ic, trend='c', method='mle'):
return bestlag

def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver=None, maxiter=35,
transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Expand Down Expand Up @@ -491,14 +491,11 @@ def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used. The default is 'l_bfgs' (limited memory Broyden-
Fletcher-Goldfarb-Shanno). Other choices are 'bfgs', 'newton'
(Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient),
'ncg' (non-conjugate gradient), and 'powell'.
The limited memory BFGS uses m=30 to approximate the Hessian,
projected gradient tolerance of 1e-7 and factr = 1e3. These
cannot currently be changed for l_bfgs. See notes for more
information.
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
Expand Down Expand Up @@ -560,14 +557,14 @@ def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
self.Y = Y
self.X = X

if solver:
solver = solver.lower()
if method == "cmle": # do OLS
arfit = OLS(Y,X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs #needed for predict fcasterr
if method == "mle":

elif method == "mle":
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y,X).fit().params
Expand All @@ -578,20 +575,17 @@ def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
k_trend + k_ar))
start_params = self._invtransparams(start_params)
loglike = lambda params : -self.loglike(params)
if solver == None: # use limited memory bfgs
bounds = [(None,)*2]*(k_ar+k)
mlefit = optimize.fmin_l_bfgs_b(loglike, start_params,
approx_grad=True, m=12, pgtol=1e-8, factr=1e2,
bounds=bounds, iprint=disp)
self.mlefit = mlefit
params = mlefit[0]
else:
mlefit = super(AR, self).fit(start_params=start_params,
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback = callback, **kwargs)
self.mlefit = mlefit
params = mlefit.params

params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
Expand Down