Skip to content

Commit

Permalink
ENH: Get rid of hard-coded lbfgs. Closes #988.
Browse files Browse the repository at this point in the history
  • Loading branch information
jseabold committed Feb 6, 2014
1 parent 482c17d commit 3ae69df
Showing 1 changed file with 17 additions and 23 deletions.
40 changes: 17 additions & 23 deletions statsmodels/tsa/ar_model.py
Expand Up @@ -453,7 +453,7 @@ def select_order(self, maxlag, ic, trend='c', method='mle'):
return bestlag return bestlag


def fit(self, maxlag=None, method='cmle', ic=None, trend='c', def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver=None, maxiter=35, transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs): full_output=1, disp=1, callback=None, **kwargs):
""" """
Fit the unconditional maximum likelihood of an AR(p) process. Fit the unconditional maximum likelihood of an AR(p) process.
Expand Down Expand Up @@ -491,14 +491,11 @@ def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
start_params : array-like, optional start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates. A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional solver : str or None, optional
Solver to be used. The default is 'l_bfgs' (limited memory Broyden- Solver to be used if method is 'mle'. The default is 'lbfgs'
Fletcher-Goldfarb-Shanno). Other choices are 'bfgs', 'newton' (limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
(Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient), are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'ncg' (non-conjugate gradient), and 'powell'. 'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
The limited memory BFGS uses m=30 to approximate the Hessian, and 'powell'.
projected gradient tolerance of 1e-7 and factr = 1e3. These
cannot currently be changed for l_bfgs. See notes for more
information.
maxiter : int, optional maxiter : int, optional
The maximum number of function evaluations. Default is 35. The maximum number of function evaluations. Default is 35.
tol : float tol : float
Expand Down Expand Up @@ -560,14 +557,14 @@ def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
self.Y = Y self.Y = Y
self.X = X self.X = X


if solver:
solver = solver.lower()
if method == "cmle": # do OLS if method == "cmle": # do OLS
arfit = OLS(Y,X).fit() arfit = OLS(Y,X).fit()
params = arfit.params params = arfit.params
self.nobs = nobs - k_ar self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs #needed for predict fcasterr self.sigma2 = arfit.ssr/arfit.nobs #needed for predict fcasterr
if method == "mle":
elif method == "mle":
solver = solver.lower()
self.nobs = nobs self.nobs = nobs
if start_params is None: if start_params is None:
start_params = OLS(Y,X).fit().params start_params = OLS(Y,X).fit().params
Expand All @@ -578,20 +575,17 @@ def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
k_trend + k_ar)) k_trend + k_ar))
start_params = self._invtransparams(start_params) start_params = self._invtransparams(start_params)
loglike = lambda params : -self.loglike(params) loglike = lambda params : -self.loglike(params)
if solver == None: # use limited memory bfgs if solver == 'lbfgs':
bounds = [(None,)*2]*(k_ar+k) kwargs.setdefault('pgtol', 1e-8)
mlefit = optimize.fmin_l_bfgs_b(loglike, start_params, kwargs.setdefault('factr', 1e2)
approx_grad=True, m=12, pgtol=1e-8, factr=1e2, kwargs.setdefault('m', 12)
bounds=bounds, iprint=disp) kwargs.setdefault('approx_grad', True)
self.mlefit = mlefit mlefit = super(AR, self).fit(start_params=start_params,
params = mlefit[0]
else:
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter, method=solver, maxiter=maxiter,
full_output=full_output, disp=disp, full_output=full_output, disp=disp,
callback = callback, **kwargs) callback = callback, **kwargs)
self.mlefit = mlefit
params = mlefit.params params = mlefit.params
if self.transparams: if self.transparams:
params = self._transparams(params) params = self._transparams(params)
self.transparams = False # turn off now for other results self.transparams = False # turn off now for other results
Expand Down

0 comments on commit 3ae69df

Please sign in to comment.