Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MAINT: Remove deprecated code for 0.6 #2035

Merged
merged 10 commits into from
Oct 9, 2014
61 changes: 18 additions & 43 deletions statsmodels/base/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -1074,7 +1074,7 @@ def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
return cov_p

#TODO: make sure this works as needed for GLMs
def t_test(self, r_matrix, q_matrix=None, cov_p=None, scale=None,
def t_test(self, r_matrix, cov_p=None, scale=None,
use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Expand All @@ -1083,16 +1083,12 @@ def t_test(self, r_matrix, q_matrix=None, cov_p=None, scale=None,
----------
r_matrix : array-like, str, tuple
- array : If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions.
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), since q_matrix is
deprecated.
q_matrix : array-like or scalar, optional
This is deprecated. See `r_matrix` and the examples for more
information on new usage. Can be either a scalar or a length p
row vector. If omitted and r_matrix is an array, `q_matrix` is
assumed to be a conformable array of zeros.
- tuple : A tuple of arrays in the form (R, q). If q is given,
can be either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
Expand Down Expand Up @@ -1158,12 +1154,6 @@ def t_test(self, r_matrix, q_matrix=None, cov_p=None, scale=None,
patsy.DesignInfo.linear_constraint
"""
from patsy import DesignInfo
if q_matrix is not None:
from warnings import warn
warn("The `q_matrix` keyword is deprecated and will be removed "
"in 0.6.0. See the documentation for the new API",
FutureWarning)
r_matrix = (r_matrix, q_matrix)
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
Expand Down Expand Up @@ -1212,8 +1202,7 @@ def t_test(self, r_matrix, q_matrix=None, cov_p=None, scale=None,
df_denom=df_resid,
distribution='norm')

def f_test(self, r_matrix, q_matrix=None, cov_p=None, scale=1.0,
invcov=None):
def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None):
"""
Compute the F-test for a joint linear hypothesis.

Expand All @@ -1224,16 +1213,12 @@ def f_test(self, r_matrix, q_matrix=None, cov_p=None, scale=1.0,
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors.
test and k is the number of regressors. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), since q_matrix is
deprecated.
q_matrix : array-like
This is deprecated. See `r_matrix` and the examples for more
information on new usage. Can be either a scalar or a length p
row vector. If omitted and r_matrix is an array, `q_matrix` is
assumed to be a conformable array of zeros.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length k row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
Expand Down Expand Up @@ -1310,30 +1295,26 @@ def f_test(self, r_matrix, q_matrix=None, cov_p=None, scale=1.0,
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
res = self.wald_test(r_matrix, q_matrix=q_matrix, cov_p=cov_p,
scale=scale, invcov=invcov, use_f=True)
res = self.wald_test(r_matrix, cov_p=cov_p, scale=scale,
invcov=invcov, use_f=True)
return res

#TODO: untested for GLMs?
def wald_test(self, r_matrix, q_matrix=None, cov_p=None, scale=1.0,
invcov=None, use_f=None):
def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,
use_f=None):
"""
Compute a Wald-test for a joint linear hypothesis.

Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors.
test and k is the number of regressors. It is assumed that the
linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), since q_matrix is
deprecated.
q_matrix : array-like
This is deprecated. See `r_matrix` and the examples for more
information on new usage. Can be either a scalar or a length p
row vector. If omitted and r_matrix is an array, `q_matrix` is
assumed to be a conformable array of zeros.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
Expand Down Expand Up @@ -1376,12 +1357,6 @@ def wald_test(self, r_matrix, q_matrix=None, cov_p=None, scale=1.0,
use_f = (hasattr(self, 'use_t') and self.use_t)

from patsy import DesignInfo
if q_matrix is not None:
from warnings import warn
warn("The `q_matrix` keyword is deprecated and will be removed "
"in 0.6.0. See the documentation for the new API",
FutureWarning)
r_matrix = (r_matrix, q_matrix)
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
Expand Down
18 changes: 0 additions & 18 deletions statsmodels/discrete/discrete_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2438,17 +2438,6 @@ def get_margeff(self, at='overall', method='dydx', atexog=None,
from statsmodels.discrete.discrete_margins import DiscreteMargins
return DiscreteMargins(self, (at, method, atexog, dummy, count))


def margeff(self, at='overall', method='dydx', atexog=None, dummy=False,
count=False):
"""DEPRECATED: marginal effects, use get_margeff instead
"""
import warnings
warnings.warn("This method is deprecated and will be removed in 0.6.0."
" Use get_margeff instead", FutureWarning)
return self.get_margeff(at, method, atexog, dummy, count)


def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
"""Summarize the Regression Results
Expand Down Expand Up @@ -2726,13 +2715,6 @@ def summary(self, yname=None, xname=None, title=None, alpha=.05,
return smry
summary.__doc__ = DiscreteResults.summary.__doc__

@cache_readonly
def resid(self):
import warnings
warnings.warn("This attribute is deprecated and will be removed in "
"0.6.0. Use resid_dev instead.", FutureWarning)
return self.resid_dev

@cache_readonly
def resid_dev(self):
"""
Expand Down
32 changes: 18 additions & 14 deletions statsmodels/distributions/tests/test_edgeworth.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,22 +74,24 @@ def test_too_few_cumulants(self):
assert_raises(ValueError, ExpandedNormal, [1])

def test_coefficients(self):
# 3rd order in n**(1/2)
ne3 = ExpandedNormal([0., 1., 1.])
assert_allclose(ne3._coef, [1., 0., 0., 1./6])
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
# 3rd order in n**(1/2)
ne3 = ExpandedNormal([0., 1., 1.])
assert_allclose(ne3._coef, [1., 0., 0., 1./6])

# 4th order in n**(1/2)
ne4 = ExpandedNormal([0., 1., 1., 1.])
assert_allclose(ne4._coef, [1., 0., 0., 1./6, 1./24, 0., 1./72])
# 4th order in n**(1/2)
ne4 = ExpandedNormal([0., 1., 1., 1.])
assert_allclose(ne4._coef, [1., 0., 0., 1./6, 1./24, 0., 1./72])

# 5th order
ne5 = ExpandedNormal([0., 1., 1., 1., 1.])
assert_allclose(ne5._coef, [1., 0., 0., 1./6, 1./24, 1./120,
1./72, 1./144, 0., 1./1296])
# 5th order
ne5 = ExpandedNormal([0., 1., 1., 1., 1.])
assert_allclose(ne5._coef, [1., 0., 0., 1./6, 1./24, 1./120,
1./72, 1./144, 0., 1./1296])

# adding trailing zeroes increases the order
ne33 = ExpandedNormal([0., 1., 1., 0.])
assert_allclose(ne33._coef, [1., 0., 0., 1./6, 0., 0., 1./72])
# adding trailing zeroes increases the order
ne33 = ExpandedNormal([0., 1., 1., 0.])
assert_allclose(ne33._coef, [1., 0., 0., 1./6, 0., 0., 1./72])

def test_normal(self):
# with two cumulants, it's just a gaussian
Expand All @@ -101,7 +103,9 @@ def test_chi2_moments(self):
# construct the expansion for \chi^2
N, df = 6, 15
cum = [_chi2_cumulant(n+1, df) for n in range(N)]
ne = ExpandedNormal(cum, name='edgw_chi2')
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
ne = ExpandedNormal(cum, name='edgw_chi2')

# compare the moments
assert_allclose([_chi2_moment(n, df) for n in range(N)],
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/nonparametric/api.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .kde import KDE, KDEUnivariate
from .kde import KDEUnivariate
from .smoothers_lowess import lowess
from . import bandwidths

Expand Down
9 changes: 1 addition & 8 deletions statsmodels/nonparametric/kde.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,13 +265,6 @@ def evaluate(self, point):
return self.kernel.density(self.endog, point)


class KDE(KDEUnivariate):
def __init__(self, endog):
self.endog = np.asarray(endog)
warnings.warn("KDE is deprecated and will be removed in 0.6, "
"use KDEUnivariate instead", FutureWarning)


#### Kernel Density Estimator Functions ####

def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
Expand Down Expand Up @@ -460,7 +453,7 @@ def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=N
X = np.asarray(X)
X = X[np.logical_and(X>clip[0], X<clip[1])] # won't work for two columns.
# will affect underlying data?

# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()

Expand Down
8 changes: 5 additions & 3 deletions statsmodels/stats/tests/test_pairwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

Author: Josef Perktold
"""

import warnings
from statsmodels.compat.python import BytesIO, asbytes, range
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
Expand Down Expand Up @@ -248,8 +248,10 @@ def test_incorrect_output(self):
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1] * 10)

# group_order doesn't select all observations, only one group left
assert_raises(ValueError, MultiComparison, np.array([1] * 10),
[1, 2] * 5, group_order=[1])
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
assert_raises(ValueError, MultiComparison, np.array([1] * 10),
[1, 2] * 5, group_order=[1])

# group_order doesn't select all observations,
# we do tukey_hsd with reduced set of observations
Expand Down
12 changes: 0 additions & 12 deletions statsmodels/tools/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,18 +444,6 @@ def fullrank(X, r=None):
value.append(V[:, order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)

StepFunction = np.deprecate(StepFunction,
old_name='statsmodels.tools.tools.StepFunction',
new_name='statsmodels.distributions.StepFunction')
monotone_fn_inverter = np.deprecate(monotone_fn_inverter,
old_name='statsmodels.tools.tools'
'.monotone_fn_inverter',
new_name='statsmodels.distributions'
'.monotone_fn_inverter')
ECDF = np.deprecate(ECDF,
old_name='statsmodels.tools.tools.ECDF',
new_name='statsmodels.distributions.ECDF')


def unsqueeze(data, axis, oldshape):
"""
Expand Down
41 changes: 8 additions & 33 deletions statsmodels/tsa/arima_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@
The endogenous variable.
order : iterable
The (p,q) order of the model for the number of AR parameters,
differences, and MA parameters to use. Though optional, the order
keyword in fit is deprecated and it is recommended to give order here.
differences, and MA parameters to use.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
Expand Down Expand Up @@ -441,19 +440,14 @@ class ARMA(tsbase.TimeSeriesModel):
"extra_sections" : _armax_notes %
{"Model" : "ARMA"}}

def __init__(self, endog, order=None, exog=None, dates=None, freq=None,
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
super(ARMA, self).__init__(endog, exog, dates, freq, missing=missing)
exog = self.data.exog # get it after it's gone through processing
if order is None:
import warnings
warnings.warn("In the next release order will not be optional "
"in the model constructor.", FutureWarning)
else:
_check_estimable(len(self.endog), sum(order))
self.k_ar = k_ar = order[0]
self.k_ma = k_ma = order[1]
self.k_lags = max(k_ar, k_ma+1)
_check_estimable(len(self.endog), sum(order))
self.k_ar = k_ar = order[0]
self.k_ma = k_ma = order[1]
self.k_lags = max(k_ar, k_ma+1)
if exog is not None:
if exog.ndim == 1:
exog = exog[:, None]
Expand Down Expand Up @@ -806,7 +800,7 @@ def loglike_css(self, params, set_sigma2=True):
llf = -nobs/2.*(log(2*pi) + log(sigma2)) - ssr/(2*sigma2)
return llf

def fit(self, order=None, start_params=None, trend='c', method="css-mle",
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Expand Down Expand Up @@ -877,25 +871,6 @@ def fit(self, order=None, start_params=None, trend='c', method="css-mle",
r, order = 'F')

"""
if order is not None:
import warnings
warnings.warn("The order argument to fit is deprecated. "
"Please use the model constructor argument order. "
"This will overwrite any order given in the model "
"constructor.", FutureWarning)

_check_estimable(len(self.endog), sum(order))
# get model order and constants
self.k_ar = int(order[0])
self.k_ma = int(order[1])
self.k_lags = max(k_ar, k_ma + 1)
else:
try:
assert hasattr(self, "k_ar")
assert hasattr(self, "k_ma")
except:
raise ValueError("Please give order to the model constructor "
"before calling fit.")
k_ar = self.k_ar
k_ma = self.k_ma

Expand Down Expand Up @@ -1123,7 +1098,7 @@ def fit(self, start_params=None, trend='c', method="css-mle",
r, order = 'F')

"""
arima_fit = super(ARIMA, self).fit(None, start_params, trend,
arima_fit = super(ARIMA, self).fit(start_params, trend,
method, transparams, solver,
maxiter, full_output, disp,
callback, **kwargs)
Expand Down
Loading