Skip to content

Commit

Permalink
MAINT: Relax test tolerance for future compat
Browse files Browse the repository at this point in the history
Relax very tight test coverage
  • Loading branch information
bashtage committed Aug 5, 2020
1 parent 257fae3 commit 11e0a6f
Show file tree
Hide file tree
Showing 20 changed files with 137 additions and 161 deletions.
38 changes: 19 additions & 19 deletions statsmodels/base/model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from statsmodels.compat.python import lzip

from functools import reduce
import warnings

import numpy as np
from scipy import stats
Expand Down Expand Up @@ -543,9 +544,8 @@ def hess(params, *args):
Hinv = eigvecs.dot(np.diag(1.0 / eigvals)).dot(eigvecs.T)
Hinv = np.asfortranarray((Hinv + Hinv.T) / 2.0)
else:
from warnings import warn
warn('Inverting hessian failed, no bse or cov_params '
'available', HessianInversionWarning)
warnings.warn('Inverting hessian failed, no bse or cov_params '
'available', HessianInversionWarning)
Hinv = None

if 'cov_type' in kwargs:
Expand All @@ -562,10 +562,10 @@ def hess(params, *args):
mlefit.mle_retvals = retvals
if isinstance(retvals, dict):
if warn_convergence and not retvals['converged']:
from warnings import warn
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warn("Maximum Likelihood optimization failed to converge. "
"Check mle_retvals", ConvergenceWarning)
warnings.warn("Maximum Likelihood optimization failed to "
"converge. Check mle_retvals",
ConvergenceWarning)

mlefit.mle_settings = optim_settings
return mlefit
Expand Down Expand Up @@ -967,7 +967,6 @@ def fit(self, start_params=None, method='nm', maxiter=500, full_output=1,
for i in range(-k_miss)])
else:
# I do not want to raise after we have already fit()
import warnings
warnings.warn('more exog_names than parameters', ValueWarning)

return genericmlefit
Expand Down Expand Up @@ -1084,7 +1083,6 @@ def predict(self, exog=None, transform=True, *args, **kwargs):
'{0}'.format(str(str(exc))))
raise exc.__class__(msg)
if orig_exog_len > len(exog) and not is_dict:
import warnings
if exog_index is None:
warnings.warn('nan values have been dropped', ValueWarning)
else:
Expand Down Expand Up @@ -1353,24 +1351,30 @@ def bse(self):
bse_ = np.empty(len(self.params))
bse_[:] = np.nan
else:
bse_ = np.sqrt(np.diag(self.cov_params()))
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
bse_ = np.sqrt(np.diag(self.cov_params()))
return bse_

@cached_value
def tvalues(self):
"""
Return the t-statistic for a given parameter estimate.
"""
return self.params / self.bse
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return self.params / self.bse

@cached_value
def pvalues(self):
"""The two-tailed p values for the t-stats of the params."""
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid) * 2
else:
return stats.norm.sf(np.abs(self.tvalues)) * 2
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid) * 2
else:
return stats.norm.sf(np.abs(self.tvalues)) * 2

def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
other=None):
Expand Down Expand Up @@ -1563,7 +1567,6 @@ def t_test(self, r_matrix, cov_p=None, scale=None, use_t=None):
==============================================================================
"""
if scale is not None:
import warnings
warnings.warn('scale is has no effect and is deprecated. It will'
'be removed in the next version.',
DeprecationWarning)
Expand Down Expand Up @@ -1715,7 +1718,6 @@ def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None):
<F test: F=array([[ 144.17976065]]), p=6.322026217355609e-08, df_denom=9, df_num=3>
"""
if scale != 1.0:
import warnings
warnings.warn('scale is has no effect and is deprecated. It will'
'be removed in the next version.',
DeprecationWarning)
Expand Down Expand Up @@ -1786,7 +1788,6 @@ def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,
where the rank of the covariance of the noise is not full.
"""
if scale != 1.0:
import warnings
warnings.warn('scale is has no effect and is deprecated. It will'
'be removed in the next version.',
DeprecationWarning)
Expand Down Expand Up @@ -1828,7 +1829,6 @@ def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,
invcov = np.linalg.pinv(cov_p)
J_ = np.linalg.matrix_rank(cov_p)
if J_ < J:
import warnings
warnings.warn('covariance of constraints does not have full '
'rank. The number of constraints is %d, but '
'rank is %d' % (J, J_), ValueWarning)
Expand Down
18 changes: 4 additions & 14 deletions statsmodels/base/tests/test_generic_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,19 +109,14 @@ def test_zero_constrained(self):
assert_equal(res1.bse[drop_index], 0)
# OSX has many slight failures on this test
tol = 1e-8 if PLATFORM_OSX else 1e-10
with pytest.warns(RuntimeWarning, match="invalid value encountered"):
# division by zero in bse
tvals1 = res1.tvalues[keep_index_p]
tvals1 = res1.tvalues[keep_index_p]
assert_allclose(tvals1, res2.tvalues, rtol=tol, atol=tol)

# See gh5993
if PLATFORM_LINUX32 or SCIPY_GT_14:
pvals1 = res1.pvalues[keep_index_p]
else:
with pytest.warns(RuntimeWarning,
match="invalid value encountered"):
# passing NaN into scipy.stats functions
pvals1 = res1.pvalues[keep_index_p]
pvals1 = res1.pvalues[keep_index_p]
assert_allclose(pvals1, res2.pvalues, rtol=tol, atol=tol)

if hasattr(res1, 'resid'):
Expand Down Expand Up @@ -259,19 +254,14 @@ def test_zero_collinear(self):
assert_allclose(res1.params[drop_index], 0, rtol=1e-10)
assert_allclose(res1.bse[keep_index_p], res2.bse, rtol=1e-8)
assert_allclose(res1.bse[drop_index], 0, rtol=1e-10)
with pytest.warns(RuntimeWarning, match="invalid value"):
# zero in bse, so division by zero warning
tvals1 = res1.tvalues[keep_index_p]
tvals1 = res1.tvalues[keep_index_p]
assert_allclose(tvals1, res2.tvalues, rtol=5e-8)

# See gh5993
if PLATFORM_LINUX32 or SCIPY_GT_14:
pvals1 = res1.pvalues[keep_index_p]
else:
with pytest.warns(RuntimeWarning,
match="invalid value encountered"):
# passing NaN into scipy.stats functions
pvals1 = res1.pvalues[keep_index_p]
pvals1 = res1.pvalues[keep_index_p]
assert_allclose(pvals1, res2.pvalues, rtol=1e-6, atol=1e-30)

if hasattr(res1, 'resid'):
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/nonparametric/linbin.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def fast_linbin(np.ndarray[DOUBLE] X, double a, double b, int M, int trunc=1):
Py_ssize_t i, li_i
int nobs = X.shape[0]
double delta = (b - a)/(M - 1)
np.ndarray[DOUBLE] gcnts = np.zeros(M, np.float)
np.ndarray[DOUBLE] gcnts = np.zeros(M, float)
np.ndarray[DOUBLE] lxi = (X - a)/delta
np.ndarray[INT] li = lxi.astype(int)
np.ndarray[DOUBLE] rem = lxi - li
Expand Down
5 changes: 4 additions & 1 deletion statsmodels/regression/dimred.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import warnings

import numpy as np
import pandas as pd

from statsmodels.base import model
import statsmodels.base.wrapper as wrap
from statsmodels.tools.sm_exceptions import ConvergenceWarning


class _DimReductionRegression(model.Model):
Expand Down Expand Up @@ -688,7 +691,7 @@ def fit(self, start_params=None, maxiter=200, gtol=1e-4):
g = self.score(params.ravel())
gn = np.sqrt(np.sum(g * g))
msg = "CovReduce optimization did not converge, |g|=%f" % gn
warnings.warn(msg)
warnings.warn(msg, ConvergenceWarning)

results = DimReductionResults(self, params, eigs=None)
results.llf = llf
Expand Down
11 changes: 6 additions & 5 deletions statsmodels/stats/dist_dependence_measures.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,14 @@
Annals of Statistics, Vol. 35 No. 6, pp. 2769-2794.
"""
import numpy as np
import warnings
from collections import namedtuple
import warnings

import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy.stats import norm

from statsmodels.tools.sm_exceptions import HypothesisTestWarning

DistDependStat = namedtuple(
"DistDependStat",
Expand Down Expand Up @@ -131,10 +132,10 @@ def distance_covariance_test(x, y, B=None, method="auto"):
# to the asymptotic approximation.
if chosen_method == "emp" and pval in [0, 1]:
msg = (
"p-value was {} when using the empirical method. ".format(pval)
+ "The asymptotic approximation will be used instead"
f"p-value was {pval} when using the empirical method. "
"The asymptotic approximation will be used instead"
)
warnings.warn(msg)
warnings.warn(msg, HypothesisTestWarning)
_, pval = _asymptotic_pvalue(stats)

return test_statistic, pval, chosen_method
Expand Down
Loading

0 comments on commit 11e0a6f

Please sign in to comment.