Skip to content

Commit

Permalink
Merge pull request #6478 from bashtage/0.11.cherry
Browse files Browse the repository at this point in the history
MAINT: Cherry pick recent pulls for 0.11
  • Loading branch information
bashtage committed Jan 28, 2020
2 parents 2e0af07 + f8c238e commit fb05389
Show file tree
Hide file tree
Showing 14 changed files with 107 additions and 55 deletions.
1 change: 1 addition & 0 deletions lint.sh
Expand Up @@ -90,6 +90,7 @@ if [ "$LINT" == true ]; then
statsmodels/graphics/api.py \
statsmodels/graphics/functional.py \
statsmodels/graphics/tests/test_agreement.py \
statsmodels/graphics/tests/test_boxplots.py \
statsmodels/graphics/tests/test_correlation.py \
statsmodels/graphics/tests/test_functional.py \
statsmodels/graphics/tests/test_gofplots.py \
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Expand Up @@ -5,7 +5,7 @@ requires = [
"cython>=0.29.14",
"numpy==1.14.5; python_version=='3.5'",
"numpy==1.14.5; python_version=='3.6'",
"numpy==1.14.5; python_version>='3.7'",
"numpy==1.16.1; python_version=='3.7'",
"numpy==1.17.5; python_version>='3.8'",
"scipy>=1.0",
]
1 change: 1 addition & 0 deletions setup.cfg
Expand Up @@ -42,6 +42,7 @@ filterwarnings =
error:The default value of lags:FutureWarning
error::pandas.core.common.SettingWithCopyWarning
error:non-integer arg n is deprecated:DeprecationWarning
error:Creating an ndarray:numpy.VisibleDeprecationWarning
markers =
example: mark a test that runs example code
matplotlib: mark a test that requires matplotlib
Expand Down
12 changes: 6 additions & 6 deletions statsmodels/graphics/boxplots.py
Expand Up @@ -13,8 +13,9 @@


def violinplot(data, ax=None, labels=None, positions=None, side='both',
show_boxplot=True, plot_opts={}):
"""Make a violin plot of each dataset in the `data` sequence.
show_boxplot=True, plot_opts=None):
"""
Make a violin plot of each dataset in the `data` sequence.
A violin plot is a boxplot combined with a kernel density estimate of the
probability density function per point.
Expand Down Expand Up @@ -123,8 +124,8 @@ def violinplot(data, ax=None, labels=None, positions=None, side='both',
.. plot:: plots/graphics_boxplot_violinplot.py
"""

if np.size(data) == 0:
plot_opts = {} if plot_opts is None else plot_opts
if max([np.size(arr) for arr in data]) == 0:
msg = "No Data to make Violin: Try again!"
raise ValueError(msg)

Expand All @@ -141,8 +142,7 @@ def violinplot(data, ax=None, labels=None, positions=None, side='both',

# Plot violins.
for pos_data, pos in zip(data, positions):
xvals, violin = _single_violin(ax, pos, pos_data, width, side,
plot_opts)
_single_violin(ax, pos, pos_data, width, side, plot_opts)

if show_boxplot:
ax.boxplot(data, notch=1, positions=positions, vert=1)
Expand Down
82 changes: 55 additions & 27 deletions statsmodels/graphics/tests/test_boxplots.py
Expand Up @@ -10,61 +10,89 @@
pass


@pytest.mark.matplotlib
def test_violinplot_beanplot(close_figures):
@pytest.fixture(scope="module")
def age_and_labels():
# Test violinplot and beanplot with the same dataset.
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]

age = [data.exog['age'][data.endog == id] for id in party_ID]
return age, labels

fig = plt.figure()
ax = fig.add_subplot(111)

@pytest.mark.matplotlib
def test_violinplot(age_and_labels, close_figures):
age, labels = age_and_labels

fig, ax = plt.subplots(1, 1)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})


fig = plt.figure()
ax = fig.add_subplot(111)
@pytest.mark.matplotlib
def test_violinplot_bw_factor(age_and_labels, close_figures):
age, labels = age_and_labels

fig, ax = plt.subplots(1, 1)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30,
'bw_factor':.2})
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30,
'bw_factor': .2})


@pytest.mark.matplotlib
def test_beanplot(age_and_labels, close_figures):
age, labels = age_and_labels

fig = plt.figure()
ax = fig.add_subplot(111)
fig, ax = plt.subplots(1, 1)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})


@pytest.mark.matplotlib
def test_beanplot_jitter(age_and_labels, close_figures):
age, labels = age_and_labels

fig = plt.figure()
ax = fig.add_subplot(111)
fig, ax = plt.subplots(1, 1)
beanplot(age, ax=ax, labels=labels, jitter=True,
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})

fig = plt.figure()
ax = fig.add_subplot(111)

@pytest.mark.matplotlib
def test_beanplot_side_right(age_and_labels, close_figures):
age, labels = age_and_labels

fig, ax = plt.subplots(1, 1)
beanplot(age, ax=ax, labels=labels, jitter=True, side='right',
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})

fig = plt.figure()
ax = fig.add_subplot(111)

@pytest.mark.matplotlib
def test_beanplot_side_left(age_and_labels, close_figures):
age, labels = age_and_labels

fig, ax = plt.subplots(1, 1)
beanplot(age, ax=ax, labels=labels, jitter=True, side='left',
plot_opts={'cutoff_val': 5, 'cutoff_type': 'abs',
'label_fontsize': 'small',
'label_rotation': 30})

fig = plt.figure()
ax = fig.add_subplot(111)

@pytest.mark.matplotlib
def test_beanplot_legend_text(age_and_labels, close_figures):
age, labels = age_and_labels

fig, ax = plt.subplots(1, 1)
beanplot(age, ax=ax, labels=labels,
plot_opts={'bean_legend_text': 'text'})
2 changes: 1 addition & 1 deletion statsmodels/nonparametric/bandwidths.py
Expand Up @@ -170,7 +170,7 @@ def select_bandwidth(x, bw, kernel):
bandwidth = bandwidth_funcs[bw](x, kernel)
if bandwidth == 0:
# eventually this can fall back on another selection criterion.
err = "Selected KDE bandwidth is 0. Cannot estiamte density."
err = "Selected KDE bandwidth is 0. Cannot estimate density."
raise RuntimeError(err)
else:
return bandwidth
3 changes: 3 additions & 0 deletions statsmodels/regression/linear_model.py
Expand Up @@ -1776,6 +1776,9 @@ def fvalue(self):
@cache_readonly
def f_pvalue(self):
"""The p-value of the F-statistic."""
# Special case for df_model 0
if self.df_model == 0:
return np.full_like(self.fvalue, np.nan)
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)

@cache_readonly
Expand Down
10 changes: 10 additions & 0 deletions statsmodels/regression/tests/test_regression.py
Expand Up @@ -1351,3 +1351,13 @@ def test_bool_regressor(reset_randomstate):
bool_res = OLS(endog, exog).fit()
res = OLS(endog, exog.astype(np.double)).fit()
assert_allclose(bool_res.params, res.params)


def test_ols_constant(reset_randomstate):
y = np.random.standard_normal((200))
x = np.ones((200, 1))
res = OLS(y, x).fit()
with pytest.warns(None) as recording:
assert np.isnan(res.fvalue)
assert np.isnan(res.f_pvalue)
assert len(recording) == 0
9 changes: 6 additions & 3 deletions statsmodels/stats/libqsturng/qsturng_.py
Expand Up @@ -821,20 +821,23 @@ def _psturng(q, r, v):
if q < 0.:
raise ValueError('q should be >= 0')

opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)
def opt_func(p, r, v):
return np.squeeze(abs(_qsturng(p, r, v) - q))

if v == 1:
if q < _qsturng(.9, r, 1):
return .1
elif q > _qsturng(.999, r, 1):
return .001
return 1. - fminbound(opt_func, .9, .999, args=(r,v))
soln = 1. - fminbound(opt_func, .9, .999, args=(r,v))
return np.atleast_1d(soln)
else:
if q < _qsturng(.1, r, v):
return .9
elif q > _qsturng(.999, r, v):
return .001
return 1. - fminbound(opt_func, .1, .999, args=(r,v))
soln = 1. - fminbound(opt_func, .1, .999, args=(r,v))
return np.atleast_1d(soln)

_vpsturng = np.vectorize(_psturng)
_vpsturng.__doc__ = """vector version of psturng"""
Expand Down
9 changes: 6 additions & 3 deletions statsmodels/stats/tests/test_pairwise.py
Expand Up @@ -258,11 +258,14 @@ def setup_class(cls):

def test_incorrect_output(self):
# too few groups
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1, 2] * 4)
with pytest.raises(ValueError):
MultiComparison(np.array([1] * 10), [1, 2] * 4)
# too many groups
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1, 2] * 6)
with pytest.raises(ValueError):
MultiComparison(np.array([1] * 10), [1, 2] * 6)
# just one group
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1] * 10)
with pytest.raises(ValueError):
MultiComparison(np.array([1] * 10), [1] * 10)

# group_order does not select all observations, only one group left
with warnings.catch_warnings(record=True) as w:
Expand Down
Expand Up @@ -38,10 +38,9 @@ def test_brockwell_davis_ex533():
# through by sigma^2
arma_process_acovf /= sigma2
unconditional_variance /= sigma2
out = np.array(_arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf))
acovf = np.array(out[0])
acovf2 = np.array(out[1])
transformed_acovf = _arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf)
acovf, acovf2 = (np.array(arr) for arr in transformed_acovf)

# `acovf` is an m^2 x m^2 matrix, where m = max(p, q)
# but it is only valid for the autocovariances of the first m observations
Expand Down Expand Up @@ -144,10 +143,9 @@ def test_brockwell_davis_ex534():
[7.17133, 6.44139, 5.06027], atol=1e-5)

# Next, get the autocovariance of the transformed process
out = np.array(_arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf))
acovf = np.array(out[0])
acovf2 = np.array(out[1])
transformed_acovf = _arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf)
acovf, acovf2 = (np.array(arr) for arr in transformed_acovf)
# See test_brockwell_davis_ex533 for details on acovf vs acovf2

# Test acovf
Expand Down Expand Up @@ -226,8 +224,9 @@ def test_innovations_algo_filter_kalman_filter(ar_params, ma_params, sigma2):

# Innovations algorithm approach
arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
acovf, acovf2 = np.array(_arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf / sigma2))
transformed_acov = _arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf / sigma2)
acovf, acovf2 = (np.array(mv) for mv in transformed_acov)
theta, r = _arma_innovations.darma_innovations_algo_fast(
nobs, ar_params, ma_params, acovf, acovf2)
u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/tsa/tests/test_arima.py
Expand Up @@ -2686,7 +2686,7 @@ def test_arma_repeated_fit():
arma = ARMA(x, (1, 1))
res = arma.fit(trend='c', disp=-1)
repeat = arma.fit(trend='c', disp=-1)
rtol = 1e-5 if PLATFORM_WIN32 else 1e-7
rtol = 1e-4 if PLATFORM_WIN32 else 1e-7
assert_allclose(res.params, repeat.params, rtol=rtol)
assert isinstance(res.summary().as_text(), str)
assert isinstance(repeat.summary().as_text(), str)
Expand Down
4 changes: 3 additions & 1 deletion tools/ci/azure_template.yml
Expand Up @@ -22,6 +22,8 @@ jobs:
python.version: '3.6'
Python37:
python.version: '3.7'
Python38:
python.version: '3.8'
maxParallel: 4

steps:
Expand All @@ -37,7 +39,7 @@ jobs:
displayName: 'Install dependencies'
- script: |
python setup.py build_ext --inplace -q
pip install -e .
displayName: 'Build Cython Extensions'
- script: |
Expand Down
6 changes: 4 additions & 2 deletions tools/ci/docbuild.sh
Expand Up @@ -65,9 +65,11 @@ if [[ -z "$TRAVIS_TAG" ]]; then
doctr deploy --built-docs docs/build/html/ --deploy-repo statsmodels/statsmodels.github.io devel > /dev/null;
else
if [[ "$TRAVIS_TAG" != *"dev"* ]]; then # do not push on dev tags
doctr deploy --build-tags --built-docs docs/build/html/ --deploy-repo statsmodels/statsmodels.github.io "$TRAVIS_TAG";
echo doctr deploy --build-tags --built-docs docs/build/html/ --deploy-repo statsmodels/statsmodels.github.io "$TRAVIS_TAG" > /dev/null;
doctr deploy --build-tags --built-docs docs/build/html/ --deploy-repo statsmodels/statsmodels.github.io "$TRAVIS_TAG" > /dev/null;
if [[ "$TRAVIS_TAG" != *"rc"* ]]; then # do not push on main on rc
doctr deploy --build-tags --built-docs docs/build/html/ --deploy-repo statsmodels/statsmodels.github.io stable;
echo doctr deploy --build-tags --built-docs docs/build/html/ --deploy-repo statsmodels/statsmodels.github.io stable > /dev/null;
doctr deploy --build-tags --built-docs docs/build/html/ --deploy-repo statsmodels/statsmodels.github.io stable > /dev/null;
fi;
fi;
fi;
Expand Down

0 comments on commit fb05389

Please sign in to comment.