Skip to content

Commit

Permalink
CLN: Fixes for future SciPY and pandas
Browse files Browse the repository at this point in the history
Silence new warnings produced by pandas 1.0+
Ensure starting values respect builds in HoltWinters
  • Loading branch information
bashtage committed Jan 15, 2020
1 parent 76da0ea commit ecf5379
Show file tree
Hide file tree
Showing 12 changed files with 46 additions and 23 deletions.
1 change: 1 addition & 0 deletions setup.cfg
Expand Up @@ -40,6 +40,7 @@ filterwarnings =
error:recarray support has been deprecated:FutureWarning
error:The value returned will change to a:FutureWarning
error:The default value of lags:FutureWarning
error:Support for multi-dimensional:DeprecationWarning
markers =
example: mark a test that runs example code
matplotlib: mark a test that requires matplotlib
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/base/data.py
Expand Up @@ -587,7 +587,7 @@ def attach_dates(self, result):
squeezed = result.squeeze()
k_endog = np.array(self.ynames, ndmin=1).shape[0]
if k_endog > 1 and squeezed.shape == (k_endog,):
squeezed = squeezed[None, :]
squeezed = np.asarray(squeezed)[None, :]
# May be zero-dim, for example in the case of forecast one step in tsa
if squeezed.ndim < 2:
return Series(squeezed, index=self.predict_dates)
Expand Down
5 changes: 3 additions & 2 deletions statsmodels/base/tests/test_generic_methods.py
Expand Up @@ -13,6 +13,7 @@
from statsmodels.compat.pandas import assert_series_equal, assert_index_equal
from statsmodels.compat.platform import (PLATFORM_OSX, PLATFORM_LINUX32,
PLATFORM_WIN32)
from statsmodels.compat.scipy import SCIPY_GT_14

import numpy as np
import pandas as pd
Expand Down Expand Up @@ -114,7 +115,7 @@ def test_zero_constrained(self):
assert_allclose(tvals1, res2.tvalues, rtol=tol, atol=tol)

# See gh5993
if PLATFORM_LINUX32:
if PLATFORM_LINUX32 or SCIPY_GT_14:
pvals1 = res1.pvalues[keep_index_p]
else:
with pytest.warns(RuntimeWarning,
Expand Down Expand Up @@ -264,7 +265,7 @@ def test_zero_collinear(self):
assert_allclose(tvals1, res2.tvalues, rtol=5e-8)

# See gh5993
if PLATFORM_LINUX32:
if PLATFORM_LINUX32 or SCIPY_GT_14:
pvals1 = res1.pvalues[keep_index_p]
else:
with pytest.warns(RuntimeWarning,
Expand Down
1 change: 1 addition & 0 deletions statsmodels/compat/scipy.py
Expand Up @@ -6,6 +6,7 @@
SCIPY_11 = (LooseVersion(scipy.__version__) < LooseVersion('1.2.0') and
LooseVersion(scipy.__version__) >= LooseVersion('1.1.0'))

SCIPY_GT_14 = LooseVersion(scipy.__version__) >= LooseVersion('1.5')

def _next_regular(target):
"""
Expand Down
8 changes: 6 additions & 2 deletions statsmodels/graphics/factorplots.py
Expand Up @@ -130,14 +130,18 @@ def interaction_plot(x, trace, response, func=np.mean, ax=None, plottype='b',
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
x = np.asarray(group['x'])
response = np.asarray(group['response'])
ax.plot(x, response, color=colors[i],
marker=markers[i], label=label,
linestyle=linestyles[i], **kwargs)
elif plottype == 'line' or plottype == 'l':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
x = np.asarray(group['x'])
response = np.asarray(group['response'])
ax.plot(x, response, color=colors[i],
label=label, linestyle=linestyles[i], **kwargs)
elif plottype == 'scatter' or plottype == 's':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
Expand Down
16 changes: 9 additions & 7 deletions statsmodels/graphics/regressionplots.py
Expand Up @@ -406,15 +406,17 @@ def plot_partregress(endog, exog_i, exog_others, data=None,
# all arrays or pandas-like

if RHS_isemtpy:
endog = np.asarray(endog)
exog_i = np.asarray(exog_i)
ax.plot(endog, exog_i, 'o', **kwargs)
fitted_line = OLS(endog, exog_i).fit()
x_axis_endog_name = 'x' if isinstance(exog_i, np.ndarray) else exog_i.name
y_axis_endog_name = 'y' if isinstance(endog, np.ndarray) else endog.design_info.column_names[0]
else:
res_yaxis = OLS(endog, RHS).fit()
res_xaxis = OLS(exog_i, RHS).fit()
xaxis_resid = res_xaxis.resid
yaxis_resid = res_yaxis.resid
xaxis_resid = np.asarray(res_xaxis.resid)
yaxis_resid = np.asarray(res_yaxis.resid)
x_axis_endog_name = res_xaxis.model.endog_names
y_axis_endog_name = res_yaxis.model.endog_names
ax.plot(xaxis_resid, yaxis_resid, 'o', **kwargs)
Expand All @@ -428,11 +430,11 @@ def plot_partregress(endog, exog_i, exog_others, data=None,
ax.set_ylabel("e(%s | X)" % y_axis_endog_name)
ax.set_title('Partial Regression Plot', **title_kwargs)

#NOTE: if we want to get super fancy, we could annotate if a point is
#clicked using this widget
#http://stackoverflow.com/questions/4652439/
#is-there-a-matplotlib-equivalent-of-matlabs-datacursormode/
#4674445#4674445
# NOTE: if we want to get super fancy, we could annotate if a point is
# clicked using this widget
# http://stackoverflow.com/questions/4652439/
# is-there-a-matplotlib-equivalent-of-matlabs-datacursormode/
# 4674445#4674445
if obs_labels is True:
if data is not None:
obs_labels = data.index
Expand Down
4 changes: 2 additions & 2 deletions statsmodels/graphics/tests/test_regressionplots.py
Expand Up @@ -140,9 +140,9 @@ class TestPlotFormula(TestPlotPandas):
def test_one_column_exog(self, close_figures):
from statsmodels.formula.api import ols
res = ols("y~var1-1", data=self.data).fit()
fig = plot_regress_exog(res, "var1")
plot_regress_exog(res, "var1")
res = ols("y~var1", data=self.data).fit()
fig = plot_regress_exog(res, "var1")
plot_regress_exog(res, "var1")


class TestABLine(object):
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/sandbox/predict_functional.py
Expand Up @@ -426,7 +426,7 @@ def _glm_basic_scr(result, exog, alpha):
# Calculate kappa_0 (formula 42 from Sun et al)
bz = np.linalg.solve(B.T, exog.T).T
bz /= np.sqrt(n)
bz /= sigma[:, None]
bz /= np.asarray(sigma)[:, None]
bzd = np.diff(bz, 1, axis=0)
bzdn = (bzd**2).sum(1)
kappa_0 = np.sqrt(bzdn).sum()
Expand Down
7 changes: 7 additions & 0 deletions statsmodels/sandbox/tests/test_predict_functional.py
Expand Up @@ -63,6 +63,8 @@ def test_formula(self, close_figures):
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
fvals1, pr1 = np.asarray(fvals1), np.asarray(pr1)
fvals2, pr2 = np.asarray(fvals2), np.asarray(pr2)
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.plot(fvals2, pr2, '-', label='x4=C')
ha, lb = ax.get_legend_handles_labels()
Expand Down Expand Up @@ -110,6 +112,7 @@ def test_lm_contrast(self, close_figures):
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
fvals, pr = np.asarray(fvals), np.asarray(pr)
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 4 - fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
Expand Down Expand Up @@ -146,6 +149,7 @@ def test_glm_formula_contrast(self, close_figures):
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
fvals, pr = np.asarray(fvals), np.asarray(pr)
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 0.2 - 0.1*fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
Expand Down Expand Up @@ -259,6 +263,9 @@ def test_glm_formula(self, close_figures):
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
fvals1, pr1 = np.asarray(fvals1), np.asarray(pr1)
fvals2, pr2 = np.asarray(fvals2), np.asarray(pr2)
exact1, exact2 = np.asarray(exact1), np.asarray(exact2)
plt.plot(fvals1, pr1, '-', label='x3=B')
plt.plot(fvals2, pr2, '-', label='x3=C')
plt.plot(fvals1, exact1, '-', label='x3=B (exact)')
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/tsa/ar_model.py
Expand Up @@ -2093,7 +2093,7 @@ def plot_diagnostics(self, lags=10, fig=None, figsize=None):
hold_back = self.model.hold_back
x = hold_back + np.arange(self.resid.shape[0])
std_resid = resid / np.sqrt(self.sigma2)
ax.plot(x, std_resid)
ax.plot(np.asarray(x), np.asarray(std_resid))
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title('Standardized residual')
Expand Down
19 changes: 14 additions & 5 deletions statsmodels/tsa/holtwinters.py
Expand Up @@ -680,12 +680,13 @@ def fit(self, smoothing_level=None, smoothing_slope=None, smoothing_seasonal=Non
# using guesstimates for the levels
txi = xi & np.array([True, True, True, False, False, True] + [False] * m)
txi = txi.astype(np.bool)
bounds = np.array([(0.0, 1.0), (0.0, 1.0), (0.0, 1.0),
(0.0, None), (0.0, None), (0.0, 1.0)] + [(None, None), ] * m)
bounds = ([(0.0, 1.0), (0.0, 1.0), (0.0, 1.0), (0.0, None),
(0.0, None), (0.0, 1.0)] + [(None, None), ] * m)
args = (txi.astype(np.uint8), p, y, lvls, b, s, m, self.nobs,
max_seen)
if start_params is None and np.any(txi) and use_brute:
res = brute(func, bounds[txi], args, Ns=20,
_bounds = [b for b, flag in zip(bounds, txi) if flag]
res = brute(func, _bounds, args, Ns=20,
full_output=True, finish=None)
p[txi], max_seen, _, _ = res
else:
Expand All @@ -708,14 +709,22 @@ def fit(self, smoothing_level=None, smoothing_slope=None, smoothing_seasonal=Non
# Take a deeper look in the local minimum we are in to find the best
# solution to parameters, maybe hop around to try escape the local
# minimum we may be in.
_bounds = [b for b, flag in zip(bounds, xi) if flag]
res = basinhopping(func, p[xi],
minimizer_kwargs={'args': args, 'bounds': bounds[xi]},
minimizer_kwargs={'args': args, 'bounds': _bounds},
stepsize=0.01)
success = res.lowest_optimization_result.success
else:
# Take a deeper look in the local minimum we are in to find the best
# solution to parameters
res = minimize(func, p[xi], args=args, bounds=bounds[xi])
_bounds = [b for b, flag in zip(bounds, xi) if flag]
lb, ub = np.asarray(_bounds).T.astype(np.float)
initial_p = p[xi]
loc = p[xi] < lb
initial_p[loc] = lb[loc]
loc = p[xi] > ub
initial_p[loc] = ub[loc]
res = minimize(func, p[xi], args=args, bounds=_bounds)
success = res.success

if not success:
Expand Down
2 changes: 0 additions & 2 deletions statsmodels/tsa/tests/test_holtwinters.py
Expand Up @@ -323,8 +323,6 @@ def test_holt_damp_R(self):
447.2614880558126]
assert_allclose(fit.forecast(10), desired, atol=1e-4)



def test_hw_seasonal(self):
fit1 = ExponentialSmoothing(self.aust, seasonal_periods=4,
trend='additive',
Expand Down

0 comments on commit ecf5379

Please sign in to comment.