From f248fe9810c3b134986ec5504161d25f33af23d3 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Thu, 23 May 2019 17:24:43 -0700 Subject: [PATCH 1/4] CLN: missing whitespace after punctuation in statespace/tests --- .../tsa/statespace/tests/test_collapsed.py | 26 +- .../tsa/statespace/tests/test_concentrated.py | 2 +- .../statespace/tests/test_dynamic_factor.py | 2 +- .../tests/test_exact_diffuse_filtering.py | 10 +- .../tsa/statespace/tests/test_mlemodel.py | 44 +- .../statespace/tests/test_representation.py | 94 ++-- .../tsa/statespace/tests/test_sarimax.py | 422 +++++++++--------- 7 files changed, 300 insertions(+), 300 deletions(-) diff --git a/statsmodels/tsa/statespace/tests/test_collapsed.py b/statsmodels/tsa/statespace/tests/test_collapsed.py index 181a2261df5..1a9e5e318d0 100644 --- a/statsmodels/tsa/statespace/tests/test_collapsed.py +++ b/statsmodels/tsa/statespace/tests/test_collapsed.py @@ -82,20 +82,20 @@ def test_using_collapsed(self): def test_forecasts(self): assert_allclose( - self.results_a.forecasts[0,:], - self.results_b.forecasts[0,:], + self.results_a.forecasts[0, :], + self.results_b.forecasts[0, :], ) def test_forecasts_error(self): assert_allclose( - self.results_a.forecasts_error[0,:], - self.results_b.forecasts_error[0,:] + self.results_a.forecasts_error[0, :], + self.results_b.forecasts_error[0, :] ) def test_forecasts_error_cov(self): assert_allclose( - self.results_a.forecasts_error_cov[0,0,:], - self.results_b.forecasts_error_cov[0,0,:] + self.results_a.forecasts_error_cov[0, 0, :], + self.results_b.forecasts_error_cov[0, 0, :] ) def test_filtered_state(self): @@ -454,7 +454,7 @@ def setup_class(cls, which='mixed', *args, **kwargs): # Data dta = datasets.macrodata.load_pandas().data dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS') - obs = np.log(dta[['realgdp','realcons','realinv']]).diff().iloc[1:] * 400 + obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:] * 400 if which == 'all': obs.iloc[:50, :] = np.nan @@ -526,20 +526,20 @@ def test_using_collapsed(self): def test_forecasts(self): assert_allclose( - self.results_a.forecasts[0,:], - self.results_b.forecasts[0,:], + self.results_a.forecasts[0, :], + self.results_b.forecasts[0, :], ) def test_forecasts_error(self): assert_allclose( - self.results_a.forecasts_error[0,:], - self.results_b.forecasts_error[0,:] + self.results_a.forecasts_error[0, :], + self.results_b.forecasts_error[0, :] ) def test_forecasts_error_cov(self): assert_allclose( - self.results_a.forecasts_error_cov[0,0,:], - self.results_b.forecasts_error_cov[0,0,:] + self.results_a.forecasts_error_cov[0, 0, :], + self.results_b.forecasts_error_cov[0, 0, :] ) def test_filtered_state(self): diff --git a/statsmodels/tsa/statespace/tests/test_concentrated.py b/statsmodels/tsa/statespace/tests/test_concentrated.py index 94536d961d9..4fd35a49d02 100644 --- a/statsmodels/tsa/statespace/tests/test_concentrated.py +++ b/statsmodels/tsa/statespace/tests/test_concentrated.py @@ -164,7 +164,7 @@ def check_concentrated_scale(filter_univariate=False, missing=False, **kwargs): dta['dln_inc'] = np.log(dta['inc']).diff() dta['dln_consump'] = np.log(dta['consump']).diff() - endog = dta.loc['1960-04-01':'1978-10-01',['dln_inv', 'dln_inc']] + endog = dta.loc['1960-04-01':'1978-10-01', ['dln_inv', 'dln_inc']] # Optionally add some missing observations if missing: diff --git a/statsmodels/tsa/statespace/tests/test_dynamic_factor.py b/statsmodels/tsa/statespace/tests/test_dynamic_factor.py index e58872e6aca..077da271212 100644 --- a/statsmodels/tsa/statespace/tests/test_dynamic_factor.py +++ b/statsmodels/tsa/statespace/tests/test_dynamic_factor.py @@ -621,7 +621,7 @@ def test_misspecification(): endog = np.arange(20).reshape(10, 2) # Too few endog - assert_raises(ValueError, dynamic_factor.DynamicFactor, endog[:,0], k_factors=0, factor_order=0) + assert_raises(ValueError, dynamic_factor.DynamicFactor, endog[:, 0], k_factors=0, factor_order=0) # Too many factors assert_raises(ValueError, dynamic_factor.DynamicFactor, endog, k_factors=2, factor_order=1) diff --git a/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py b/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py index bbe38dbd749..056d029bc48 100644 --- a/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py +++ b/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py @@ -179,7 +179,7 @@ def model_common_level(endog=None, params=None, restricted=False): def model_var1(endog=None, params=None, measurement_error=False, init=None): if endog is None: endog = (np.log( - macrodata[['realgdp','realcons']]).iloc[:21].diff().iloc[1:] * 400) + macrodata[['realgdp', 'realcons']]).iloc[:21].diff().iloc[1:] * 400) if params is None: params = np.r_[0.5, 0.3, 0.2, 0.4, 2**0.5, 0, 3**0.5] if measurement_error: @@ -200,7 +200,7 @@ def model_var1(endog=None, params=None, measurement_error=False, init=None): def model_dfm(endog=None, params=None, factor_order=2): if endog is None: endog = (np.log( - macrodata[['realgdp','realcons']]).iloc[:21].diff().iloc[1:] * 400) + macrodata[['realgdp', 'realcons']]).iloc[:21].diff().iloc[1:] * 400) if params is None: params = np.r_[0.5, 1., 1.5, 2., 0.9, 0.1] @@ -746,7 +746,7 @@ class CheckVAR1Missing(CheckVAR1): @classmethod def setup_class(cls, **kwargs): endog = (np.log( - macrodata[['realgdp','realcons']]).iloc[:21].diff().iloc[1:] * 400) + macrodata[['realgdp', 'realcons']]).iloc[:21].diff().iloc[1:] * 400) endog.iloc[0:5, 0] = np.nan endog.iloc[8:12, :] = np.nan kwargs['endog'] = endog @@ -964,13 +964,13 @@ def test_irrelevant_state(): endog = macrodata.infl spec = { - 'freq_seasonal': [{'period':8, 'harmonics': 6}, + 'freq_seasonal': [{'period': 8, 'harmonics': 6}, {'period': 36, 'harmonics': 6}] } # Approximate diffuse version mod = UnobservedComponents(endog, 'llevel', **spec) - mod.ssm.initialization = Initialization(mod.k_states,'approximate_diffuse') + mod.ssm.initialization = Initialization(mod.k_states, 'approximate_diffuse') res = mod.smooth([3.4, 7.2, 0.01, 0.01]) # Exact diffuse version diff --git a/statsmodels/tsa/statespace/tests/test_mlemodel.py b/statsmodels/tsa/statespace/tests/test_mlemodel.py index 2193d05aae2..b16403e5d5e 100644 --- a/statsmodels/tsa/statespace/tests/test_mlemodel.py +++ b/statsmodels/tsa/statespace/tests/test_mlemodel.py @@ -40,7 +40,7 @@ def get_dummy_mod(fit=True, pandas=False): endog = pd.Series(endog, index=index) exog = pd.Series(exog, index=index) - mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False) + mod = sarimax.SARIMAX(endog, exog=exog, order=(0, 0, 0), time_varying_regression=True, mle_regression=False) if fit: with warnings.catch_warnings(): @@ -161,7 +161,7 @@ def test_fit_misc(): true = results_sarimax.wpi1_stationary endog = np.diff(true['data'])[1:] - mod = sarimax.SARIMAX(endog, order=(1,0,1), trend='c') + mod = sarimax.SARIMAX(endog, order=(1, 0, 1), trend='c') # Test optim_hessian={'opg','oim','approx'} with warnings.catch_warnings(): @@ -189,14 +189,14 @@ def test_score_misc(): def test_from_formula(): - assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1,2,3)) + assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1, 2, 3)) def test_score_analytic_ar1(): # Test the score against the analytic score for an AR(1) model with 2 # observations # Let endog = [1, 0.5], params=[0, 1] - mod = sarimax.SARIMAX([1, 0.5], order=(1,0,0)) + mod = sarimax.SARIMAX([1, 0.5], order=(1, 0, 0)) def partial_phi(phi, sigma2): return -0.5 * (phi**2 + 2*phi*sigma2 - 1) / (sigma2 * (1 - phi**2)) @@ -284,10 +284,10 @@ def partial_transform_sigma2(sigma2): params = np.r_[0.5, 1.] def hessian(phi, sigma2): - hessian = np.zeros((2,2)) - hessian[0,0] = (-phi**2 - 1) / (phi**2 - 1)**2 - hessian[1,0] = hessian[0,1] = -1 / (2 * sigma2**2) - hessian[1,1] = (sigma2 + phi - 1.25) / sigma2**3 + hessian = np.zeros((2, 2)) + hessian[0, 0] = (-phi**2 - 1) / (phi**2 - 1)**2 + hessian[1, 0] = hessian[0, 1] = -1 / (2 * sigma2**2) + hessian[1, 1] = (sigma2 + phi - 1.25) / sigma2**3 return hessian analytic_hessian = hessian(params[0], params[1]) @@ -331,7 +331,7 @@ def test_cov_params(): def test_transform(): # The transforms in MLEModel are noops - mod = MLEModel([1,2], **kwargs) + mod = MLEModel([1, 2], **kwargs) # Test direct transform, untransform assert_allclose(mod.transform_params([2, 3]), [2, 3]) @@ -380,7 +380,7 @@ def test_filter(): def test_params(): - mod = MLEModel([1,2], **kwargs) + mod = MLEModel([1, 2], **kwargs) # By default start_params raises NotImplementedError assert_raises(NotImplementedError, lambda: mod.start_params) @@ -415,7 +415,7 @@ def test_results(pandas=False): def test_predict(): dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS') - endog = pd.Series([1,2], index=dates) + endog = pd.Series([1, 2], index=dates) mod = MLEModel(endog, **kwargs) res = mod.filter([]) @@ -432,14 +432,14 @@ def test_predict(): # assert_raises(ValueError, res.predict, dynamic='1982-01-01') # Test for passing a string to predict when dates are not set - mod = MLEModel([1,2], **kwargs) + mod = MLEModel([1, 2], **kwargs) res = mod.filter([]) assert_raises(KeyError, res.predict, dynamic='string') def test_forecast(): # Numpy - mod = MLEModel([1,2], **kwargs) + mod = MLEModel([1, 2], **kwargs) res = mod.filter([]) forecast = res.forecast(steps=10) assert_allclose(forecast, np.ones((10,)) * 2) @@ -447,7 +447,7 @@ def test_forecast(): # Pandas index = pd.date_range('1960-01-01', periods=2, freq='MS') - mod = MLEModel(pd.Series([1,2], index=index), **kwargs) + mod = MLEModel(pd.Series([1, 2], index=index), **kwargs) res = mod.filter([]) assert_allclose(res.forecast(steps=10), np.ones((10,)) * 2) assert_allclose(res.forecast(steps='1960-12-01'), np.ones((10,)) * 2) @@ -456,7 +456,7 @@ def test_forecast(): def test_summary(): dates = pd.date_range(start='1980-01-01', end='1984-01-01', freq='AS') - endog = pd.Series([1,2,3,4,5], index=dates) + endog = pd.Series([1, 2, 3, 4, 5], index=dates) mod = MLEModel(endog, **kwargs) res = mod.filter([]) @@ -528,15 +528,15 @@ def test_basic_endog(): assert_raises(ValueError, mod.filter, []) # Check that a different iterable tpyes give the expected result - endog = [1.,2.] + endog = [1., 2.] mod = check_endog(endog, **kwargs) mod.filter([]) - endog = [[1.],[2.]] + endog = [[1.], [2.]] mod = check_endog(endog, **kwargs) mod.filter([]) - endog = (1.,2.) + endog = (1., 2.) mod = check_endog(endog, **kwargs) mod.filter([]) @@ -553,7 +553,7 @@ def test_numpy_endog(): assert_equal(mod.data.orig_endog.base is not endog, True) endog[0] = 2 # there is no link to mod.endog - assert_equal(mod.endog, np.r_[1, 2].reshape(2,1)) + assert_equal(mod.endog, np.r_[1, 2].reshape(2, 1)) # there remains a link to mod.data.orig_endog assert_equal(mod.data.orig_endog, endog) @@ -565,7 +565,7 @@ def test_numpy_endog(): assert_raises(TypeError, check_endog, endog, **kwargs) # Example : 1-dim array, both C- and F-contiguous, length 2 - endog = np.array([1.,2.]) + endog = np.array([1., 2.]) assert_equal(endog.ndim, 1) assert_equal(endog.flags['C_CONTIGUOUS'], True) assert_equal(endog.flags['F_CONTIGUOUS'], True) @@ -667,7 +667,7 @@ def test_pandas_endog(): # Example (failure): pandas.DataFrame with 2 columns endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates) # raises error because 2-columns means k_endog=2, but the design matrix - # set in **kwargs is shaped (1,1) + # set in **kwargs is shaped (1, 1) assert_raises(ValueError, check_endog, endog, **kwargs) # Check behavior of the link maintained between passed `endog` and @@ -679,7 +679,7 @@ def test_pandas_endog(): assert_equal(mod.data.orig_endog.values.base is not endog, True) endog.iloc[0, 0] = 2 # there is no link to mod.endog - assert_equal(mod.endog, np.r_[1, 2].reshape(2,1)) + assert_equal(mod.endog, np.r_[1, 2].reshape(2, 1)) # there remains a link to mod.data.orig_endog assert_allclose(mod.data.orig_endog, endog) diff --git a/statsmodels/tsa/statespace/tests/test_representation.py b/statsmodels/tsa/statespace/tests/test_representation.py index a2eb76ff6b3..031d1914d2d 100644 --- a/statsmodels/tsa/statespace/tests/test_representation.py +++ b/statsmodels/tsa/statespace/tests/test_representation.py @@ -523,7 +523,7 @@ class TestClark1989PartialMissing(Clark1989): def setup_class(cls): super(TestClark1989PartialMissing, cls).setup_class() endog = cls.model.endog - endog[1,-51:] = np.NaN + endog[1, -51:] = np.NaN cls.model.bind(endog) cls.results = cls.run_filter() @@ -537,7 +537,7 @@ def test_filtered_state(self): def test_predicted_state(self): assert_allclose( - self.results.predicted_state.T[1:], clark1989_results.iloc[:,1:], + self.results.predicted_state.T[1:], clark1989_results.iloc[:, 1:], atol=1e-8 ) @@ -556,7 +556,7 @@ def set_designs(): mod['designs'] = 1 def set_designs2(): - mod['designs',0,0] = 1 + mod['designs', 0, 0] = 1 def set_designs3(): mod[0] = 1 @@ -567,22 +567,22 @@ def set_designs3(): # Test invalid __getitem__ assert_raises(IndexError, lambda: mod['designs']) - assert_raises(IndexError, lambda: mod['designs',0,0,0]) + assert_raises(IndexError, lambda: mod['designs', 0, 0, 0]) assert_raises(IndexError, lambda: mod[0]) # Test valid __setitem__, __getitem__ - assert_equal(mod.design[0,0,0], 0) - mod['design',0,0,0] = 1 + assert_equal(mod.design[0, 0, 0], 0) + mod['design', 0, 0, 0] = 1 assert_equal(mod['design'].sum(), 1) - assert_equal(mod.design[0,0,0], 1) - assert_equal(mod['design',0,0,0], 1) + assert_equal(mod.design[0, 0, 0], 1) + assert_equal(mod['design', 0, 0, 0], 1) # Test valid __setitem__, __getitem__ with unspecified time index mod['design'] = np.zeros(mod['design'].shape) - assert_equal(mod.design[0,0], 0) - mod['design',0,0] = 1 - assert_equal(mod.design[0,0], 1) - assert_equal(mod['design',0,0], 1) + assert_equal(mod.design[0, 0], 0) + mod['design', 0, 0] = 1 + assert_equal(mod.design[0, 0], 1) + assert_equal(mod['design', 0, 0], 1) def test_representation(): @@ -595,7 +595,7 @@ def zero_kstates(): # Test an invalid endogenous array def empty_endog(): - endog = np.zeros((0,0)) + endog = np.zeros((0, 0)) mod = Representation(endog, k_states=2) assert_raises(ValueError, empty_endog) @@ -603,7 +603,7 @@ def empty_endog(): # wide format: k_endog x nobs) nobs = 10 k_endog = 2 - endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog,nobs)*1.) + endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog, nobs)*1.) mod = Representation(endog, k_states=2) assert_equal(mod.nobs, nobs) assert_equal(mod.k_endog, k_endog) @@ -612,7 +612,7 @@ def empty_endog(): # tall format: nobs x k_endog) nobs = 10 k_endog = 2 - endog = np.arange(nobs*k_endog).reshape(nobs,k_endog)*1. + endog = np.arange(nobs*k_endog).reshape(nobs, k_endog)*1. mod = Representation(endog, k_states=2) assert_equal(mod.nobs, nobs) assert_equal(mod.k_endog, k_endog) @@ -629,31 +629,31 @@ def test_bind(): mod = Representation(2, k_states=2) # Test invalid endogenous array (it must be ndarray) - assert_raises(ValueError, lambda: mod.bind([1,2,3,4])) + assert_raises(ValueError, lambda: mod.bind([1, 2, 3, 4])) # Test valid (nobs x 1) endogenous array - mod.bind(np.arange(10).reshape((5,2))*1.) + mod.bind(np.arange(10).reshape((5, 2))*1.) assert_equal(mod.nobs, 5) # Test valid (k_endog x 0) endogenous array - mod.bind(np.zeros((0,2),dtype=np.float64)) + mod.bind(np.zeros((0, 2), dtype=np.float64)) # Test invalid (3-dim) endogenous array - assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.)) + assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2, 2, 3)*1.)) # Test valid F-contiguous - mod.bind(np.asfortranarray(np.arange(10).reshape(2,5))) + mod.bind(np.asfortranarray(np.arange(10).reshape(2, 5))) assert_equal(mod.nobs, 5) # Test valid C-contiguous - mod.bind(np.arange(10).reshape(5,2)) + mod.bind(np.arange(10).reshape(5, 2)) assert_equal(mod.nobs, 5) # Test invalid F-contiguous - assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(5,2)))) + assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(5, 2)))) # Test invalid C-contiguous - assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(2,5))) + assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(2, 5))) def test_initialization(): @@ -674,7 +674,7 @@ def test_initialization(): # Test invalid initial_state initial_state = np.zeros(10,) assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov)) - initial_state = np.zeros((10,10)) + initial_state = np.zeros((10, 10)) assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov)) # Test invalid initial_state_cov @@ -743,7 +743,7 @@ def test_cython(): # Force creating new ?Statespace and ?KalmanFilter, by changing the # time-varying character of an array - mod.design = np.zeros((1,1,2)) + mod.design = np.zeros((1, 1, 2)) mod._initialize_filter() assert_equal(mod._kalman_filter == kf, False) kf = mod._kalman_filters['d'] @@ -758,7 +758,7 @@ def test_cython(): def test_filter(): # Tests of invalid calls to the filter function - endog = np.ones((10,1)) + endog = np.ones((10, 1)) mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 mod['selection', :] = 1 @@ -772,7 +772,7 @@ def test_filter(): def test_loglike(): # Tests of invalid calls to the loglike function - endog = np.ones((10,1)) + endog = np.ones((10, 1)) mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 mod['selection', :] = 1 @@ -789,10 +789,10 @@ def test_predict(): warnings.simplefilter("always") - endog = np.ones((10,1)) + endog = np.ones((10, 1)) mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 - mod['obs_intercept'] = np.zeros((1,10)) + mod['obs_intercept'] = np.zeros((1, 10)) mod['selection', :] = 1 mod['state_cov', :] = 1 @@ -828,7 +828,7 @@ def test_predict(): # Check that dynamic > nobs is an warning with warnings.catch_warnings(record=True) as w: - res.predict(end=11, dynamic=11, obs_intercept=np.zeros((1,1))) + res.predict(end=11, dynamic=11, obs_intercept=np.zeros((1, 1))) message = ('Dynamic prediction specified to begin during' ' out-of-sample forecasting period, and so has no' ' effect.') @@ -836,7 +836,7 @@ def test_predict(): # Check for a warning when providing a non-used statespace matrix with warnings.catch_warnings(record=True) as w: - res.predict(end=res.nobs+1, design=True, obs_intercept=np.zeros((1,1))) + res.predict(end=res.nobs+1, design=True, obs_intercept=np.zeros((1, 1))) message = ('Model has time-invariant design matrix, so the design' ' argument to `predict` has been ignored.') assert_equal(str(w[0].message), message) @@ -856,7 +856,7 @@ def test_predict(): obs_intercept=np.zeros(2)) # Check that start=None gives start=0 and end=None gives end=nobs - assert_equal(res.predict().forecasts.shape, (1,res.nobs)) + assert_equal(res.predict().forecasts.shape, (1, res.nobs)) # Check that dynamic=True begins dynamic prediction immediately # TODO just a smoke test @@ -909,15 +909,15 @@ def test_predict(): # is given mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 - mod['obs_cov'] = np.zeros((1,1,10)) + mod['obs_cov'] = np.zeros((1, 1, 10)) mod['selection', :] = 1 mod['state_cov', :] = 1 res = mod.filter() assert_raises(ValueError, res.predict, end=res.nobs+1, - obs_cov=np.zeros((1,1))) + obs_cov=np.zeros((1, 1))) assert_raises(ValueError, res.predict, end=res.nobs+1, - obs_cov=np.zeros((1,1,2))) + obs_cov=np.zeros((1, 1, 2))) def test_standardized_forecasts_error(): @@ -934,13 +934,13 @@ def test_standardized_forecasts_error(): ) data['lgdp'] = np.log(data['GDP']) - # Fit an ARIMA(1,1,0) to log GDP - mod = sarimax.SARIMAX(data['lgdp'], order=(1,1,0)) + # Fit an ARIMA(1, 1, 0) to log GDP + mod = sarimax.SARIMAX(data['lgdp'], order=(1, 1, 0)) res = mod.fit(disp=-1) standardized_forecasts_error = ( res.filter_results.forecasts_error[0] / - np.sqrt(res.filter_results.forecasts_error_cov[0,0]) + np.sqrt(res.filter_results.forecasts_error_cov[0, 0]) ) assert_allclose( @@ -1028,7 +1028,7 @@ def test_simulate(): assert_raises(ValueError, mod.simulate, nsimulations+1, measurement_shocks, state_shocks) - # ARMA(1,1): phi = [0.1], theta = [0.5], sigma^2 = 2 + # ARMA(1, 1): phi = [0.1], theta = [0.5], sigma^2 = 2 phi = 0.1 theta = 0.5 mod = sarimax.SARIMAX([0], order=(1, 0, 1)) @@ -1042,7 +1042,7 @@ def test_simulate(): assert_allclose(actual, desired) - # SARIMAX(1,0,1)x(1,0,1,4), this time using the results object call + # SARIMAX(1, 0, 1)x(1, 0, 1, 4), this time using the results object call mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1, 0, 1), seasonal_order=(1, 0, 1, 4)) res = mod.filter([0.1, 0.5, 0.2, -0.3, 1]) @@ -1136,7 +1136,7 @@ def test_impulse_responses(): # a multivariate or empty "impulse" is sent mod = KalmanFilter(k_endog=1, k_states=1) assert_raises(ValueError, mod.impulse_responses, impulse=1) - assert_raises(ValueError, mod.impulse_responses, impulse=[1,1]) + assert_raises(ValueError, mod.impulse_responses, impulse=[1, 1]) assert_raises(ValueError, mod.impulse_responses, impulse=[]) # Univariate model with two uncorrelated shocks @@ -1151,23 +1151,23 @@ def test_impulse_responses(): actual = mod.impulse_responses(steps=10, impulse=0) assert_allclose(actual, desired) - actual = mod.impulse_responses(steps=10, impulse=[1,0]) + actual = mod.impulse_responses(steps=10, impulse=[1, 0]) assert_allclose(actual, desired) actual = mod.impulse_responses(steps=10, impulse=1) assert_allclose(actual, desired) - actual = mod.impulse_responses(steps=10, impulse=[0,1]) + actual = mod.impulse_responses(steps=10, impulse=[0, 1]) assert_allclose(actual, desired) # In this case (with sigma=sigma^2=1), orthogonalized is the same as not actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True) assert_allclose(actual, desired) - actual = mod.impulse_responses(steps=10, impulse=[1,0], orthogonalized=True) + actual = mod.impulse_responses(steps=10, impulse=[1, 0], orthogonalized=True) assert_allclose(actual, desired) - actual = mod.impulse_responses(steps=10, impulse=[0,1], orthogonalized=True) + actual = mod.impulse_responses(steps=10, impulse=[0, 1], orthogonalized=True) assert_allclose(actual, desired) # Univariate model with two correlated shocks @@ -1220,7 +1220,7 @@ def test_impulse_responses(): assert_allclose(actual, np.c_[zeros, ones]) # AR(1) model generates a geometrically declining series - mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1,0,0)) + mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1, 0, 0)) phi = 0.5 mod.update([phi, 1]) @@ -1238,7 +1238,7 @@ def test_impulse_responses(): def test_missing(): # Datasets - endog = np.arange(10).reshape(10,1) + endog = np.arange(10).reshape(10, 1) endog_pre_na = np.ascontiguousarray(np.c_[ endog.copy() * np.nan, endog.copy() * np.nan, endog, endog]) endog_post_na = np.ascontiguousarray(np.c_[ diff --git a/statsmodels/tsa/statespace/tests/test_sarimax.py b/statsmodels/tsa/statespace/tests/test_sarimax.py index 022d8048d36..e2f48523932 100644 --- a/statsmodels/tsa/statespace/tests/test_sarimax.py +++ b/statsmodels/tsa/statespace/tests/test_sarimax.py @@ -1063,32 +1063,32 @@ def test_init_keys_replicate(self): class Test_ar(SARIMAXCoverageTest): - # // AR: (p,0,0) x (0,0,0,0) - # arima wpi, arima(3,0,0) noconstant vce(oim) + # // AR: (p, 0, 0) x (0, 0, 0, 0) + # arima wpi, arima(3, 0, 0) noconstant vce(oim) # save_results 1 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) + kwargs['order'] = (3, 0, 0) super(Test_ar, cls).setup_class(0, *args, **kwargs) class Test_ar_as_polynomial(SARIMAXCoverageTest): - # // AR: (p,0,0) x (0,0,0,0) - # arima wpi, arima(3,0,0) noconstant vce(oim) + # // AR: (p, 0, 0) x (0, 0, 0, 0) + # arima wpi, arima(3, 0, 0) noconstant vce(oim) # save_results 1 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = ([1,1,1],0,0) + kwargs['order'] = ([1, 1, 1], 0, 0) super(Test_ar_as_polynomial, cls).setup_class(0, *args, **kwargs) class Test_ar_trend_c(SARIMAXCoverageTest): # // 'c' - # arima wpi c, arima(3,0,0) noconstant vce(oim) + # arima wpi c, arima(3, 0, 0) noconstant vce(oim) # save_results 2 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) + kwargs['order'] = (3, 0, 0) kwargs['trend'] = 'c' super(Test_ar_trend_c, cls).setup_class(1, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1097,11 +1097,11 @@ def setup_class(cls, *args, **kwargs): class Test_ar_trend_ct(SARIMAXCoverageTest): # // 'ct' - # arima wpi c t, arima(3,0,0) noconstant vce(oim) + # arima wpi c t, arima(3, 0, 0) noconstant vce(oim) # save_results 3 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) + kwargs['order'] = (3, 0, 0) kwargs['trend'] = 'ct' super(Test_ar_trend_ct, cls).setup_class(2, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1109,58 +1109,58 @@ def setup_class(cls, *args, **kwargs): class Test_ar_trend_polynomial(SARIMAXCoverageTest): - # // polynomial [1,0,0,1] - # arima wpi c t3, arima(3,0,0) noconstant vce(oim) + # // polynomial [1, 0, 0, 1] + # arima wpi c t3, arima(3, 0, 0) noconstant vce(oim) # save_results 4 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) - kwargs['trend'] = [1,0,0,1] + kwargs['order'] = (3, 0, 0) + kwargs['trend'] = [1, 0, 0, 1] super(Test_ar_trend_polynomial, cls).setup_class(3, *args, **kwargs) # Modify true params to convert from mean to intercept form cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] class Test_ar_diff(SARIMAXCoverageTest): - # // AR and I(d): (p,d,0) x (0,0,0,0) - # arima wpi, arima(3,2,0) noconstant vce(oim) + # // AR and I(d): (p, d, 0) x (0, 0, 0, 0) + # arima wpi, arima(3, 2, 0) noconstant vce(oim) # save_results 5 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,2,0) + kwargs['order'] = (3, 2, 0) super(Test_ar_diff, cls).setup_class(4, *args, **kwargs) class Test_ar_seasonal_diff(SARIMAXCoverageTest): - # // AR and I(D): (p,0,0) x (0,D,0,s) - # arima wpi, arima(3,0,0) sarima(0,2,0,4) noconstant vce(oim) + # // AR and I(D): (p, 0, 0) x (0, D, 0, s) + # arima wpi, arima(3, 0, 0) sarima(0, 2, 0, 4) noconstant vce(oim) # save_results 6 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) - kwargs['seasonal_order'] = (0,2,0,4) + kwargs['order'] = (3, 0, 0) + kwargs['seasonal_order'] = (0, 2, 0, 4) super(Test_ar_seasonal_diff, cls).setup_class(5, *args, **kwargs) class Test_ar_diffuse(SARIMAXCoverageTest): # // AR and diffuse initialization - # arima wpi, arima(3,0,0) noconstant vce(oim) diffuse + # arima wpi, arima(3, 0, 0) noconstant vce(oim) diffuse # save_results 7 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) + kwargs['order'] = (3, 0, 0) kwargs['initialization'] = 'approximate_diffuse' kwargs['initial_variance'] = 1e9 super(Test_ar_diffuse, cls).setup_class(6, *args, **kwargs) class Test_ar_no_enforce(SARIMAXCoverageTest): - # // AR: (p,0,0) x (0,0,0,0) - # arima wpi, arima(3,0,0) noconstant vce(oim) + # // AR: (p, 0, 0) x (0, 0, 0, 0) + # arima wpi, arima(3, 0, 0) noconstant vce(oim) # save_results 1 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) + kwargs['order'] = (3, 0, 0) kwargs['enforce_stationarity'] = False kwargs['enforce_invertibility'] = False kwargs['initial_variance'] = 1e9 @@ -1189,11 +1189,11 @@ def test_init_keys_replicate(self): class Test_ar_exogenous(SARIMAXCoverageTest): # // ARX - # arima wpi x, arima(3,0,0) noconstant vce(oim) + # arima wpi x, arima(3, 0, 0) noconstant vce(oim) # save_results 8 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) + kwargs['order'] = (3, 0, 0) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 super(Test_ar_exogenous, cls).setup_class(7, *args, **kwargs) @@ -1201,11 +1201,11 @@ def setup_class(cls, *args, **kwargs): class Test_ar_exogenous_in_state(SARIMAXCoverageTest): # // ARX - # arima wpi x, arima(3,0,0) noconstant vce(oim) + # arima wpi x, arima(3, 0, 0) noconstant vce(oim) # save_results 8 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,0) + kwargs['order'] = (3, 0, 0) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 kwargs['mle_regression'] = False @@ -1238,86 +1238,86 @@ def test_regression_coefficient(self): class Test_ma(SARIMAXCoverageTest): - # // MA: (0,0,q) x (0,0,0,0) - # arima wpi, arima(0,0,3) noconstant vce(oim) + # // MA: (0, 0, q) x (0, 0, 0, 0) + # arima wpi, arima(0, 0, 3) noconstant vce(oim) # save_results 9 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,3) + kwargs['order'] = (0, 0, 3) super(Test_ma, cls).setup_class(8, *args, **kwargs) class Test_ma_as_polynomial(SARIMAXCoverageTest): - # // MA: (0,0,q) x (0,0,0,0) - # arima wpi, arima(0,0,3) noconstant vce(oim) + # // MA: (0, 0, q) x (0, 0, 0, 0) + # arima wpi, arima(0, 0, 3) noconstant vce(oim) # save_results 9 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,[1,1,1]) + kwargs['order'] = (0, 0, [1, 1, 1]) super(Test_ma_as_polynomial, cls).setup_class(8, *args, **kwargs) class Test_ma_trend_c(SARIMAXCoverageTest): # // 'c' - # arima wpi c, arima(0,0,3) noconstant vce(oim) + # arima wpi c, arima(0, 0, 3) noconstant vce(oim) # save_results 10 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,3) + kwargs['order'] = (0, 0, 3) kwargs['trend'] = 'c' super(Test_ma_trend_c, cls).setup_class(9, *args, **kwargs) class Test_ma_trend_ct(SARIMAXCoverageTest): # // 'ct' - # arima wpi c t, arima(0,0,3) noconstant vce(oim) + # arima wpi c t, arima(0, 0, 3) noconstant vce(oim) # save_results 11 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,3) + kwargs['order'] = (0, 0, 3) kwargs['trend'] = 'ct' super(Test_ma_trend_ct, cls).setup_class(10, *args, **kwargs) class Test_ma_trend_polynomial(SARIMAXCoverageTest): - # // polynomial [1,0,0,1] - # arima wpi c t3, arima(0,0,3) noconstant vce(oim) + # // polynomial [1, 0, 0, 1] + # arima wpi c t3, arima(0, 0, 3) noconstant vce(oim) # save_results 12 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,3) - kwargs['trend'] = [1,0,0,1] + kwargs['order'] = (0, 0, 3) + kwargs['trend'] = [1, 0, 0, 1] super(Test_ma_trend_polynomial, cls).setup_class(11, *args, **kwargs) class Test_ma_diff(SARIMAXCoverageTest): - # // MA and I(d): (0,d,q) x (0,0,0,0) - # arima wpi, arima(0,2,3) noconstant vce(oim) + # // MA and I(d): (0, d, q) x (0, 0, 0, 0) + # arima wpi, arima(0, 2, 3) noconstant vce(oim) # save_results 13 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,2,3) + kwargs['order'] = (0, 2, 3) super(Test_ma_diff, cls).setup_class(12, *args, **kwargs) class Test_ma_seasonal_diff(SARIMAXCoverageTest): - # // MA and I(D): (p,0,0) x (0,D,0,s) - # arima wpi, arima(0,0,3) sarima(0,2,0,4) noconstant vce(oim) + # // MA and I(D): (p, 0, 0) x (0, D, 0, s) + # arima wpi, arima(0, 0, 3) sarima(0, 2, 0, 4) noconstant vce(oim) # save_results 14 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,3) - kwargs['seasonal_order'] = (0,2,0,4) + kwargs['order'] = (0, 0, 3) + kwargs['seasonal_order'] = (0, 2, 0, 4) super(Test_ma_seasonal_diff, cls).setup_class(13, *args, **kwargs) class Test_ma_diffuse(SARIMAXCoverageTest): # // MA and diffuse initialization - # arima wpi, arima(0,0,3) noconstant vce(oim) diffuse + # arima wpi, arima(0, 0, 3) noconstant vce(oim) diffuse # save_results 15 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,3) + kwargs['order'] = (0, 0, 3) kwargs['initialization'] = 'approximate_diffuse' kwargs['initial_variance'] = 1e9 super(Test_ma_diffuse, cls).setup_class(14, *args, **kwargs) @@ -1325,33 +1325,33 @@ def setup_class(cls, *args, **kwargs): class Test_ma_exogenous(SARIMAXCoverageTest): # // MAX - # arima wpi x, arima(0,0,3) noconstant vce(oim) + # arima wpi x, arima(0, 0, 3) noconstant vce(oim) # save_results 16 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,3) + kwargs['order'] = (0, 0, 3) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 super(Test_ma_exogenous, cls).setup_class(15, *args, **kwargs) class Test_arma(SARIMAXCoverageTest): - # // ARMA: (p,0,q) x (0,0,0,0) - # arima wpi, arima(3,0,3) noconstant vce(oim) + # // ARMA: (p, 0, q) x (0, 0, 0, 0) + # arima wpi, arima(3, 0, 3) noconstant vce(oim) # save_results 17 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,3) + kwargs['order'] = (3, 0, 3) super(Test_arma, cls).setup_class(16, *args, **kwargs) class Test_arma_trend_c(SARIMAXCoverageTest): # // 'c' - # arima wpi c, arima(3,0,2) noconstant vce(oim) + # arima wpi c, arima(3, 0, 2) noconstant vce(oim) # save_results 18 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,2) + kwargs['order'] = (3, 0, 2) kwargs['trend'] = 'c' super(Test_arma_trend_c, cls).setup_class(17, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1360,11 +1360,11 @@ def setup_class(cls, *args, **kwargs): class Test_arma_trend_ct(SARIMAXCoverageTest): # // 'ct' - # arima wpi c t, arima(3,0,2) noconstant vce(oim) + # arima wpi c t, arima(3, 0, 2) noconstant vce(oim) # save_results 19 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,2) + kwargs['order'] = (3, 0, 2) kwargs['trend'] = 'ct' super(Test_arma_trend_ct, cls).setup_class(18, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1372,57 +1372,57 @@ def setup_class(cls, *args, **kwargs): class Test_arma_trend_polynomial(SARIMAXCoverageTest): - # // polynomial [1,0,0,1] - # arima wpi c t3, arima(3,0,2) noconstant vce(oim) + # // polynomial [1, 0, 0, 1] + # arima wpi c t3, arima(3, 0, 2) noconstant vce(oim) # save_results 20 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,2) - kwargs['trend'] = [1,0,0,1] + kwargs['order'] = (3, 0, 2) + kwargs['trend'] = [1, 0, 0, 1] super(Test_arma_trend_polynomial, cls).setup_class(19, *args, **kwargs) # Modify true params to convert from mean to intercept form cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] class Test_arma_diff(SARIMAXCoverageTest): - # // ARMA and I(d): (p,d,q) x (0,0,0,0) - # arima wpi, arima(3,2,2) noconstant vce(oim) + # // ARMA and I(d): (p, d, q) x (0, 0, 0, 0) + # arima wpi, arima(3, 2, 2) noconstant vce(oim) # save_results 21 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,2,2) + kwargs['order'] = (3, 2, 2) super(Test_arma_diff, cls).setup_class(20, *args, **kwargs) class Test_arma_seasonal_diff(SARIMAXCoverageTest): - # // ARMA and I(D): (p,0,q) x (0,D,0,s) - # arima wpi, arima(3,0,2) sarima(0,2,0,4) noconstant vce(oim) + # // ARMA and I(D): (p, 0, q) x (0, D, 0, s) + # arima wpi, arima(3, 0, 2) sarima(0, 2, 0, 4) noconstant vce(oim) # save_results 22 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,2) - kwargs['seasonal_order'] = (0,2,0,4) + kwargs['order'] = (3, 0, 2) + kwargs['seasonal_order'] = (0, 2, 0, 4) super(Test_arma_seasonal_diff, cls).setup_class(21, *args, **kwargs) class Test_arma_diff_seasonal_diff(SARIMAXCoverageTest): - # // ARMA and I(d) and I(D): (p,d,q) x (0,D,0,s) - # arima wpi, arima(3,2,2) sarima(0,2,0,4) noconstant vce(oim) + # // ARMA and I(d) and I(D): (p, d, q) x (0, D, 0, s) + # arima wpi, arima(3, 2, 2) sarima(0, 2, 0, 4) noconstant vce(oim) # save_results 23 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,2,2) - kwargs['seasonal_order'] = (0,2,0,4) + kwargs['order'] = (3, 2, 2) + kwargs['seasonal_order'] = (0, 2, 0, 4) super(Test_arma_diff_seasonal_diff, cls).setup_class(22, *args, **kwargs) class Test_arma_diffuse(SARIMAXCoverageTest): # // ARMA and diffuse initialization - # arima wpi, arima(3,0,2) noconstant vce(oim) diffuse + # arima wpi, arima(3, 0, 2) noconstant vce(oim) diffuse # save_results 24 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,2) + kwargs['order'] = (3, 0, 2) kwargs['initialization'] = 'approximate_diffuse' kwargs['initial_variance'] = 1e9 super(Test_arma_diffuse, cls).setup_class(23, *args, **kwargs) @@ -1430,46 +1430,46 @@ def setup_class(cls, *args, **kwargs): class Test_arma_exogenous(SARIMAXCoverageTest): # // ARMAX - # arima wpi x, arima(3,0,2) noconstant vce(oim) + # arima wpi x, arima(3, 0, 2) noconstant vce(oim) # save_results 25 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,0,2) + kwargs['order'] = (3, 0, 2) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 super(Test_arma_exogenous, cls).setup_class(24, *args, **kwargs) class Test_seasonal_ar(SARIMAXCoverageTest): - # // SAR: (0,0,0) x (P,0,0,s) - # arima wpi, sarima(3,0,0,4) noconstant vce(oim) + # // SAR: (0, 0, 0) x (P, 0, 0, s) + # arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim) # save_results 26 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,0,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 0, 4) super(Test_seasonal_ar, cls).setup_class(25, *args, **kwargs) class Test_seasonal_ar_as_polynomial(SARIMAXCoverageTest): - # // SAR: (0,0,0) x (P,0,0,s) - # arima wpi, sarima(3,0,0,4) noconstant vce(oim) + # // SAR: (0, 0, 0) x (P, 0, 0, s) + # arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim) # save_results 26 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = ([1,1,1],0,0,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = ([1, 1, 1], 0, 0, 4) super(Test_seasonal_ar_as_polynomial, cls).setup_class(25, *args, **kwargs) class Test_seasonal_ar_trend_c(SARIMAXCoverageTest): # // 'c' - # arima wpi c, sarima(3,0,0,4) noconstant vce(oim) + # arima wpi c, sarima(3, 0, 0, 4) noconstant vce(oim) # save_results 27 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,0,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 0, 4) kwargs['trend'] = 'c' super(Test_seasonal_ar_trend_c, cls).setup_class(26, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1478,12 +1478,12 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_ar_trend_ct(SARIMAXCoverageTest): # // 'ct' - # arima wpi c t, sarima(3,0,0,4) noconstant vce(oim) + # arima wpi c t, sarima(3, 0, 0, 4) noconstant vce(oim) # save_results 28 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,0,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 0, 4) kwargs['trend'] = 'ct' super(Test_seasonal_ar_trend_ct, cls).setup_class(27, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1491,49 +1491,49 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_ar_trend_polynomial(SARIMAXCoverageTest): - # // polynomial [1,0,0,1] - # arima wpi c t3, sarima(3,0,0,4) noconstant vce(oim) + # // polynomial [1, 0, 0, 1] + # arima wpi c t3, sarima(3, 0, 0, 4) noconstant vce(oim) # save_results 29 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,0,4) - kwargs['trend'] = [1,0,0,1] + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 0, 4) + kwargs['trend'] = [1, 0, 0, 1] super(Test_seasonal_ar_trend_polynomial, cls).setup_class(28, *args, **kwargs) # Modify true params to convert from mean to intercept form cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] class Test_seasonal_ar_diff(SARIMAXCoverageTest): - # // SAR and I(d): (0,d,0) x (P,0,0,s) - # arima wpi, arima(0,2,0) sarima(3,0,0,4) noconstant vce(oim) + # // SAR and I(d): (0, d, 0) x (P, 0, 0, s) + # arima wpi, arima(0, 2, 0) sarima(3, 0, 0, 4) noconstant vce(oim) # save_results 30 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,2,0) - kwargs['seasonal_order'] = (3,0,0,4) + kwargs['order'] = (0, 2, 0) + kwargs['seasonal_order'] = (3, 0, 0, 4) super(Test_seasonal_ar_diff, cls).setup_class(29, *args, **kwargs) class Test_seasonal_ar_seasonal_diff(SARIMAXCoverageTest): - # // SAR and I(D): (0,0,0) x (P,D,0,s) - # arima wpi, sarima(3,2,0,4) noconstant vce(oim) + # // SAR and I(D): (0, 0, 0) x (P, D, 0, s) + # arima wpi, sarima(3, 2, 0, 4) noconstant vce(oim) # save_results 31 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,2,0,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 2, 0, 4) super(Test_seasonal_ar_seasonal_diff, cls).setup_class(30, *args, **kwargs) class Test_seasonal_ar_diffuse(SARIMAXCoverageTest): # // SAR and diffuse initialization - # arima wpi, sarima(3,0,0,4) noconstant vce(oim) diffuse + # arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim) diffuse # save_results 32 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,0,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 0, 4) kwargs['initialization'] = 'approximate_diffuse' kwargs['initial_variance'] = 1e9 super(Test_seasonal_ar_diffuse, cls).setup_class(31, *args, **kwargs) @@ -1541,12 +1541,12 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_ar_exogenous(SARIMAXCoverageTest): # // SARX - # arima wpi x, sarima(3,0,0,4) noconstant vce(oim) + # arima wpi x, sarima(3, 0, 0, 4) noconstant vce(oim) # save_results 33 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,0,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 0, 4) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 super(Test_seasonal_ar_exogenous, cls).setup_class(32, *args, **kwargs) @@ -1554,34 +1554,34 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_ma(SARIMAXCoverageTest): # // SMA - # arima wpi, sarima(0,0,3,4) noconstant vce(oim) + # arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim) # save_results 34 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,0,3,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 0, 3, 4) super(Test_seasonal_ma, cls).setup_class(33, *args, **kwargs) class Test_seasonal_ma_as_polynomial(SARIMAXCoverageTest): # // SMA - # arima wpi, sarima(0,0,3,4) noconstant vce(oim) + # arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim) # save_results 34 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,0,[1,1,1],4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 0, [1, 1, 1], 4) super(Test_seasonal_ma_as_polynomial, cls).setup_class(33, *args, **kwargs) class Test_seasonal_ma_trend_c(SARIMAXCoverageTest): # // 'c' - # arima wpi c, sarima(0,0,3,4) noconstant vce(oim) + # arima wpi c, sarima(0, 0, 3, 4) noconstant vce(oim) # save_results 35 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,0,3,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 0, 3, 4) kwargs['trend'] = 'c' kwargs['decimal'] = 3 super(Test_seasonal_ma_trend_c, cls).setup_class(34, *args, **kwargs) @@ -1589,59 +1589,59 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_ma_trend_ct(SARIMAXCoverageTest): # // 'ct' - # arima wpi c t, sarima(0,0,3,4) noconstant vce(oim) + # arima wpi c t, sarima(0, 0, 3, 4) noconstant vce(oim) # save_results 36 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,0,3,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 0, 3, 4) kwargs['trend'] = 'ct' super(Test_seasonal_ma_trend_ct, cls).setup_class(35, *args, **kwargs) class Test_seasonal_ma_trend_polynomial(SARIMAXCoverageTest): - # // polynomial [1,0,0,1] - # arima wpi c t3, sarima(0,0,3,4) noconstant vce(oim) + # // polynomial [1, 0, 0, 1] + # arima wpi c t3, sarima(0, 0, 3, 4) noconstant vce(oim) # save_results 37 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,0,3,4) - kwargs['trend'] = [1,0,0,1] + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 0, 3, 4) + kwargs['trend'] = [1, 0, 0, 1] kwargs['decimal'] = 3 super(Test_seasonal_ma_trend_polynomial, cls).setup_class(36, *args, **kwargs) class Test_seasonal_ma_diff(SARIMAXCoverageTest): - # // SMA and I(d): (0,d,0) x (0,0,Q,s) - # arima wpi, arima(0,2,0) sarima(0,0,3,4) noconstant vce(oim) + # // SMA and I(d): (0, d, 0) x (0, 0, Q, s) + # arima wpi, arima(0, 2, 0) sarima(0, 0, 3, 4) noconstant vce(oim) # save_results 38 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,2,0) - kwargs['seasonal_order'] = (0,0,3,4) + kwargs['order'] = (0, 2, 0) + kwargs['seasonal_order'] = (0, 0, 3, 4) super(Test_seasonal_ma_diff, cls).setup_class(37, *args, **kwargs) class Test_seasonal_ma_seasonal_diff(SARIMAXCoverageTest): - # // SMA and I(D): (0,0,0) x (0,D,Q,s) - # arima wpi, sarima(0,2,3,4) noconstant vce(oim) + # // SMA and I(D): (0, 0, 0) x (0, D, Q, s) + # arima wpi, sarima(0, 2, 3, 4) noconstant vce(oim) # save_results 39 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,2,3,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 2, 3, 4) super(Test_seasonal_ma_seasonal_diff, cls).setup_class(38, *args, **kwargs) class Test_seasonal_ma_diffuse(SARIMAXCoverageTest): # // SMA and diffuse initialization - # arima wpi, sarima(0,0,3,4) noconstant vce(oim) diffuse + # arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim) diffuse # save_results 40 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,0,3,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 0, 3, 4) kwargs['initialization'] = 'approximate_diffuse' kwargs['initial_variance'] = 1e9 super(Test_seasonal_ma_diffuse, cls).setup_class(39, *args, **kwargs) @@ -1649,36 +1649,36 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_ma_exogenous(SARIMAXCoverageTest): # // SMAX - # arima wpi x, sarima(0,0,3,4) noconstant vce(oim) + # arima wpi x, sarima(0, 0, 3, 4) noconstant vce(oim) # save_results 41 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (0,0,3,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (0, 0, 3, 4) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 super(Test_seasonal_ma_exogenous, cls).setup_class(40, *args, **kwargs) class Test_seasonal_arma(SARIMAXCoverageTest): - # // SARMA: (0,0,0) x (P,0,Q,s) - # arima wpi, sarima(3,0,2,4) noconstant vce(oim) + # // SARMA: (0, 0, 0) x (P, 0, Q, s) + # arima wpi, sarima(3, 0, 2, 4) noconstant vce(oim) # save_results 42 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,2,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 2, 4) super(Test_seasonal_arma, cls).setup_class(41, *args, **kwargs) class Test_seasonal_arma_trend_c(SARIMAXCoverageTest): # // 'c' - # arima wpi c, sarima(3,0,2,4) noconstant vce(oim) + # arima wpi c, sarima(3, 0, 2, 4) noconstant vce(oim) # save_results 43 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,2,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 2, 4) kwargs['trend'] = 'c' super(Test_seasonal_arma_trend_c, cls).setup_class(42, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1687,12 +1687,12 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_arma_trend_ct(SARIMAXCoverageTest): # // 'ct' - # arima wpi c t, sarima(3,0,2,4) noconstant vce(oim) + # arima wpi c t, sarima(3, 0, 2, 4) noconstant vce(oim) # save_results 44 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,2,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 2, 4) kwargs['trend'] = 'ct' super(Test_seasonal_arma_trend_ct, cls).setup_class(43, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1700,14 +1700,14 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_arma_trend_polynomial(SARIMAXCoverageTest): - # // polynomial [1,0,0,1] - # arima wpi c t3, sarima(3,0,2,4) noconstant vce(oim) + # // polynomial [1, 0, 0, 1] + # arima wpi c t3, sarima(3, 0, 2, 4) noconstant vce(oim) # save_results 45 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,2,4) - kwargs['trend'] = [1,0,0,1] + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 2, 4) + kwargs['trend'] = [1, 0, 0, 1] kwargs['decimal'] = 3 super(Test_seasonal_arma_trend_polynomial, cls).setup_class(44, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1730,35 +1730,35 @@ def test_results(self): class Test_seasonal_arma_diff(SARIMAXCoverageTest): - # // SARMA and I(d): (0,d,0) x (P,0,Q,s) - # arima wpi, arima(0,2,0) sarima(3,0,2,4) noconstant vce(oim) + # // SARMA and I(d): (0, d, 0) x (P, 0, Q, s) + # arima wpi, arima(0, 2, 0) sarima(3, 0, 2, 4) noconstant vce(oim) # save_results 46 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,2,0) - kwargs['seasonal_order'] = (3,0,2,4) + kwargs['order'] = (0, 2, 0) + kwargs['seasonal_order'] = (3, 0, 2, 4) super(Test_seasonal_arma_diff, cls).setup_class(45, *args, **kwargs) class Test_seasonal_arma_seasonal_diff(SARIMAXCoverageTest): - # // SARMA and I(D): (0,0,0) x (P,D,Q,s) - # arima wpi, sarima(3,2,2,4) noconstant vce(oim) + # // SARMA and I(D): (0, 0, 0) x (P, D, Q, s) + # arima wpi, sarima(3, 2, 2, 4) noconstant vce(oim) # save_results 47 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,2,2,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 2, 2, 4) super(Test_seasonal_arma_seasonal_diff, cls).setup_class(46, *args, **kwargs) class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest): - # // SARMA and I(d) and I(D): (0,d,0) x (P,D,Q,s) - # arima wpi, arima(0,2,0) sarima(3,2,2,4) noconstant vce(oim) + # // SARMA and I(d) and I(D): (0, d, 0) x (P, D, Q, s) + # arima wpi, arima(0, 2, 0) sarima(3, 2, 2, 4) noconstant vce(oim) # save_results 48 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,2,0) - kwargs['seasonal_order'] = (3,2,2,4) + kwargs['order'] = (0, 2, 0) + kwargs['seasonal_order'] = (3, 2, 2, 4) super(Test_seasonal_arma_diff_seasonal_diff, cls).setup_class(47, *args, **kwargs) def test_results(self): @@ -1779,12 +1779,12 @@ def test_results(self): class Test_seasonal_arma_diffuse(SARIMAXCoverageTest): # // SARMA and diffuse initialization - # arima wpi, sarima(3,0,2,4) noconstant vce(oim) diffuse + # arima wpi, sarima(3, 0, 2, 4) noconstant vce(oim) diffuse # save_results 49 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,2,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 2, 4) kwargs['decimal'] = 3 kwargs['initialization'] = 'approximate_diffuse' kwargs['initial_variance'] = 1e9 @@ -1793,12 +1793,12 @@ def setup_class(cls, *args, **kwargs): class Test_seasonal_arma_exogenous(SARIMAXCoverageTest): # // SARMAX - # arima wpi x, sarima(3,0,2,4) noconstant vce(oim) + # arima wpi x, sarima(3, 0, 2, 4) noconstant vce(oim) # save_results 50 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (0,0,0) - kwargs['seasonal_order'] = (3,0,2,4) + kwargs['order'] = (0, 0, 0) + kwargs['seasonal_order'] = (3, 0, 2, 4) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 super(Test_seasonal_arma_exogenous, cls).setup_class(49, *args, **kwargs) @@ -1806,12 +1806,12 @@ def setup_class(cls, *args, **kwargs): class Test_sarimax_exogenous(SARIMAXCoverageTest): # // SARIMAX and exogenous - # arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim) + # arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim) # save_results 51 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,2,2) - kwargs['seasonal_order'] = (3,2,2,4) + kwargs['order'] = (3, 2, 2) + kwargs['seasonal_order'] = (3, 2, 2, 4) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 super(Test_sarimax_exogenous, cls).setup_class(50, *args, **kwargs) @@ -1826,12 +1826,12 @@ def test_results_params(self): class Test_sarimax_exogenous_not_hamilton(SARIMAXCoverageTest): # // SARIMAX and exogenous - # arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim) + # arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim) # save_results 51 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,2,2) - kwargs['seasonal_order'] = (3,2,2,4) + kwargs['order'] = (3, 2, 2) + kwargs['seasonal_order'] = (3, 2, 2, 4) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 kwargs['hamilton_representation'] = False @@ -1841,12 +1841,12 @@ def setup_class(cls, *args, **kwargs): class Test_sarimax_exogenous_diffuse(SARIMAXCoverageTest): # // SARIMAX and exogenous diffuse - # arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim) diffuse + # arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim) diffuse # save_results 52 @classmethod def setup_class(cls, *args, **kwargs): - kwargs['order'] = (3,2,2) - kwargs['seasonal_order'] = (3,2,2,4) + kwargs['order'] = (3, 2, 2) + kwargs['seasonal_order'] = (3, 2, 2, 4) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 kwargs['decimal'] = 2 @@ -1859,7 +1859,7 @@ class Test_arma_exog_trend_polynomial_missing(SARIMAXCoverageTest): # // ARMA and exogenous and trend polynomial and missing # gen wpi2 = wpi # replace wpi2 = . in 10/19 - # arima wpi2 x c t3, arima(3,0,2) noconstant vce(oim) + # arima wpi2 x c t3, arima(3, 0, 2) noconstant vce(oim) # save_results 53 @classmethod def setup_class(cls, *args, **kwargs): @@ -1869,8 +1869,8 @@ def setup_class(cls, *args, **kwargs): endog[9:19] = np.nan endog = endog[1:] - endog[:-1] endog[9] = np.nan - kwargs['order'] = (3,0,2) - kwargs['trend'] = [0,0,0,1] + kwargs['order'] = (3, 0, 2) + kwargs['trend'] = [0, 0, 0, 1] kwargs['decimal'] = 1 super(Test_arma_exog_trend_polynomial_missing, cls).setup_class(52, endog=endog, *args, **kwargs) # Modify true params to convert from mean to intercept form @@ -1883,7 +1883,7 @@ def test_simple_time_varying(): # are not time-varying, and in fact the regression fit is perfect endog = np.arange(100)*1.0 exog = 2*endog - mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False) + mod = sarimax.SARIMAX(endog, exog=exog, order=(0, 0, 0), time_varying_regression=True, mle_regression=False) # Ignore the warning that MLE doesn't converge with warnings.catch_warnings(): @@ -1900,32 +1900,32 @@ def test_simple_time_varying(): def test_invalid_time_varying(): - assert_raises(ValueError, sarimax.SARIMAX, endog=[1,2,3], mle_regression=True, time_varying_regression=True) + assert_raises(ValueError, sarimax.SARIMAX, endog=[1, 2, 3], mle_regression=True, time_varying_regression=True) def test_manual_stationary_initialization(): endog = results_sarimax.wpi1_data # Create the first model to compare against - mod1 = sarimax.SARIMAX(endog, order=(3,0,0)) - res1 = mod1.filter([0.5,0.2,0.1,1]) + mod1 = sarimax.SARIMAX(endog, order=(3, 0, 0)) + res1 = mod1.filter([0.5, 0.2, 0.1, 1]) # Create a second model with "known" initialization - mod2 = sarimax.SARIMAX(endog, order=(3,0,0)) + mod2 = sarimax.SARIMAX(endog, order=(3, 0, 0)) mod2.ssm.initialize_known(res1.filter_results.initial_state, res1.filter_results.initial_state_cov) - res2 = mod2.filter([0.5,0.2,0.1,1]) + res2 = mod2.filter([0.5, 0.2, 0.1, 1]) # Create a third model with "known" initialization, but specified in kwargs - mod3 = sarimax.SARIMAX(endog, order=(3,0,0), + mod3 = sarimax.SARIMAX(endog, order=(3, 0, 0), initialization='known', initial_state=res1.filter_results.initial_state, initial_state_cov=res1.filter_results.initial_state_cov) - res3 = mod3.filter([0.5,0.2,0.1,1]) + res3 = mod3.filter([0.5, 0.2, 0.1, 1]) # Create the forth model with stationary initialization specified in kwargs - mod4 = sarimax.SARIMAX(endog, order=(3,0,0), initialization='stationary') - res4 = mod4.filter([0.5,0.2,0.1,1]) + mod4 = sarimax.SARIMAX(endog, order=(3, 0, 0), initialization='stationary') + res4 = mod4.filter([0.5, 0.2, 0.1, 1]) # Just test a couple of things to make sure the results are the same assert_almost_equal(res1.llf, res2.llf) @@ -1945,29 +1945,29 @@ def test_manual_approximate_diffuse_initialization(): endog = results_sarimax.wpi1_data # Create the first model to compare against - mod1 = sarimax.SARIMAX(endog, order=(3,0,0)) + mod1 = sarimax.SARIMAX(endog, order=(3, 0, 0)) mod1.ssm.initialize_approximate_diffuse(1e9) - res1 = mod1.filter([0.5,0.2,0.1,1]) + res1 = mod1.filter([0.5, 0.2, 0.1, 1]) # Create a second model with "known" initialization - mod2 = sarimax.SARIMAX(endog, order=(3,0,0)) + mod2 = sarimax.SARIMAX(endog, order=(3, 0, 0)) mod2.ssm.initialize_known(res1.filter_results.initial_state, res1.filter_results.initial_state_cov) - res2 = mod2.filter([0.5,0.2,0.1,1]) + res2 = mod2.filter([0.5, 0.2, 0.1, 1]) # Create a third model with "known" initialization, but specified in kwargs - mod3 = sarimax.SARIMAX(endog, order=(3,0,0), + mod3 = sarimax.SARIMAX(endog, order=(3, 0, 0), initialization='known', initial_state=res1.filter_results.initial_state, initial_state_cov=res1.filter_results.initial_state_cov) - res3 = mod3.filter([0.5,0.2,0.1,1]) + res3 = mod3.filter([0.5, 0.2, 0.1, 1]) # Create the forth model with approximate diffuse initialization specified # in kwargs - mod4 = sarimax.SARIMAX(endog, order=(3,0,0), + mod4 = sarimax.SARIMAX(endog, order=(3, 0, 0), initialization='approximate_diffuse', initial_variance=1e9) - res4 = mod4.filter([0.5,0.2,0.1,1]) + res4 = mod4.filter([0.5, 0.2, 0.1, 1]) # Just test a couple of things to make sure the results are the same assert_almost_equal(res1.llf, res2.llf) @@ -1986,8 +1986,8 @@ def test_manual_approximate_diffuse_initialization(): def test_results(): endog = results_sarimax.wpi1_data - mod = sarimax.SARIMAX(endog, order=(1,0,1)) - res = mod.filter([0.5,-0.5,1], cov_type='oim') + mod = sarimax.SARIMAX(endog, order=(1, 0, 1)) + res = mod.filter([0.5, -0.5, 1], cov_type='oim') assert_almost_equal(res.arroots, 2.) assert_almost_equal(res.maroots, 2.) @@ -2073,7 +2073,7 @@ def test_predict_custom_index(): def test_arima000(): - # Test an ARIMA(0,0,0) with measurement error model (i.e. just estimating + # Test an ARIMA(0, 0, 0) with measurement error model (i.e. just estimating # a variance term) np.random.seed(328423) nobs = 50 From 7b8a2daa566a2fbae8fc3f41ff4b5306a4f706d5 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Thu, 23 May 2019 17:27:53 -0700 Subject: [PATCH 2/4] CLN: over-indent E127 in statespace/tests --- .../tsa/statespace/tests/test_collapsed.py | 6 ++--- .../tsa/statespace/tests/test_kalman.py | 26 +++++++++---------- .../statespace/tests/test_representation.py | 4 +-- .../tsa/statespace/tests/test_sarimax.py | 10 +++---- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/statsmodels/tsa/statespace/tests/test_collapsed.py b/statsmodels/tsa/statespace/tests/test_collapsed.py index 1a9e5e318d0..822eba918b8 100644 --- a/statsmodels/tsa/statespace/tests/test_collapsed.py +++ b/statsmodels/tsa/statespace/tests/test_collapsed.py @@ -56,10 +56,10 @@ def setup_class(cls, dtype=float, alternate_timing=False, **kwargs): # Update matrices with test parameters cls.model['design'] = np.array([[0.5, 0.2], - [0, 0.8], - [1, -0.5]]) + [0, 0.8], + [1, -0.5]]) cls.model['transition'] = np.array([[0.4, 0.5], - [1, 0]]) + [1, 0]]) cls.model['obs_cov'] = np.diag([0.2, 1.1, 0.5]) cls.model['state_cov'] = np.diag([2., 1]) diff --git a/statsmodels/tsa/statespace/tests/test_kalman.py b/statsmodels/tsa/statespace/tests/test_kalman.py index 0a9ecff122b..a33f257ae41 100644 --- a/statsmodels/tsa/statespace/tests/test_kalman.py +++ b/statsmodels/tsa/statespace/tests/test_kalman.py @@ -89,23 +89,23 @@ def setup_class(cls, dtype=float, conserve_memory=0, loglikelihood_burn=0): cls.k_states = k_states = 4 # dimension of state space # transition matrix cls.transition = np.zeros((k_states, k_states, 1), - dtype=dtype, order="F") + dtype=dtype, order="F") cls.transition[([0, 0, 1, 1, 2, 3], - [0, 3, 1, 2, 1, 3], - [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1] + [0, 3, 1, 2, 1, 3], + [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1] # state intercept cls.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F") # selection matrix cls.selection = np.asfortranarray(np.eye(k_states)[:, :, None], - dtype=dtype) + dtype=dtype) # state covariance matrix cls.state_cov = np.zeros((k_states, k_states, 1), - dtype=dtype, order="F") + dtype=dtype, order="F") # Initialization: Diffuse priors cls.initial_state = np.zeros((k_states,), dtype=dtype, order="F") cls.initial_state_cov = np.asfortranarray(np.eye(k_states)*100, - dtype=dtype) + dtype=dtype) # Update matrices with given parameters (sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array( @@ -336,7 +336,7 @@ def setup_class(cls, dtype=float, nforecast=100, conserve_memory=0): # Add missing observations to the end (to forecast) cls._obs = cls.obs cls.obs = np.array(np.r_[cls.obs[0, :], [np.nan]*nforecast], - ndmin=2, dtype=dtype, order="F") + ndmin=2, dtype=dtype, order="F") def test_filtered_state(self): assert_almost_equal( @@ -472,23 +472,23 @@ def setup_class(cls, dtype=float, conserve_memory=0, loglikelihood_burn=0): # transition matrix cls.transition = np.zeros((k_states, k_states, 1), - dtype=dtype, order="F") + dtype=dtype, order="F") cls.transition[([0, 0, 1, 1, 2, 3, 4, 5], - [0, 4, 1, 2, 1, 2, 4, 5], - [0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1] + [0, 4, 1, 2, 1, 2, 4, 5], + [0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1] # state intercept cls.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F") # selection matrix cls.selection = np.asfortranarray(np.eye(k_states)[:, :, None], - dtype=dtype) + dtype=dtype) # state covariance matrix cls.state_cov = np.zeros((k_states, k_states, 1), - dtype=dtype, order="F") + dtype=dtype, order="F") # Initialization: Diffuse priors cls.initial_state = np.zeros((k_states,), dtype=dtype) cls.initial_state_cov = np.asfortranarray(np.eye(k_states)*100, - dtype=dtype) + dtype=dtype) # Update matrices with given parameters (sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec, diff --git a/statsmodels/tsa/statespace/tests/test_representation.py b/statsmodels/tsa/statespace/tests/test_representation.py index 031d1914d2d..3b2df58a18e 100644 --- a/statsmodels/tsa/statespace/tests/test_representation.py +++ b/statsmodels/tsa/statespace/tests/test_representation.py @@ -62,8 +62,8 @@ def setup_class(cls, dtype=float, **kwargs): cls.model.design[:, :, 0] = [1, 1, 0, 0] cls.model.transition[([0, 0, 1, 1, 2, 3], - [0, 3, 1, 2, 1, 3], - [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1] + [0, 3, 1, 2, 1, 3], + [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1] cls.model.selection = np.eye(cls.model.k_states) # Update matrices with given parameters diff --git a/statsmodels/tsa/statespace/tests/test_sarimax.py b/statsmodels/tsa/statespace/tests/test_sarimax.py index e2f48523932..4646b619136 100644 --- a/statsmodels/tsa/statespace/tests/test_sarimax.py +++ b/statsmodels/tsa/statespace/tests/test_sarimax.py @@ -48,8 +48,8 @@ def setup_class(cls): cls.result_a = cls.model_a.fit(disp=-1) cls.model_b = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c', - simple_differencing=True, - hamilton_representation=True) + simple_differencing=True, + hamilton_representation=True) cls.result_b = cls.model_b.fit(disp=-1) def test_loglike(self): @@ -97,7 +97,7 @@ class TestRealGDPARStata(object): def setup_class(cls): dlgdp = np.log(realgdp_results['value']).diff()[1:].values cls.model = sarimax.SARIMAX(dlgdp, order=(12, 0, 0), trend='n', - hamilton_representation=True) + hamilton_representation=True) # Estimated by Stata params = [ .40725515, .18782621, -.01514009, -.01027267, -.03642297, @@ -174,7 +174,7 @@ def setup_class(cls, true, *args, **kwargs): kwargs.setdefault('hamilton_representation', True) cls.model = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c', - *args, **kwargs) + *args, **kwargs) # Stata estimates the mean of the process, whereas SARIMAX estimates # the intercept of the process. Get the intercept. @@ -735,7 +735,7 @@ def setup_class(cls): ) cls.true_params = np.r_[true['params_exog'], true['params_ar'], - true['params_ma'], true['params_variance']] + true['params_ma'], true['params_variance']] cls.result = cls.model.filter(cls.true_params) From 0cb80861766f29b7fd1ae1d026bb60f2b6ca2301 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Thu, 23 May 2019 17:51:32 -0700 Subject: [PATCH 3/4] CLN: test_sarimax, test_save --- .../tsa/statespace/tests/test_sarimax.py | 293 ++++++++++++------ statsmodels/tsa/statespace/tests/test_save.py | 2 +- 2 files changed, 194 insertions(+), 101 deletions(-) diff --git a/statsmodels/tsa/statespace/tests/test_sarimax.py b/statsmodels/tsa/statespace/tests/test_sarimax.py index 4646b619136..a932e0216b0 100644 --- a/statsmodels/tsa/statespace/tests/test_sarimax.py +++ b/statsmodels/tsa/statespace/tests/test_sarimax.py @@ -6,28 +6,31 @@ """ from __future__ import division, absolute_import, print_function +import os +import warnings + from statsmodels.compat.platform import PLATFORM_WIN import numpy as np import pandas as pd import pytest -import os -import warnings from statsmodels.tsa.statespace import sarimax, tools from statsmodels.tsa import arima_model as arima from .results import results_sarimax from statsmodels.tools import add_constant -from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose +from numpy.testing import ( + assert_equal, assert_almost_equal, assert_raises, assert_allclose +) current_path = os.path.dirname(os.path.abspath(__file__)) -realgdp_path = 'results' + os.sep + 'results_realgdpar_stata.csv' +realgdp_path = os.path.join('results', 'results_realgdpar_stata.csv') realgdp_results = pd.read_csv(current_path + os.sep + realgdp_path) -coverage_path = 'results' + os.sep + 'results_sarimax_coverage.csv' -coverage_results = pd.read_csv(current_path + os.sep + coverage_path) +coverage_path = os.path.join('results', 'results_sarimax_coverage.csv') +coverage_results = pd.read_csv(os.path.join(current_path, coverage_path)) class TestSARIMAXStatsmodels(object): @@ -73,13 +76,14 @@ def test_mle(self): def test_bse(self): # Test the complex step approximated BSE values - bse = self.result_b._cov_params_approx(approx_complex_step=True).diagonal()**0.5 + cpa = self.result_b._cov_params_approx(approx_complex_step=True) + bse = cpa.diagonal()**0.5 assert_allclose(bse[1:-1], self.result_a.bse[1:], atol=1e-5) def test_t_test(self): import statsmodels.tools._testing as smt - #self.result_b.pvalues - #self.result_b._cache['pvalues'] += 1 # use to trigger failure + # to trigger failure, un-comment the following: + # self.result_b._cache['pvalues'] += 1 smt.check_ttest_tvalues(self.result_b) smt.check_ftest_pvalues(self.result_b) @@ -233,8 +237,9 @@ def test_bse_approx(self): # assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-1) # # finite difference, centered - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-3) # assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-3) @@ -246,7 +251,8 @@ def test_bse_oim(self): def test_bse_robust(self): robust_oim_bse = self.result.cov_params_robust_oim.diagonal()**0.5 - robust_approx_bse = self.result.cov_params_robust_approx.diagonal()**0.5 + cpra = self.result.cov_params_robust_approx + robust_approx_bse = cpra.diagonal()**0.5 true_robust_bse = np.r_[ self.true['se_ar_robust'], self.true['se_ma_robust'] ] @@ -270,7 +276,7 @@ def setup_class(cls, **kwargs): results_sarimax.wpi1_diffuse['initial_variance'] ) super(TestARIMADiffuse, cls).setup_class(results_sarimax.wpi1_diffuse, - **kwargs) + **kwargs) def test_bse(self): # test defaults @@ -299,8 +305,9 @@ def test_bse_approx(self): # assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4) # # finite difference, centered : failure - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4) # assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4) @@ -366,7 +373,8 @@ def test_bse(self): assert_equal(self.result._cov_approx_centered, False) # default covariance type (opg) assert_allclose(self.result.bse[1], self.true['se_ar_opg'], atol=1e-6) - assert_allclose(self.result.bse[2:4], self.true['se_ma_opg'], atol=1e-5) + assert_allclose(self.result.bse[2:4], self.true['se_ma_opg'], + atol=1e-5) def test_bse_approx(self): # complex step @@ -387,8 +395,9 @@ def test_bse_approx(self): # assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-2) # # finite difference, centered - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-3) # assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-3) @@ -455,7 +464,8 @@ def test_bse(self): assert_equal(self.result._cov_approx_centered, False) # default covariance type (opg) assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6) - assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'], atol=1e-6) + assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'], + atol=1e-6) def test_bse_approx(self): # complex step @@ -473,13 +483,16 @@ def test_bse_approx(self): # bse = self.result._cov_params_approx( # approx_complex_step=False).diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2) - # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-2) + # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], + # atol=1e-2) # # finite difference, centered - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4) - # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-4) + # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], + # atol=1e-4) def test_bse_oim(self): # OIM covariance type @@ -509,7 +522,8 @@ def test_bse(self): assert_equal(self.result._cov_approx_centered, False) # default covariance type (opg) assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6) - assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'], atol=1e-6) + assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'], + atol=1e-6) def test_bse_approx(self): # complex step @@ -527,13 +541,16 @@ def test_bse_approx(self): # bse = self.result._cov_params_approx( # approx_complex_step=False).diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2) - # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-2) + # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], + # atol=1e-2) # # finite difference, centered - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4) - # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-4) + # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], + # atol=1e-4) def test_bse_oim(self): # OIM covariance type @@ -579,7 +596,8 @@ def test_bse(self): assert_equal(self.result._cov_approx_centered, False) # default covariance type (opg) assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6) - assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'], atol=1e-6) + assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'], + atol=1e-6) def test_bse_approx(self): # complex step @@ -596,13 +614,16 @@ def test_bse_approx(self): # bse = self.result._cov_params_approx( # approx_complex_step=False).diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2) - # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-2) + # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], + # atol=1e-2) # # finite difference, centered : failure with NaNs - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4) - # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-4) + # assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], + # atol=1e-4) def test_bse_oim(self): # OIM covariance type @@ -663,7 +684,8 @@ def test_bse(self): assert_equal(self.result._cov_approx_complex_step, True) assert_equal(self.result._cov_approx_centered, False) # default covariance type (opg) - assert_allclose(self.result.bse[0:2], self.true['se_exog_opg'], atol=1e-4) + assert_allclose(self.result.bse[0:2], self.true['se_exog_opg'], + atol=1e-4) assert_allclose(self.result.bse[2], self.true['se_ar_opg'], atol=1e-6) assert_allclose(self.result.bse[3], self.true['se_ma_opg'], atol=1e-6) @@ -689,8 +711,9 @@ def test_bse_approx(self): # assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-2) # # finite difference, centered - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[0], self.true['se_exog_oim'][0], rtol=1) # assert_allclose(bse[1], self.true['se_exog_oim'][1], atol=1e-2) # assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-2) @@ -791,15 +814,17 @@ def test_bse_approx(self): # with warnings.catch_warnings(): # warnings.simplefilter("ignore") - # # finite difference, non-centered : failure (catastrophic cancellation) + # # finite difference, non-centered : + # # failure (catastrophic cancellation) # bse = self.result._cov_params_approx( # approx_complex_step=False).diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-3) # assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-2) # # finite difference, centered : failure (nan) - # bse = self.result._cov_params_approx( - # approx_complex_step=False, approx_centered=True).diagonal()**0.5 + # cpa = self.result._cov_params_approx( + # approx_complex_step=False, approx_centered=True) + # bse = cpa.diagonal()**0.5 # assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-3) # assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-3) @@ -928,7 +953,9 @@ def setup_class(cls, i, decimal=4, endog=None, *args, **kwargs): # Loglikelihood, parameters cls.true_loglike = coverage_results.loc[i]['llf'] - cls.true_params = np.array([float(x) for x in coverage_results.loc[i]['parameters'].split(',')]) + cls.true_params = np.array([ + float(x) for x in coverage_results.loc[i]['parameters'].split(',')] + ) # Stata reports the standard deviation; make it the variance cls.true_params[-1] = cls.true_params[-1]**2 @@ -953,36 +980,48 @@ def test_loglike(self): def test_start_params(self): # just a quick test that start_params isn't throwing an exception # (other than related to invertibility) - stat, inv = self.model.enforce_stationarity, self.model.enforce_invertibility + stat = self.model.enforce_stationarity + inv = self.model.enforce_invertibility self.model.enforce_stationarity = False self.model.enforce_invertibility = False self.model.start_params - self.model.enforce_stationarity, self.model.enforce_invertibility = stat, inv + self.model.enforce_stationarity = stat + self.model.enforce_invertibility = inv def test_transform_untransform(self): - stat, inv = self.model.enforce_stationarity, self.model.enforce_invertibility + model = self.model + stat, inv = model.enforce_stationarity, model.enforce_invertibility true_constrained = self.true_params # Sometimes the parameters given by Stata are not stationary and / or # invertible, so we need to skip those transformations for those # parameter sets - self.model.update(self.true_params) - contracted_polynomial_seasonal_ar = self.model.polynomial_seasonal_ar[self.model.polynomial_seasonal_ar.nonzero()] - self.model.enforce_stationarity = ( - (self.model.k_ar == 0 or tools.is_invertible(np.r_[1, -self.model.polynomial_ar[1:]])) and - (len(contracted_polynomial_seasonal_ar) <= 1 or tools.is_invertible(np.r_[1, -contracted_polynomial_seasonal_ar[1:]])) + model.update(self.true_params) + + par = model.polynomial_ar + psar = model.polynomial_seasonal_ar + contracted_psar = psar[psar.nonzero()] + model.enforce_stationarity = ( + (model.k_ar == 0 or tools.is_invertible(np.r_[1, -par[1:]])) and + (len(contracted_psar) <= 1 or + tools.is_invertible(np.r_[1, -contracted_psar[1:]])) ) - contracted_polynomial_seasonal_ma = self.model.polynomial_seasonal_ma[self.model.polynomial_seasonal_ma.nonzero()] - self.model.enforce_invertibility = ( - (self.model.k_ma == 0 or tools.is_invertible(np.r_[1, self.model.polynomial_ma[1:]])) and - (len(contracted_polynomial_seasonal_ma) <= 1 or tools.is_invertible(np.r_[1, contracted_polynomial_seasonal_ma[1:]])) + + pma = model.polynomial_ma + psma = model.polynomial_seasonal_ma + contracted_psma = psma[psma.nonzero()] + model.enforce_invertibility = ( + (model.k_ma == 0 or tools.is_invertible(np.r_[1, pma[1:]])) and + (len(contracted_psma) <= 1 or + tools.is_invertible(np.r_[1, contracted_psma[1:]])) ) - unconstrained = self.model.untransform_params(true_constrained) - constrained = self.model.transform_params(unconstrained) + unconstrained = model.untransform_params(true_constrained) + constrained = model.transform_params(unconstrained) assert_almost_equal(constrained, true_constrained, 4) - self.model.enforce_stationarity, self.model.enforce_invertibility = stat, inv + model.enforce_stationarity = stat + model.enforce_invertibility = inv def test_results(self): self.result = self.model.filter(self.true_params) @@ -1021,7 +1060,7 @@ def test_predict(self): # Test forecasts if self.model.k_exog == 0: predict = result.predict(start=self.model.nobs, - end=self.model.nobs+10, dynamic=-10) + end=self.model.nobs+10, dynamic=-10) assert_equal(predict.shape, (11,)) predict = result.predict(start=self.model.nobs, @@ -1033,7 +1072,8 @@ def test_predict(self): forecast = result.forecast(10) assert_equal(forecast.shape, (10,)) else: - exog = np.r_[[0]*self.model.k_exog*11].reshape(11, self.model.k_exog) + k_exog = self.model.k_exog + exog = np.r_[[0]*k_exog*11].reshape(11, k_exog) predict = result.predict(start=self.model.nobs, end=self.model.nobs+10, dynamic=-10, @@ -1044,7 +1084,7 @@ def test_predict(self): end=self.model.nobs+10, dynamic=-10, exog=exog) - exog = np.r_[[0]*self.model.k_exog].reshape(1, self.model.k_exog) + exog = np.r_[[0]*k_exog].reshape(1, k_exog) forecast = result.forecast(exog=exog) assert_equal(forecast.shape, (1,)) @@ -1091,8 +1131,10 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 0, 0) kwargs['trend'] = 'c' super(Test_ar_trend_c, cls).setup_class(1, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[0] = (1 - cls.true_params[1:4].sum()) * cls.true_params[0] + tps = cls.true_params + cls.true_params[0] = (1 - tps[1:4].sum()) * tps[0] class Test_ar_trend_ct(SARIMAXCoverageTest): @@ -1104,8 +1146,10 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 0, 0) kwargs['trend'] = 'ct' super(Test_ar_trend_ct, cls).setup_class(2, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] class Test_ar_trend_polynomial(SARIMAXCoverageTest): @@ -1117,8 +1161,10 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 0, 0) kwargs['trend'] = [1, 0, 0, 1] super(Test_ar_trend_polynomial, cls).setup_class(3, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] class Test_ar_diff(SARIMAXCoverageTest): @@ -1354,8 +1400,10 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 0, 2) kwargs['trend'] = 'c' super(Test_arma_trend_c, cls).setup_class(17, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:1] = (1 - cls.true_params[1:4].sum()) * cls.true_params[:1] + tps = cls.true_params + cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1] class Test_arma_trend_ct(SARIMAXCoverageTest): @@ -1367,8 +1415,10 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 0, 2) kwargs['trend'] = 'ct' super(Test_arma_trend_ct, cls).setup_class(18, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] class Test_arma_trend_polynomial(SARIMAXCoverageTest): @@ -1380,8 +1430,10 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 0, 2) kwargs['trend'] = [1, 0, 0, 1] super(Test_arma_trend_polynomial, cls).setup_class(19, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] class Test_arma_diff(SARIMAXCoverageTest): @@ -1413,7 +1465,8 @@ class Test_arma_diff_seasonal_diff(SARIMAXCoverageTest): def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 2, 2) kwargs['seasonal_order'] = (0, 2, 0, 4) - super(Test_arma_diff_seasonal_diff, cls).setup_class(22, *args, **kwargs) + super(Test_arma_diff_seasonal_diff, cls).setup_class( + 22, *args, **kwargs) class Test_arma_diffuse(SARIMAXCoverageTest): @@ -1459,7 +1512,8 @@ class Test_seasonal_ar_as_polynomial(SARIMAXCoverageTest): def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 0, 0) kwargs['seasonal_order'] = ([1, 1, 1], 0, 0, 4) - super(Test_seasonal_ar_as_polynomial, cls).setup_class(25, *args, **kwargs) + super(Test_seasonal_ar_as_polynomial, cls).setup_class( + 25, *args, **kwargs) class Test_seasonal_ar_trend_c(SARIMAXCoverageTest): @@ -1472,8 +1526,10 @@ def setup_class(cls, *args, **kwargs): kwargs['seasonal_order'] = (3, 0, 0, 4) kwargs['trend'] = 'c' super(Test_seasonal_ar_trend_c, cls).setup_class(26, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:1] = (1 - cls.true_params[1:4].sum()) * cls.true_params[:1] + tps = cls.true_params + cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1] class Test_seasonal_ar_trend_ct(SARIMAXCoverageTest): @@ -1487,7 +1543,8 @@ def setup_class(cls, *args, **kwargs): kwargs['trend'] = 'ct' super(Test_seasonal_ar_trend_ct, cls).setup_class(27, *args, **kwargs) # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] class Test_seasonal_ar_trend_polynomial(SARIMAXCoverageTest): @@ -1499,9 +1556,12 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 0, 0) kwargs['seasonal_order'] = (3, 0, 0, 4) kwargs['trend'] = [1, 0, 0, 1] - super(Test_seasonal_ar_trend_polynomial, cls).setup_class(28, *args, **kwargs) + super(Test_seasonal_ar_trend_polynomial, cls).setup_class( + 28, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] class Test_seasonal_ar_diff(SARIMAXCoverageTest): @@ -1523,7 +1583,8 @@ class Test_seasonal_ar_seasonal_diff(SARIMAXCoverageTest): def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 0, 0) kwargs['seasonal_order'] = (3, 2, 0, 4) - super(Test_seasonal_ar_seasonal_diff, cls).setup_class(30, *args, **kwargs) + super(Test_seasonal_ar_seasonal_diff, cls).setup_class( + 30, *args, **kwargs) class Test_seasonal_ar_diffuse(SARIMAXCoverageTest): @@ -1571,7 +1632,8 @@ class Test_seasonal_ma_as_polynomial(SARIMAXCoverageTest): def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 0, 0) kwargs['seasonal_order'] = (0, 0, [1, 1, 1], 4) - super(Test_seasonal_ma_as_polynomial, cls).setup_class(33, *args, **kwargs) + super(Test_seasonal_ma_as_polynomial, cls).setup_class( + 33, *args, **kwargs) class Test_seasonal_ma_trend_c(SARIMAXCoverageTest): @@ -1609,7 +1671,8 @@ def setup_class(cls, *args, **kwargs): kwargs['seasonal_order'] = (0, 0, 3, 4) kwargs['trend'] = [1, 0, 0, 1] kwargs['decimal'] = 3 - super(Test_seasonal_ma_trend_polynomial, cls).setup_class(36, *args, **kwargs) + super(Test_seasonal_ma_trend_polynomial, cls).setup_class( + 36, *args, **kwargs) class Test_seasonal_ma_diff(SARIMAXCoverageTest): @@ -1631,7 +1694,8 @@ class Test_seasonal_ma_seasonal_diff(SARIMAXCoverageTest): def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 0, 0) kwargs['seasonal_order'] = (0, 2, 3, 4) - super(Test_seasonal_ma_seasonal_diff, cls).setup_class(38, *args, **kwargs) + super(Test_seasonal_ma_seasonal_diff, cls).setup_class( + 38, *args, **kwargs) class Test_seasonal_ma_diffuse(SARIMAXCoverageTest): @@ -1681,8 +1745,10 @@ def setup_class(cls, *args, **kwargs): kwargs['seasonal_order'] = (3, 0, 2, 4) kwargs['trend'] = 'c' super(Test_seasonal_arma_trend_c, cls).setup_class(42, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:1] = (1 - cls.true_params[1:4].sum()) * cls.true_params[:1] + tps = cls.true_params + cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1] class Test_seasonal_arma_trend_ct(SARIMAXCoverageTest): @@ -1694,9 +1760,12 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 0, 0) kwargs['seasonal_order'] = (3, 0, 2, 4) kwargs['trend'] = 'ct' - super(Test_seasonal_arma_trend_ct, cls).setup_class(43, *args, **kwargs) + super(Test_seasonal_arma_trend_ct, cls).setup_class( + 43, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] class Test_seasonal_arma_trend_polynomial(SARIMAXCoverageTest): @@ -1709,9 +1778,12 @@ def setup_class(cls, *args, **kwargs): kwargs['seasonal_order'] = (3, 0, 2, 4) kwargs['trend'] = [1, 0, 0, 1] kwargs['decimal'] = 3 - super(Test_seasonal_arma_trend_polynomial, cls).setup_class(44, *args, **kwargs) + super(Test_seasonal_arma_trend_polynomial, cls).setup_class( + 44, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[:2] = (1 - cls.true_params[2:5].sum()) * cls.true_params[:2] + tps = cls.true_params + cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2] def test_results(self): self.result = self.model.filter(self.true_params) @@ -1748,7 +1820,8 @@ class Test_seasonal_arma_seasonal_diff(SARIMAXCoverageTest): def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 0, 0) kwargs['seasonal_order'] = (3, 2, 2, 4) - super(Test_seasonal_arma_seasonal_diff, cls).setup_class(46, *args, **kwargs) + super(Test_seasonal_arma_seasonal_diff, cls).setup_class( + 46, *args, **kwargs) class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest): @@ -1759,7 +1832,8 @@ class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest): def setup_class(cls, *args, **kwargs): kwargs['order'] = (0, 2, 0) kwargs['seasonal_order'] = (3, 2, 2, 4) - super(Test_seasonal_arma_diff_seasonal_diff, cls).setup_class(47, *args, **kwargs) + super(Test_seasonal_arma_diff_seasonal_diff, cls).setup_class( + 47, *args, **kwargs) def test_results(self): self.result = self.model.filter(self.true_params) @@ -1801,7 +1875,8 @@ def setup_class(cls, *args, **kwargs): kwargs['seasonal_order'] = (3, 0, 2, 4) endog = results_sarimax.wpi1_data kwargs['exog'] = (endog - np.floor(endog))**2 - super(Test_seasonal_arma_exogenous, cls).setup_class(49, *args, **kwargs) + super(Test_seasonal_arma_exogenous, cls).setup_class( + 49, *args, **kwargs) class Test_sarimax_exogenous(SARIMAXCoverageTest): @@ -1836,12 +1911,14 @@ def setup_class(cls, *args, **kwargs): kwargs['exog'] = (endog - np.floor(endog))**2 kwargs['hamilton_representation'] = False kwargs['simple_differencing'] = False - super(Test_sarimax_exogenous_not_hamilton, cls).setup_class(50, *args, **kwargs) + super(Test_sarimax_exogenous_not_hamilton, cls).setup_class( + 50, *args, **kwargs) class Test_sarimax_exogenous_diffuse(SARIMAXCoverageTest): # // SARIMAX and exogenous diffuse - # arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim) diffuse + # arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim) + # diffuse # save_results 52 @classmethod def setup_class(cls, *args, **kwargs): @@ -1852,7 +1929,8 @@ def setup_class(cls, *args, **kwargs): kwargs['decimal'] = 2 kwargs['initialization'] = 'approximate_diffuse' kwargs['initial_variance'] = 1e9 - super(Test_sarimax_exogenous_diffuse, cls).setup_class(51, *args, **kwargs) + super(Test_sarimax_exogenous_diffuse, cls).setup_class( + 51, *args, **kwargs) class Test_arma_exog_trend_polynomial_missing(SARIMAXCoverageTest): @@ -1872,9 +1950,12 @@ def setup_class(cls, *args, **kwargs): kwargs['order'] = (3, 0, 2) kwargs['trend'] = [0, 0, 0, 1] kwargs['decimal'] = 1 - super(Test_arma_exog_trend_polynomial_missing, cls).setup_class(52, endog=endog, *args, **kwargs) + super(Test_arma_exog_trend_polynomial_missing, cls).setup_class( + 52, endog=endog, *args, **kwargs) + # Modify true params to convert from mean to intercept form - cls.true_params[0] = (1 - cls.true_params[2:5].sum()) * cls.true_params[0] + tps = cls.true_params + cls.true_params[0] = (1 - tps[2:5].sum()) * tps[0] # Miscellaneous coverage tests @@ -1883,7 +1964,12 @@ def test_simple_time_varying(): # are not time-varying, and in fact the regression fit is perfect endog = np.arange(100)*1.0 exog = 2*endog - mod = sarimax.SARIMAX(endog, exog=exog, order=(0, 0, 0), time_varying_regression=True, mle_regression=False) + mod = sarimax.SARIMAX( + endog, + exog=exog, + order=(0, 0, 0), + time_varying_regression=True, + mle_regression=False) # Ignore the warning that MLE doesn't converge with warnings.catch_warnings(): @@ -1891,7 +1977,7 @@ def test_simple_time_varying(): res = mod.fit(disp=-1) # Test that the estimated variances of the errors are essentially zero - # 5 digits necessary to accommodate 32-bit numpy / scipy with OpenBLAS 0.2.18 + # 5 digits necessary to accommodate 32-bit numpy/scipy with OpenBLAS 0.2.18 assert_almost_equal(res.params, [0, 0], 5) # Test that the time-varying coefficients are all 0.5 (except the first @@ -1900,7 +1986,12 @@ def test_simple_time_varying(): def test_invalid_time_varying(): - assert_raises(ValueError, sarimax.SARIMAX, endog=[1, 2, 3], mle_regression=True, time_varying_regression=True) + assert_raises( + ValueError, + sarimax.SARIMAX, + endog=[1, 2, 3], + mle_regression=True, + time_varying_regression=True) def test_manual_stationary_initialization(): @@ -1917,10 +2008,11 @@ def test_manual_stationary_initialization(): res2 = mod2.filter([0.5, 0.2, 0.1, 1]) # Create a third model with "known" initialization, but specified in kwargs - mod3 = sarimax.SARIMAX(endog, order=(3, 0, 0), - initialization='known', - initial_state=res1.filter_results.initial_state, - initial_state_cov=res1.filter_results.initial_state_cov) + mod3 = sarimax.SARIMAX( + endog, order=(3, 0, 0), + initialization='known', + initial_state=res1.filter_results.initial_state, + initial_state_cov=res1.filter_results.initial_state_cov) res3 = mod3.filter([0.5, 0.2, 0.1, 1]) # Create the forth model with stationary initialization specified in kwargs @@ -1956,10 +2048,11 @@ def test_manual_approximate_diffuse_initialization(): res2 = mod2.filter([0.5, 0.2, 0.1, 1]) # Create a third model with "known" initialization, but specified in kwargs - mod3 = sarimax.SARIMAX(endog, order=(3, 0, 0), - initialization='known', - initial_state=res1.filter_results.initial_state, - initial_state_cov=res1.filter_results.initial_state_cov) + mod3 = sarimax.SARIMAX( + endog, order=(3, 0, 0), + initialization='known', + initial_state=res1.filter_results.initial_state, + initial_state_cov=res1.filter_results.initial_state_cov) res3 = mod3.filter([0.5, 0.2, 0.1, 1]) # Create the forth model with approximate diffuse initialization specified @@ -2053,6 +2146,7 @@ def test_misc_exog(): order=(1, 1, 0)) +@pytest.mark.smoke def test_datasets(): # Test that some unusual types of datasets work @@ -2060,7 +2154,7 @@ def test_datasets(): endog = np.random.binomial(1, 0.5, size=100) exog = np.random.binomial(1, 0.5, size=100) mod = sarimax.SARIMAX(endog, exog=exog, order=(1, 0, 0)) - res = mod.fit(disp=-1) + mod.fit(disp=-1) def test_predict_custom_index(): @@ -2209,7 +2303,6 @@ def check_concentrated_scale(filter_univariate=False): getattr(res_orig.filter_results, name)) # Test filter / smoother output - scale = res_conc.scale d = res_conc.loglikelihood_burn filter_attr = ['predicted_state', 'filtered_state', 'forecasts', diff --git a/statsmodels/tsa/statespace/tests/test_save.py b/statsmodels/tsa/statespace/tests/test_save.py index 566cf2fe718..5dcd6dc5dea 100644 --- a/statsmodels/tsa/statespace/tests/test_save.py +++ b/statsmodels/tsa/statespace/tests/test_save.py @@ -24,7 +24,7 @@ def temp_filename(): try: os.close(fd) os.unlink(filename) - except Exception as e: + except Exception: print("Couldn't close or delete file " "{filename}.".format(filename=filename)) From 388d1d4e3253693b330bbc53edcc9710fa70d3cd Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Thu, 23 May 2019 18:26:29 -0700 Subject: [PATCH 4/4] CLN: strict linting for statespace --- lint.sh | 5 +- .../tsa/statespace/tests/test_collapsed.py | 48 ++- .../tsa/statespace/tests/test_concentrated.py | 1 - .../statespace/tests/test_dynamic_factor.py | 342 ++++++++++++------ .../tests/test_exact_diffuse_filtering.py | 24 +- .../tsa/statespace/tests/test_mlemodel.py | 106 ++++-- .../tsa/statespace/tests/test_options.py | 36 +- .../tsa/statespace/tests/test_pickle.py | 3 +- .../statespace/tests/test_representation.py | 46 ++- 9 files changed, 419 insertions(+), 192 deletions(-) diff --git a/lint.sh b/lint.sh index 02397b86564..8486608583c 100755 --- a/lint.sh +++ b/lint.sh @@ -84,10 +84,7 @@ if [ "$LINT" == true ]; then statsmodels/tsa/regime_switching/ \ statsmodels/tsa/vector_ar/dynamic.py \ statsmodels/tsa/vector_ar/tests/results/ \ - statsmodels/tsa/statespace/*.py \ - statsmodels/tsa/statespace/tests/results/ \ - statsmodels/tsa/statespace/tests/test_var.py \ - statsmodels/tsa/statespace/tests/test_varmax.py \ + statsmodels/tsa/statespace/ \ statsmodels/tsa/tests/results/ \ statsmodels/conftest.py \ statsmodels/tools/sm_exceptions.py \ diff --git a/statsmodels/tsa/statespace/tests/test_collapsed.py b/statsmodels/tsa/statespace/tests/test_collapsed.py index 822eba918b8..0fb87ed1196 100644 --- a/statsmodels/tsa/statespace/tests/test_collapsed.py +++ b/statsmodels/tsa/statespace/tests/test_collapsed.py @@ -226,7 +226,8 @@ def setup_class(cls, dtype=float, **kwargs): class TestTrivariateConventionalAlternate(TestTrivariateConventional): @classmethod def setup_class(cls, *args, **kwargs): - super(TestTrivariateConventionalAlternate, cls).setup_class(alternate_timing=True, *args, **kwargs) + super(TestTrivariateConventionalAlternate, cls).setup_class( + alternate_timing=True, *args, **kwargs) def test_using_alterate(self): assert(self.model._kalman_filter.filter_timing == 1) @@ -235,7 +236,8 @@ def test_using_alterate(self): class TestTrivariateConventionalPartialMissing(Trivariate): @classmethod def setup_class(cls, dtype=float, **kwargs): - super(TestTrivariateConventionalPartialMissing, cls).setup_class(dtype, **kwargs) + super(TestTrivariateConventionalPartialMissing, cls).setup_class( + dtype, **kwargs) n_disturbance_variates = ( (cls.model.k_endog + cls.model.k_posdef) * cls.model.nobs ) @@ -261,10 +263,12 @@ def setup_class(cls, dtype=float, **kwargs): ) -class TestTrivariateConventionalPartialMissingAlternate(TestTrivariateConventionalPartialMissing): +class TestTrivariateConventionalPartialMissingAlternate( + TestTrivariateConventionalPartialMissing): @classmethod def setup_class(cls, *args, **kwargs): - super(TestTrivariateConventionalPartialMissingAlternate, cls).setup_class(alternate_timing=True, *args, **kwargs) + super(TestTrivariateConventionalPartialMissingAlternate, + cls).setup_class(alternate_timing=True, *args, **kwargs) def test_using_alterate(self): assert(self.model._kalman_filter.filter_timing == 1) @@ -273,7 +277,8 @@ def test_using_alterate(self): class TestTrivariateConventionalAllMissing(Trivariate): @classmethod def setup_class(cls, dtype=float, **kwargs): - super(TestTrivariateConventionalAllMissing, cls).setup_class(dtype, **kwargs) + super(TestTrivariateConventionalAllMissing, cls).setup_class( + dtype, **kwargs) n_disturbance_variates = ( (cls.model.k_endog + cls.model.k_posdef) * cls.model.nobs ) @@ -299,10 +304,12 @@ def setup_class(cls, dtype=float, **kwargs): ) -class TestTrivariateConventionalAllMissingAlternate(TestTrivariateConventionalAllMissing): +class TestTrivariateConventionalAllMissingAlternate( + TestTrivariateConventionalAllMissing): @classmethod def setup_class(cls, *args, **kwargs): - super(TestTrivariateConventionalAllMissingAlternate, cls).setup_class(alternate_timing=True, *args, **kwargs) + super(TestTrivariateConventionalAllMissingAlternate, cls).setup_class( + alternate_timing=True, *args, **kwargs) def test_using_alterate(self): assert(self.model._kalman_filter.filter_timing == 1) @@ -337,7 +344,8 @@ def setup_class(cls, dtype=float, **kwargs): class TestTrivariateUnivariateAlternate(TestTrivariateUnivariate): @classmethod def setup_class(cls, *args, **kwargs): - super(TestTrivariateUnivariateAlternate, cls).setup_class(alternate_timing=True, *args, **kwargs) + super(TestTrivariateUnivariateAlternate, cls).setup_class( + alternate_timing=True, *args, **kwargs) def test_using_alterate(self): assert(self.model._kalman_filter.filter_timing == 1) @@ -346,7 +354,8 @@ def test_using_alterate(self): class TestTrivariateUnivariatePartialMissing(Trivariate): @classmethod def setup_class(cls, dtype=float, **kwargs): - super(TestTrivariateUnivariatePartialMissing, cls).setup_class(dtype, **kwargs) + super(TestTrivariateUnivariatePartialMissing, cls).setup_class( + dtype, **kwargs) n_disturbance_variates = ( (cls.model.k_endog + cls.model.k_posdef) * cls.model.nobs ) @@ -372,10 +381,12 @@ def setup_class(cls, dtype=float, **kwargs): ) -class TestTrivariateUnivariatePartialMissingAlternate(TestTrivariateUnivariatePartialMissing): +class TestTrivariateUnivariatePartialMissingAlternate( + TestTrivariateUnivariatePartialMissing): @classmethod def setup_class(cls, *args, **kwargs): - super(TestTrivariateUnivariatePartialMissingAlternate, cls).setup_class(alternate_timing=True, *args, **kwargs) + super(TestTrivariateUnivariatePartialMissingAlternate, + cls).setup_class(alternate_timing=True, *args, **kwargs) def test_using_alterate(self): assert(self.model._kalman_filter.filter_timing == 1) @@ -384,7 +395,8 @@ def test_using_alterate(self): class TestTrivariateUnivariateAllMissing(Trivariate): @classmethod def setup_class(cls, dtype=float, **kwargs): - super(TestTrivariateUnivariateAllMissing, cls).setup_class(dtype, **kwargs) + super(TestTrivariateUnivariateAllMissing, cls).setup_class( + dtype, **kwargs) n_disturbance_variates = ( (cls.model.k_endog + cls.model.k_posdef) * cls.model.nobs ) @@ -410,10 +422,12 @@ def setup_class(cls, dtype=float, **kwargs): ) -class TestTrivariateUnivariateAllMissingAlternate(TestTrivariateUnivariateAllMissing): +class TestTrivariateUnivariateAllMissingAlternate( + TestTrivariateUnivariateAllMissing): @classmethod def setup_class(cls, *args, **kwargs): - super(TestTrivariateUnivariateAllMissingAlternate, cls).setup_class(alternate_timing=True, *args, **kwargs) + super(TestTrivariateUnivariateAllMissingAlternate, cls).setup_class( + alternate_timing=True, *args, **kwargs) def test_using_alterate(self): assert(self.model._kalman_filter.filter_timing == 1) @@ -453,8 +467,10 @@ def collapse(cls, obs, **kwargs): def setup_class(cls, which='mixed', *args, **kwargs): # Data dta = datasets.macrodata.load_pandas().data - dta.index = pd.date_range(start='1959-01-01', end='2009-7-01', freq='QS') - obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:] * 400 + dta.index = pd.date_range(start='1959-01-01', + end='2009-7-01', freq='QS') + levels = dta[['realgdp', 'realcons', 'realinv']] + obs = np.log(levels).diff().iloc[1:] * 400 if which == 'all': obs.iloc[:50, :] = np.nan diff --git a/statsmodels/tsa/statespace/tests/test_concentrated.py b/statsmodels/tsa/statespace/tests/test_concentrated.py index 4fd35a49d02..80fcc6e67cd 100644 --- a/statsmodels/tsa/statespace/tests/test_concentrated.py +++ b/statsmodels/tsa/statespace/tests/test_concentrated.py @@ -104,7 +104,6 @@ def test_concentrated_predict_sarimax(): nobs = 30 np.random.seed(28953) endog = np.random.normal(size=nobs) - kwargs = {} # Typical model out = get_sarimax_models(endog) diff --git a/statsmodels/tsa/statespace/tests/test_dynamic_factor.py b/statsmodels/tsa/statespace/tests/test_dynamic_factor.py index 077da271212..c789f5d0691 100644 --- a/statsmodels/tsa/statespace/tests/test_dynamic_factor.py +++ b/statsmodels/tsa/statespace/tests/test_dynamic_factor.py @@ -5,23 +5,23 @@ License: Simplified-BSD """ from __future__ import division, absolute_import, print_function +import os +import re +import warnings import numpy as np +from numpy.testing import assert_equal, assert_raises, assert_allclose import pandas as pd import pytest -import os -import re -import warnings from statsmodels.tsa.statespace import dynamic_factor from .results import results_varmax, results_dynamic_factor -from numpy.testing import assert_equal, assert_raises, assert_allclose from statsmodels.iolib.summary import forg current_path = os.path.dirname(os.path.abspath(__file__)) -output_path = 'results' + os.sep + 'results_dynamic_factor_stata.csv' -output_results = pd.read_csv(current_path + os.sep + output_path) +output_path = os.path.join('results', 'results_dynamic_factor_stata.csv') +output_results = pd.read_csv(os.path.join(current_path, output_path)) class CheckDynamicFactor(object): @@ -58,11 +58,13 @@ def test_params(self): # Similarly a smoke test for param_names assert_equal(len(self.model.start_params), len(self.model.param_names)) # Finally make sure the transform and untransform do their job - actual = self.model.transform_params(self.model.untransform_params(self.model.start_params)) + actual = self.model.transform_params( + self.model.untransform_params(self.model.start_params)) assert_allclose(actual, self.model.start_params) # Also in the case of enforce stationarity = False self.model.enforce_stationarity = False - actual = self.model.transform_params(self.model.untransform_params(self.model.start_params)) + actual = self.model.transform_params( + self.model.untransform_params(self.model.start_params)) self.model.enforce_stationarity = True assert_allclose(actual, self.model.start_params) @@ -72,14 +74,22 @@ def test_results(self, close_figures): warnings.simplefilter("ignore") self.results.summary() - # Test cofficient matrix creation (via a different, more direct, method) + # Test cofficient matrix creation + # (via a different, more direct, method) if self.model.factor_order > 0: - coefficients = np.array(self.results.params[self.model._params_factor_transition]).reshape(self.model.k_factors, self.model.k_factors * self.model.factor_order) + model = self.model + k_factors = model.k_factors + pft_params = self.results.params[model._params_factor_transition] + coefficients = np.array(pft_params).reshape( + k_factors, k_factors * model.factor_order) coefficient_matrices = np.array([ - coefficients[:self.model.k_factors, i*self.model.k_factors:(i+1)*self.model.k_factors] + coefficients[:self.model.k_factors, + i*self.model.k_factors:(i+1)*self.model.k_factors] for i in range(self.model.factor_order) ]) - assert_equal(self.results.coefficient_matrices_var, coefficient_matrices) + assert_equal( + self.results.coefficient_matrices_var, + coefficient_matrices) else: assert_equal(self.results.coefficient_matrices_var, None) @@ -100,11 +110,12 @@ def test_no_enforce(self): assert_allclose(results.llf, self.results.llf, rtol=1e-5) def test_mle(self, init_powell=True): - with warnings.catch_warnings(record=True) as w: + with warnings.catch_warnings(record=True): warnings.simplefilter('always') start_params = self.model.start_params if init_powell: - results = self.model.fit(method='powell', maxiter=100, disp=False) + results = self.model.fit(method='powell', + maxiter=100, disp=False) start_params = results.params results = self.model.fit(start_params, maxiter=1000, disp=False) results = self.model.fit(results.params, method='nm', maxiter=1000, @@ -133,7 +144,8 @@ def test_predict(self, **kwargs): def test_dynamic_predict(self, **kwargs): # Tests predict + dynamic predict + forecast assert_allclose( - self.results.predict(end='1982-10-01', dynamic='1961-01-01', **kwargs), + self.results.predict(end='1982-10-01', dynamic='1961-01-01', + **kwargs), self.true['dynamic_predict'], atol=1e-6) @@ -145,9 +157,12 @@ class TestDynamicFactor(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_dfm.copy() - true['predict'] = output_results.iloc[1:][['predict_dfm_1', 'predict_dfm_2', 'predict_dfm_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_dfm_1', 'dyn_predict_dfm_2', 'dyn_predict_dfm_3']] - super(TestDynamicFactor, cls).setup_class(true, k_factors=1, factor_order=2) + true['predict'] = output_results.iloc[1:][[ + 'predict_dfm_1', 'predict_dfm_2', 'predict_dfm_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_dfm_1', 'dyn_predict_dfm_2', 'dyn_predict_dfm_3']] + super(TestDynamicFactor, cls).setup_class( + true, k_factors=1, factor_order=2) def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal()**0.5 @@ -161,9 +176,12 @@ class TestDynamicFactor2(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_dfm2.copy() - true['predict'] = output_results.iloc[1:][['predict_dfm2_1', 'predict_dfm2_2', 'predict_dfm2_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_dfm2_1', 'dyn_predict_dfm2_2', 'dyn_predict_dfm2_3']] - super(TestDynamicFactor2, cls).setup_class(true, k_factors=2, factor_order=1) + true['predict'] = output_results.iloc[1:][[ + 'predict_dfm2_1', 'predict_dfm2_2', 'predict_dfm2_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_dfm2_1', 'dyn_predict_dfm2_2', 'dyn_predict_dfm2_3']] + super(TestDynamicFactor2, cls).setup_class( + true, k_factors=2, factor_order=1) def test_mle(self): # Stata's MLE on this model doesn't converge, so no reason to check @@ -195,50 +213,60 @@ def test_summary(self): params = self.true['params'] # Make sure we have the right number of tables - assert_equal(len(tables), 2 + self.model.k_endog + self.model.k_factors + 1) + assert_equal( + len(tables), + 2 + self.model.k_endog + self.model.k_factors + 1) # Check the model overview table - assert_equal(re.search(r'Model:.*DynamicFactor\(factors=2, order=1\)', tables[0]) is None, False) + assert re.search( + r'Model:.*DynamicFactor\(factors=2, order=1\)', + tables[0]) # For each endogenous variable, check the output for i in range(self.model.k_endog): offset_loading = self.model.k_factors * i - offset_var = self.model.k_factors * self.model.k_endog table = tables[i + 2] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Results for equation %s' % name, table) is None, False) + assert re.search('Results for equation %s' % name, table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 7) # -> Check that we have the right coefficients - assert_equal(re.search('loading.f1 +' + forg(params[offset_loading + 0], prec=4), table) is None, False) - assert_equal(re.search('loading.f2 +' + forg(params[offset_loading + 1], prec=4), table) is None, False) + assert re.search( + 'loading.f1 +' + forg(params[offset_loading + 0], prec=4), + table) + assert re.search( + 'loading.f2 +' + forg(params[offset_loading + 1], prec=4), + table) # For each factor, check the output for i in range(self.model.k_factors): - offset = self.model.k_endog * (self.model.k_factors + 1) + i * self.model.k_factors + offset = (self.model.k_endog * (self.model.k_factors + 1) + + i * self.model.k_factors) table = tables[self.model.k_endog + i + 2] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Results for factor equation f%d' % (i+1), table) is None, False) + assert re.search('Results for factor equation f%d' % (i+1), table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 7) # -> Check that we have the right coefficients - assert_equal(re.search('L1.f1 +' + forg(params[offset + 0], prec=4), table) is None, False) - assert_equal(re.search('L1.f2 +' + forg(params[offset + 1], prec=4), table) is None, False) + assert re.search('L1.f1 +' + forg(params[offset + 0], prec=4), + table) + assert re.search('L1.f2 +' + forg(params[offset + 1], prec=4), + table) # Check the Error covariance matrix output table = tables[2 + self.model.k_endog + self.model.k_factors] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Error covariance matrix', table) is None, False) + assert re.search('Error covariance matrix', table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 8) @@ -246,7 +274,9 @@ def test_summary(self): # -> Check that we have the right coefficients offset = self.model.k_endog * self.model.k_factors for i in range(self.model.k_endog): - assert_equal(re.search('sigma2.%s +%s' % (self.model.endog_names[i], forg(params[offset + i], prec=4)), table) is None, False) + iname = self.model.endog_names[i] + iparam = forg(params[offset + i], prec=4) + assert re.search('sigma2.%s +%s' % (iname, iparam), table) class TestDynamicFactor_exog1(CheckDynamicFactor): @@ -256,10 +286,17 @@ class TestDynamicFactor_exog1(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_dfm_exog1.copy() - true['predict'] = output_results.iloc[1:][['predict_dfm_exog1_1', 'predict_dfm_exog1_2', 'predict_dfm_exog1_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_dfm_exog1_1', 'dyn_predict_dfm_exog1_2', 'dyn_predict_dfm_exog1_3']] + true['predict'] = output_results.iloc[1:][[ + 'predict_dfm_exog1_1', + 'predict_dfm_exog1_2', + 'predict_dfm_exog1_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_dfm_exog1_1', + 'dyn_predict_dfm_exog1_2', + 'dyn_predict_dfm_exog1_3']] exog = np.ones((75, 1)) - super(TestDynamicFactor_exog1, cls).setup_class(true, k_factors=1, factor_order=1, exog=exog) + super(TestDynamicFactor_exog1, cls).setup_class( + true, k_factors=1, factor_order=1, exog=exog) def test_predict(self): exog = np.ones((16, 1)) @@ -282,21 +319,30 @@ class TestDynamicFactor_exog2(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_dfm_exog2.copy() - true['predict'] = output_results.iloc[1:][['predict_dfm_exog2_1', 'predict_dfm_exog2_2', 'predict_dfm_exog2_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_dfm_exog2_1', 'dyn_predict_dfm_exog2_2', 'dyn_predict_dfm_exog2_3']] + true['predict'] = output_results.iloc[1:][[ + 'predict_dfm_exog2_1', + 'predict_dfm_exog2_2', + 'predict_dfm_exog2_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_dfm_exog2_1', + 'dyn_predict_dfm_exog2_2', + 'dyn_predict_dfm_exog2_3']] exog = np.c_[np.ones((75, 1)), (np.arange(75) + 2)[:, np.newaxis]] - super(TestDynamicFactor_exog2, cls).setup_class(true, k_factors=1, factor_order=1, exog=exog) + super(TestDynamicFactor_exog2, cls).setup_class( + true, k_factors=1, factor_order=1, exog=exog) def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal()**0.5 assert_allclose(bse**2, self.true['var_oim'], atol=1e-5) def test_predict(self): - exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]] + exog = np.c_[np.ones((16, 1)), + (np.arange(75, 75+16) + 2)[:, np.newaxis]] super(TestDynamicFactor_exog2, self).test_predict(exog=exog) def test_dynamic_predict(self): - exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]] + exog = np.c_[np.ones((16, 1)), + (np.arange(75, 75+16) + 2)[:, np.newaxis]] super(TestDynamicFactor_exog2, self).test_dynamic_predict(exog=exog) def test_summary(self): @@ -307,10 +353,13 @@ def test_summary(self): params = self.true['params'] # Make sure we have the right number of tables - assert_equal(len(tables), 2 + self.model.k_endog + self.model.k_factors + 1) + assert_equal( + len(tables), + 2 + self.model.k_endog + self.model.k_factors + 1) # Check the model overview table - assert_equal(re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)', tables[0]) is None, False) + assert re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)', + tables[0]) assert_equal(re.search(r'.*2 regressors', tables[0]) is None, False) # For each endogenous variable, check the output @@ -321,37 +370,45 @@ def test_summary(self): # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Results for equation %s' % name, table) is None, False) + assert re.search('Results for equation %s' % name, table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 8) # -> Check that we have the right coefficients - assert_equal(re.search('loading.f1 +' + forg(params[offset_loading + 0], prec=4), table) is None, False) - assert_equal(re.search('beta.const +' + forg(params[offset_exog + i*2 + 0], prec=4), table) is None, False) - assert_equal(re.search('beta.x1 +' + forg(params[offset_exog + i*2 + 1], prec=4), table) is None, False) + assert re.search( + 'loading.f1 +' + forg(params[offset_loading + 0], prec=4), + table) + assert re.search( + 'beta.const +' + forg(params[offset_exog + i*2 + 0], prec=4), + table) + assert re.search( + 'beta.x1 +' + forg(params[offset_exog + i*2 + 1], prec=4), + table) # For each factor, check the output for i in range(self.model.k_factors): - offset = self.model.k_endog * (self.model.k_factors + 3) + i * self.model.k_factors + offset = (self.model.k_endog * (self.model.k_factors + 3) + + i * self.model.k_factors) table = tables[self.model.k_endog + i + 2] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Results for factor equation f%d' % (i+1), table) is None, False) + assert re.search('Results for factor equation f%d' % (i+1), table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 6) # -> Check that we have the right coefficients - assert_equal(re.search('L1.f1 +' + forg(params[offset + 0], prec=4), table) is None, False) + assert re.search('L1.f1 +' + forg(params[offset + 0], prec=4), + table) # Check the Error covariance matrix output table = tables[2 + self.model.k_endog + self.model.k_factors] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Error covariance matrix', table) is None, False) + assert re.search('Error covariance matrix', table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 8) @@ -359,7 +416,9 @@ def test_summary(self): # -> Check that we have the right coefficients offset = self.model.k_endog * (self.model.k_factors + 2) for i in range(self.model.k_endog): - assert_equal(re.search('sigma2.%s +%s' % (self.model.endog_names[i], forg(params[offset + i], prec=4)), table) is None, False) + iname = self.model.endog_names[i] + iparam = forg(params[offset + i], prec=4) + assert re.search('sigma2.%s +%s' % (iname, iparam), table) class TestDynamicFactor_general_errors(CheckDynamicFactor): @@ -373,16 +432,23 @@ class TestDynamicFactor_general_errors(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_dfm_gen.copy() - true['predict'] = output_results.iloc[1:][['predict_dfm_gen_1', 'predict_dfm_gen_2', 'predict_dfm_gen_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_dfm_gen_1', 'dyn_predict_dfm_gen_2', 'dyn_predict_dfm_gen_3']] - super(TestDynamicFactor_general_errors, cls).setup_class(true, k_factors=1, factor_order=1, error_var=True, error_order=1, error_cov_type='unstructured') + true['predict'] = output_results.iloc[1:][[ + 'predict_dfm_gen_1', 'predict_dfm_gen_2', 'predict_dfm_gen_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_dfm_gen_1', + 'dyn_predict_dfm_gen_2', + 'dyn_predict_dfm_gen_3']] + super(TestDynamicFactor_general_errors, cls).setup_class( + true, k_factors=1, factor_order=1, error_var=True, + error_order=1, error_cov_type='unstructured') def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal() assert_allclose(bse[:3], self.true['var_oim'][:3], atol=1e-5) assert_allclose(bse[-10:], self.true['var_oim'][-10:], atol=3e-4) - @pytest.mark.skip("Known failure, no sequence of optimizers has been found which can achieve the maximum.") + @pytest.mark.skip("Known failure, no sequence of optimizers has been " + "found which can achieve the maximum.") def test_mle(self): # The following gets us to llf=546.53, which is still not good enough # llf = 300.842477412 @@ -411,11 +477,15 @@ def test_summary(self): params = self.true['params'] # Make sure we have the right number of tables - assert_equal(len(tables), 2 + self.model.k_endog + self.model.k_factors + self.model.k_endog + 1) + assert_equal( + len(tables), + 2 + self.model.k_endog + self.model.k_factors + + self.model.k_endog + 1) # Check the model overview table - assert_equal(re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)', tables[0]) is None, False) - assert_equal(re.search(r'.*VAR\(1\) errors', tables[0]) is None, False) + assert re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)', + tables[0]) + assert re.search(r'.*VAR\(1\) errors', tables[0]) # For each endogenous variable, check the output for i in range(self.model.k_endog): @@ -424,37 +494,42 @@ def test_summary(self): # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Results for equation %s' % name, table) is None, False) + assert re.search('Results for equation %s' % name, table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 6) # -> Check that we have the right coefficients - assert_equal(re.search('loading.f1 +' + forg(params[offset_loading + 0], prec=4), table) is None, False) + pattern = 'loading.f1 +' + forg(params[offset_loading + 0], prec=4) + assert re.search(pattern, table) # For each factor, check the output for i in range(self.model.k_factors): - offset = self.model.k_endog * self.model.k_factors + 6 + i * self.model.k_factors + offset = (self.model.k_endog * self.model.k_factors + + 6 + i * self.model.k_factors) table = tables[2 + self.model.k_endog + i] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Results for factor equation f%d' % (i+1), table) is None, False) + assert re.search('Results for factor equation f%d' % (i+1), table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 6) # -> Check that we have the right coefficients - assert_equal(re.search('L1.f1 +' + forg(params[offset + 0], prec=4), table) is None, False) + assert re.search('L1.f1 +' + forg(params[offset + 0], prec=4), + table) # For each error equation, check the output for i in range(self.model.k_endog): - offset = self.model.k_endog * (self.model.k_factors + i) + 6 + self.model.k_factors + offset = (self.model.k_endog * (self.model.k_factors + i) + + 6 + self.model.k_factors) table = tables[2 + self.model.k_endog + self.model.k_factors + i] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search(r'Results for error equation e\(%s\)' % name, table) is None, False) + assert re.search(r'Results for error equation e\(%s\)' % name, + table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 8) @@ -462,26 +537,41 @@ def test_summary(self): # -> Check that we have the right coefficients for j in range(self.model.k_endog): name = self.model.endog_names[j] - assert_equal(re.search(r'L1.e\(%s\) +%s' % (name, forg(params[offset + j], prec=4)), table) is None, False) + pattern = r'L1.e\(%s\) +%s' % (name, forg(params[offset + j], + prec=4)) + assert re.search(pattern, table) # Check the Error covariance matrix output - table = tables[2 + self.model.k_endog + self.model.k_factors + self.model.k_endog] + table = tables[2 + self.model.k_endog + + self.model.k_factors + self.model.k_endog] # -> Make sure we have the right table / table name name = self.model.endog_names[i] - assert_equal(re.search('Error covariance matrix', table) is None, False) + assert re.search('Error covariance matrix', table) # -> Make sure it's the right size assert_equal(len(table.split('\n')), 11) # -> Check that we have the right coefficients offset = self.model.k_endog * self.model.k_factors - assert_equal(re.search('sqrt.var.dln_inv +' + forg(params[offset + 0], prec=4), table) is None, False) - assert_equal(re.search('sqrt.cov.dln_inv.dln_inc +' + forg(params[offset + 1], prec=4), table) is None, False) - assert_equal(re.search('sqrt.var.dln_inc +' + forg(params[offset + 2], prec=4), table) is None, False) - assert_equal(re.search('sqrt.cov.dln_inv.dln_consump +' + forg(params[offset + 3], prec=4), table) is None, False) - assert_equal(re.search('sqrt.cov.dln_inc.dln_consump +' + forg(params[offset + 4], prec=4), table) is None, False) - assert_equal(re.search('sqrt.var.dln_consump +' + forg(params[offset + 5], prec=4), table) is None, False) + assert re.search( + 'sqrt.var.dln_inv +' + forg(params[offset + 0], prec=4), + table) + assert re.search( + 'sqrt.cov.dln_inv.dln_inc +' + forg(params[offset + 1], prec=4), + table) + assert re.search( + 'sqrt.var.dln_inc +' + forg(params[offset + 2], prec=4), + table) + assert re.search( + 'sqrt.cov.dln_inv.dln_consump +' + forg(params[offset+3], prec=4), + table) + assert re.search( + 'sqrt.cov.dln_inc.dln_consump +' + forg(params[offset+4], prec=4), + table) + assert re.search( + 'sqrt.var.dln_consump +' + forg(params[offset + 5], prec=4), + table) class TestDynamicFactor_ar2_errors(CheckDynamicFactor): @@ -495,21 +585,28 @@ class TestDynamicFactor_ar2_errors(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_dfm_ar2.copy() - true['predict'] = output_results.iloc[1:][['predict_dfm_ar2_1', 'predict_dfm_ar2_2', 'predict_dfm_ar2_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_dfm_ar2_1', 'dyn_predict_dfm_ar2_2', 'dyn_predict_dfm_ar2_3']] - super(TestDynamicFactor_ar2_errors, cls).setup_class(true, k_factors=1, factor_order=1, error_order=2) + true['predict'] = output_results.iloc[1:][[ + 'predict_dfm_ar2_1', 'predict_dfm_ar2_2', 'predict_dfm_ar2_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_dfm_ar2_1', + 'dyn_predict_dfm_ar2_2', + 'dyn_predict_dfm_ar2_3']] + super(TestDynamicFactor_ar2_errors, cls).setup_class( + true, k_factors=1, factor_order=1, error_order=2) def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal() assert_allclose(bse, self.true['var_oim'], atol=1e-5) def test_mle(self): - with warnings.catch_warnings(record=True) as w: + with warnings.catch_warnings(record=True): # Depending on the system, this test can reach a greater precision, # but for cross-platform results keep it at 1e-2 mod = self.model res1 = mod.fit(maxiter=100, optim_score='approx', disp=False) - res = mod.fit(res1.params, method='nm', maxiter=10000, optim_score='approx', disp=False) + res = mod.fit( + res1.params, method='nm', maxiter=10000, + optim_score='approx', disp=False) assert_allclose(res.llf, self.results.llf, atol=1e-2) @@ -521,10 +618,16 @@ class TestDynamicFactor_scalar_error(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_dfm_scalar.copy() - true['predict'] = output_results.iloc[1:][['predict_dfm_scalar_1', 'predict_dfm_scalar_2', 'predict_dfm_scalar_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_dfm_scalar_1', 'dyn_predict_dfm_scalar_2', 'dyn_predict_dfm_scalar_3']] + true['predict'] = output_results.iloc[1:][[ + 'predict_dfm_scalar_1', 'predict_dfm_scalar_2', + 'predict_dfm_scalar_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_dfm_scalar_1', 'dyn_predict_dfm_scalar_2', + 'dyn_predict_dfm_scalar_3']] exog = np.ones((75, 1)) - super(TestDynamicFactor_scalar_error, cls).setup_class(true, k_factors=1, factor_order=1, exog=exog, error_cov_type='scalar') + super(TestDynamicFactor_scalar_error, cls).setup_class( + true, k_factors=1, factor_order=1, + exog=exog, error_cov_type='scalar') def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal() @@ -536,7 +639,8 @@ def test_predict(self): def test_dynamic_predict(self): exog = np.ones((16, 1)) - super(TestDynamicFactor_scalar_error, self).test_dynamic_predict(exog=exog) + super(TestDynamicFactor_scalar_error, + self).test_dynamic_predict(exog=exog) class TestStaticFactor(CheckDynamicFactor): @@ -546,9 +650,12 @@ class TestStaticFactor(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_sfm.copy() - true['predict'] = output_results.iloc[1:][['predict_sfm_1', 'predict_sfm_2', 'predict_sfm_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_sfm_1', 'dyn_predict_sfm_2', 'dyn_predict_sfm_3']] - super(TestStaticFactor, cls).setup_class(true, k_factors=1, factor_order=0) + true['predict'] = output_results.iloc[1:][[ + 'predict_sfm_1', 'predict_sfm_2', 'predict_sfm_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_sfm_1', 'dyn_predict_sfm_2', 'dyn_predict_sfm_3']] + super(TestStaticFactor, cls).setup_class( + true, k_factors=1, factor_order=0) def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal() @@ -568,21 +675,27 @@ class TestSUR(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_sur.copy() - true['predict'] = output_results.iloc[1:][['predict_sur_1', 'predict_sur_2', 'predict_sur_3']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_sur_1', 'dyn_predict_sur_2', 'dyn_predict_sur_3']] + true['predict'] = output_results.iloc[1:][[ + 'predict_sur_1', 'predict_sur_2', 'predict_sur_3']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_sur_1', 'dyn_predict_sur_2', 'dyn_predict_sur_3']] exog = np.c_[np.ones((75, 1)), (np.arange(75) + 2)[:, np.newaxis]] - super(TestSUR, cls).setup_class(true, k_factors=0, factor_order=0, exog=exog, error_cov_type='unstructured') + super(TestSUR, cls).setup_class( + true, k_factors=0, factor_order=0, + exog=exog, error_cov_type='unstructured') def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal() assert_allclose(bse[:6], self.true['var_oim'][:6], atol=1e-5) def test_predict(self): - exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]] + exog = np.c_[np.ones((16, 1)), + (np.arange(75, 75+16) + 2)[:, np.newaxis]] super(TestSUR, self).test_predict(exog=exog) def test_dynamic_predict(self): - exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]] + exog = np.c_[np.ones((16, 1)), + (np.arange(75, 75+16) + 2)[:, np.newaxis]] super(TestSUR, self).test_dynamic_predict(exog=exog) @@ -595,22 +708,31 @@ class TestSUR_autocorrelated_errors(CheckDynamicFactor): @classmethod def setup_class(cls): true = results_dynamic_factor.lutkepohl_sur_auto.copy() - true['predict'] = output_results.iloc[1:][['predict_sur_auto_1', 'predict_sur_auto_2']] - true['dynamic_predict'] = output_results.iloc[1:][['dyn_predict_sur_auto_1', 'dyn_predict_sur_auto_2']] + true['predict'] = output_results.iloc[1:][[ + 'predict_sur_auto_1', 'predict_sur_auto_2']] + true['dynamic_predict'] = output_results.iloc[1:][[ + 'dyn_predict_sur_auto_1', 'dyn_predict_sur_auto_2']] exog = np.c_[np.ones((75, 1)), (np.arange(75) + 2)[:, np.newaxis]] - super(TestSUR_autocorrelated_errors, cls).setup_class(true, k_factors=0, factor_order=0, exog=exog, error_order=1, error_var=True, error_cov_type='diagonal', included_vars=['dln_inv', 'dln_inc']) + super(TestSUR_autocorrelated_errors, cls).setup_class( + true, k_factors=0, factor_order=0, exog=exog, + error_order=1, error_var=True, + error_cov_type='diagonal', + included_vars=['dln_inv', 'dln_inc']) def test_bse_approx(self): bse = self.results._cov_params_approx().diagonal() assert_allclose(bse, self.true['var_oim'], atol=1e-5) def test_predict(self): - exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]] + exog = np.c_[np.ones((16, 1)), + (np.arange(75, 75+16) + 2)[:, np.newaxis]] super(TestSUR_autocorrelated_errors, self).test_predict(exog=exog) def test_dynamic_predict(self): - exog = np.c_[np.ones((16, 1)), (np.arange(75, 75+16) + 2)[:, np.newaxis]] - super(TestSUR_autocorrelated_errors, self).test_dynamic_predict(exog=exog) + exog = np.c_[np.ones((16, 1)), + (np.arange(75, 75+16) + 2)[:, np.newaxis]] + super(TestSUR_autocorrelated_errors, + self).test_dynamic_predict(exog=exog) def test_mle(self): super(TestSUR_autocorrelated_errors, self).test_mle(init_powell=False) @@ -621,23 +743,35 @@ def test_misspecification(): endog = np.arange(20).reshape(10, 2) # Too few endog - assert_raises(ValueError, dynamic_factor.DynamicFactor, endog[:, 0], k_factors=0, factor_order=0) + assert_raises( + ValueError, + dynamic_factor.DynamicFactor, endog[:, 0], k_factors=0, factor_order=0) # Too many factors - assert_raises(ValueError, dynamic_factor.DynamicFactor, endog, k_factors=2, factor_order=1) + assert_raises( + ValueError, + dynamic_factor.DynamicFactor, endog, k_factors=2, factor_order=1) # Bad error_cov_type specification - assert_raises(ValueError, dynamic_factor.DynamicFactor, endog, k_factors=1, factor_order=1, order=(1, 0), error_cov_type='') + assert_raises( + ValueError, + dynamic_factor.DynamicFactor, + endog, + k_factors=1, factor_order=1, order=(1, 0), error_cov_type='') def test_miscellaneous(): # Initialization with 1-dimensional exog array exog = np.arange(75) mod = CheckDynamicFactor() - mod.setup_class(true=None, k_factors=1, factor_order=1, exog=exog, filter=False) - exog = pd.Series(np.arange(75), index=pd.date_range(start='1960-04-01', end='1978-10-01', freq='QS')) + mod.setup_class(true=None, k_factors=1, factor_order=1, + exog=exog, filter=False) + exog = pd.Series(np.arange(75), + index=pd.date_range(start='1960-04-01', + end='1978-10-01', freq='QS')) mod = CheckDynamicFactor() - mod.setup_class(true=None, k_factors=1, factor_order=1, exog=exog, filter=False) + mod.setup_class( + true=None, k_factors=1, factor_order=1, exog=exog, filter=False) def test_predict_custom_index(): diff --git a/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py b/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py index 056d029bc48..9cc786bec02 100644 --- a/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py +++ b/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py @@ -178,8 +178,8 @@ def model_common_level(endog=None, params=None, restricted=False): def model_var1(endog=None, params=None, measurement_error=False, init=None): if endog is None: - endog = (np.log( - macrodata[['realgdp', 'realcons']]).iloc[:21].diff().iloc[1:] * 400) + levels = macrodata[['realgdp', 'realcons']] + endog = np.log(levels).iloc[:21].diff().iloc[1:] * 400 if params is None: params = np.r_[0.5, 0.3, 0.2, 0.4, 2**0.5, 0, 3**0.5] if measurement_error: @@ -199,8 +199,8 @@ def model_var1(endog=None, params=None, measurement_error=False, init=None): def model_dfm(endog=None, params=None, factor_order=2): if endog is None: - endog = (np.log( - macrodata[['realgdp', 'realcons']]).iloc[:21].diff().iloc[1:] * 400) + levels = macrodata[['realgdp', 'realcons']] + endog = np.log(levels).iloc[:21].diff().iloc[1:] * 400 if params is None: params = np.r_[0.5, 1., 1.5, 2., 0.9, 0.1] @@ -623,8 +623,10 @@ def setup_class(cls, *args, **kwargs): # Get the approximate diffuse results kappa = cls.approximate_diffuse_variance if init_approx is None: - init_approx = Initialization(cls.ssm.k_states, - 'approximate_diffuse', approximate_diffuse_variance=kappa) + init_approx = Initialization( + cls.ssm.k_states, + 'approximate_diffuse', + approximate_diffuse_variance=kappa) cls.ssm.initialize(init_approx) cls.results_b = cls.ssm.smooth() @@ -735,7 +737,8 @@ def test_smoothed_measurement_disturbance_cov(self, rtol_diffuse=None): class TestVAR1MeasurementError_KFAS(CheckKFASMixin, CheckVAR1MeasurementError): - results_path = os.path.join(current_path, 'results', + results_path = os.path.join( + current_path, 'results', 'results_exact_initial_var1_measurement_error_R.csv') @@ -745,8 +748,8 @@ class TestVAR1MeasurementError_KFAS(CheckKFASMixin, CheckVAR1MeasurementError): class CheckVAR1Missing(CheckVAR1): @classmethod def setup_class(cls, **kwargs): - endog = (np.log( - macrodata[['realgdp', 'realcons']]).iloc[:21].diff().iloc[1:] * 400) + levels = macrodata[['realgdp', 'realcons']] + endog = np.log(levels).iloc[:21].diff().iloc[1:] * 400 endog.iloc[0:5, 0] = np.nan endog.iloc[8:12, :] = np.nan kwargs['endog'] = endog @@ -970,7 +973,8 @@ def test_irrelevant_state(): # Approximate diffuse version mod = UnobservedComponents(endog, 'llevel', **spec) - mod.ssm.initialization = Initialization(mod.k_states, 'approximate_diffuse') + mod.ssm.initialization = Initialization(mod.k_states, + 'approximate_diffuse') res = mod.smooth([3.4, 7.2, 0.01, 0.01]) # Exact diffuse version diff --git a/statsmodels/tsa/statespace/tests/test_mlemodel.py b/statsmodels/tsa/statespace/tests/test_mlemodel.py index b16403e5d5e..a3a3f8ec12b 100644 --- a/statsmodels/tsa/statespace/tests/test_mlemodel.py +++ b/statsmodels/tsa/statespace/tests/test_mlemodel.py @@ -6,18 +6,22 @@ """ from __future__ import division, absolute_import, print_function -import numpy as np -import pandas as pd import os import re - import warnings + +import numpy as np +import pandas as pd +import pytest + from statsmodels.tsa.statespace import (sarimax, varmax, kalman_filter, kalman_smoother) from statsmodels.tsa.statespace.mlemodel import MLEModel, MLEResultsWrapper from statsmodels.datasets import nile -from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises -from statsmodels.tsa.statespace.tests.results import results_sarimax, results_var_misc +from numpy.testing import ( + assert_almost_equal, assert_equal, assert_allclose, assert_raises) +from statsmodels.tsa.statespace.tests.results import ( + results_sarimax, results_var_misc) current_path = os.path.dirname(os.path.abspath(__file__)) @@ -40,7 +44,9 @@ def get_dummy_mod(fit=True, pandas=False): endog = pd.Series(endog, index=index) exog = pd.Series(exog, index=index) - mod = sarimax.SARIMAX(endog, exog=exog, order=(0, 0, 0), time_varying_regression=True, mle_regression=False) + mod = sarimax.SARIMAX( + endog, exog=exog, order=(0, 0, 0), + time_varying_regression=True, mle_regression=False) if fit: with warnings.catch_warnings(): @@ -113,7 +119,9 @@ def test_wrapping(): # The defaults are as follows: assert_equal(mod.ssm.filter_method, kalman_filter.FILTER_CONVENTIONAL) - assert_equal(mod.ssm.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY) + assert_equal( + mod.ssm.stability_method, + kalman_filter.STABILITY_FORCE_SYMMETRY) assert_equal(mod.ssm.conserve_memory, kalman_filter.MEMORY_STORE_ALL) assert_equal(mod.ssm.smoother_output, kalman_smoother.SMOOTHER_ALL) @@ -166,8 +174,10 @@ def test_fit_misc(): # Test optim_hessian={'opg','oim','approx'} with warnings.catch_warnings(): warnings.simplefilter("ignore") - res1 = mod.fit(method='ncg', disp=0, optim_hessian='opg', optim_complex_step=False) - res2 = mod.fit(method='ncg', disp=0, optim_hessian='oim', optim_complex_step=False) + res1 = mod.fit(method='ncg', disp=0, optim_hessian='opg', + optim_complex_step=False) + res2 = mod.fit(method='ncg', disp=0, optim_hessian='oim', + optim_complex_step=False) # Check that the Hessians broadly result in the same optimum assert_allclose(res1.llf, res2.llf, rtol=1e-2) @@ -177,10 +187,11 @@ def test_fit_misc(): warnings.simplefilter("ignore") res_params = mod.fit(disp=-1, return_params=True) - # 5 digits necessary to accommodate 32-bit numpy / scipy with OpenBLAS 0.2.18 + # 5 digits necessary to accommodate 32-bit numpy/scipy with OpenBLAS 0.2.18 assert_almost_equal(res_params, [0, 0], 5) +@pytest.mark.smoke def test_score_misc(): mod, res = get_dummy_mod() @@ -266,7 +277,7 @@ def partial_transform_sigma2(sigma2): approx_fd_centered = ( mod.score(uparams, transformed=False, approx_complex_step=False, approx_centered=True)) - assert_allclose(approx_fd, analytic_score, atol=1e-5) + assert_allclose(approx_fd_centered, analytic_score, atol=1e-5) harvey_cs = mod.score(uparams, transformed=False, method='harvey', approx_complex_step=True) @@ -307,26 +318,57 @@ def test_cov_params(): with warnings.catch_warnings(): warnings.simplefilter("ignore") res = mod.fit(res.params, disp=-1, cov_type='none') - assert_equal(res.cov_kwds['description'], 'Covariance matrix not calculated.') + assert_equal( + res.cov_kwds['description'], + 'Covariance matrix not calculated.') + res = mod.fit(res.params, disp=-1, cov_type='approx') assert_equal(res.cov_type, 'approx') - assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical (complex-step) differentiation.') + assert_equal( + res.cov_kwds['description'], + 'Covariance matrix calculated using numerical (complex-step) ' + 'differentiation.') + res = mod.fit(res.params, disp=-1, cov_type='oim') assert_equal(res.cov_type, 'oim') - assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the observed information matrix (complex-step) described in Harvey (1989).') + assert_equal( + res.cov_kwds['description'], + 'Covariance matrix calculated using the observed information ' + 'matrix (complex-step) described in Harvey (1989).') + res = mod.fit(res.params, disp=-1, cov_type='opg') assert_equal(res.cov_type, 'opg') - assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the outer product of gradients (complex-step).') + assert_equal( + res.cov_kwds['description'], + 'Covariance matrix calculated using the outer product of ' + 'gradients (complex-step).') + res = mod.fit(res.params, disp=-1, cov_type='robust') assert_equal(res.cov_type, 'robust') - assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).') + assert_equal( + res.cov_kwds['description'], + 'Quasi-maximum likelihood covariance matrix used for robustness ' + 'to some misspecifications; calculated using the observed ' + 'information matrix (complex-step) described in Harvey (1989).') + res = mod.fit(res.params, disp=-1, cov_type='robust_oim') assert_equal(res.cov_type, 'robust_oim') - assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).') + assert_equal( + res.cov_kwds['description'], + 'Quasi-maximum likelihood covariance matrix used for robustness ' + 'to some misspecifications; calculated using the observed ' + 'information matrix (complex-step) described in Harvey (1989).') + res = mod.fit(res.params, disp=-1, cov_type='robust_approx') assert_equal(res.cov_type, 'robust_approx') - assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using numerical (complex-step) differentiation.') - assert_raises(NotImplementedError, mod.fit, res.params, disp=-1, cov_type='invalid_cov_type') + assert_equal( + res.cov_kwds['description'], + 'Quasi-maximum likelihood covariance matrix used for robustness ' + 'to some misspecifications; calculated using numerical ' + '(complex-step) differentiation.') + + with pytest.raises(NotImplementedError): + mod.fit(res.params, disp=-1, cov_type='invalid_cov_type') def test_transform(): @@ -451,7 +493,8 @@ def test_forecast(): res = mod.filter([]) assert_allclose(res.forecast(steps=10), np.ones((10,)) * 2) assert_allclose(res.forecast(steps='1960-12-01'), np.ones((10,)) * 2) - assert_allclose(res.get_forecast(steps=10).predicted_mean, np.ones((10,)) * 2) + assert_allclose(res.get_forecast(steps=10).predicted_mean, + np.ones((10,)) * 2) def test_summary(): @@ -715,17 +758,21 @@ def test_diagnostics(): desired = res.test_heteroskedasticity(method='breakvar') assert_allclose(actual, desired) - assert_raises(ValueError, res.test_heteroskedasticity, method=None, alternative='invalid') - assert_raises(NotImplementedError, res.test_heteroskedasticity, method='invalid') + with pytest.raises(ValueError): + res.test_heteroskedasticity(method=None, alternative='invalid') + with pytest.raises(NotImplementedError): + res.test_heteroskedasticity(method='invalid') actual = res.test_serial_correlation(method=None) desired = res.test_serial_correlation(method='ljungbox') assert_allclose(actual, desired) - assert_raises(NotImplementedError, res.test_serial_correlation, method='invalid') + with pytest.raises(NotImplementedError): + res.test_serial_correlation(method='invalid') # Smoke tests for other options - actual = res.test_heteroskedasticity(method=None, alternative='d', use_f=False) + actual = res.test_heteroskedasticity(method=None, alternative='d', + use_f=False) desired = res.test_serial_correlation(method='boxpierce') @@ -739,7 +786,8 @@ def test_diagnostics_nile_eviews(): niledata = nile.data.load_pandas().data niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS') - mod = MLEModel(niledata['volume'], k_states=1, + mod = MLEModel( + niledata['volume'], k_states=1, initialization='approximate_diffuse', initial_variance=1e15, loglikelihood_burn=1) mod.ssm['design', 0, 0] = 1 @@ -766,7 +814,8 @@ def test_diagnostics_nile_durbinkoopman(): niledata = nile.data.load_pandas().data niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS') - mod = MLEModel(niledata['volume'], k_states=1, + mod = MLEModel( + niledata['volume'], k_states=1, initialization='approximate_diffuse', initial_variance=1e15, loglikelihood_burn=1) mod.ssm['design', 0, 0] = 1 @@ -794,13 +843,14 @@ def test_diagnostics_nile_durbinkoopman(): assert_allclose(actual, [0.61], atol=1e-2) +@pytest.mark.smoke def test_prediction_results(): # Just smoke tests for the PredictionResults class, which is copied from # elsewhere in Statsmodels mod, res = get_dummy_mod() predict = res.get_prediction() - summary_frame = predict.summary_frame() + predict.summary_frame() def test_lutkepohl_information_criteria(): @@ -814,7 +864,7 @@ def test_lutkepohl_information_criteria(): dta['dln_consump'] = np.log(dta['consump']).diff() endog = dta.loc['1960-04-01':'1978-10-01', - ['dln_inv', 'dln_inc', 'dln_consump']] + ['dln_inv', 'dln_inc', 'dln_consump']] # AR model - SARIMAX # (use loglikelihood_burn=1 to mimic conditional MLE used by Stata's var diff --git a/statsmodels/tsa/statespace/tests/test_options.py b/statsmodels/tsa/statespace/tests/test_options.py index f652eee3c95..3a0bf9ab329 100644 --- a/statsmodels/tsa/statespace/tests/test_options.py +++ b/statsmodels/tsa/statespace/tests/test_options.py @@ -81,7 +81,8 @@ def test_filter_methods(self): assert_equal(model.filter_method, FILTER_CONVENTIONAL) model.filter_collapsed = True - assert_equal(model.filter_method, FILTER_CONVENTIONAL | FILTER_COLLAPSED) + assert_equal(model.filter_method, + FILTER_CONVENTIONAL | FILTER_COLLAPSED) model.filter_conventional = False assert_equal(model.filter_method, FILTER_COLLAPSED) @@ -90,7 +91,8 @@ def test_filter_methods(self): assert_equal(model.filter_method, FILTER_AUGMENTED) # Try setting via boolean via method - model.set_filter_method(filter_conventional=True, filter_augmented=False) + model.set_filter_method(filter_conventional=True, + filter_augmented=False) assert_equal(model.filter_method, FILTER_CONVENTIONAL) # Try setting and unsetting all @@ -117,7 +119,8 @@ def test_inversion_methods(self): model.invert_univariate = True assert_equal(model.inversion_method, INVERT_UNIVARIATE) model.invert_cholesky = True - assert_equal(model.inversion_method, INVERT_UNIVARIATE | INVERT_CHOLESKY) + assert_equal(model.inversion_method, + INVERT_UNIVARIATE | INVERT_CHOLESKY) model.invert_univariate = False assert_equal(model.inversion_method, INVERT_CHOLESKY) @@ -126,8 +129,10 @@ def test_inversion_methods(self): assert_equal(model.inversion_method, INVERT_LU) # Try setting via boolean via method - model.set_inversion_method(invert_cholesky=True, invert_univariate=True, invert_lu=False) - assert_equal(model.inversion_method, INVERT_UNIVARIATE | INVERT_CHOLESKY) + model.set_inversion_method(invert_cholesky=True, + invert_univariate=True, invert_lu=False) + assert_equal(model.inversion_method, + INVERT_UNIVARIATE | INVERT_CHOLESKY) # Try setting and unsetting all model.inversion_method = 0 @@ -179,7 +184,8 @@ def test_conserve_memory(self): model.memory_no_forecast = True assert_equal(model.conserve_memory, MEMORY_NO_FORECAST) model.memory_no_filtered = True - assert_equal(model.conserve_memory, MEMORY_NO_FORECAST | MEMORY_NO_FILTERED) + assert_equal(model.conserve_memory, + MEMORY_NO_FORECAST | MEMORY_NO_FILTERED) model.memory_no_forecast = False assert_equal(model.conserve_memory, MEMORY_NO_FILTERED) @@ -188,7 +194,8 @@ def test_conserve_memory(self): assert_equal(model.conserve_memory, MEMORY_NO_PREDICTED) # Try setting via boolean via method - model.set_conserve_memory(memory_no_filtered=True, memory_no_predicted=False) + model.set_conserve_memory(memory_no_filtered=True, + memory_no_predicted=False) assert_equal(model.conserve_memory, MEMORY_NO_FILTERED) # Try setting and unsetting all @@ -222,7 +229,8 @@ def test_smoother_outputs(self): model.smoother_state = True assert_equal(model.smoother_output, SMOOTHER_STATE) model.smoother_disturbance = True - assert_equal(model.smoother_output, SMOOTHER_STATE | SMOOTHER_DISTURBANCE) + assert_equal(model.smoother_output, + SMOOTHER_STATE | SMOOTHER_DISTURBANCE) model.smoother_state = False assert_equal(model.smoother_output, SMOOTHER_DISTURBANCE) @@ -231,7 +239,8 @@ def test_smoother_outputs(self): assert_equal(model.smoother_output, SMOOTHER_DISTURBANCE_COV) # Try setting via boolean via method - model.set_smoother_output(smoother_disturbance=True, smoother_disturbance_cov=False) + model.set_smoother_output(smoother_disturbance=True, + smoother_disturbance_cov=False) assert_equal(model.smoother_output, SMOOTHER_DISTURBANCE) # Try setting and unsetting all @@ -256,5 +265,10 @@ def test_simulation_outputs(self): # TODO test changing simulation options in SimulationSmoothResults # instance - assert_equal(self.model.get_simulation_output(SIMULATION_STATE), SIMULATION_STATE) - assert_equal(self.model.get_simulation_output(simulate_state=True, simulate_disturbance=True), SIMULATION_ALL) + assert_equal( + self.model.get_simulation_output(SIMULATION_STATE), + SIMULATION_STATE) + assert_equal( + self.model.get_simulation_output(simulate_state=True, + simulate_disturbance=True), + SIMULATION_ALL) diff --git a/statsmodels/tsa/statespace/tests/test_pickle.py b/statsmodels/tsa/statespace/tests/test_pickle.py index 832f2b2e9ee..1615aa754c9 100644 --- a/statsmodels/tsa/statespace/tests/test_pickle.py +++ b/statsmodels/tsa/statespace/tests/test_pickle.py @@ -137,7 +137,8 @@ def test_kalman_filter_pickle(data): def test_representation_pickle(): nobs = 10 k_endog = 2 - endog = np.asfortranarray(np.arange(nobs * k_endog).reshape(k_endog, nobs) * 1.) + arr = np.arange(nobs * k_endog).reshape(k_endog, nobs) * 1. + endog = np.asfortranarray(arr) mod = Representation(endog, k_states=2) pkl_mod = cPickle.loads(cPickle.dumps(mod)) diff --git a/statsmodels/tsa/statespace/tests/test_representation.py b/statsmodels/tsa/statespace/tests/test_representation.py index 3b2df58a18e..e3a29ae5ed2 100644 --- a/statsmodels/tsa/statespace/tests/test_representation.py +++ b/statsmodels/tsa/statespace/tests/test_representation.py @@ -14,22 +14,25 @@ """ from __future__ import division, absolute_import, print_function +import os import warnings + import numpy as np import pandas as pd import pytest -import os from statsmodels.tsa.statespace.representation import Representation -from statsmodels.tsa.statespace.kalman_filter import KalmanFilter, FilterResults, PredictionResults +from statsmodels.tsa.statespace.kalman_filter import ( + KalmanFilter, FilterResults, PredictionResults) from statsmodels.tsa.statespace import tools, sarimax from .results import results_kalman_filter -from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose +from numpy.testing import ( + assert_equal, assert_almost_equal, assert_raises, assert_allclose) current_path = os.path.dirname(os.path.abspath(__file__)) -clark1989_path = 'results' + os.sep + 'results_clark1989_R.csv' -clark1989_results = pd.read_csv(current_path + os.sep + clark1989_path) +clark1989_path = os.path.join('results', 'results_clark1989_R.csv') +clark1989_results = pd.read_csv(os.path.join(current_path, clark1989_path)) class Clark1987(object): @@ -590,20 +593,21 @@ def test_representation(): # Test an invalid number of states def zero_kstates(): - mod = Representation(1, 0) + Representation(1, 0) assert_raises(ValueError, zero_kstates) # Test an invalid endogenous array def empty_endog(): endog = np.zeros((0, 0)) - mod = Representation(endog, k_states=2) + Representation(endog, k_states=2) assert_raises(ValueError, empty_endog) # Test a Fortran-ordered endogenous array (which will be assumed to be in # wide format: k_endog x nobs) nobs = 10 k_endog = 2 - endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog, nobs)*1.) + arr = np.arange(nobs*k_endog).reshape(k_endog, nobs)*1. + endog = np.asfortranarray(arr) mod = Representation(endog, k_states=2) assert_equal(mod.nobs, nobs) assert_equal(mod.k_endog, k_endog) @@ -639,7 +643,8 @@ def test_bind(): mod.bind(np.zeros((0, 2), dtype=np.float64)) # Test invalid (3-dim) endogenous array - assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2, 2, 3)*1.)) + with pytest.raises(ValueError): + mod.bind(np.arange(12).reshape(2, 2, 3)*1.) # Test valid F-contiguous mod.bind(np.asfortranarray(np.arange(10).reshape(2, 5))) @@ -650,7 +655,8 @@ def test_bind(): assert_equal(mod.nobs, 5) # Test invalid F-contiguous - assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(5, 2)))) + with pytest.raises(ValueError): + mod.bind(np.asfortranarray(np.arange(10).reshape(5, 2))) # Test invalid C-contiguous assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(2, 5))) @@ -673,14 +679,17 @@ def test_initialization(): # Test invalid initial_state initial_state = np.zeros(10,) - assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov)) + with pytest.raises(ValueError): + mod.initialize_known(initial_state, initial_state_cov) initial_state = np.zeros((10, 10)) - assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov)) + with pytest.raises(ValueError): + mod.initialize_known(initial_state, initial_state_cov) # Test invalid initial_state_cov initial_state = np.zeros(2,) + 1.5 initial_state_cov = np.eye(3) - assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov)) + with pytest.raises(ValueError): + mod.initialize_known(initial_state, initial_state_cov) def test_no_endog(): @@ -715,7 +724,7 @@ def test_cython(): # Test that a dKalmanFilter instance was created assert_equal(prefix in mod._kalman_filters, True) kf = mod._kalman_filters[prefix] - assert_equal(isinstance(kf, tools.prefix_kalman_filter_map[prefix]), True) + assert isinstance(kf, tools.prefix_kalman_filter_map[prefix]) # Test that the default returned _kalman_filter is the above instance assert_equal(mod._kalman_filter, kf) @@ -836,7 +845,8 @@ def test_predict(): # Check for a warning when providing a non-used statespace matrix with warnings.catch_warnings(record=True) as w: - res.predict(end=res.nobs+1, design=True, obs_intercept=np.zeros((1, 1))) + res.predict(end=res.nobs+1, design=True, + obs_intercept=np.zeros((1, 1))) message = ('Model has time-invariant design matrix, so the design' ' argument to `predict` has been ignored.') assert_equal(str(w[0].message), message) @@ -1164,10 +1174,12 @@ def test_impulse_responses(): actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True) assert_allclose(actual, desired) - actual = mod.impulse_responses(steps=10, impulse=[1, 0], orthogonalized=True) + actual = mod.impulse_responses( + steps=10, impulse=[1, 0], orthogonalized=True) assert_allclose(actual, desired) - actual = mod.impulse_responses(steps=10, impulse=[0, 1], orthogonalized=True) + actual = mod.impulse_responses( + steps=10, impulse=[0, 1], orthogonalized=True) assert_allclose(actual, desired) # Univariate model with two correlated shocks