Skip to content

Commit

Permalink
Merge pull request #4910 from jbrockmendel/blanks
Browse files Browse the repository at this point in the history
CLN: Blank Lines E301,E302,E303,E305,E306 in examples, tools, sm.base
  • Loading branch information
ChadFulton committed Sep 5, 2018
2 parents 4114a8a + d640525 commit b613149
Show file tree
Hide file tree
Showing 31 changed files with 93 additions and 28 deletions.
3 changes: 1 addition & 2 deletions examples/incomplete/wls_extended.py
Expand Up @@ -47,7 +47,6 @@
rsquared_adj = 1 -(wls_fit.nobs)/(wls_fit.df_resid)*(1-rsquared)



#Trying to figure out what's going on in this example
#----------------------------------------------------

Expand Down Expand Up @@ -80,6 +79,7 @@
#@savefig wls_robust_compare.png
plt.ylim([0,2000]);


#What is going on? A more systematic look at the data
#----------------------------------------------------

Expand Down Expand Up @@ -109,7 +109,6 @@ def getrsq(fitresult):
endog = fitresult[1]
nobs = resid.shape[0]


rss = np.dot(resid, resid)
tss = np.var(endog)*nobs
return 1-rss/tss, rss, tss, tss-rss
Expand Down
2 changes: 2 additions & 0 deletions examples/python/contrasts.py
Expand Up @@ -65,9 +65,11 @@

from patsy.contrasts import ContrastMatrix


def _name_levels(prefix, levels):
return ["[%s%s]" % (prefix, level) for level in levels]


class Simple(object):
def _simple_contrast(self, levels):
nlevels = len(levels)
Expand Down
2 changes: 2 additions & 0 deletions examples/python/formulas.py
Expand Up @@ -106,6 +106,8 @@

def log_plus_1(x):
return np.log(x) + 1.


res = sm.ols(formula='Lottery ~ log_plus_1(Literacy)', data=df).fit()
print(res.params)

Expand Down
2 changes: 2 additions & 0 deletions examples/python/glm_formula.py
Expand Up @@ -28,6 +28,8 @@

def double_it(x):
return 2 * x


formula = 'SUCCESS ~ double_it(LOWINC) + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
mod2 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod2.summary()
Expand Down
5 changes: 4 additions & 1 deletion examples/python/quantile_regression.py
Expand Up @@ -43,10 +43,13 @@
# For convenience, we place the quantile regression results in a Pandas DataFrame, and the OLS results in a dictionary.

quantiles = np.arange(.05, .96, .1)


def fit_model(q):
res = mod.fit(q=q)
return [q, res.params['Intercept'], res.params['income']] + res.conf_int().loc['income'].tolist()



models = [fit_model(x) for x in quantiles]
models = pd.DataFrame(models, columns=['q', 'a', 'b','lb','ub'])

Expand Down
1 change: 0 additions & 1 deletion statsmodels/base/_constraints.py
Expand Up @@ -128,7 +128,6 @@ def reduce(self, params):
return params.dot(self.transf_mat)



def transform_params_constraint(params, Sinv, R, q):
"""find the parameters that statisfy linear constraint from unconstraint
Expand Down
1 change: 0 additions & 1 deletion statsmodels/base/_penalized.py
Expand Up @@ -64,7 +64,6 @@ def loglike(self, params, pen_weight=None, **kwds):

return llf


def loglikeobs(self, params, pen_weight=None, **kwds):
if pen_weight is None:
pen_weight = self.pen_weight
Expand Down
1 change: 1 addition & 0 deletions statsmodels/base/_penalties.py
Expand Up @@ -23,6 +23,7 @@

import numpy as np


def _check_wts(weights, wts):
"""helper function for deprecating `wts`
"""
Expand Down
1 change: 0 additions & 1 deletion statsmodels/base/_screening.py
Expand Up @@ -420,7 +420,6 @@ def screen_exog_iterator(self, exog_iterator):
exog_winner = np.column_stack(exog_winner)
res_screen_final = self.screen_exog(exog_winner, maxiter=20)


exog_winner_names = ['var%d_%d' % (bidx, idx)
for bidx, batch in enumerate(exog_idx)
for idx in batch]
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/base/data.py
Expand Up @@ -176,7 +176,6 @@ def _handle_constant(self, hasconst):
self.k_constant = int(rank_orig == rank_augm)
self.const_idx = None


@classmethod
def _drop_nans(cls, x, nan_mask):
return x[nan_mask]
Expand Down Expand Up @@ -569,6 +568,7 @@ def attach_ynames(self, result):
else:
return DataFrame(result, columns=self.ynames)


def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
ynames = ['y']
Expand Down
4 changes: 1 addition & 3 deletions statsmodels/base/elastic_net.py
Expand Up @@ -365,8 +365,6 @@ class RegularizedResultsWrapper(wrap.ResultsWrapper):
'resid': 'rows',
'fittedvalues': 'rows',
}

_wrap_attrs = _attrs

wrap.populate_wrapper(RegularizedResultsWrapper,
wrap.populate_wrapper(RegularizedResultsWrapper, # noqa:E305
RegularizedResults)
1 change: 1 addition & 0 deletions statsmodels/base/l1_solvers_common.py
Expand Up @@ -5,6 +5,7 @@
import numpy as np
from statsmodels.compat.python import range


def qc_results(params, alpha, score, qc_tol, qc_verbose=False):
"""
Theory dictates that one of two conditions holds:
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/base/optimizer.py
Expand Up @@ -7,6 +7,7 @@
import numpy as np
from scipy import optimize


def _check_method(method, methods):
if method not in methods:
message = "Unknown fit method %s" % method
Expand Down Expand Up @@ -213,7 +214,6 @@ def _fit_constrained(self, params):
"""
pass


def _fit_regularized(self, params):
#TODO: code won't necessarily be general here. 3 options.
# 1) setup for scipy.optimize.fmin_sqlsqp
Expand Down
8 changes: 7 additions & 1 deletion statsmodels/base/tests/test_data.py
Expand Up @@ -10,6 +10,7 @@
from statsmodels.genmod import families
from statsmodels.discrete.discrete_model import Logit


#class TestDates(object):
# @classmethod
# def setup_class(cls):
Expand All @@ -20,6 +21,7 @@
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)


class TestArrays(object):
@classmethod
def setup_class(cls):
Expand Down Expand Up @@ -391,6 +393,7 @@ def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:,None])


def test_alignment():
#Fix Issue #206
from statsmodels.regression.linear_model import OLS
Expand All @@ -412,6 +415,7 @@ def test_alignment():
# which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))


class TestMultipleEqsArrays(TestArrays):
@classmethod
def setup_class(cls):
Expand Down Expand Up @@ -498,6 +502,7 @@ def test_attach(self):
'columns_eq'),
self.col_eq_result)


class TestMissingArray(object):
@classmethod
def setup_class(cls):
Expand Down Expand Up @@ -564,6 +569,7 @@ def test_extra_kwargs_1d(self):
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)


class TestMissingPandas(object):
@classmethod
def setup_class(cls):
Expand Down Expand Up @@ -740,7 +746,6 @@ def test_hasconst(self):
assert_equal(res.model.k_constant, result[0])
assert_equal(res.model.data.k_constant, result[0])


@classmethod
def setup_class(cls):
# create data
Expand Down Expand Up @@ -797,6 +802,7 @@ def mod(y, x):
def _initialize(cls):
cls.y = cls.y_bin


class TestHasConstantLogit(CheckHasConstant):

@classmethod
Expand Down
6 changes: 1 addition & 5 deletions statsmodels/base/tests/test_generic_methods.py
Expand Up @@ -77,7 +77,6 @@ def test_ttest_tvalues(self):
'Conf. Int. Low', 'Conf. Int. Upp.']
assert_array_equal(summf.columns.values, cols)


def test_ftest_pvalues(self):
res = self.results
use_t = res.use_t
Expand Down Expand Up @@ -568,7 +567,6 @@ def setup_class(cls):
cls.data = test.data.drop([0,1,2])
cls.initialize()


def test_combined(self):
res = self.res
wa = res.wald_test_terms(skip_single=False, combine_terms=['Duration', 'Weight'])
Expand All @@ -582,7 +580,6 @@ def test_combined(self):

compare_waldres(res, wa, [c_const, c_d, c_w, c_dw, c_duration, c_weight])


def test_categories(self):
# test only multicolumn terms
res = self.res
Expand Down Expand Up @@ -632,7 +629,6 @@ def initialize(cls):
mod = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", cls.data)
cls.res = mod.fit(use_t=False)


def test_noformula(self):
endog = self.res.model.endog
exog = self.res.model.data.orig_exog
Expand Down Expand Up @@ -762,7 +758,6 @@ def setup_class(cls):
'C(Weight)[T.3]',
'C(Weight)[T.3] - C(Weight)[T.2]']


def test_alpha(self):
pw1 = self.res.t_test_pairwise(self.term_name, method='hommel',
factor_labels='A B C'.split())
Expand Down Expand Up @@ -816,6 +811,7 @@ def setup_class(cls):
'C(Weight)[3] - C(Weight)[1]',
'C(Weight)[3] - C(Weight)[2]']


class TestTTestPairwiseOLS4(CheckPairwise):

@classmethod
Expand Down
3 changes: 3 additions & 0 deletions statsmodels/base/tests/test_optimize.py
Expand Up @@ -20,12 +20,15 @@
def dummy_func(x):
return x**2


def dummy_score(x):
return 2.*x


def dummy_hess(x):
return [[2.]]


def test_full_output_false():
# just a smoke test

Expand Down
5 changes: 0 additions & 5 deletions statsmodels/base/tests/test_penalized.py
Expand Up @@ -165,8 +165,6 @@ def _initialize(cls):


class TestPenalizedPoissonOracle(CheckPenalizedPoisson):


@classmethod
def _initialize(cls):
y, x = cls.y, cls.x
Expand Down Expand Up @@ -355,7 +353,6 @@ def _initialize(cls):
cls.atol = 1e-12
cls.k_params = cls.k_nonzero


def test_cov_type(self):
res1 = self.res1
res2 = self.res2
Expand Down Expand Up @@ -526,7 +523,6 @@ def _initialize(cls):
cls.atol = 1e-10
cls.k_params = 4


def test_deriv(self):
res1 = self.res1
res2 = self.res2
Expand Down Expand Up @@ -693,7 +689,6 @@ def _initialize(cls):
# the corresponding Theil penweight seems to be 2 * nobs / sigma2_e
cls.res2 = modp.fit(pen_weight=120.74564413221599 * 1000)


pen = smpen.L2ContraintsPenalty(restriction=restriction)
mod = GLMPenalized(y, x, family=family.Gaussian(),
penal=pen)
Expand Down
1 change: 1 addition & 0 deletions statsmodels/base/tests/test_penalties.py
Expand Up @@ -12,6 +12,7 @@
import statsmodels.base._penalties as smpen
from statsmodels.tools.numdiff import approx_fprime, approx_hess


class CheckPenalty(object):

def test_symmetry(self):
Expand Down
2 changes: 0 additions & 2 deletions statsmodels/base/tests/test_predict.py
Expand Up @@ -31,7 +31,6 @@ def test_2d(self):
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)


def test_1d(self):
# one observation
res = self.res
Expand All @@ -47,7 +46,6 @@ def test_1d(self):
assert_equal(pred.index, np.arange(1))
assert_allclose(pred, fittedm, rtol=1e-13)


# Series
pred = res.predict(data.mean())
assert_equal(pred.index, np.arange(1))
Expand Down
2 changes: 1 addition & 1 deletion statsmodels/base/tests/test_screening.py
Expand Up @@ -49,6 +49,7 @@ def _get_poisson_data():
y = np.random.poisson(mu)
return y, x, idx_nonzero_true, beta


def test_poisson_screening():

np.random.seed(987865)
Expand Down Expand Up @@ -271,7 +272,6 @@ def test_glmlogit_screening():

res_screen.results_final


xnames = ['var%4d' % ii for ii in res_screen.idx_nonzero]
xnames[0] = 'const'

Expand Down
8 changes: 6 additions & 2 deletions statsmodels/base/tests/test_shrink_pickle.py
Expand Up @@ -166,6 +166,7 @@ def setup(self):
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)


class TestRemoveDataPickleNegativeBinomial(RemoveDataPickle):

def setup(self):
Expand All @@ -176,6 +177,7 @@ def setup(self):
mod = sm.NegativeBinomial(data.endog, data.exog)
self.results = mod.fit(disp=0)


class TestRemoveDataPickleLogit(RemoveDataPickle):

def setup(self):
Expand Down Expand Up @@ -260,8 +262,11 @@ class TestPickleFormula4(TestPickleFormula2):
def setup(self):
self.results = sm.OLS.from_formula("Y ~ np.log(abs(A) + 1) + B * C", data=self.data).fit()

# we need log in module namespace for the following test

# we need log in module namespace for TestPickleFormula5
from numpy import log


class TestPickleFormula5(TestPickleFormula2):

def setup(self):
Expand All @@ -281,7 +286,6 @@ def setup(self):
self.results = model.fit_regularized(method='l1', disp=0, alpha=10)



if __name__ == '__main__':
for cls in [TestRemoveDataPickleOLS, TestRemoveDataPickleWLS,
TestRemoveDataPicklePoisson,
Expand Down

0 comments on commit b613149

Please sign in to comment.