From 4b39c9ef57b6ab849b10fa152abfa12621fbe3da Mon Sep 17 00:00:00 2001 From: Josef Perktold Date: Wed, 3 Oct 2012 21:40:33 -0400 Subject: [PATCH] REF/TST: fix tests for alpha rescaled --- statsmodels/discrete/tests/test_discrete.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/statsmodels/discrete/tests/test_discrete.py b/statsmodels/discrete/tests/test_discrete.py index 6bd9a6bef75..e692b9ff9a3 100644 --- a/statsmodels/discrete/tests/test_discrete.py +++ b/statsmodels/discrete/tests/test_discrete.py @@ -393,7 +393,7 @@ class TestProbitL1(CheckLikelihoodModelL1): def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) - alpha = np.array([0.1, 0.2, 0.3, 10]) / data.exog.shape[0] + alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0] cls.res1 = Probit(data.endog, data.exog).fit_regularized( method="l1", alpha=alpha, disp=0, trim_mode='auto', auto_trim_tol=0.02, acc=1e-10, maxiter=1000) @@ -413,7 +413,7 @@ def setupClass(cls): anes_exog = anes_data.exog anes_exog = sm.add_constant(anes_exog, prepend=False) mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog) - alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) / anes_exog.shape[0] + alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0] alpha[-1,:] = 0 cls.res1 = mlogit_mod.fit_regularized( method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02, @@ -428,7 +428,7 @@ class TestLogitL1(CheckLikelihoodModelL1): def setupClass(cls): data = sm.datasets.spector.load() data.exog = sm.add_constant(data.exog, prepend=True) - cls.alpha = 3 * np.array([0., 1., 1., 1.]) / data.exog.shape[0] + cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0] cls.res1 = Logit(data.endog, data.exog).fit_regularized( method="l1", alpha=cls.alpha, disp=0, trim_mode='size', size_trim_tol=1e-5, acc=1e-10, maxiter=1000) @@ -450,7 +450,7 @@ def setupClass(self): def test_cvxopt_versus_slsqp(self): #Compares resutls from cvxopt to the standard slsqp if has_cvxopt: - self.alpha = 3. * np.array([0, 1, 1, 1.]) / self.data.endog.shape[0] + self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0] res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized( method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000, trim_mode='auto') @@ -471,7 +471,7 @@ def setupClass(cls): cls.alphas = np.array( [[0.1, 0.1, 0.1, 0.1], [0.4, 0.4, 0.5, 0.5], - [0.5, 0.5, 1, 1]]) / data.exog.shape[0] + [0.5, 0.5, 1, 1]]) #/ data.exog.shape[0] cls.res1 = DiscreteL1() cls.res1.sweep()