Skip to content
Browse files

REF: divide loglike, score, hessian by nobs in LikelihoodModel.fit, t…

…ests pass
  • Loading branch information...
1 parent 507f94b commit 4f79bb3fb845b01d6cd48e21717ac2d2bce58417 @josef-pkt josef-pkt committed Sep 29, 2012
Showing with 16 additions and 15 deletions.
  1. +8 −6 statsmodels/base/model.py
  2. +8 −9 statsmodels/discrete/tests/test_discrete.py
View
14 statsmodels/base/model.py
@@ -283,10 +283,11 @@ def fit(self, start_params=None, method='newton', maxiter=100,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
- f = lambda params, *args: -self.loglike(params, *args)
- score = lambda params: -self.score(params)
+ nobs = self.endog.shape[0]
+ f = lambda params, *args: -self.loglike(params, *args) / nobs
+ score = lambda params: -self.score(params) / nobs
try:
- hess = lambda params: -self.hessian(params)
+ hess = lambda params: -self.hessian(params) / nobs
except:
hess = None
@@ -302,8 +303,9 @@ def fit(self, start_params=None, method='newton', maxiter=100,
fit_funcs.update(extra_fit_funcs)
if method == 'newton':
- score = lambda params: self.score(params)
- hess = lambda params: self.hessian(params)
+ score = lambda params: self.score(params) / nobs
+ hess = lambda params: self.hessian(params) / nobs
+ #TODO: why are score and hess positive?
func = fit_funcs[method]
xopt, retvals = func(f, score, start_params, fargs, kwargs,
@@ -322,7 +324,7 @@ def fit(self, start_params=None, method='newton', maxiter=100,
elif cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == 'newton' and full_output:
- Hinv = np.linalg.inv(-retvals['Hessian'])
+ Hinv = np.linalg.inv(-retvals['Hessian']) / nobs
else:
try:
Hinv = np.linalg.inv(-1 * self.hessian(xopt))
View
17 statsmodels/discrete/tests/test_discrete.py
@@ -393,7 +393,7 @@ class TestProbitL1(CheckLikelihoodModelL1):
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
- alpha = np.array([0.1, 0.2, 0.3, 10])
+ alpha = np.array([0.1, 0.2, 0.3, 10]) / data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
@@ -413,7 +413,7 @@ def setupClass(cls):
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
- alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
+ alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) / anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
@@ -428,7 +428,7 @@ class TestLogitL1(CheckLikelihoodModelL1):
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
- cls.alpha = 3 * np.array([0, 1, 1, 1])
+ cls.alpha = 3 * np.array([0., 1., 1., 1.]) / data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
@@ -448,11 +448,9 @@ def setupClass(self):
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
- """
- Compares resutls from cvxopt to the standard slsqp
- """
+ #Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
- self.alpha = 3 * np.array([0, 1, 1, 1])
+ self.alpha = 3. * np.array([0, 1, 1, 1.]) / self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
@@ -471,8 +469,9 @@ def setupClass(cls):
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
- [[0.1, 0.1, 0.1, 0.1],
- [0.4, 0.4, 0.5, 0.5], [0.5, 0.5, 1, 1]])
+ [[0.1, 0.1, 0.1, 0.1],
+ [0.4, 0.4, 0.5, 0.5],
+ [0.5, 0.5, 1, 1]]) / data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()

0 comments on commit 4f79bb3

Please sign in to comment.
Something went wrong with that request. Please try again.