Skip to content

Commit

Permalink
Merge branch 'release/1.2.15'
Browse files Browse the repository at this point in the history
  • Loading branch information
horta committed Jun 30, 2017
2 parents baf4f8f + 6c3ae6f commit b4ab5e7
Show file tree
Hide file tree
Showing 21 changed files with 91 additions and 67 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ notifications:
email: false
deploy:
provider: pypi
server: https://upload.pypi.org/legacy/
user: dhorta
password:
secure: kJx9NqIGL2zSc5+R6Ktqssv6/n1whi6sB0FQKK+/SE/tNl3OnmqJ9lYF8/J2IMt1Uhz44gIWJl0uzPWUCQqghT7pSqUy1MN1yhlkxh4W0thZMc6eXEWbOHrPp0pC2GvpgDy0xys5+OzI1v1khpZMt0KP1vuSb/Z5G+wkDxqCU/ep2JObI/3GgbkNx/3I0SBKDvYmvNgq39HMwidx3YW+LgWsWsEYAJBFmKM58UkT+EXfcWOxy2K0Aqt6uULEZEYk3y39nJxOTaLGUx19wfQNQQO0ugOX6mGHKRK34mb+DFfV2VOMrV093dSyelsHq5pT9d628L+l9TEg0WlXgt3ABOagB6dBV66CNRCD29SuOBbQebKNjBV0OLX8xB0BcEG9n/H0GWkOSxP79wL1oB3kIJB6pasZ0rCvgfddyjFr900+okdXCqiXH4bPfvOMRsSGF9gNqERWbGlqs2lAnUZlkdt2l+1w2KSRTDvogGaXT1cC/T71ttnPkATJN+SagPEPGTOUYFgLGsdYY9/j0zrSXzbXujR2lTG0zAsvZ4lhGujc/EBdAZ4/di+wZfOM+EyU6pefzYVZg4q2yDkdr1GxwAPN5BB4VHRLsw2IgEyYedWHYS66vvNZ7KuuF336Y3Sp6lDXz0mL6AYixOBh46YolxlGqhcGFkpkE7I0WBQD2c4=
Expand Down
5 changes: 3 additions & 2 deletions glimix_core/cov/sum.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,16 @@ class SumCov(FunctionReduce):
f(f_0, f_1, \dots) = f_0 + f_1 + \dots
"""

def __init__(self, covariances):
self._covariances = [c for c in covariances]
FunctionReduce.__init__(self, self._covariances, 'sum')

def value_reduce(self, values): # pylint: disable=R0201
def value_reduce(self, values): # pylint: disable=R0201
r"""Sum covariance function evaluated at `(f_0, f_1, ...)`."""
return add.reduce(list(values.values()))

def gradient_reduce(self, _, gradients): # pylint: disable=R0201
def gradient_reduce(self, _, gradients): # pylint: disable=R0201
r"""Sum of covariance function derivatives.
Returns:
Expand Down
28 changes: 13 additions & 15 deletions glimix_core/ep/ep_linear_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,23 +50,21 @@ def _lml(self):
tQ = sqrt(1 - d) * Q

lml = [
-log(L.diagonal()).sum(), #
-0.5 * sum(log(s * S)), #
+0.5 * sum(log(A)), #
-log(L.diagonal()).sum(),
-0.5 * sum(log(s * S)),
+0.5 * sum(log(A)),
# lml += 0.5 * sum(log(ttau)),
+0.5 * dot(teta * A, dot(tQ, cho_solve(L, dot(tQ.T,
teta * A)))), #!=
-0.5 * dot(teta, teta / TS), #
+dot(m, A * teta) - 0.5 * dot(m, A * ttau * m), #
-0.5 * dot(m * A * ttau,
dot(tQ,
cho_solve(L, dot(tQ.T,
2 * A * teta - A * ttau * m)))), #
+sum(self._moments['log_zeroth']), #
+0.5 * sum(log(TS)), #
+0.5 * dot(teta * A, dot(tQ, cho_solve(L, dot(tQ.T, teta * A)))),
-0.5 * dot(teta, teta / TS),
+dot(m, A * teta) - 0.5 * dot(m, A * ttau * m),
-0.5 * dot(
m * A * ttau,
dot(tQ, cho_solve(L, dot(tQ.T, 2 * A * teta - A * ttau * m)))),
+sum(self._moments['log_zeroth']),
+0.5 * sum(log(TS)),
# lml -= 0.5 * sum(log(ttau)),
-0.5 * sum(log(ctau)), #
+0.5 * dot(ceta / TS, ttau * ceta / ctau - 2 * teta), #
-0.5 * sum(log(ctau)),
+0.5 * dot(ceta / TS, ttau * ceta / ctau - 2 * teta),
0.5 * s * d * sum(teta * A * teta)
]
lml = fsum(lml)
Expand Down
6 changes: 3 additions & 3 deletions glimix_core/ep/posterior_linear_kernel.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from __future__ import division

from numpy import sum as npsum
from numpy import dot, empty, sqrt, concatenate, zeros
from scipy.linalg import cho_factor

from numpy import concatenate, dot, empty, sqrt, zeros
from numpy_sugar.linalg import cho_solve, ddot, dotd, sum2diag
from scipy.linalg import cho_factor

from .posterior import Posterior


class PosteriorLinearKernel(Posterior):
r"""EP posterior.
Expand Down
6 changes: 4 additions & 2 deletions glimix_core/ep/site.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from __future__ import division

from numpy import zeros, maximum
from numpy import maximum, zeros

class Site(object): # pylint: disable=R0903

class Site(object): # pylint: disable=R0903
r"""EP parameters."""

def __init__(self, n):
self.tau = zeros(n)
self.eta = zeros(n)
Expand Down
3 changes: 2 additions & 1 deletion glimix_core/ep/test/test_ep.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

from glimix_core.ep import EP


def test_ep():
ep = EP(10)
with pytest.raises(NotImplementedError):
print(ep._compute_moments()) # pylint: disable=W0212
print(ep._compute_moments()) # pylint: disable=W0212
5 changes: 2 additions & 3 deletions glimix_core/glmm/glmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from numpy import (asarray, clip, concatenate, dot, exp, inf, log, ndarray,
zeros)
from numpy.linalg import LinAlgError

from numpy_sugar import epsilon

from optimix import Function, Scalar, Vector
from optimix.optimize import BadSolutionError

Expand Down Expand Up @@ -232,8 +232,7 @@ def gradient(self): # pylint: disable=W0221
g = self._lml_derivatives(self._X)
ev = exp(-v)
grad = [
g['delta'] * (ev/(1 + ev))/(1 + ev),
g['scale'] * exp(x),
g['delta'] * (ev / (1 + ev)) / (1 + ev), g['scale'] * exp(x),
g['mean']
]
except (ValueError, LinAlgError) as e:
Expand Down
18 changes: 14 additions & 4 deletions glimix_core/glmm/test/test_glmm.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
import pytest

from numpy import asarray, ascontiguousarray, dot, ones, sqrt, zeros
from numpy.random import RandomState
from numpy.testing import assert_allclose
from numpy import ascontiguousarray, sqrt, ones, dot, zeros, asarray
from numpy_sugar.linalg import economic_qs, economic_qs_linear

from glimix_core.example import linear_eye_cov
from glimix_core.glmm import GLMM
from glimix_core.random import bernoulli_sample

from optimix import check_grad


def test_glmm_precise():
random = RandomState(0)
X = random.randn(100, 5)
Expand Down Expand Up @@ -40,6 +39,7 @@ def test_glmm_precise():

assert_allclose(check_grad(glmm), 0, atol=1e-4)


def test_glmm_delta0():
random = RandomState(0)
X = random.randn(100, 5)
Expand All @@ -57,6 +57,7 @@ def test_glmm_delta0():
assert_allclose(glmm.value(), -294.3289786264443)
assert_allclose(check_grad(glmm, step=1e-5), 0, atol=1e-2)


def test_glmm_delta1():
random = RandomState(0)
X = random.randn(100, 5)
Expand All @@ -74,6 +75,7 @@ def test_glmm_delta1():
assert_allclose(glmm.value(), -317.9043148331947)
assert_allclose(check_grad(glmm), 0, atol=1e-4)


def test_glmm_wrong_qs():
random = RandomState(0)
X = random.randn(10, 15)
Expand All @@ -86,6 +88,7 @@ def test_glmm_wrong_qs():
with pytest.raises(ValueError):
print(GLMM((nsuc, ntri), 'binomial', X, QS))


def test_glmm_optimize():
random = RandomState(0)
X = random.randn(100, 5)
Expand Down Expand Up @@ -116,6 +119,7 @@ def test_glmm_optimize():

assert_allclose(glmm.value(), -159.1688201218538, rtol=1e-06)


def test_glmm_optimize_low_rank():
random = RandomState(0)
X = random.randn(100, 5)
Expand All @@ -135,10 +139,11 @@ def test_glmm_optimize_low_rank():
glmm.feed().maximize(progress=False)
assert_allclose(glmm.value(), -155.4794212740998, rtol=1e-06)


def test_glmm_bernoulli_problematic():
random = RandomState(1)
N = 500
G = random.randn(N, N+50)
G = random.randn(N, N + 50)
y = bernoulli_sample(0.0, G, random_state=random)
y = (y, )

Expand All @@ -160,6 +165,7 @@ def test_glmm_bernoulli_problematic():
assert_allclose(model.scale, 0.6026005889095781, rtol=1e-5)
assert_allclose(model.beta, [-0.01806123661347892])


def _stdnorm(X, axis=None, out=None):
X = ascontiguousarray(X)
if out is None:
Expand All @@ -179,6 +185,7 @@ def _stdnorm(X, axis=None, out=None):

return out


def test_glmm_binomial_pheno_list():
random = RandomState(0)
nsamples = 50
Expand Down Expand Up @@ -220,6 +227,7 @@ def test_glmm_scale_very_low():

assert_allclose(check_grad(glmm), 0, atol=1e-2)


def test_glmm_scale_very_high():
random = RandomState(0)
X = random.randn(100, 5)
Expand All @@ -237,6 +245,7 @@ def test_glmm_scale_very_high():

assert_allclose(check_grad(glmm), 0, atol=1e-3)


def test_glmm_delta_zero():
random = RandomState(0)
X = random.randn(100, 5)
Expand All @@ -257,6 +266,7 @@ def test_glmm_delta_zero():
assert_allclose(glmm.value(), -263.56884343483136)
assert_allclose(glmm.delta, 1)


def test_glmm_delta_one():
random = RandomState(0)
X = random.randn(100, 5)
Expand Down
8 changes: 6 additions & 2 deletions glimix_core/lik/test/test_lik.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
from numpy.random import RandomState
from numpy.testing import assert_allclose

from glimix_core.lik import (BernoulliProdLik, BinomialProdLik, DeltaProdLik,
PoissonProdLik)
from glimix_core.link import ProbitLink
from glimix_core.lik import (BernoulliProdLik, BinomialProdLik,
DeltaProdLik, PoissonProdLik)


def test_delta_prod_lik():
random = RandomState(0)
Expand All @@ -21,6 +22,7 @@ def test_delta_prod_lik():
assert_allclose(lik.mean([-1, 0, 0.5]), [-1, 0, 0.5])
assert_allclose(lik.sample([-10, 0, 0.5], random), [-10, 0, 0.5])


def test_bernoulli_prod_lik():
random = RandomState(0)

Expand All @@ -35,6 +37,7 @@ def test_bernoulli_prod_lik():
assert_allclose(lik.mean([-1, 0, 0.5]), [0.15865525, 0.5, 0.69146246])
assert_allclose(lik.sample([-10, 0, 0.5], random), [0, 1, 1])


def test_binomial_prod_lik():
random = RandomState(0)

Expand All @@ -50,6 +53,7 @@ def test_binomial_prod_lik():
assert_allclose(lik.mean([-1, 0, 0.5]), [0.15865525, 0.5, 0.69146246])
assert_allclose(lik.sample([-10, 0, 0.5], random), [0, 1, 2])


def test_poisson_prod_lik():
random = RandomState(0)

Expand Down
7 changes: 5 additions & 2 deletions glimix_core/link/test/test_link.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,29 @@
import pytest

from numpy.testing import assert_allclose

from glimix_core.link import ProbitLink, LogitLink, LogLink
from glimix_core.link import LogitLink, LogLink, ProbitLink
from glimix_core.link.link import Link


def test_probit_link():
link = ProbitLink()
assert_allclose(link.value(link.inv(3.2)), 3.2)
assert_allclose(link.latent_variance, 1.0)


def test_logit_link():
link = LogitLink()
assert_allclose(link.value(link.inv(3.2)), 3.2)
assert_allclose(link.latent_variance, 3.289868133696453)


def test_loglink_link():
link = LogLink()
assert_allclose(link.value(link.inv(3.2)), 3.2)
with pytest.raises(NotImplementedError):
print(link.latent_variance)


def test_link_interface():
link = Link()

Expand Down
17 changes: 9 additions & 8 deletions glimix_core/lmm/scan.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
from __future__ import division

from numpy import append, dot, empty, log, full
from numpy import sum as npsum

from tqdm import tqdm

from numpy import append, dot, empty, full, log
from numpy_sugar.linalg import solve
from tqdm import tqdm

LOG2PI = 1.837877066409345339081937709124758839607238769531250

class FastScanner(object): # pylint: disable=R0903

class FastScanner(object): # pylint: disable=R0903
r"""Fast inference over multiple covariates.
Let :math:`\tilde{\mathrm M}_i` be a column-matrix of fixed-effect
Expand Down Expand Up @@ -40,7 +39,7 @@ def __init__(self, y, M, QS, delta):
self._yTQdiag = [l / r for (l, r) in zip(yTQ, self._diags)]

self._a = [(i**2 / j).sum() for (i, j) in zip(yTQ, self._diags)]
self._b = [dot(i, j.T) for(i, j) in zip(self._yTQdiag, MTQ)]
self._b = [dot(i, j.T) for (i, j) in zip(self._yTQdiag, MTQ)]

self._MTQdiag = [i / j for (i, j) in zip(MTQ, self._diags)]

Expand Down Expand Up @@ -119,8 +118,10 @@ def _fast_scan_chunk(self, markers):
beta = solve(self._C[1] - self._C[0], b11m - b00m)
effect_sizes[i] = beta[-1]

p0 = self._a[1] - 2 * b11m.dot(beta) + beta.dot(self._C[1].dot(beta))
p1 = self._a[0] - 2 * b00m.dot(beta) + beta.dot(self._C[0]).dot(beta)
p0 = self._a[1] - 2 * b11m.dot(beta) + beta.dot(
self._C[1].dot(beta))
p1 = self._a[0] - 2 * b00m.dot(beta) + beta.dot(
self._C[0]).dot(beta)

scale = (p0 + p1) / markers.shape[0]

Expand Down
2 changes: 1 addition & 1 deletion glimix_core/mean/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
:members:
"""

from .offset import OffsetMean
from .linear import LinearMean
from .offset import OffsetMean
from .sum import SumMean

__all__ = ['OffsetMean', 'LinearMean', 'SumMean']
9 changes: 4 additions & 5 deletions glimix_core/mean/linear.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
from __future__ import division

from numpy import zeros, dot
from numpy import ascontiguousarray
from numpy import ascontiguousarray, dot, zeros

from optimix import Function
from optimix import Vector
from optimix import Function, Vector


class LinearMean(Function):
Expand All @@ -18,6 +16,7 @@ class LinearMean(Function):
where :math:`\boldsymbol\alpha` is a vector of effect sizes.
"""

def __init__(self, size):
Function.__init__(self, effsizes=Vector(zeros(size)))

Expand All @@ -32,7 +31,7 @@ def value(self, x):
"""
return dot(x, self.variables().get('effsizes').value)

def gradient(self, x): # pylint: disable=R0201
def gradient(self, x): # pylint: disable=R0201
r"""Gradient of the linear mean function.
Args:
Expand Down

0 comments on commit b4ab5e7

Please sign in to comment.