Skip to content

Commit

Permalink
give each multipledispatch-registered implementation a unique name so…
Browse files Browse the repository at this point in the history
… that docs are generated correctly. resolves #1494
  • Loading branch information
st-- committed Jun 4, 2020
1 parent 647ca90 commit b7c0e00
Show file tree
Hide file tree
Showing 17 changed files with 63 additions and 72 deletions.
6 changes: 2 additions & 4 deletions gpflow/conditionals/conditionals.py
@@ -1,5 +1,3 @@
# noqa: F811

import tensorflow as tf

from ..covariances import Kuf, Kuu
Expand All @@ -12,7 +10,7 @@


@conditional.register(object, InducingVariables, Kernel, object)
def _conditional(
def single_output_conditional(
Xnew: tf.Tensor,
inducing_variable: InducingVariables,
kernel: Kernel,
Expand Down Expand Up @@ -64,7 +62,7 @@ def _conditional(


@conditional.register(object, object, Kernel, object)
def _conditional(
def plain_conditional(
Xnew: tf.Tensor,
X: tf.Tensor,
kernel: Kernel,
Expand Down
5 changes: 0 additions & 5 deletions gpflow/conditionals/multioutput/conditionals.py
@@ -1,8 +1,3 @@
# flake8: ignore=F811
# noqa: ignore=F811
# flake8: F811
# noqa: F811

import tensorflow as tf

from ... import covariances
Expand Down
2 changes: 1 addition & 1 deletion gpflow/conditionals/multioutput/sample_conditionals.py
Expand Up @@ -12,7 +12,7 @@
@sample_conditional.register(
object, SharedIndependentInducingVariables, LinearCoregionalization, object
)
def _sample_conditional(
def sample_conditional__SharedIndependentInducingVariables__LinearCoregionalization(
Xnew,
inducing_variable,
kernel,
Expand Down
2 changes: 1 addition & 1 deletion gpflow/conditionals/sample_conditionals.py
Expand Up @@ -8,7 +8,7 @@

@sample_conditional.register(object, object, Kernel, object)
@sample_conditional.register(object, InducingVariables, Kernel, object)
def _sample_conditional(
def sample_conditional__InducingVariables__Kernel(
Xnew: tf.Tensor,
inducing_variable: InducingVariables,
kernel: Kernel,
Expand Down
10 changes: 5 additions & 5 deletions gpflow/covariances/kufs.py
Expand Up @@ -23,8 +23,8 @@ def Kuf_sqexp_multiscale(inducing_variable: Multiscale, kernel: SquaredExponenti


@Kuf.register(InducingPatches, Convolutional, object)
def Kuf_conv_patch(feat, kern, Xnew):
Xp = kern.get_patches(Xnew) # [N, num_patches, patch_len]
bigKzx = kern.base_kernel.K(feat.Z, Xp) # [M, N, P] -- thanks to broadcasting of kernels
Kzx = tf.reduce_sum(bigKzx * kern.weights if hasattr(kern, "weights") else bigKzx, [2])
return Kzx / kern.num_patches
def Kuf_conv_patch(inducing_variable: InducingPatches, kernel: Convolutional, Xnew):
Xp = kernel.get_patches(Xnew) # [N, num_patches, patch_len]
bigKzx = kernel.base_kernel.K(inducing_variable.Z, Xp) # [M, N, P] -- thanks to broadcasting of kernels
Kzx = tf.reduce_sum(bigKzx * kernel.weights if hasattr(kernel, "weights") else bigKzx, [2])
return Kzx / kernel.num_patches
6 changes: 3 additions & 3 deletions gpflow/covariances/kuus.py
Expand Up @@ -14,7 +14,7 @@ def Kuu_kernel_inducingpoints(inducing_variable: InducingPoints, kernel: Kernel,


@Kuu.register(Multiscale, SquaredExponential)
def Kuu_sqexp_multiscale(inducing_variable: Multiscale, kernel: SquaredExponential, *, jitter=0.0):
def Kuu_squaredexponential_multiscale(inducing_variable: Multiscale, kernel: SquaredExponential, *, jitter=0.0):
Zmu, Zlen = kernel.slice(inducing_variable.Z, inducing_variable.scales)
idlengthscales2 = tf.square(kernel.lengthscales + Zlen)
sc = tf.sqrt(
Expand All @@ -27,5 +27,5 @@ def Kuu_sqexp_multiscale(inducing_variable: Multiscale, kernel: SquaredExponenti


@Kuu.register(InducingPatches, Convolutional)
def Kuu_conv_patch(feat, kern, jitter=0.0):
return kern.base_kernel.K(feat.Z) + jitter * tf.eye(len(feat), dtype=default_float())
def Kuu_conv_patch(inducing_variable: InducingPatches, kernel: Convolutional, jitter=0.0):
return kernel.base_kernel.K(inducing_variable.Z) + jitter * tf.eye(len(inducing_variable), dtype=default_float())
16 changes: 8 additions & 8 deletions gpflow/covariances/multioutput/kufs.py
Expand Up @@ -19,12 +19,12 @@


@Kuf.register(InducingPoints, MultioutputKernel, object)
def _Kuf(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):
def _Kuf__InducingPoints__MultioutputKernel(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):
return kernel(inducing_variable.Z, Xnew, full_cov=True, full_output_cov=True) # [M, P, N, P]


@Kuf.register(SharedIndependentInducingVariables, SharedIndependent, object)
def _Kuf(
def _Kuf__SharedIndependentInducingVariables__SharedIndependent(
inducing_variable: SharedIndependentInducingVariables,
kernel: SharedIndependent,
Xnew: tf.Tensor,
Expand All @@ -33,7 +33,7 @@ def _Kuf(


@Kuf.register(SeparateIndependentInducingVariables, SharedIndependent, object)
def _Kuf(
def _Kuf__SeparateIndependentInducingVariables__SharedIndependent(
inducing_variable: SeparateIndependentInducingVariables,
kernel: SharedIndependent,
Xnew: tf.Tensor,
Expand All @@ -44,7 +44,7 @@ def _Kuf(


@Kuf.register(SharedIndependentInducingVariables, SeparateIndependent, object)
def _Kuf(
def _Kuf__SharedIndependentInducingVariables__SeparateIndependent(
inducing_variable: SharedIndependentInducingVariables,
kernel: SeparateIndependent,
Xnew: tf.Tensor,
Expand All @@ -55,7 +55,7 @@ def _Kuf(


@Kuf.register(SeparateIndependentInducingVariables, SeparateIndependent, object)
def _Kuf(
def _Kuf__SeparateIndependentInducingVariables__SeparateIndependent(
inducing_variable: SeparateIndependentInducingVariables,
kernel: SeparateIndependent,
Xnew: tf.Tensor,
Expand All @@ -71,7 +71,7 @@ def _Kuf(
LinearCoregionalization,
object,
)
def _Kuf(
def _Kuf__Fallback__LinearCoregionalization(
inducing_variable: Union[
SeparateIndependentInducingVariables, SharedIndependentInducingVariables
],
Expand All @@ -84,7 +84,7 @@ def _Kuf(


@Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)
def _Kuf(
def _Kuf__SharedIndependentInducingVariables__LinearCoregionalization(
inducing_variable: SharedIndependentInducingVariables,
kernel: SeparateIndependent,
Xnew: tf.Tensor,
Expand All @@ -95,7 +95,7 @@ def _Kuf(


@Kuf.register(SeparateIndependentInducingVariables, LinearCoregionalization, object)
def _Kuf(inducing_variable, kernel, Xnew):
def _Kuf__SeparateIndependentInducingVariables__LinearCoregionalization(inducing_variable: SeparateIndependentInducingVariables, kernel: LinearCoregionalization, Xnew: tf.Tensor):
return tf.stack(
[Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)],
axis=0,
Expand Down
10 changes: 5 additions & 5 deletions gpflow/covariances/multioutput/kuus.py
Expand Up @@ -19,15 +19,15 @@


@Kuu.register(InducingPoints, MultioutputKernel)
def _Kuu(inducing_variable: InducingPoints, kernel: MultioutputKernel, *, jitter=0.0):
def _Kuu__InducingPoints__MultioutputKernel(inducing_variable: InducingPoints, kernel: MultioutputKernel, *, jitter=0.0):
Kmm = kernel(inducing_variable.Z, full_cov=True, full_output_cov=True) # [M, P, M, P]
M = tf.shape(Kmm)[0] * tf.shape(Kmm)[1]
jittermat = jitter * tf.reshape(tf.eye(M, dtype=Kmm.dtype), tf.shape(Kmm))
return Kmm + jittermat


@Kuu.register(FallbackSharedIndependentInducingVariables, SharedIndependent)
def _Kuu(
def _Kuu__FallbackSharedIndependentInducingVariables__shared(
inducing_variable: FallbackSharedIndependentInducingVariables,
kernel: SharedIndependent,
*,
Expand All @@ -39,7 +39,7 @@ def _Kuu(


@Kuu.register(FallbackSharedIndependentInducingVariables, (SeparateIndependent, IndependentLatent))
def _Kuu(
def _Kuu__FallbackSharedIndependentInducingVariables__independent(
inducing_variable: FallbackSharedIndependentInducingVariables,
kernel: Union[SeparateIndependent, IndependentLatent],
*,
Expand All @@ -53,7 +53,7 @@ def _Kuu(


@Kuu.register(FallbackSeparateIndependentInducingVariables, SharedIndependent)
def _Kuu(
def _Kuu__FallbackSeparateIndependentInducingVariables__shared(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: SharedIndependent,
*,
Expand All @@ -69,7 +69,7 @@ def _Kuu(
@Kuu.register(
FallbackSeparateIndependentInducingVariables, (SeparateIndependent, LinearCoregionalization)
)
def _Kuu(
def _Kuu__FallbackSeparateIndependentInducingVariables__independent(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: Union[SeparateIndependent, LinearCoregionalization],
*,
Expand Down
4 changes: 2 additions & 2 deletions gpflow/expectations/cross_kernels.py
Expand Up @@ -14,7 +14,7 @@
kernels.Linear,
InducingPoints,
)
def _E(p, sqexp_kern, feat1, lin_kern, feat2, nghp=None):
def _E__Gaussian__SquaredExponential__InducingPoints__Linear__InducingPoints(p, sqexp_kern, feat1, lin_kern, feat2, nghp=None):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
Expand Down Expand Up @@ -96,7 +96,7 @@ def take_with_ard(value):
kernels.SquaredExponential,
InducingPoints,
)
def _E(p, lin_kern, feat1, sqexp_kern, feat2, nghp=None):
def _E__Gaussian__Linear__InducingPoints__SquaredExponential__InducingPoints(p, lin_kern, feat1, sqexp_kern, feat2, nghp=None):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
Expand Down
10 changes: 5 additions & 5 deletions gpflow/expectations/linears.py
Expand Up @@ -12,7 +12,7 @@


@dispatch.expectation.register(Gaussian, kernels.Linear, NoneType, NoneType, NoneType)
def _E(p, kernel, _, __, ___, nghp=None):
def _E__Gaussian__Linear(p, kernel, _, __, ___, nghp=None):
"""
Compute the expectation:
<diag(K_{X, X})>_p(X)
Expand All @@ -28,7 +28,7 @@ def _E(p, kernel, _, __, ___, nghp=None):


@dispatch.expectation.register(Gaussian, kernels.Linear, InducingPoints, NoneType, NoneType)
def _E(p, kernel, inducing_variable, _, __, nghp=None):
def _E__Gaussian__Linear__InducingPoints(p, kernel, inducing_variable, _, __, nghp=None):
"""
Compute the expectation:
<K_{X, Z}>_p(X)
Expand All @@ -43,7 +43,7 @@ def _E(p, kernel, inducing_variable, _, __, nghp=None):


@dispatch.expectation.register(Gaussian, kernels.Linear, InducingPoints, mfn.Identity, NoneType)
def _E(p, kernel, inducing_variable, mean, _, nghp=None):
def _E__Gaussian__Linear__InducingPoints__Identity(p, kernel, inducing_variable, mean, _, nghp=None):
"""
Compute the expectation:
expectation[n] = <K_{Z, x_n} x_n^T>_p(x_n)
Expand All @@ -62,7 +62,7 @@ def _E(p, kernel, inducing_variable, mean, _, nghp=None):
@dispatch.expectation.register(
MarkovGaussian, kernels.Linear, InducingPoints, mfn.Identity, NoneType
)
def _E(p, kernel, inducing_variable, mean, _, nghp=None):
def _E__MarkovGaussian__Linear__InducingPoints__Identity(p, kernel, inducing_variable, mean, _, nghp=None):
"""
Compute the expectation:
expectation[n] = <K_{Z, x_n} x_{n+1}^T>_p(x_{n:n+1})
Expand All @@ -83,7 +83,7 @@ def _E(p, kernel, inducing_variable, mean, _, nghp=None):
@dispatch.expectation.register(
(Gaussian, DiagonalGaussian), kernels.Linear, InducingPoints, kernels.Linear, InducingPoints
)
def _E(p, kern1, feat1, kern2, feat2, nghp=None):
def _E__Gaussian__Linear__InducingPoints__Linear__InducingPoints(p, kern1, feat1, kern2, feat2, nghp=None):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
Expand Down
18 changes: 8 additions & 10 deletions gpflow/expectations/mean_functions.py
@@ -1,5 +1,3 @@
# noqa: F811

import tensorflow as tf

from .. import mean_functions as mfn
Expand All @@ -11,7 +9,7 @@


@dispatch.expectation.register(Gaussian, (mfn.Linear, mfn.Constant), NoneType, NoneType, NoneType)
def _E(p, mean, _, __, ___, nghp=None):
def _E__Gaussian__Linear(p, mean, _, __, ___, nghp=None):
"""
Compute the expectation:
<m(X)>_p(X)
Expand All @@ -23,7 +21,7 @@ def _E(p, mean, _, __, ___, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Constant, NoneType, mfn.Constant, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Constant__Constant(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -35,7 +33,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Constant, NoneType, mfn.MeanFunction, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Constant__MeanFunction(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -49,7 +47,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.MeanFunction, NoneType, mfn.Constant, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__MeanFunction__Constant(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -63,7 +61,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Identity, NoneType, mfn.Identity, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Identity__Identity(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -75,7 +73,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Identity, NoneType, mfn.Linear, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Identity__Linear(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -93,7 +91,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Linear, NoneType, mfn.Identity, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Linear__Identity(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -113,7 +111,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Linear, NoneType, mfn.Linear, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Linear__Linear(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand Down
14 changes: 7 additions & 7 deletions gpflow/expectations/misc.py
Expand Up @@ -16,7 +16,7 @@
@dispatch.expectation.register(
(Gaussian, MarkovGaussian), mfn.Identity, NoneType, kernels.Linear, InducingPoints
)
def _E(p, mean, _, kernel, inducing_variable, nghp=None):
def _E__Gaussian__Identity__Linear__InducingPoints(p, mean, _, kernel, inducing_variable, nghp=None):
"""
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
Expand All @@ -31,7 +31,7 @@ def _E(p, mean, _, kernel, inducing_variable, nghp=None):
@dispatch.expectation.register(
(Gaussian, MarkovGaussian), kernels.Kernel, InducingVariables, mfn.MeanFunction, NoneType
)
def _E(p, kernel, inducing_variable, mean, _, nghp=None):
def _E__Gaussian__Kernel__InducingVariables__MeanFunction(p, kernel, inducing_variable, mean, _, nghp=None):
"""
Compute the expectation:
expectation[n] = <K_{Z, x_n} m(x_n)>_p(x_n)
Expand All @@ -43,7 +43,7 @@ def _E(p, kernel, inducing_variable, mean, _, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Constant, NoneType, kernels.Kernel, InducingPoints)
def _E(p, constant_mean, _, kernel, inducing_variable, nghp=None):
def _E__Gaussian__ConstantMF__Kernel__InducingPoints(p, constant_mean, _, kernel, inducing_variable, nghp=None):
"""
Compute the expectation:
expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n)
Expand All @@ -59,7 +59,7 @@ def _E(p, constant_mean, _, kernel, inducing_variable, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Linear, NoneType, kernels.Kernel, InducingPoints)
def _E(p, linear_mean, _, kernel, inducing_variable, nghp=None):
def _E__Gaussian__LinearMF__Kernel__InducingPoints(p, linear_mean, _, kernel, inducing_variable, nghp=None):
"""
Compute the expectation:
expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n)
Expand All @@ -80,7 +80,7 @@ def _E(p, linear_mean, _, kernel, inducing_variable, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Identity, NoneType, kernels.Kernel, InducingPoints)
def _E(p, identity_mean, _, kernel, inducing_variable, nghp=None):
def _E__Gaussian__Identity__Kernel__InducingPoints(p, identity_mean, _, kernel, inducing_variable, nghp=None):
"""
This prevents infinite recursion for kernels that don't have specific
implementations of _expectation(p, identity_mean, None, kernel, inducing_variable).
Expand All @@ -99,7 +99,7 @@ def _E(p, identity_mean, _, kernel, inducing_variable, nghp=None):
@dispatch.expectation.register(
DiagonalGaussian, object, (InducingVariables, NoneType), object, (InducingVariables, NoneType)
)
def _E(p, obj1, feat1, obj2, feat2, nghp=None):
def _E__DiagonalGaussian__fallback(p, obj1, feat1, obj2, feat2, nghp=None):
gaussian = Gaussian(p.mu, tf.linalg.diag(p.cov))
return expectation(gaussian, (obj1, feat1), (obj2, feat2), nghp=nghp)

Expand All @@ -110,7 +110,7 @@ def _E(p, obj1, feat1, obj2, feat2, nghp=None):
@dispatch.expectation.register(
MarkovGaussian, object, (InducingVariables, NoneType), object, (InducingVariables, NoneType)
)
def _E(p, obj1, feat1, obj2, feat2, nghp=None):
def _E__MarkovGaussian__fallback(p, obj1, feat1, obj2, feat2, nghp=None):
"""
Nota Bene: if only one object is passed, obj1 is
associated with x_n, whereas obj2 with x_{n+1}
Expand Down

0 comments on commit b7c0e00

Please sign in to comment.