Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

give each multipledispatch-registered implementation a unique name #1496

Open
wants to merge 7 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 2 additions & 4 deletions gpflow/conditionals/conditionals.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# noqa: F811

import tensorflow as tf

from ..covariances import Kuf, Kuu
Expand All @@ -12,7 +10,7 @@


@conditional.register(object, InducingVariables, Kernel, object)
def _conditional(
def single_output_conditional(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
def single_output_conditional(
def conditional__InducingVariables__Kernel(

Xnew: tf.Tensor,
inducing_variable: InducingVariables,
kernel: Kernel,
Expand Down Expand Up @@ -64,7 +62,7 @@ def _conditional(


@conditional.register(object, object, Kernel, object)
def _conditional(
def plain_conditional(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
def plain_conditional(
def conditional__object__Kernel(

Xnew: tf.Tensor,
X: tf.Tensor,
kernel: Kernel,
Expand Down
5 changes: 0 additions & 5 deletions gpflow/conditionals/multioutput/conditionals.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,3 @@
# flake8: ignore=F811
# noqa: ignore=F811
# flake8: F811
# noqa: F811

import tensorflow as tf

from ... import covariances
Expand Down
2 changes: 1 addition & 1 deletion gpflow/conditionals/multioutput/sample_conditionals.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
@sample_conditional.register(
object, SharedIndependentInducingVariables, LinearCoregionalization, object
)
def _sample_conditional(
def sample_conditional__SharedIndependentInducingVariables__LinearCoregionalization(
Xnew,
inducing_variable,
kernel,
Expand Down
2 changes: 1 addition & 1 deletion gpflow/conditionals/sample_conditionals.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

@sample_conditional.register(object, object, Kernel, object)
@sample_conditional.register(object, InducingVariables, Kernel, object)
def _sample_conditional(
def sample_conditional__InducingVariables__Kernel(
Xnew: tf.Tensor,
inducing_variable: InducingVariables,
kernel: Kernel,
Expand Down
12 changes: 7 additions & 5 deletions gpflow/covariances/kufs.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,10 @@ def Kuf_sqexp_multiscale(inducing_variable: Multiscale, kernel: SquaredExponenti


@Kuf.register(InducingPatches, Convolutional, object)
def Kuf_conv_patch(feat, kern, Xnew):
Xp = kern.get_patches(Xnew) # [N, num_patches, patch_len]
bigKzx = kern.base_kernel.K(feat.Z, Xp) # [M, N, P] -- thanks to broadcasting of kernels
Kzx = tf.reduce_sum(bigKzx * kern.weights if hasattr(kern, "weights") else bigKzx, [2])
return Kzx / kern.num_patches
def Kuf_conv_patch(inducing_variable: InducingPatches, kernel: Convolutional, Xnew):
Xp = kernel.get_patches(Xnew) # [N, num_patches, patch_len]
bigKzx = kernel.base_kernel.K(
inducing_variable.Z, Xp
) # [M, N, P] -- thanks to broadcasting of kernels
Kzx = tf.reduce_sum(bigKzx * kernel.weights if hasattr(kernel, "weights") else bigKzx, [2])
return Kzx / kernel.num_patches
10 changes: 7 additions & 3 deletions gpflow/covariances/kuus.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ def Kuu_kernel_inducingpoints(inducing_variable: InducingPoints, kernel: Kernel,


@Kuu.register(Multiscale, SquaredExponential)
def Kuu_sqexp_multiscale(inducing_variable: Multiscale, kernel: SquaredExponential, *, jitter=0.0):
def Kuu_squaredexponential_multiscale(
inducing_variable: Multiscale, kernel: SquaredExponential, *, jitter=0.0
):
Zmu, Zlen = kernel.slice(inducing_variable.Z, inducing_variable.scales)
idlengthscales2 = tf.square(kernel.lengthscales + Zlen)
sc = tf.sqrt(
Expand All @@ -27,5 +29,7 @@ def Kuu_sqexp_multiscale(inducing_variable: Multiscale, kernel: SquaredExponenti


@Kuu.register(InducingPatches, Convolutional)
def Kuu_conv_patch(feat, kern, jitter=0.0):
return kern.base_kernel.K(feat.Z) + jitter * tf.eye(len(feat), dtype=default_float())
def Kuu_conv_patch(inducing_variable: InducingPatches, kernel: Convolutional, jitter=0.0):
return kernel.base_kernel.K(inducing_variable.Z) + jitter * tf.eye(
len(inducing_variable), dtype=default_float()
)
22 changes: 14 additions & 8 deletions gpflow/covariances/multioutput/kufs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@


@Kuf.register(InducingPoints, MultioutputKernel, object)
def _Kuf(inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor):
def _Kuf__InducingPoints__MultioutputKernel(
inducing_variable: InducingPoints, kernel: MultioutputKernel, Xnew: tf.Tensor
):
return kernel(inducing_variable.Z, Xnew, full_cov=True, full_output_cov=True) # [M, P, N, P]


@Kuf.register(SharedIndependentInducingVariables, SharedIndependent, object)
def _Kuf(
def _Kuf__SharedIndependentInducingVariables__SharedIndependent(
inducing_variable: SharedIndependentInducingVariables,
kernel: SharedIndependent,
Xnew: tf.Tensor,
Expand All @@ -33,7 +35,7 @@ def _Kuf(


@Kuf.register(SeparateIndependentInducingVariables, SharedIndependent, object)
def _Kuf(
def _Kuf__SeparateIndependentInducingVariables__SharedIndependent(
inducing_variable: SeparateIndependentInducingVariables,
kernel: SharedIndependent,
Xnew: tf.Tensor,
Expand All @@ -44,7 +46,7 @@ def _Kuf(


@Kuf.register(SharedIndependentInducingVariables, SeparateIndependent, object)
def _Kuf(
def _Kuf__SharedIndependentInducingVariables__SeparateIndependent(
inducing_variable: SharedIndependentInducingVariables,
kernel: SeparateIndependent,
Xnew: tf.Tensor,
Expand All @@ -55,7 +57,7 @@ def _Kuf(


@Kuf.register(SeparateIndependentInducingVariables, SeparateIndependent, object)
def _Kuf(
def _Kuf__SeparateIndependentInducingVariables__SeparateIndependent(
inducing_variable: SeparateIndependentInducingVariables,
kernel: SeparateIndependent,
Xnew: tf.Tensor,
Expand All @@ -71,7 +73,7 @@ def _Kuf(
LinearCoregionalization,
object,
)
def _Kuf(
def _Kuf__Fallback__LinearCoregionalization(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you sure about the fallback here?

inducing_variable: Union[
SeparateIndependentInducingVariables, SharedIndependentInducingVariables
],
Expand All @@ -84,7 +86,7 @@ def _Kuf(


@Kuf.register(SharedIndependentInducingVariables, LinearCoregionalization, object)
def _Kuf(
def _Kuf__SharedIndependentInducingVariables__LinearCoregionalization(
inducing_variable: SharedIndependentInducingVariables,
kernel: SeparateIndependent,
Xnew: tf.Tensor,
Expand All @@ -95,7 +97,11 @@ def _Kuf(


@Kuf.register(SeparateIndependentInducingVariables, LinearCoregionalization, object)
def _Kuf(inducing_variable, kernel, Xnew):
def _Kuf__SeparateIndependentInducingVariables__LinearCoregionalization(
inducing_variable: SeparateIndependentInducingVariables,
kernel: LinearCoregionalization,
Xnew: tf.Tensor,
):
return tf.stack(
[Kuf(f, k, Xnew) for f, k in zip(inducing_variable.inducing_variable_list, kernel.kernels)],
axis=0,
Expand Down
12 changes: 7 additions & 5 deletions gpflow/covariances/multioutput/kuus.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,17 @@


@Kuu.register(InducingPoints, MultioutputKernel)
def _Kuu(inducing_variable: InducingPoints, kernel: MultioutputKernel, *, jitter=0.0):
def _Kuu__InducingPoints__MultioutputKernel(
inducing_variable: InducingPoints, kernel: MultioutputKernel, *, jitter=0.0
):
Kmm = kernel(inducing_variable.Z, full_cov=True, full_output_cov=True) # [M, P, M, P]
M = tf.shape(Kmm)[0] * tf.shape(Kmm)[1]
jittermat = jitter * tf.reshape(tf.eye(M, dtype=Kmm.dtype), tf.shape(Kmm))
return Kmm + jittermat


@Kuu.register(FallbackSharedIndependentInducingVariables, SharedIndependent)
def _Kuu(
def _Kuu__FallbackSharedIndependentInducingVariables__shared(
inducing_variable: FallbackSharedIndependentInducingVariables,
kernel: SharedIndependent,
*,
Expand All @@ -39,7 +41,7 @@ def _Kuu(


@Kuu.register(FallbackSharedIndependentInducingVariables, (SeparateIndependent, IndependentLatent))
def _Kuu(
def _Kuu__FallbackSharedIndependentInducingVariables__independent(
inducing_variable: FallbackSharedIndependentInducingVariables,
kernel: Union[SeparateIndependent, IndependentLatent],
*,
Expand All @@ -53,7 +55,7 @@ def _Kuu(


@Kuu.register(FallbackSeparateIndependentInducingVariables, SharedIndependent)
def _Kuu(
def _Kuu__FallbackSeparateIndependentInducingVariables__shared(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: SharedIndependent,
*,
Expand All @@ -69,7 +71,7 @@ def _Kuu(
@Kuu.register(
FallbackSeparateIndependentInducingVariables, (SeparateIndependent, LinearCoregionalization)
)
def _Kuu(
def _Kuu__FallbackSeparateIndependentInducingVariables__independent(
inducing_variable: FallbackSeparateIndependentInducingVariables,
kernel: Union[SeparateIndependent, LinearCoregionalization],
*,
Expand Down
8 changes: 6 additions & 2 deletions gpflow/expectations/cross_kernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
kernels.Linear,
InducingPoints,
)
def _E(p, sqexp_kern, feat1, lin_kern, feat2, nghp=None):
def _E__Gaussian__SquaredExponential__InducingPoints__Linear__InducingPoints(
p, sqexp_kern, feat1, lin_kern, feat2, nghp=None
):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
Expand Down Expand Up @@ -96,7 +98,9 @@ def take_with_ard(value):
kernels.SquaredExponential,
InducingPoints,
)
def _E(p, lin_kern, feat1, sqexp_kern, feat2, nghp=None):
def _E__Gaussian__Linear__InducingPoints__SquaredExponential__InducingPoints(
p, lin_kern, feat1, sqexp_kern, feat2, nghp=None
):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
Expand Down
16 changes: 11 additions & 5 deletions gpflow/expectations/linears.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@


@dispatch.expectation.register(Gaussian, kernels.Linear, NoneType, NoneType, NoneType)
def _E(p, kernel, _, __, ___, nghp=None):
def _E__Gaussian__Linear(p, kernel, _, __, ___, nghp=None):
"""
Compute the expectation:
<diag(K_{X, X})>_p(X)
Expand All @@ -28,7 +28,7 @@ def _E(p, kernel, _, __, ___, nghp=None):


@dispatch.expectation.register(Gaussian, kernels.Linear, InducingPoints, NoneType, NoneType)
def _E(p, kernel, inducing_variable, _, __, nghp=None):
def _E__Gaussian__Linear__InducingPoints(p, kernel, inducing_variable, _, __, nghp=None):
"""
Compute the expectation:
<K_{X, Z}>_p(X)
Expand All @@ -43,7 +43,9 @@ def _E(p, kernel, inducing_variable, _, __, nghp=None):


@dispatch.expectation.register(Gaussian, kernels.Linear, InducingPoints, mfn.Identity, NoneType)
def _E(p, kernel, inducing_variable, mean, _, nghp=None):
def _E__Gaussian__Linear__InducingPoints__Identity(
p, kernel, inducing_variable, mean, _, nghp=None
):
"""
Compute the expectation:
expectation[n] = <K_{Z, x_n} x_n^T>_p(x_n)
Expand All @@ -62,7 +64,9 @@ def _E(p, kernel, inducing_variable, mean, _, nghp=None):
@dispatch.expectation.register(
MarkovGaussian, kernels.Linear, InducingPoints, mfn.Identity, NoneType
)
def _E(p, kernel, inducing_variable, mean, _, nghp=None):
def _E__MarkovGaussian__Linear__InducingPoints__Identity(
p, kernel, inducing_variable, mean, _, nghp=None
):
"""
Compute the expectation:
expectation[n] = <K_{Z, x_n} x_{n+1}^T>_p(x_{n:n+1})
Expand All @@ -83,7 +87,9 @@ def _E(p, kernel, inducing_variable, mean, _, nghp=None):
@dispatch.expectation.register(
(Gaussian, DiagonalGaussian), kernels.Linear, InducingPoints, kernels.Linear, InducingPoints
)
def _E(p, kern1, feat1, kern2, feat2, nghp=None):
def _E__Gaussian__Linear__InducingPoints__Linear__InducingPoints(
p, kern1, feat1, kern2, feat2, nghp=None
):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
Expand Down
18 changes: 8 additions & 10 deletions gpflow/expectations/mean_functions.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# noqa: F811

import tensorflow as tf

from .. import mean_functions as mfn
Expand All @@ -11,7 +9,7 @@


@dispatch.expectation.register(Gaussian, (mfn.Linear, mfn.Constant), NoneType, NoneType, NoneType)
def _E(p, mean, _, __, ___, nghp=None):
def _E__Gaussian__Linear(p, mean, _, __, ___, nghp=None):
"""
Compute the expectation:
<m(X)>_p(X)
Expand All @@ -23,7 +21,7 @@ def _E(p, mean, _, __, ___, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Constant, NoneType, mfn.Constant, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Constant__Constant(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -35,7 +33,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Constant, NoneType, mfn.MeanFunction, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Constant__MeanFunction(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -49,7 +47,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.MeanFunction, NoneType, mfn.Constant, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__MeanFunction__Constant(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -63,7 +61,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Identity, NoneType, mfn.Identity, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Identity__Identity(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -75,7 +73,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Identity, NoneType, mfn.Linear, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Identity__Linear(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -93,7 +91,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Linear, NoneType, mfn.Identity, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Linear__Identity(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand All @@ -113,7 +111,7 @@ def _E(p, mean1, _, mean2, __, nghp=None):


@dispatch.expectation.register(Gaussian, mfn.Linear, NoneType, mfn.Linear, NoneType)
def _E(p, mean1, _, mean2, __, nghp=None):
def _E__Gaussian__Linear__Linear(p, mean1, _, mean2, __, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
Expand Down