diff --git a/botorch/models/fidelity_kernels/__init__.py b/botorch/models/fidelity_kernels/__init__.py new file mode 100644 index 0000000000..ed94fa51a1 --- /dev/null +++ b/botorch/models/fidelity_kernels/__init__.py @@ -0,0 +1,9 @@ +#! /usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from .downsampling_kernel import DownsamplingKernel +from .exponential_decay_kernel import ExpDecayKernel + + +__all__ = ["ExpDecayKernel", "DownsamplingKernel"] diff --git a/botorch/models/fidelity_kernels/downsampling_kernel.py b/botorch/models/fidelity_kernels/downsampling_kernel.py new file mode 100644 index 0000000000..ac868e6324 --- /dev/null +++ b/botorch/models/fidelity_kernels/downsampling_kernel.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Optional + +import torch +from gpytorch.constraints import Interval, Positive +from gpytorch.kernels import Kernel +from gpytorch.priors import Prior +from torch import Tensor + + +class DownsamplingKernel(Kernel): + r""" + Computes a covariance matrix based on the down sampling kernel + between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}` (we expect 'd = 1'): + + .. math:: + \begin{equation*} + k_\text{ds}(\mathbf{x_1}, \mathbf{x_2}) = c + + (1 - \mathbf{x_1})^{1 + \delta} * (1 - \mathbf{x_2})^{1 + \delta}. + \end{equation*} + + where + + * :math:`c` is an :attr:`offset` parameter, + `\delta` is an :attr:`power` parameter + Args: + :attr:`power_constraint` (Constraint, optional): + Constraint to place on power parameter. Default: `Positive`. + :attr:`power_prior` (:class:`gpytorch.priors.Prior`): + Prior over the power parameter (default `None`). + :attr:`offset_constraint` (Constraint, optional): + Constraint to place on offset parameter. Default: `Positive`. + :attr:`active_dims` (list): + List of data dimensions to operate on. + `len(active_dims)` should equal `num_dimensions`. + """ + + def __init__( + self, + power_prior: Optional[Prior] = None, + offset_prior: Optional[Prior] = None, + power_constraint: Optional[Interval] = None, + offset_constraint: Optional[Interval] = None, + **kwargs + ): + super().__init__(**kwargs) + + if power_constraint is None: + power_constraint = Positive() + if offset_constraint is None: + offset_constraint = Positive() + + self.register_parameter( + name="raw_power", + parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), + ) + + self.register_parameter( + name="raw_offset", + parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), + ) + + if power_prior is not None: + self.register_prior( + "power_prior", + power_prior, + lambda: self.power, + lambda v: self._set_power(v), + ) + self.register_constraint("raw_power", power_constraint) + + if offset_prior is not None: + self.register_prior( + "offset_prior", + offset_prior, + lambda: self.offset, + lambda v: self._set_offset(v), + ) + + self.register_constraint("raw_offset", offset_constraint) + + @property + def power(self) -> Tensor: + return self.raw_power_constraint.transform(self.raw_power) + + @power.setter + def power(self, value: Tensor) -> None: + self._set_power(value) + + def _set_power(self, value: Tensor) -> None: + if not torch.is_tensor(value): + value = torch.as_tensor(value).to(self.raw_power) + self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value)) + + @property + def offset(self) -> Tensor: + return self.raw_offset_constraint.transform(self.raw_offset) + + @offset.setter + def offset(self, value: Tensor) -> None: + self._set_offset(value) + + def _set_offset(self, value: Tensor) -> None: + if not torch.is_tensor(value): + value = torch.as_tensor(value).to(self.raw_offset) + self.initialize(raw_offset=self.raw_offset_constraint.inverse_transform(value)) + + def forward( + self, + x1: Tensor, + x2: Tensor, + diag: Optional[bool] = False, + last_dim_is_batch: Optional[bool] = False, + **params + ) -> Tensor: + offset = self.offset.view(*self.batch_shape, 1, 1) + power = self.power.view(*self.batch_shape, 1, 1) + if last_dim_is_batch: + x1 = x1.transpose(-1, -2).unsqueeze(-1) + x2 = x2.transpose(-1, -2).unsqueeze(-1) + x1_ = 1 - x1 + x2_ = 1 - x2 + if diag: + return (x1_ * x2_).sum(dim=-1).pow(power + 1) + offset + + if x1.dim() == 2 and x2.dim() == 2: + return torch.addmm( + offset, x1_.pow(power + 1), x2_.transpose(-2, -1).pow(power + 1) + ) + else: + return ( + torch.matmul(x1_.pow(power + 1), x2_.transpose(-2, -1).pow(power + 1)) + + offset + ) diff --git a/botorch/models/fidelity_kernels/exponential_decay_kernel.py b/botorch/models/fidelity_kernels/exponential_decay_kernel.py new file mode 100644 index 0000000000..1c624e22d7 --- /dev/null +++ b/botorch/models/fidelity_kernels/exponential_decay_kernel.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Optional + +import torch +from gpytorch.constraints import Interval, Positive +from gpytorch.kernels import Kernel +from gpytorch.priors import Prior +from torch import Tensor + + +class ExpDecayKernel(Kernel): + r""" + Computes a covariance matrix based on the exponential decay kernel + between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}` (we expect d = 1): + + .. math:: + \begin{equation*} + k_\text{expdecay}(\mathbf{x_1}, \mathbf{x_2}) = w + + \frac{\beta^{\alpha}}{(\mathbf{x_1} + \mathbf{x_2} + \beta)^{\alpha}}. + \end{equation*} + + where + + * :math:`w` is an :attr:`offset` parameter, `\beta` is an :attr:'lenthscale' + parameter `\alpha` is an :attr:`power` parameter + Args: + :attr:`lengthscale_constraint` (Constraint, optional): + Constraint to place on lengthscale parameter. Default: `Positive`. + :attr:`lengthscale_prior` (:class:`gpytorch.priors.Prior`): + Prior over the lengthscale parameter (default `None`). + :attr:`power_constraint` (Constraint, optional): + Constraint to place on power parameter. Default: `Positive`. + :attr:`power_prior` (:class:`gpytorch.priors.Prior`): + Prior over the power parameter (default `None`). + :attr:`offset_constraint` (Constraint, optional): + Constraint to place on offset parameter. Default: `Positive`. + :attr:`active_dims` (list): + List of data dimensions to operate on. + `len(active_dims)` should equal `num_dimensions`. + """ + + def __init__( + self, + power_prior: Optional[Prior] = None, + offset_prior: Optional[Prior] = None, + power_constraint: Optional[Interval] = None, + offset_constraint: Optional[Interval] = None, + **kwargs + ): + super().__init__(has_lengthscale=True, **kwargs) + + if power_constraint is None: + power_constraint = Positive() + if offset_constraint is None: + offset_constraint = Positive() + + self.register_parameter( + name="raw_power", + parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), + ) + + self.register_parameter( + name="raw_offset", + parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), + ) + + if power_prior is not None: + self.register_prior( + "power_prior", + power_prior, + lambda: self.power, + lambda v: self._set_power(v), + ) + self.register_constraint("raw_power", offset_constraint) + + if offset_prior is not None: + self.register_prior( + "offset_prior", + offset_prior, + lambda: self.offset, + lambda v: self._set_offset(v), + ) + + self.register_constraint("raw_offset", offset_constraint) + + @property + def power(self) -> Tensor: + return self.raw_power_constraint.transform(self.raw_power) + + @power.setter + def power(self, value: Tensor) -> None: + self._set_power(value) + + def _set_power(self, value: Tensor) -> None: + if not torch.is_tensor(value): + value = torch.as_tensor(value).to(self.raw_power) + self.initialize(raw_power=self.raw_power_constraint.inverse_transform(value)) + + @property + def offset(self) -> Tensor: + return self.raw_offset_constraint.transform(self.raw_offset) + + @offset.setter + def offset(self, value: Tensor) -> None: + self._set_offset(value) + + def _set_offset(self, value: Tensor) -> None: + if not torch.is_tensor(value): + value = torch.as_tensor(value).to(self.raw_offset) + self.initialize(raw_offset=self.raw_offset_constraint.inverse_transform(value)) + + def forward(self, x1: Tensor, x2: Tensor, **params) -> Tensor: + offset = self.offset.view(*self.batch_shape, 1, 1) + power = self.power.view(*self.batch_shape, 1, 1) + x1_ = x1.div(self.lengthscale) + x2_ = x2.div(self.lengthscale) + diff = self.covar_dist(x1_, -x2_, **params) + res = offset + (diff + 1).pow(-power) + return res diff --git a/test/models/fidelity_kernels/__init__.py b/test/models/fidelity_kernels/__init__.py new file mode 100644 index 0000000000..30a40e9e93 --- /dev/null +++ b/test/models/fidelity_kernels/__init__.py @@ -0,0 +1,3 @@ +#! /usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/test/models/fidelity_kernels/test_downsampling_kernel.py b/test/models/fidelity_kernels/test_downsampling_kernel.py new file mode 100644 index 0000000000..1c74bbf060 --- /dev/null +++ b/test/models/fidelity_kernels/test_downsampling_kernel.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + + +import unittest + +import torch +from botorch.models.fidelity_kernels.downsampling_kernel import DownsamplingKernel +from gpytorch.priors.torch_priors import GammaPrior, NormalPrior +from gpytorch.test.base_kernel_test_case import BaseKernelTestCase + + +class TestDownsamplingKernel(unittest.TestCase, BaseKernelTestCase): + def create_kernel_no_ard(self, **kwargs): + return DownsamplingKernel(**kwargs) + + def create_data_no_batch(self): + return torch.rand(50, 10) + + def create_data_single_batch(self): + return torch.rand(2, 50, 2) + + def create_data_double_batch(self): + return torch.rand(3, 2, 50, 2) + + def test_subset_active_compute_downsampling_function(self): + a = torch.tensor([0.1, 0.2]).view(2, 1) + a_p = torch.tensor([0.3, 0.4]).view(2, 1) + a = torch.cat((a, a_p), 1) + b = torch.tensor([0.2, 0.4]).view(2, 1) + power = 1 + offset = 1 + + kernel = DownsamplingKernel(active_dims=[0]) + kernel.initialize(power=power, offset=offset) + kernel.eval() + + diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]]) + actual = offset + diff.pow(1 + power) + res = kernel(a, b).evaluate() + + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_computes_downsampling_function(self): + a = torch.tensor([0.1, 0.2]).view(2, 1) + b = torch.tensor([0.2, 0.4]).view(2, 1) + power = 1 + offset = 1 + + kernel = DownsamplingKernel() + kernel.initialize(power=power, offset=offset) + kernel.eval() + + diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]]) + actual = offset + diff.pow(1 + power) + res = kernel(a, b).evaluate() + + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_subset_computes_active_downsampling_function_batch(self): + a = torch.tensor([[0.1, 0.2, 0.2], [0.3, 0.4, 0.2], [0.5, 0.5, 0.5]]).view( + 3, 3, 1 + ) + a_p = torch.tensor([[0.1, 0.2, 0.2], [0.3, 0.4, 0.2], [0.5, 0.5, 0.5]]).view( + 3, 3, 1 + ) + a = torch.cat((a, a_p), 2) + b = torch.tensor([[0.5, 0.6, 0.1], [0.7, 0.8, 0.2], [0.6, 0.6, 0.5]]).view( + 3, 3, 1 + ) + power = 1 + offset = 1 + kernel = DownsamplingKernel(batch_shape=torch.Size([3]), active_dims=[0]) + kernel.initialize(power=power, offset=offset) + kernel.eval() + res = kernel(a, b).evaluate() + + actual = torch.zeros(3, 3, 3) + + diff = torch.tensor([[0.45, 0.36, 0.81], [0.4, 0.32, 0.72], [0.4, 0.32, 0.72]]) + actual[0, :, :] = offset + diff.pow(1 + power) + + diff = torch.tensor( + [[0.21, 0.14, 0.56], [0.18, 0.12, 0.48], [0.24, 0.16, 0.64]] + ) + actual[1, :, :] = offset + diff.pow(1 + power) + + diff = torch.tensor([[0.2, 0.2, 0.25], [0.2, 0.2, 0.25], [0.2, 0.2, 0.25]]) + actual[2, :, :] = offset + diff.pow(1 + power) + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_computes_downsampling_function_batch(self): + a = torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.5]]).view(3, 2, 1) + b = torch.tensor([[0.5, 0.6], [0.7, 0.8], [0.6, 0.6]]).view(3, 2, 1) + power = 1 + offset = 1 + + kernel = DownsamplingKernel(batch_shape=torch.Size([3])) + kernel.initialize(power=power, offset=offset) + kernel.eval() + res = kernel(a, b).evaluate() + + actual = torch.zeros(3, 2, 2) + + diff = torch.tensor([[0.45, 0.36], [0.4, 0.32]]) + actual[0, :, :] = offset + diff.pow(1 + power) + + diff = torch.tensor([[0.21, 0.14], [0.18, 0.12]]) + actual[1, :, :] = offset + diff.pow(1 + power) + + diff = torch.tensor([[0.2, 0.2], [0.2, 0.2]]) + actual[2, :, :] = offset + diff.pow(1 + power) + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_initialize_offset(self): + kernel = DownsamplingKernel() + kernel.initialize(offset=1) + actual_value = torch.tensor(1.0).view_as(kernel.offset) + self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5) + + def test_initialize_offset_batch(self): + kernel = DownsamplingKernel(batch_shape=torch.Size([2])) + off_init = torch.tensor([1.0, 2.0]) + kernel.initialize(offset=off_init) + actual_value = off_init.view_as(kernel.offset) + self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5) + + def test_initialize_power(self): + kernel = DownsamplingKernel() + kernel.initialize(power=1) + actual_value = torch.tensor(1.0).view_as(kernel.power) + self.assertLess(torch.norm(kernel.power - actual_value), 1e-5) + + def test_initialize_power_batch(self): + kernel = DownsamplingKernel(batch_shape=torch.Size([2])) + power_init = torch.tensor([1.0, 2.0]) + kernel.initialize(power=power_init) + actual_value = power_init.view_as(kernel.power) + self.assertLess(torch.norm(kernel.power - actual_value), 1e-5) + + def test_last_dim_is_batch(self): + a = ( + torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.5]]) + .view(3, 2) + .transpose(-1, -2) + ) + b = ( + torch.tensor([[0.5, 0.6], [0.7, 0.8], [0.6, 0.6]]) + .view(3, 2) + .transpose(-1, -2) + ) + power = 1 + offset = 1 + + kernel = DownsamplingKernel() + kernel.initialize(power=power, offset=offset) + kernel.eval() + res = kernel(a, b, last_dim_is_batch=True).evaluate() + + actual = torch.zeros(3, 2, 2) + + diff = torch.tensor([[0.45, 0.36], [0.4, 0.32]]) + actual[0, :, :] = offset + diff.pow(1 + power) + + diff = torch.tensor([[0.21, 0.14], [0.18, 0.12]]) + actual[1, :, :] = offset + diff.pow(1 + power) + + diff = torch.tensor([[0.2, 0.2], [0.2, 0.2]]) + actual[2, :, :] = offset + diff.pow(1 + power) + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_diag_calculation(self): + a = torch.tensor([0.1, 0.2]).view(2, 1) + b = torch.tensor([0.2, 0.4]).view(2, 1) + power = 1 + offset = 1 + + kernel = DownsamplingKernel() + kernel.initialize(power=power, offset=offset) + kernel.eval() + + diff = torch.tensor([[0.72, 0.54], [0.64, 0.48]]) + actual = offset + diff.pow(1 + power) + res = kernel(a, b, diag=True) + + self.assertLess(torch.norm(res - torch.diag(actual)), 1e-5) + + def test_initialize_power_prior(self): + kernel = DownsamplingKernel() + kernel.power_prior = NormalPrior(1, 1) + self.assertTrue(isinstance(kernel.power_prior, NormalPrior)) + kernel2 = DownsamplingKernel(power_prior=GammaPrior(1, 1)) + self.assertTrue(isinstance(kernel2.power_prior, GammaPrior)) + + def test_initialize_offset_prior(self): + kernel = DownsamplingKernel() + kernel.offset_prior = NormalPrior(1, 1) + self.assertTrue(isinstance(kernel.offset_prior, NormalPrior)) + kernel2 = DownsamplingKernel(offset_prior=GammaPrior(1, 1)) + self.assertTrue(isinstance(kernel2.offset_prior, GammaPrior)) diff --git a/test/models/fidelity_kernels/test_exponential_decay_kernel.py b/test/models/fidelity_kernels/test_exponential_decay_kernel.py new file mode 100644 index 0000000000..e4947293c3 --- /dev/null +++ b/test/models/fidelity_kernels/test_exponential_decay_kernel.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import unittest + +import torch +from botorch.models.fidelity_kernels.exponential_decay_kernel import ExpDecayKernel +from gpytorch.priors.torch_priors import GammaPrior, NormalPrior +from gpytorch.test.base_kernel_test_case import BaseKernelTestCase + + +class TestExpDecayKernel(unittest.TestCase, BaseKernelTestCase): + def create_kernel_no_ard(self, **kwargs): + return ExpDecayKernel(**kwargs) + + def test_subset_active_compute_exponential_decay_function(self): + a = torch.tensor([1.0, 2.0]).view(2, 1) + a_p = torch.tensor([3.0, 4.0]).view(2, 1) + a = torch.cat((a, a_p), 1) + b = torch.tensor([2.0, 4.0]).view(2, 1) + lengthscale = 1 + power = 1 + offset = 1 + + kernel = ExpDecayKernel(active_dims=[0]) + kernel.initialize(lengthscale=lengthscale, power=power, offset=offset) + kernel.eval() + + diff = torch.tensor([[4.0, 6.0], [5.0, 7.0]]) + actual = offset + diff.pow(-power) + res = kernel(a, b).evaluate() + + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_computes_exponential_decay_function(self): + a = torch.tensor([1.0, 2.0]).view(2, 1) + b = torch.tensor([2.0, 4.0]).view(2, 1) + lengthscale = 1 + power = 1 + offset = 1 + + kernel = ExpDecayKernel() + kernel.initialize(lengthscale=lengthscale, power=power, offset=offset) + kernel.eval() + + diff = torch.tensor([[4.0, 6.0], [5.0, 7.0]]) + actual = offset + torch.tensor([1.0]).div(diff.pow(power)) + res = kernel(a, b).evaluate() + + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_subset_active_exponential_decay_function_batch(self): + a = torch.tensor([[1.0, 0.0], [2.0, 0.0], [3.0, 0.0], [4.0, 0.0]]).view(2, 2, 2) + b = torch.tensor([[5.0, 6.0], [7.0, 8.0]]).view(2, 2, 1) + lengthscale = 1 + power = 1 + offset = 1 + + kernel = ExpDecayKernel(batch_shape=torch.Size([2]), active_dims=[0]) + kernel.initialize(lengthscale=lengthscale, power=power, offset=offset) + kernel.eval() + + actual = torch.zeros(2, 2, 2) + + diff = torch.tensor([[7.0, 8.0], [8.0, 9.0]]) + actual[0, :, :] = offset + torch.tensor([1.0]).div(diff.pow(power)) + + diff = torch.tensor([[11.0, 12.0], [12.0, 13.0]]) + actual[1, :, :] = offset + torch.tensor([1.0]).div(diff.pow(power)) + + res = kernel(a, b).evaluate() + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_computes_exponential_decay_function_batch(self): + a = torch.tensor([[1.0, 2.0], [3.0, 4.0]]).view(2, 2, 1) + b = torch.tensor([[5.0, 6.0], [7.0, 8.0]]).view(2, 2, 1) + lengthscale = 1 + power = 1 + offset = 1 + + kernel = ExpDecayKernel(batch_shape=torch.Size([2])) + kernel.initialize(lengthscale=lengthscale, power=power, offset=offset) + kernel.eval() + + actual = torch.zeros(2, 2, 2) + + diff = torch.tensor([[7.0, 8.0], [8.0, 9.0]]) + actual[0, :, :] = offset + diff.pow(-power) + + diff = torch.tensor([[11.0, 12.0], [12.0, 13.0]]) + actual[1, :, :] = offset + diff.pow(-power) + + res = kernel(a, b).evaluate() + self.assertLess(torch.norm(res - actual), 1e-5) + + def test_initialize_lengthscale(self): + kernel = ExpDecayKernel() + kernel.initialize(lengthscale=1) + actual_value = torch.tensor(1.0).view_as(kernel.lengthscale) + self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5) + + def test_initialize_lengthscale_batch(self): + kernel = ExpDecayKernel(batch_shape=torch.Size([2])) + ls_init = torch.tensor([1.0, 2.0]) + kernel.initialize(lengthscale=ls_init) + actual_value = ls_init.view_as(kernel.lengthscale) + self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5) + + def test_initialize_offset(self): + kernel = ExpDecayKernel() + kernel.initialize(offset=1) + actual_value = torch.tensor(1.0).view_as(kernel.offset) + self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5) + + def test_initialize_offset_batch(self): + kernel = ExpDecayKernel(batch_shape=torch.Size([2])) + off_init = torch.tensor([1.0, 2.0]) + kernel.initialize(offset=off_init) + actual_value = off_init.view_as(kernel.offset) + self.assertLess(torch.norm(kernel.offset - actual_value), 1e-5) + + def test_initialize_power(self): + kernel = ExpDecayKernel() + kernel.initialize(power=1) + actual_value = torch.tensor(1.0).view_as(kernel.power) + self.assertLess(torch.norm(kernel.power - actual_value), 1e-5) + + def test_initialize_power_batch(self): + kernel = ExpDecayKernel(batch_shape=torch.Size([2])) + power_init = torch.tensor([1.0, 2.0]) + kernel.initialize(power=power_init) + actual_value = power_init.view_as(kernel.power) + self.assertLess(torch.norm(kernel.power - actual_value), 1e-5) + + def test_initialize_power_prior(self): + kernel = ExpDecayKernel() + kernel.power_prior = NormalPrior(1, 1) + self.assertTrue(isinstance(kernel.power_prior, NormalPrior)) + kernel2 = ExpDecayKernel(power_prior=GammaPrior(1, 1)) + self.assertTrue(isinstance(kernel2.power_prior, GammaPrior)) + + def test_initialize_offset_prior(self): + kernel = ExpDecayKernel() + kernel.offset_prior = NormalPrior(1, 1) + self.assertTrue(isinstance(kernel.offset_prior, NormalPrior)) + kernel2 = ExpDecayKernel(offset_prior=GammaPrior(1, 1)) + self.assertTrue(isinstance(kernel2.offset_prior, GammaPrior))