Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions botorch/models/fidelity/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/usr/bin/env python3

# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

from .gp_regression_fidelity import SingleTaskMultiFidelityGP


__all__ = ["SingleTaskMultiFidelityGP"]
105 changes: 105 additions & 0 deletions botorch/models/fidelity/gp_regression_fidelity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#! /usr/bin/env python3

# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved

r"""
Gaussian Process Regression models based on GPyTorch models.
"""

from typing import Optional

import torch
from botorch.exceptions import UnsupportedError
from botorch.models.fidelity_kernels.downsampling_kernel import DownsamplingKernel
from botorch.models.fidelity_kernels.exponential_decay_kernel import ExpDecayKernel
from gpytorch.kernels.rbf_kernel import RBFKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.likelihoods.likelihood import Likelihood
from gpytorch.priors.torch_priors import GammaPrior
from torch import Tensor

from ..gp_regression import SingleTaskGP


class SingleTaskMultiFidelityGP(SingleTaskGP):
r"""A single task multi-fidelity GP model.

A sub-class of SingleTaskGP model. By default the last two dimensions of train_X
are the fidelity parameters: training iterations, training data points.
The kernel comes from this paper `https://arxiv.org/abs/1903.04703`

Args:
train_X: A `n x (d + s)` or `batch_shape x n x (d + s) ` (batch mode) tensor
of training features, s is the dimension of the fidelity parameters.
train_Y: A `n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of
training observations.
train_iteration_fidelity: An indicator of whether we have the training
iteration fidelity variable.
train_data_fidelity: An indicator of whether we have the downsampling
fidelity variable. If train_iteration_fidelity and train_data_fidelity
are both True, the last and second last columns are treated as the
training data points fidelity parameter and training iteration
number fidelity parameter respectively. Otherwise the last column of
train_X is treated as the fidelity parameter with True indicator.
We assume train_X has at least one fidelity parameter.
likelihood: A likelihood. If omitted, use a standard
GaussianLikelihood with inferred noise level.

Example:
>>> train_X = torch.rand(20, 4)
>>> train_Y = train_X.pow(2).sum(dim=-1)
>>> model = SingleTaskMultiFidelityGP(train_X, train_Y)
"""

def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
train_iteration_fidelity: bool = True,
train_data_fidelity: bool = True,
likelihood: Optional[Likelihood] = None,
) -> None:
train_X, train_Y, _ = self._set_dimensions(train_X=train_X, train_Y=train_Y)
num_fidelity = train_iteration_fidelity + train_data_fidelity
ard_num_dims = train_X.shape[-1] - num_fidelity
active_dimsX = list(range(train_X.shape[-1] - num_fidelity))
rbf_kernel = RBFKernel(
ard_num_dims=ard_num_dims,
batch_shape=self._aug_batch_shape,
lengthscale_prior=GammaPrior(3.0, 6.0),
active_dims=active_dimsX,
)
exp_kernel = ExpDecayKernel(
batch_shape=self._aug_batch_shape,
lengthscale_prior=GammaPrior(3.0, 6.0),
offset_prior=GammaPrior(3.0, 6.0),
power_prior=GammaPrior(3.0, 6.0),
)
ds_kernel = DownsamplingKernel(
batch_shape=self._aug_batch_shape,
offset_prior=GammaPrior(3.0, 6.0),
power_prior=GammaPrior(3.0, 6.0),
)
if train_iteration_fidelity and train_data_fidelity:
active_dimsS1 = [train_X.shape[-1] - 1]
active_dimsS2 = [train_X.shape[-1] - 2]
exp_kernel.active_dims = torch.tensor(active_dimsS1)
ds_kernel.active_dims = torch.tensor(active_dimsS2)
kernel = rbf_kernel * exp_kernel * ds_kernel
elif train_iteration_fidelity or train_data_fidelity:
active_dimsS = [train_X.shape[-1] - 1]
if train_iteration_fidelity:
exp_kernel.active_dims = torch.tensor(active_dimsS)
kernel = rbf_kernel * exp_kernel
else:
ds_kernel.active_dims = torch.tensor(active_dimsS)
kernel = rbf_kernel * ds_kernel
else:
raise UnsupportedError("You should have at least one fidelity parameter.")
covar_module = ScaleKernel(
kernel,
batch_shape=self._aug_batch_shape,
outputscale_prior=GammaPrior(2.0, 0.15),
)
super().__init__(train_X=train_X, train_Y=train_Y, covar_module=covar_module)
self.to(train_X)
34 changes: 21 additions & 13 deletions botorch/models/gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from gpytorch.likelihoods.noise_models import HeteroskedasticNoise
from gpytorch.means.constant_mean import ConstantMean
from gpytorch.models.exact_gp import ExactGP
from gpytorch.module import Module
from gpytorch.priors.smoothed_box_prior import SmoothedBoxPrior
from gpytorch.priors.torch_priors import GammaPrior
from torch import Tensor
Expand Down Expand Up @@ -52,7 +53,11 @@ class SingleTaskGP(BatchedMultiOutputGPyTorchModel, ExactGP):
"""

def __init__(
self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None
self,
train_X: Tensor,
train_Y: Tensor,
likelihood: Optional[Likelihood] = None,
covar_module: Optional[Module] = None,
) -> None:
r"""A single-task exact GP model.

Expand All @@ -63,13 +68,14 @@ def __init__(
training observations.
likelihood: A likelihood. If omitted, use a standard
GaussianLikelihood with inferred noise level.
covar_module: The covariance (kernel) matrix. If omitted, use the
MaternKernel.

Example:
>>> train_X = torch.rand(20, 2)
>>> train_Y = torch.sin(train_X[:, 0]) + torch.cos(train_X[:, 1])
>>> model = SingleTaskGP(train_X, train_Y)
"""
ard_num_dims = train_X.shape[-1]
train_X, train_Y, _ = self._set_dimensions(train_X=train_X, train_Y=train_Y)
train_X, train_Y, _ = multioutput_to_batch_mode_transform(
train_X=train_X, train_Y=train_Y, num_outputs=self._num_outputs
Expand All @@ -90,16 +96,19 @@ def __init__(
self._is_custom_likelihood = True
ExactGP.__init__(self, train_X, train_Y, likelihood)
self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape)
self.covar_module = ScaleKernel(
MaternKernel(
nu=2.5,
ard_num_dims=ard_num_dims,
if covar_module is None:
self.covar_module = ScaleKernel(
MaternKernel(
nu=2.5,
ard_num_dims=train_X.shape[-1],
batch_shape=self._aug_batch_shape,
lengthscale_prior=GammaPrior(3.0, 6.0),
),
batch_shape=self._aug_batch_shape,
lengthscale_prior=GammaPrior(3.0, 6.0),
),
batch_shape=self._aug_batch_shape,
outputscale_prior=GammaPrior(2.0, 0.15),
)
outputscale_prior=GammaPrior(2.0, 0.15),
)
else:
self.covar_module = covar_module
self.to(train_X)

def forward(self, x: Tensor) -> MultivariateNormal:
Expand Down Expand Up @@ -136,7 +145,6 @@ def __init__(self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor) -> None
>>> train_Yvar = torch.full_like(train_Y, 0.2)
>>> model = FixedNoiseGP(train_X, train_Y, train_Yvar)
"""
ard_num_dims = train_X.shape[-1]
train_X, train_Y, train_Yvar = self._set_dimensions(
train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar
)
Expand All @@ -156,7 +164,7 @@ def __init__(self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor) -> None
self.covar_module = ScaleKernel(
base_kernel=MaternKernel(
nu=2.5,
ard_num_dims=ard_num_dims,
ard_num_dims=train_X.shape[-1],
batch_shape=self._aug_batch_shape,
lengthscale_prior=GammaPrior(3.0, 6.0),
),
Expand Down
3 changes: 3 additions & 0 deletions test/models/fidelity/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#! /usr/bin/env python3

# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
Loading