From b8bd2a8832105c5d6df1180e2b072a9f6c562dda Mon Sep 17 00:00:00 2001 From: Geoff Pleiss Date: Mon, 8 Jun 2020 09:34:50 -0400 Subject: [PATCH] SM empspect init unsqueezes 1D inputs [Fixes #1166] --- .travis.yml | 2 +- environment.yml | 2 + gpytorch/kernels/spectral_mixture_kernel.py | 5 ++ requirements.txt | 4 +- setup.py | 2 +- test/examples/test_simple_gp_regression.py | 6 +-- .../test_spectral_mixture_gp_regression.py | 49 ++++++++++--------- 7 files changed, 42 insertions(+), 28 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7f0b0576b..7058ce5fe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,8 +13,8 @@ env: - PYTORCH_VERSION=stable WITH_PYRO=true EXAMPLES=true install: + - pip install numpy; pip install scipy; pip install scikit-learn; - if [[ $PYTORCH_VERSION = "master" ]]; then - pip install numpy; pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html; python setup.py build develop; else diff --git a/environment.yml b/environment.yml index ede62072a..23a5d2e95 100644 --- a/environment.yml +++ b/environment.yml @@ -3,3 +3,5 @@ channels: - pytorch dependencies: - pytorch + - scikit-learn + - scipy diff --git a/gpytorch/kernels/spectral_mixture_kernel.py b/gpytorch/kernels/spectral_mixture_kernel.py index d70b14cc7..4a7e45020 100644 --- a/gpytorch/kernels/spectral_mixture_kernel.py +++ b/gpytorch/kernels/spectral_mixture_kernel.py @@ -158,6 +158,11 @@ def initialize_from_data_empspect(self, train_x, train_y): from scipy.fftpack import fft from scipy.integrate import cumtrapz + if not torch.is_tensor(train_x) or not torch.is_tensor(train_y): + raise RuntimeError("train_x and train_y should be tensors") + if train_x.ndimension() == 1: + train_x = train_x.unsqueeze(-1) + N = train_x.size(-2) emp_spect = np.abs(fft(train_y.cpu().detach().numpy())) ** 2 / N M = math.floor(N / 2) diff --git a/requirements.txt b/requirements.txt index 305d31346..2caf29409 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,3 @@ -torch>=1.3 +torch>=1.5 +scikit-learn +scipy diff --git a/setup.py b/setup.py index 8066fc9fa..31c9c5d78 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ def find_version(*file_paths): torch_min = "1.5" -install_requires = [">=".join(["torch", torch_min])] +install_requires = [">=".join(["torch", torch_min]), "scikit-learn", "scipy"] # if recent dev version of PyTorch is installed, no need to install stable try: import torch diff --git a/test/examples/test_simple_gp_regression.py b/test/examples/test_simple_gp_regression.py index 47a5a57bd..3ad4e712f 100644 --- a/test/examples/test_simple_gp_regression.py +++ b/test/examples/test_simple_gp_regression.py @@ -9,7 +9,7 @@ from gpytorch.kernels import RBFKernel, ScaleKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.means import ConstantMean -from gpytorch.priors import SmoothedBoxPrior, LogNormalPrior, UniformPrior +from gpytorch.priors import SmoothedBoxPrior, UniformPrior from gpytorch.constraints import Positive from gpytorch.test.base_test_case import BaseTestCase from gpytorch.test.utils import least_used_cuda_device @@ -465,9 +465,9 @@ def test_pyro_sampling(self): # Register normal GPyTorch priors gp_model.mean_module.register_prior("mean_prior", UniformPrior(-1, 1), "constant") - gp_model.covar_module.base_kernel.register_prior("lengthscale_prior", UniformPrior(0.01, 0.2), "lengthscale") + gp_model.covar_module.base_kernel.register_prior("lengthscale_prior", UniformPrior(0.01, 0.5), "lengthscale") gp_model.covar_module.register_prior("outputscale_prior", UniformPrior(1, 2), "outputscale") - likelihood.register_prior("noise_prior", LogNormalPrior(-1.5, 0.1), "noise") + likelihood.register_prior("noise_prior", UniformPrior(0.05, 0.3), "noise") mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) diff --git a/test/examples/test_spectral_mixture_gp_regression.py b/test/examples/test_spectral_mixture_gp_regression.py index 2267c7688..b466a0908 100644 --- a/test/examples/test_spectral_mixture_gp_regression.py +++ b/test/examples/test_spectral_mixture_gp_regression.py @@ -44,11 +44,14 @@ class SpectralMixtureGPModel(gpytorch.models.ExactGP): - def __init__(self, train_x, train_y, likelihood): + def __init__(self, train_x, train_y, likelihood, empspect=False): super(SpectralMixtureGPModel, self).__init__(train_x, train_y, likelihood) self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1)) self.covar_module = SpectralMixtureKernel(num_mixtures=4, ard_num_dims=1) - self.covar_module.initialize_from_data(train_x, train_y) + if empspect: + self.covar_module.initialize_from_data(train_x, train_y) + else: + self.covar_module.initialize_from_data_empspect(train_x, train_y) def forward(self, x): mean_x = self.mean_module(x) @@ -70,9 +73,12 @@ def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) - def test_spectral_mixture_gp_mean_abs_error(self): + def test_spectral_mixture_gp_mean_abs_error_empspect_init(self): + return self.test_spectral_mixture_gp_mean_abs_error(empspect=True) + + def test_spectral_mixture_gp_mean_abs_error(self, empspect=False): likelihood = GaussianLikelihood(noise_prior=SmoothedBoxPrior(exp(-5), exp(3), sigma=0.1)) - gp_model = SpectralMixtureGPModel(train_x, train_y, likelihood) + gp_model = SpectralMixtureGPModel(train_x, train_y, likelihood, empspect=empspect) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) # Optimize the model @@ -81,27 +87,26 @@ def test_spectral_mixture_gp_mean_abs_error(self): optimizer = optim.SGD(list(gp_model.parameters()), lr=0.1) optimizer.n_iter = 0 - with gpytorch.settings.num_trace_samples(100): - for _ in range(150): - optimizer.zero_grad() - output = gp_model(train_x) - loss = -mll(output, train_y) - loss.backward() - optimizer.n_iter += 1 - optimizer.step() - - for param in gp_model.parameters(): - self.assertTrue(param.grad is not None) - self.assertGreater(param.grad.norm().item(), 0) - for param in likelihood.parameters(): - self.assertTrue(param.grad is not None) - self.assertGreater(param.grad.norm().item(), 0) + for _ in range(300): + optimizer.zero_grad() + output = gp_model(train_x) + loss = -mll(output, train_y) + loss.backward() + optimizer.n_iter += 1 optimizer.step() - gp_model.load_state_dict(good_state_dict, strict=False) + for param in gp_model.parameters(): + self.assertTrue(param.grad is not None) + self.assertGreater(param.grad.norm().item(), 0) + for param in likelihood.parameters(): + self.assertTrue(param.grad is not None) + self.assertGreater(param.grad.norm().item(), 0) + optimizer.step() + + gp_model.load_state_dict(good_state_dict, strict=False) - # Test the model - with torch.no_grad(), gpytorch.settings.max_cg_iterations(100): + # Test the model + with torch.no_grad(): gp_model.eval() likelihood.eval() test_preds = likelihood(gp_model(test_x)).mean