Skip to content

Commit

Permalink
SM empspect init unsqueezes 1D inputs
Browse files Browse the repository at this point in the history
[Fixes #1166]
  • Loading branch information
gpleiss committed Jun 8, 2020
1 parent 92e07cf commit b8bd2a8
Show file tree
Hide file tree
Showing 7 changed files with 42 additions and 28 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Expand Up @@ -13,8 +13,8 @@ env:
- PYTORCH_VERSION=stable WITH_PYRO=true EXAMPLES=true

install:
- pip install numpy; pip install scipy; pip install scikit-learn;
- if [[ $PYTORCH_VERSION = "master" ]]; then
pip install numpy;
pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html;
python setup.py build develop;
else
Expand Down
2 changes: 2 additions & 0 deletions environment.yml
Expand Up @@ -3,3 +3,5 @@ channels:
- pytorch
dependencies:
- pytorch
- scikit-learn
- scipy
5 changes: 5 additions & 0 deletions gpytorch/kernels/spectral_mixture_kernel.py
Expand Up @@ -158,6 +158,11 @@ def initialize_from_data_empspect(self, train_x, train_y):
from scipy.fftpack import fft
from scipy.integrate import cumtrapz

if not torch.is_tensor(train_x) or not torch.is_tensor(train_y):
raise RuntimeError("train_x and train_y should be tensors")
if train_x.ndimension() == 1:
train_x = train_x.unsqueeze(-1)

N = train_x.size(-2)
emp_spect = np.abs(fft(train_y.cpu().detach().numpy())) ** 2 / N
M = math.floor(N / 2)
Expand Down
4 changes: 3 additions & 1 deletion requirements.txt
@@ -1 +1,3 @@
torch>=1.3
torch>=1.5
scikit-learn
scipy
2 changes: 1 addition & 1 deletion setup.py
Expand Up @@ -26,7 +26,7 @@ def find_version(*file_paths):


torch_min = "1.5"
install_requires = [">=".join(["torch", torch_min])]
install_requires = [">=".join(["torch", torch_min]), "scikit-learn", "scipy"]
# if recent dev version of PyTorch is installed, no need to install stable
try:
import torch
Expand Down
6 changes: 3 additions & 3 deletions test/examples/test_simple_gp_regression.py
Expand Up @@ -9,7 +9,7 @@
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior, LogNormalPrior, UniformPrior
from gpytorch.priors import SmoothedBoxPrior, UniformPrior
from gpytorch.constraints import Positive
from gpytorch.test.base_test_case import BaseTestCase
from gpytorch.test.utils import least_used_cuda_device
Expand Down Expand Up @@ -465,9 +465,9 @@ def test_pyro_sampling(self):

# Register normal GPyTorch priors
gp_model.mean_module.register_prior("mean_prior", UniformPrior(-1, 1), "constant")
gp_model.covar_module.base_kernel.register_prior("lengthscale_prior", UniformPrior(0.01, 0.2), "lengthscale")
gp_model.covar_module.base_kernel.register_prior("lengthscale_prior", UniformPrior(0.01, 0.5), "lengthscale")
gp_model.covar_module.register_prior("outputscale_prior", UniformPrior(1, 2), "outputscale")
likelihood.register_prior("noise_prior", LogNormalPrior(-1.5, 0.1), "noise")
likelihood.register_prior("noise_prior", UniformPrior(0.05, 0.3), "noise")

mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

Expand Down
49 changes: 27 additions & 22 deletions test/examples/test_spectral_mixture_gp_regression.py
Expand Up @@ -44,11 +44,14 @@


class SpectralMixtureGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
def __init__(self, train_x, train_y, likelihood, empspect=False):
super(SpectralMixtureGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
self.covar_module = SpectralMixtureKernel(num_mixtures=4, ard_num_dims=1)
self.covar_module.initialize_from_data(train_x, train_y)
if empspect:
self.covar_module.initialize_from_data(train_x, train_y)
else:
self.covar_module.initialize_from_data_empspect(train_x, train_y)

def forward(self, x):
mean_x = self.mean_module(x)
Expand All @@ -70,9 +73,12 @@ def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)

def test_spectral_mixture_gp_mean_abs_error(self):
def test_spectral_mixture_gp_mean_abs_error_empspect_init(self):
return self.test_spectral_mixture_gp_mean_abs_error(empspect=True)

def test_spectral_mixture_gp_mean_abs_error(self, empspect=False):
likelihood = GaussianLikelihood(noise_prior=SmoothedBoxPrior(exp(-5), exp(3), sigma=0.1))
gp_model = SpectralMixtureGPModel(train_x, train_y, likelihood)
gp_model = SpectralMixtureGPModel(train_x, train_y, likelihood, empspect=empspect)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

# Optimize the model
Expand All @@ -81,27 +87,26 @@ def test_spectral_mixture_gp_mean_abs_error(self):
optimizer = optim.SGD(list(gp_model.parameters()), lr=0.1)
optimizer.n_iter = 0

with gpytorch.settings.num_trace_samples(100):
for _ in range(150):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()

for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for _ in range(300):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()

gp_model.load_state_dict(good_state_dict, strict=False)
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()

gp_model.load_state_dict(good_state_dict, strict=False)

# Test the model
with torch.no_grad(), gpytorch.settings.max_cg_iterations(100):
# Test the model
with torch.no_grad():
gp_model.eval()
likelihood.eval()
test_preds = likelihood(gp_model(test_x)).mean
Expand Down

0 comments on commit b8bd2a8

Please sign in to comment.