Skip to content

Commit

Permalink
work started on new MCMC, hps_bounds no longer required at initializa…
Browse files Browse the repository at this point in the history
…tion
  • Loading branch information
MarcusMNoack committed Nov 10, 2023
1 parent facf510 commit 92beba5
Show file tree
Hide file tree
Showing 10 changed files with 508 additions and 257,213 deletions.
128,492 changes: 19 additions & 128,473 deletions docs/source/examples/MultiTaskTest.ipynb

Large diffs are not rendered by default.

79 changes: 34 additions & 45 deletions docs/source/examples/NonEuclideanInputSpaces.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
"outputs": [],
"source": [
"#install the newest version of fvgp\n",
"#!pip install fvgp==4.0.9"
"#!pip install fvgp==4.0.11"
]
},
{
Expand Down Expand Up @@ -56,40 +56,36 @@
"name": "stdout",
"output_type": "stream",
"text": [
"differential_evolution step 1: f(x)= 8.78181\n",
"differential_evolution step 2: f(x)= 8.78181\n",
"differential_evolution step 3: f(x)= 8.78181\n",
"differential_evolution step 4: f(x)= 8.78181\n",
"differential_evolution step 5: f(x)= 8.78181\n",
"differential_evolution step 6: f(x)= 8.78181\n",
"differential_evolution step 7: f(x)= 8.78181\n",
"differential_evolution step 8: f(x)= 8.70813\n",
"differential_evolution step 9: f(x)= 8.55749\n",
"differential_evolution step 10: f(x)= 8.45097\n",
"differential_evolution step 11: f(x)= 8.22163\n",
"differential_evolution step 12: f(x)= 8.06583\n",
"differential_evolution step 13: f(x)= 8.06583\n",
"differential_evolution step 14: f(x)= 8.06583\n",
"differential_evolution step 15: f(x)= 8.06583\n",
"differential_evolution step 16: f(x)= 8.06543\n",
"differential_evolution step 17: f(x)= 8.06543\n",
"differential_evolution step 18: f(x)= 8.0514\n",
"differential_evolution step 19: f(x)= 8.04614\n",
"differential_evolution step 20: f(x)= 8.04614\n",
"differential_evolution step 21: f(x)= 8.0449\n",
"differential_evolution step 22: f(x)= 8.0449\n",
"differential_evolution step 23: f(x)= 8.0449\n",
"differential_evolution step 24: f(x)= 8.0449\n",
"differential_evolution step 25: f(x)= 8.0449\n",
"differential_evolution step 26: f(x)= 8.0449\n",
"differential_evolution step 1: f(x)= 9.91727\n",
"differential_evolution step 2: f(x)= 9.06802\n",
"differential_evolution step 3: f(x)= 9.06802\n",
"differential_evolution step 4: f(x)= 9.06802\n",
"differential_evolution step 5: f(x)= 8.4285\n",
"differential_evolution step 6: f(x)= 8.05457\n",
"differential_evolution step 7: f(x)= 8.05457\n",
"differential_evolution step 8: f(x)= 8.05457\n",
"differential_evolution step 9: f(x)= 8.05457\n",
"differential_evolution step 10: f(x)= 8.05457\n",
"differential_evolution step 11: f(x)= 8.05457\n",
"differential_evolution step 12: f(x)= 8.05457\n",
"differential_evolution step 13: f(x)= 8.05457\n",
"differential_evolution step 14: f(x)= 8.05457\n",
"differential_evolution step 15: f(x)= 8.05171\n",
"differential_evolution step 16: f(x)= 8.05171\n",
"differential_evolution step 17: f(x)= 8.04794\n",
"differential_evolution step 18: f(x)= 8.04555\n",
"differential_evolution step 19: f(x)= 8.04489\n",
"differential_evolution step 20: f(x)= 8.04489\n",
"differential_evolution step 21: f(x)= 8.04489\n",
"differential_evolution step 22: f(x)= 8.04489\n",
"differential_evolution step 23: f(x)= 8.04489\n",
"differential_evolution step 24: f(x)= 8.04489\n",
"differential_evolution step 25: f(x)= 8.04489\n",
"differential_evolution step 26: f(x)= 8.04489\n",
"differential_evolution step 27: f(x)= 8.04489\n",
"differential_evolution step 28: f(x)= 8.04489\n",
"differential_evolution step 29: f(x)= 8.04489\n",
"differential_evolution step 30: f(x)= 8.04489\n",
"differential_evolution step 31: f(x)= 8.04489\n",
"hyperparameters: [1.43486911 0.11327405]\n",
"hyperparameters: [1.4350853 0.12085441]\n",
"prediction : [2.74]\n",
"uncertainty: [1.19786022]\n"
"uncertainty: [1.19795046]\n"
]
}
],
Expand Down Expand Up @@ -124,24 +120,17 @@
" return hps[0] * obj.matern_kernel_diff1(d,hps[1])\n",
" \n",
"\n",
"bounds = np.array([[0.001,100.],[0.001,100]])\n",
"\n",
"my_gp = GP(1, x_data,y_data,init_hyperparameters=np.ones((2)), \n",
" hyperparameter_bounds=bounds, gp_kernel_function=kernel, info = True)\n",
"my_gp.train()\n",
"\n",
"my_gp = GP(1, x_data,y_data,init_hyperparameters=np.ones((2)), gp_kernel_function=kernel, info = True)\n",
"\n",
"bounds = np.array([[0.001,100.],[0.001,100]])\n",
"my_gp.train(hyperparameter_bounds=bounds)\n",
"\n",
"print(\"hyperparameters: \", my_gp.hyperparameters)\n",
"print(\"prediction : \",my_gp.posterior_mean({'full'})[\"f(x)\"])\n",
"print(\"uncertainty: \",np.sqrt(my_gp.posterior_covariance({'full'})[\"v(x)\"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e409d084",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
99 changes: 43 additions & 56 deletions docs/source/examples/SingleTaskTest.ipynb

Large diffs are not rendered by default.

128,492 changes: 19 additions & 128,473 deletions examples/MultiTaskTest.ipynb

Large diffs are not rendered by default.

79 changes: 34 additions & 45 deletions examples/NonEuclideanInputSpaces.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
"outputs": [],
"source": [
"#install the newest version of fvgp\n",
"#!pip install fvgp==4.0.9"
"#!pip install fvgp==4.0.11"
]
},
{
Expand Down Expand Up @@ -56,40 +56,36 @@
"name": "stdout",
"output_type": "stream",
"text": [
"differential_evolution step 1: f(x)= 8.78181\n",
"differential_evolution step 2: f(x)= 8.78181\n",
"differential_evolution step 3: f(x)= 8.78181\n",
"differential_evolution step 4: f(x)= 8.78181\n",
"differential_evolution step 5: f(x)= 8.78181\n",
"differential_evolution step 6: f(x)= 8.78181\n",
"differential_evolution step 7: f(x)= 8.78181\n",
"differential_evolution step 8: f(x)= 8.70813\n",
"differential_evolution step 9: f(x)= 8.55749\n",
"differential_evolution step 10: f(x)= 8.45097\n",
"differential_evolution step 11: f(x)= 8.22163\n",
"differential_evolution step 12: f(x)= 8.06583\n",
"differential_evolution step 13: f(x)= 8.06583\n",
"differential_evolution step 14: f(x)= 8.06583\n",
"differential_evolution step 15: f(x)= 8.06583\n",
"differential_evolution step 16: f(x)= 8.06543\n",
"differential_evolution step 17: f(x)= 8.06543\n",
"differential_evolution step 18: f(x)= 8.0514\n",
"differential_evolution step 19: f(x)= 8.04614\n",
"differential_evolution step 20: f(x)= 8.04614\n",
"differential_evolution step 21: f(x)= 8.0449\n",
"differential_evolution step 22: f(x)= 8.0449\n",
"differential_evolution step 23: f(x)= 8.0449\n",
"differential_evolution step 24: f(x)= 8.0449\n",
"differential_evolution step 25: f(x)= 8.0449\n",
"differential_evolution step 26: f(x)= 8.0449\n",
"differential_evolution step 1: f(x)= 9.91727\n",
"differential_evolution step 2: f(x)= 9.06802\n",
"differential_evolution step 3: f(x)= 9.06802\n",
"differential_evolution step 4: f(x)= 9.06802\n",
"differential_evolution step 5: f(x)= 8.4285\n",
"differential_evolution step 6: f(x)= 8.05457\n",
"differential_evolution step 7: f(x)= 8.05457\n",
"differential_evolution step 8: f(x)= 8.05457\n",
"differential_evolution step 9: f(x)= 8.05457\n",
"differential_evolution step 10: f(x)= 8.05457\n",
"differential_evolution step 11: f(x)= 8.05457\n",
"differential_evolution step 12: f(x)= 8.05457\n",
"differential_evolution step 13: f(x)= 8.05457\n",
"differential_evolution step 14: f(x)= 8.05457\n",
"differential_evolution step 15: f(x)= 8.05171\n",
"differential_evolution step 16: f(x)= 8.05171\n",
"differential_evolution step 17: f(x)= 8.04794\n",
"differential_evolution step 18: f(x)= 8.04555\n",
"differential_evolution step 19: f(x)= 8.04489\n",
"differential_evolution step 20: f(x)= 8.04489\n",
"differential_evolution step 21: f(x)= 8.04489\n",
"differential_evolution step 22: f(x)= 8.04489\n",
"differential_evolution step 23: f(x)= 8.04489\n",
"differential_evolution step 24: f(x)= 8.04489\n",
"differential_evolution step 25: f(x)= 8.04489\n",
"differential_evolution step 26: f(x)= 8.04489\n",
"differential_evolution step 27: f(x)= 8.04489\n",
"differential_evolution step 28: f(x)= 8.04489\n",
"differential_evolution step 29: f(x)= 8.04489\n",
"differential_evolution step 30: f(x)= 8.04489\n",
"differential_evolution step 31: f(x)= 8.04489\n",
"hyperparameters: [1.43486911 0.11327405]\n",
"hyperparameters: [1.4350853 0.12085441]\n",
"prediction : [2.74]\n",
"uncertainty: [1.19786022]\n"
"uncertainty: [1.19795046]\n"
]
}
],
Expand Down Expand Up @@ -124,24 +120,17 @@
" return hps[0] * obj.matern_kernel_diff1(d,hps[1])\n",
" \n",
"\n",
"bounds = np.array([[0.001,100.],[0.001,100]])\n",
"\n",
"my_gp = GP(1, x_data,y_data,init_hyperparameters=np.ones((2)), \n",
" hyperparameter_bounds=bounds, gp_kernel_function=kernel, info = True)\n",
"my_gp.train()\n",
"\n",
"my_gp = GP(1, x_data,y_data,init_hyperparameters=np.ones((2)), gp_kernel_function=kernel, info = True)\n",
"\n",
"bounds = np.array([[0.001,100.],[0.001,100]])\n",
"my_gp.train(hyperparameter_bounds=bounds)\n",
"\n",
"print(\"hyperparameters: \", my_gp.hyperparameters)\n",
"print(\"prediction : \",my_gp.posterior_mean({'full'})[\"f(x)\"])\n",
"print(\"uncertainty: \",np.sqrt(my_gp.posterior_covariance({'full'})[\"v(x)\"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e409d084",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
99 changes: 43 additions & 56 deletions examples/SingleTaskTest.ipynb

Large diffs are not rendered by default.

74 changes: 37 additions & 37 deletions examples/gp2ScaleTest.ipynb

Large diffs are not rendered by default.

54 changes: 32 additions & 22 deletions fvgp/gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,9 @@ class GP():
----------
input_space_dim : int
Dimensionality of the input space (D). If the input is non-Euclidean, the input dimensionality will be ignored.
x_data : np.ndarray
The input point positions. Shape (V x D), where D is the `input_space_dim`.
x_data : np.ndarray or set or list
The input point positions. Shape (V x D), where D is the `input_space_dim`. If dealing with non-Euclidean inputs
x_data should be a set or iterable, not a numpy array.
y_data : np.ndarray
The values of the data points. Shape (V,1) or (V).
init_hyperparameters : np.ndarray, optional
Expand Down Expand Up @@ -239,20 +240,21 @@ def __init__(
else:
if not callable(gp_kernel_function):
raise Exception(
"For GPs on non-Eucledian input spaces you need a user-defined kernel, hyperparameters, and hyperparameter_bounds.")
"For GPs on non-Eucledian input spaces you need a user-defined kernel, hyperparameters, \
and hyperparameter_bounds.")
input_space_dim = 1
self.non_Euclidean = True

if self.non_Euclidean and (init_hyperparameters is None or hyperparameter_bounds is None):
if self.non_Euclidean and init_hyperparameters is None:
raise Exception(
"You are running fvGP on non-Euclidean inputs. Please provide initial hyperparameters and hyperparameter bounds.")
"You are running fvGP on non-Euclidean inputs. Please provide initial hyperparameters.")
if np.ndim(y_data) == 2: y_data = y_data[:, 0]
if compute_device == 'gpu':
try:
import torch
except:
raise Exception(
"You have specified the 'gpu' as your compute device. You need to install pytorch manually for this to work.")
"You have specified the 'gpu' as your compute device. You need to install pytorch\
manually for this to work.")
self.normalize_y = normalize_y
self.input_space_dim = input_space_dim
self.x_data = x_data
Expand All @@ -267,29 +269,34 @@ def __init__(
self.KVinv = None
self.mcmc_info = None
self.gp2Scale = gp2Scale
self.hyperparameter_bounds = hyperparameter_bounds

if (callable(gp_kernel_function) or callable(gp_mean_function) or callable(
gp_noise_function)) and init_hyperparameters is None:
warnings.warn(
"You have provided callables for kernel, mean, or noise functions but no initial hyperparameters.",
"You have provided callables for kernel, mean, or noise \
functions but no initial hyperparameters.",
stacklevel=2)
warnings.warn("It is likley they have to be defined for a success initialization.", stacklevel=2)
warnings.warn("It is likely they have to be defined for a success initialization.", stacklevel=2)

##########################################
#######prepare hyper parameters###########
##########################################
if hyperparameter_bounds is None and not self.non_Euclidean:
if callable(gp_kernel_function):
raise Exception(
"You have not provided hyperparameter bounds but a kernel function. Your kernel needs user defined hyperparameters and bounds.")
hyperparameter_bounds = np.zeros((input_space_dim + 1, 2))
hyperparameter_bounds[0] = np.array([np.var(y_data) / 100., np.var(y_data) * 10.])
for i in range(input_space_dim):
range_xi = np.max(x_data[:, i]) - np.min(x_data[:, i])
hyperparameter_bounds[i + 1] = np.array([range_xi / 100., range_xi * 10.])
self.hyperparameter_bounds = hyperparameter_bounds
if self.hyperparameter_bounds is None:
if self.non_Euclidean:
if callable(gp_kernel_function):
warnings.warn("You have not provided hyperparameter bounds but a kernel function. \n \
Make sure you provide them tot he training.")
else:
hyperparameter_bounds = np.zeros((input_space_dim + 1, 2))
hyperparameter_bounds[0] = np.array([np.var(y_data) / 100., np.var(y_data) * 10.])
for i in range(input_space_dim):
range_xi = np.max(x_data[:, i]) - np.min(x_data[:, i])
hyperparameter_bounds[i + 1] = np.array([range_xi / 100., range_xi * 10.])
self.hyperparameter_bounds = hyperparameter_bounds

if init_hyperparameters is None:
if self.hyperparameter_bounds is None: raise Exception("hyperparameter_bounds non available.")
init_hyperparameters = np.random.uniform(low=self.hyperparameter_bounds[:, 0],
high=self.hyperparameter_bounds[:, 1],
size=len(self.hyperparameter_bounds))
Expand All @@ -299,12 +306,14 @@ def __init__(
##########################################
if self.sparse_mode and self.store_inv:
warnings.warn(
"sparse_mode and store_inv enabled but they should not be used together. I'll set store_inv = False.",
"sparse_mode and store_inv enabled but they should not be used together. \
I'll set store_inv = False.",
stacklevel=2)
self.store_inv = False
if self.sparse_mode and not callable(gp_kernel_function):
warnings.warn("You have chosen to activate sparse mode. Great! \n \
But you have not supplied a kernel that is compactly supported. \n I will use an anisotropic Wendland kernel for now.",
But you have not supplied a kernel that is compactly supported. \n I will use an \
anisotropic Wendland kernel for now.",
stacklevel=2)
gp_kernel_function = self.wendland_anisotropic

Expand All @@ -314,7 +323,8 @@ def __init__(
if gp2Scale:
if gp2Scale_dask_client is None:
gp2Scale_dask_client = Client()
warnings.warn("gp2Scale needs a 'gp2Scale_dask_client'. Set to distributed.Client().", stacklevel=2)
warnings.warn("gp2Scale needs a 'gp2Scale_dask_client'. \
Set to distributed.Client().", stacklevel=2)
self.gp2Scale_dask_client = gp2Scale_dask_client

if not callable(gp_kernel_function):
Expand Down

0 comments on commit 92beba5

Please sign in to comment.