Skip to content

Commit

Permalink
added capability of user-defined training objective functions, clean …
Browse files Browse the repository at this point in the history
…up, updated tests
  • Loading branch information
MarcusMNoack committed Dec 8, 2023
1 parent 8ddeae3 commit 455840e
Show file tree
Hide file tree
Showing 10 changed files with 128,957 additions and 291 deletions.
128,580 changes: 128,541 additions & 39 deletions examples/MultiTaskTest.ipynb

Large diffs are not rendered by default.

92 changes: 47 additions & 45 deletions examples/NonEuclideanInputSpaces.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,30 +13,21 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 1,
"id": "61c3f3bd",
"metadata": {},
"outputs": [],
"source": [
"#install the newest version of fvgp\n",
"#!pip install fvgp==4.0.11"
"#!pip install fvgp==4.0.13"
]
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 2,
"id": "b5399565",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The autoreload extension is already loaded. To reload it, use:\n",
" %reload_ext autoreload\n"
]
}
],
"outputs": [],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
Expand All @@ -48,50 +39,53 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 4,
"id": "b91e69d3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"differential_evolution step 1: f(x)= 9.91727\n",
"differential_evolution step 2: f(x)= 9.06802\n",
"differential_evolution step 3: f(x)= 9.06802\n",
"differential_evolution step 4: f(x)= 9.06802\n",
"differential_evolution step 5: f(x)= 8.4285\n",
"differential_evolution step 6: f(x)= 8.05457\n",
"differential_evolution step 7: f(x)= 8.05457\n",
"differential_evolution step 8: f(x)= 8.05457\n",
"differential_evolution step 9: f(x)= 8.05457\n",
"differential_evolution step 10: f(x)= 8.05457\n",
"differential_evolution step 11: f(x)= 8.05457\n",
"differential_evolution step 12: f(x)= 8.05457\n",
"differential_evolution step 13: f(x)= 8.05457\n",
"differential_evolution step 14: f(x)= 8.05457\n",
"differential_evolution step 15: f(x)= 8.05171\n",
"differential_evolution step 16: f(x)= 8.05171\n",
"differential_evolution step 17: f(x)= 8.04794\n",
"differential_evolution step 18: f(x)= 8.04555\n",
"differential_evolution step 1: f(x)= 9.45943\n",
"differential_evolution step 2: f(x)= 9.04232\n",
"differential_evolution step 3: f(x)= 8.52913\n",
"differential_evolution step 4: f(x)= 8.20242\n",
"differential_evolution step 5: f(x)= 8.06388\n",
"differential_evolution step 6: f(x)= 8.05498\n",
"differential_evolution step 7: f(x)= 8.05063\n",
"differential_evolution step 8: f(x)= 8.05063\n",
"differential_evolution step 9: f(x)= 8.05063\n",
"differential_evolution step 10: f(x)= 8.04662\n",
"differential_evolution step 11: f(x)= 8.04662\n",
"differential_evolution step 12: f(x)= 8.04496\n",
"differential_evolution step 13: f(x)= 8.04496\n",
"differential_evolution step 14: f(x)= 8.04496\n",
"differential_evolution step 15: f(x)= 8.04491\n",
"differential_evolution step 16: f(x)= 8.04491\n",
"differential_evolution step 17: f(x)= 8.04489\n",
"differential_evolution step 18: f(x)= 8.04489\n",
"differential_evolution step 19: f(x)= 8.04489\n",
"differential_evolution step 20: f(x)= 8.04489\n",
"differential_evolution step 21: f(x)= 8.04489\n",
"differential_evolution step 22: f(x)= 8.04489\n",
"differential_evolution step 23: f(x)= 8.04489\n",
"differential_evolution step 24: f(x)= 8.04489\n",
"differential_evolution step 25: f(x)= 8.04489\n",
"differential_evolution step 26: f(x)= 8.04489\n",
"differential_evolution step 27: f(x)= 8.04489\n",
"hyperparameters: [1.4350853 0.12085441]\n",
"hyperparameters: [1.43455918 0.10760601]\n",
"prediction : [2.74]\n",
"uncertainty: [1.19795046]\n"
"uncertainty: [1.19773084]\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/marcus/Coding/fvGP/fvgp/gp.py:290: UserWarning: You have not provided hyperparameter bounds but a kernel function. \n",
" Make sure you provide hyperparameter bounds to the training.\n",
" warnings.warn(\"You have not provided hyperparameter bounds but a kernel function. \\n \\\n",
"/tmp/ipykernel_1003689/1300562773.py:33: UserWarning: No noise function or measurement noise provided. Noise variances will be set to 1% of mean(y_data).\n",
" my_gp = GP(1, x_data,y_data,init_hyperparameters=np.ones((2)), gp_kernel_function=kernel, info = True)\n"
]
}
],
"source": [
"#making the x_data a set will allow us to put any objects or structures into it.\n",
"x_data = {('hello'),('world'),('this'),('is'),('fvgp')}\n",
"x_data = [('hello'),('world'),('this'),('is'),('fvgp')]\n",
"y_data = np.array([2.,1.9,1.8,3.0,5.])\n",
"\n",
"\n",
Expand Down Expand Up @@ -131,13 +125,21 @@
"print(\"prediction : \",my_gp.posterior_mean({'full'})[\"f(x)\"])\n",
"print(\"uncertainty: \",np.sqrt(my_gp.posterior_covariance({'full'})[\"v(x)\"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f67d948f-c643-487d-ab5b-baf3d53f7b60",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "fvgp_dev",
"language": "python",
"name": "python3"
"name": "fvgp_dev"
},
"language_info": {
"codemirror_mode": {
Expand Down
153 changes: 104 additions & 49 deletions examples/SingleTaskTest.ipynb

Large diffs are not rendered by default.

108 changes: 71 additions & 37 deletions examples/gp2ScaleTest.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion examples/gp2Scale_example_HPC_RunScript.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def main():
print("===============")
print("===============")

my_gp2S.train(hps_bounds, max_iter = 20)
my_gp2S.train(hyperparameter_bounds = hps_bounds, max_iter = 20)


if __name__ == '__main__':
Expand Down
39 changes: 22 additions & 17 deletions fvgp/fvgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

class fvGP(GP):
"""
This class provides all the tools for a multi-task Gaussian Process (GP).
This class provides all the tools for a multitask Gaussian Process (GP).
This class allows for full HPC support for training. After initialization, this
class provides all the methods described for the GP class.
Expand All @@ -21,7 +21,7 @@ class provides all the methods described for the GP class.
N ... arbitrary integers (N1, N2,...)
The main logic of fvGP is that any multi-task GP is just a single-task GP
The main logic of fvGP is that any multitask GP is just a single-task GP
over a Cartesian product space of input and output space, as long as the kernel
is flexible enough, so prepare to work on your kernel. This is the best
way to give the user optimal control and power. At various instances, for instances
Expand All @@ -36,7 +36,7 @@ class provides all the methods described for the GP class.
[0.2, 0.3,1],[0.9,0.6,1]]
This has to be understood and taken into account when customizing fvGP for multi-task
This has to be understood and taken into account when customizing fvGP for multitask
use.
Parameters
Expand All @@ -50,7 +50,7 @@ class provides all the methods described for the GP class.
output_number : int
Number of output values.
x_data : np.ndarray
The input point positions. Shape (V x D), where D is the `'input_space_dim'`.
The input point positions. Shape (V x D), where D is the `input_space_dim`.
y_data : np.ndarray
The values of the data points. Shape (V,No).
init_hyperparameters : np.ndarray, optional
Expand Down Expand Up @@ -243,7 +243,7 @@ def __init__(
ram_economy=False,
args=None,
info=False,
):
):

self.orig_input_space_dim = input_space_dim
self.output_num, self.output_dim = output_number, output_space_dim
Expand All @@ -252,14 +252,16 @@ def __init__(
if not isinstance(x_data,np.ndarray):
raise Exception("Multi-task GPs on non-Euclidean input spaces are not implemented yet.")


if np.ndim(y_data) == 1: raise ValueError("The output number is 1, you can use GP for single-task GPs")
if output_number != len(y_data[0]): raise ValueError("The output number is not in agreement with the data values given")
if output_space_dim == 1 and isinstance(output_positions, np.ndarray) == False:
if np.ndim(y_data) == 1:
raise ValueError("The output number is 1, you can use GP for single-task GPs")
if output_number != len(y_data[0]):
raise ValueError("The output number is not in agreement with the data values given")
if output_space_dim == 1 and isinstance(output_positions, np.ndarray) is False:
self.output_positions = self._compute_standard_output_positions(len(x_data))
elif self.output_dim > 1 and not isinstance(output_positions, np.ndarray):
raise Exception(
"If the dimensionality of the output space is > 1, the value positions have to be given to the fvGP class")
"If the dimensionality of the output space is > 1, \
the value positions have to be given to the fvGP class")
else:
self.output_positions = output_positions

Expand All @@ -279,12 +281,15 @@ def __init__(
The default kernel needs pytorch to be installed manually.")
self.gp_deep_kernel_layer_width = gp_deep_kernel_layer_width
self.n = Network(self.iset_dim, gp_deep_kernel_layer_width)
number_of_hps = int(2. * self.iset_dim * gp_deep_kernel_layer_width + gp_deep_kernel_layer_width**2 + 2.*gp_deep_kernel_layer_width + self.iset_dim + 2.)
number_of_hps = int(2. * self.iset_dim * gp_deep_kernel_layer_width +
gp_deep_kernel_layer_width**2 + 2.*gp_deep_kernel_layer_width + self.iset_dim + 2.)
self.hps_bounds = np.zeros((number_of_hps,2))
self.hps_bounds[0] = np.array([np.var(y_data)/10.,np.var(y_data)*10.])
self.hps_bounds[1] = np.array([(np.max(x_data) - np.min(x_data)) / 100., (np.max(x_data) - np.min(x_data)) * 100.])
self.hps_bounds[1] = np.array([(np.max(x_data) - np.min(x_data)) / 100., (np.max(x_data) -
np.min(x_data)) * 100.])
self.hps_bounds[2:] = np.array([-1.,1.])
init_hps = np.random.uniform(low = self.hps_bounds[:,0], high = self.hps_bounds[:,1],size = len(self.hps_bounds))
init_hps = np.random.uniform(low = self.hps_bounds[:, 0],
high = self.hps_bounds[:, 1], size=len(self.hps_bounds))
warnings.warn("Hyperparameter bounds have been initialized automatically \
\n for the default kernel in fvgp. They will automatically used for the training.\
\n However, you can also define and provide new bounds.")
Expand Down Expand Up @@ -316,7 +321,6 @@ def __init__(
args=args,
info=info)

################################################################################################
def update_gp_data(
self,
x_data,
Expand Down Expand Up @@ -359,7 +363,8 @@ def update_gp_data(
self.output_positions = self._compute_standard_output_positions(len(x_data))
elif self.output_dim > 1 and isinstance(output_positions, np.ndarray) == False:
raise ValueError(
"If the dimensionality of the output space is > 1, the value positions have to be given to the fvGP class. EXIT"
"If the dimensionality of the output space is > 1, \
the value positions have to be given to the fvGP class. EXIT"
)
else:
self.output_positions = output_positions
Expand Down Expand Up @@ -417,6 +422,6 @@ def _default_multi_task_kernel(self, x1, x2, hps, obj): # pragma: no cover
hps_nn[b3_indices].reshape(self.iset_dim))
x1_nn = self.n.forward(x1)
x2_nn = self.n.forward(x2)
d = obj._get_distance_matrix(x1_nn,x2_nn)
k = signal_var * obj.matern_kernel_diff1(d,length_scale)
d = obj.get_distance_matrix(x1_nn, x2_nn)
k = signal_var * obj.matern_kernel_diff1(d, length_scale)
return k

0 comments on commit 455840e

Please sign in to comment.