Skip to content

Commit

Permalink
Merge pull request #23 from lbl-camera/dev2023Sept26
Browse files Browse the repository at this point in the history
Dev2023 sept26
  • Loading branch information
MarcusMNoack committed Oct 12, 2023
2 parents ddf2176 + e85a330 commit 2d75e58
Show file tree
Hide file tree
Showing 12 changed files with 535 additions and 498 deletions.
2 changes: 1 addition & 1 deletion docs/source/examples/MultiTaskTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"metadata": {},
"source": [
"# Multi-Task Test\n",
"At first we have to install the newest version of fvgGP"
"At first we have to install the newest version of fvGP"
]
},
{
Expand Down
15 changes: 11 additions & 4 deletions examples/MultiTaskTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"metadata": {},
"source": [
"# Multi-Task Test\n",
"At first we have to install the newest version of fvgGP"
"At first we have to install the newest version of fvGP"
]
},
{
Expand All @@ -17,7 +17,7 @@
"outputs": [],
"source": [
"##first install the newest version of fvgp\n",
"#!pip install fvgp==4.0.0"
"#!pip install fvgp==4.0.5"
]
},
{
Expand Down Expand Up @@ -177,12 +177,11 @@
" #gp_kernel_function=mkernel #what happens if comment this out? (adjust bounds below)\n",
" )\n",
"#change this based on kernel choice\n",
"hps_bounds = my_gp2.hps_bounds \n",
"#hps_bounds = np.array([[0.001,10000.],[1.,1000.]])\n",
"\n",
"#my_gp2.update_gp_data(x_data3,y_data3)\n",
"print(\"Global Training in progress\")\n",
"my_gp2.train(hps_bounds, max_iter = 200)"
"my_gp2.train(max_iter = 2)"
]
},
{
Expand Down Expand Up @@ -272,6 +271,14 @@
"fig.show()\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1694322d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
4 changes: 2 additions & 2 deletions examples/SingleTaskTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
"outputs": [],
"source": [
"##first install the newest version of fvgp\n",
"#!pip install fvgp==4.0.0"
"#!pip install fvgp==4.0.5"
]
},
{
Expand Down Expand Up @@ -215,7 +215,7 @@
"outputs": [],
"source": [
"for i in range(10):\n",
" time.sleep(5)\n",
" time.sleep(2)\n",
" my_gp1.update_hyperparameters(opt_obj)\n",
" print(my_gp1.hyperparameters)\n",
" print(\"\")"
Expand Down
4 changes: 2 additions & 2 deletions examples/gp2ScaleTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"# gp2Scale \n",
"gp2Scale is a special setting in fvgp that combines non-stationary, compactly-supported kernels, HPC distributed computing, and sparse linear algebra to allow scale-up of exact GPs to millions of data points. Here we run a moderately-sized GP, just because we assume you might run this locally.\n",
"\n",
"I hope it is clear how cool it is what is happening here. If you have a dask client that points to a remote cluster with 500 GPUs, you will distribute the covariance matrix computation across those. The full matrix is sparse will be fast to work with in downstream operations. The algorithm only makes use of naturally-occuring sparsity, so the result is exact in contrast to Vecchia or inducing-point methods."
"I hope it is clear how cool it is what is happening here. If you have a dask client that points to a remote cluster with 500 GPUs, you will distribute the covariance matrix computation across those. The full matrix is sparse and will be fast to work with in downstream operations. The algorithm only makes use of naturally-occuring sparsity, so the result is exact in contrast to Vecchia or inducing-point methods."
]
},
{
Expand All @@ -19,7 +19,7 @@
"outputs": [],
"source": [
"##first install the newest version of fvgp\n",
"#!pip install fvgp==4.0.0"
"#!pip install fvgp==4.0.5"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions examples/gp2Scale_example_HPC_jobscript.sl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ echo ${port}
echo "starting scheduler"
dask-scheduler --no-dashboard --no-show --host ${hn} --port ${port} &
echo "starting workers"
srun -o dask_worker_info.txt dask-worker ${hn}:${port} &
srun -o dask_worker_info.txt dask-worker ${hn}:${port} --nthreads 1 &
echo "starting gp2Scale"
python -u run_gp2ScaleGPU.py ${hn}:${port} ${number_of_workers}
python -u gp2Scale_example_HPC_RunScript.py ${hn}:${port} ${number_of_workers}

10 changes: 8 additions & 2 deletions fvgp/fvgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,10 @@ class provides all the methods described for the GP class.
fvgp.fvGP.gp_deep_kernel_layer_width. If you specify
another kernel, please provide
init_hyperparameters.
hyperparameter_bounds : np.ndarray, optional
A 2d numpy array of shape (N x 2), where N is the number of needed hyperparameters.
The default is None, in that case hyperparameter_bounds have to be specified
in the train calls or default bounds are used. Those only work for the default kernel.
output_positions : np.ndarray, optional
A 3-D numpy array of shape (U x output_number x output_dim), so that for each measurement position, the outputs
are clearly defined by their positions in the output space. The default is np.array([[0],[1],[2],[3],...,[output_number - 1]]) for each
Expand Down Expand Up @@ -204,6 +208,7 @@ def __init__(
x_data,
y_data,
init_hyperparameters = None,
hyperparameter_bounds = None,
output_positions = None,
noise_variances = None,
compute_device = "cpu",
Expand Down Expand Up @@ -262,9 +267,9 @@ def __init__(
self.hps_bounds[2:] = np.array([-1.,1.])
init_hps = np.random.uniform(low = self.hps_bounds[:,0], high = self.hps_bounds[:,1],size = len(self.hps_bounds))
warnings.warn("Hyperparameter bounds have been initialized automatically \
\n for the default kernel in fvgp. They have to be provided to the training.\
\n For instance: GP.train(hyperparameter_bounds = fvgp_obj_name.hps_bounds)\
\n for the default kernel in fvgp. They will automatically used for the training.\
\n However, you can also define and provide new bounds.")
hyperparameter_bounds = self.hps_bounds


####init GP
Expand All @@ -273,6 +278,7 @@ def __init__(
x_data,
y_data,
init_hyperparameters = init_hps,
hyperparameter_bounds = hyperparameter_bounds,
noise_variances = noise_variances,
compute_device = compute_device,
gp_kernel_function = gp_kernel_function,
Expand Down

0 comments on commit 2d75e58

Please sign in to comment.