Skip to content

Commit

Permalink
restructuring accomplished but fvgp and gp2Scale not covered yet
Browse files Browse the repository at this point in the history
  • Loading branch information
MarcusMNoack committed Mar 28, 2024
1 parent d88745b commit 1de56e2
Show file tree
Hide file tree
Showing 8 changed files with 743 additions and 373 deletions.
117 changes: 61 additions & 56 deletions fvgp/fvgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def __init__(
self.output_num, self.output_dim = output_number, output_space_dim
###check the output dims

if not isinstance(x_data,np.ndarray):
if not isinstance(x_data, np.ndarray):
raise Exception("Multi-task GPs on non-Euclidean input spaces are not implemented yet.")

if np.ndim(y_data) == 1:
Expand All @@ -257,63 +257,64 @@ def __init__(
self.fvgp_x_data = x_data
self.fvgp_y_data = y_data
self.fvgp_noise_variances = noise_variances
x_data, y_data, noise_variances = self._transform_index_set(x_data,y_data,noise_variances,self.output_positions)
x_data, y_data, noise_variances = self._transform_index_set(x_data, y_data, noise_variances,
self.output_positions)
init_hps = init_hyperparameters

if gp_kernel_function is None:
gp_kernel_function = self._default_multi_task_kernel
try:
from .deep_kernel_network import Network
except: raise Exception("You have not specified a kernel and the default kernel will be used. \n \
except:
raise Exception("You have not specified a kernel and the default kernel will be used. \n \
The default kernel needs pytorch to be installed manually.")
self.gp_deep_kernel_layer_width = gp_deep_kernel_layer_width
self.n = Network(self.iset_dim, gp_deep_kernel_layer_width)
number_of_hps = int(2. * self.iset_dim * gp_deep_kernel_layer_width +
gp_deep_kernel_layer_width**2 + 2.*gp_deep_kernel_layer_width + self.iset_dim + 2.)
self.hps_bounds = np.zeros((number_of_hps,2))
self.hps_bounds[0] = np.array([np.var(y_data)/10.,np.var(y_data)*10.])
gp_deep_kernel_layer_width ** 2 + 2. * gp_deep_kernel_layer_width + self.iset_dim + 2.)
self.hps_bounds = np.zeros((number_of_hps, 2))
self.hps_bounds[0] = np.array([np.var(y_data) / 10., np.var(y_data) * 10.])
self.hps_bounds[1] = np.array([(np.max(x_data) - np.min(x_data)) / 100., (np.max(x_data) -
np.min(x_data)) * 100.])
self.hps_bounds[2:] = np.array([-1.,1.])
init_hps = np.random.uniform(low = self.hps_bounds[:, 0],
high = self.hps_bounds[:, 1], size=len(self.hps_bounds))
self.hps_bounds[2:] = np.array([-1., 1.])
init_hps = np.random.uniform(low=self.hps_bounds[:, 0],
high=self.hps_bounds[:, 1], size=len(self.hps_bounds))
warnings.warn("Hyperparameter bounds have been initialized automatically \
\n for the default kernel in fvgp. They will automatically used for the training.\
\n However, you can also define and provide new bounds.")
hyperparameter_bounds = self.hps_bounds


####init GP
super().__init__(
self.iset_dim,
x_data,
y_data,
init_hyperparameters=init_hps,
hyperparameter_bounds=hyperparameter_bounds,
noise_variances=noise_variances,
compute_device=compute_device,
gp_kernel_function=gp_kernel_function,
gp_kernel_function_grad=gp_kernel_function_grad,
gp_mean_function=gp_mean_function,
gp_mean_function_grad=gp_mean_function_grad,
gp_noise_function=gp_noise_function,
gp_noise_function_grad=gp_noise_function_grad,
gp2Scale=gp2Scale,
gp2Scale_dask_client=gp2Scale_dask_client,
gp2Scale_batch_size=gp2Scale_batch_size,
store_inv=store_inv,
ram_economy=ram_economy,
args=args,
info=info)
self.iset_dim,
x_data,
y_data,
init_hyperparameters=init_hps,
hyperparameter_bounds=hyperparameter_bounds,
noise_variances=noise_variances,
compute_device=compute_device,
gp_kernel_function=gp_kernel_function,
gp_kernel_function_grad=gp_kernel_function_grad,
gp_mean_function=gp_mean_function,
gp_mean_function_grad=gp_mean_function_grad,
gp_noise_function=gp_noise_function,
gp_noise_function_grad=gp_noise_function_grad,
gp2Scale=gp2Scale,
gp2Scale_dask_client=gp2Scale_dask_client,
gp2Scale_batch_size=gp2Scale_batch_size,
store_inv=store_inv,
ram_economy=ram_economy,
args=args,
info=info)

def update_gp_data(
self,
x_data,
y_data,
output_positions = None,
noise_variances = None,
overwrite = False
):
output_positions=None,
noise_variances=None,
overwrite=False
):

"""
This function updates the data in the gp object instance.
Expand Down Expand Up @@ -357,7 +358,8 @@ def update_gp_data(
######################################
#####transform to index set###########
######################################
x_data, y_data, noise_variances = self._transform_index_set(x_data,y_data,noise_variances, self.output_positions)
x_data, y_data, noise_variances = self._transform_index_set(x_data, y_data, noise_variances,
self.output_positions)
super().update_gp_data(self.x_data, self.y_data, noise_variances, overwrite=overwrite)

################################################################################################
Expand All @@ -367,44 +369,47 @@ def _compute_standard_output_positions(self, point_number):
value_pos[:, j, :] = j
return value_pos

################################################################################################
################################################################################################
def _transform_index_set(self, x_data, y_data, noise_variances, output_positions):
point_number = len(x_data)
new_points = np.zeros((point_number * self.output_num, self.iset_dim))
new_values = np.zeros((point_number * self.output_num))
if noise_variances is not None: new_variances = np.zeros((point_number * self.output_num))
else: new_variances = None
if noise_variances is not None:
new_variances = np.zeros((point_number * self.output_num))
else:
new_variances = None
for i in range(self.output_num):
new_points[i * point_number : (i + 1) * point_number] = \
np.column_stack([x_data, output_positions[:, i, :]])
new_values[i * point_number : (i + 1) * point_number] = \
y_data[:, i]
if noise_variances is not None: new_variances[i * point_number : (i + 1) * point_number] = \
noise_variances[:, i]
new_points[i * point_number: (i + 1) * point_number] = \
np.column_stack([x_data, output_positions[:, i, :]])
#if isinstance(x_data, list):
# new_points
new_values[i * point_number: (i + 1) * point_number] = y_data[:, i]
if noise_variances is not None:
new_variances[i * point_number: (i + 1) * point_number] = noise_variances[:, i]
return new_points, new_values, new_variances

################################################################################################
################################################################################################
def _default_multi_task_kernel(self, x1, x2, hps, obj): # pragma: no cover
signal_var = hps[0]
length_scale = hps[1]
hps_nn = hps[2:]
w1_indices = np.arange(0,self.gp_deep_kernel_layer_width * self.iset_dim)
w1_indices = np.arange(0, self.gp_deep_kernel_layer_width * self.iset_dim)
last = self.gp_deep_kernel_layer_width * self.iset_dim
w2_indices = np.arange(last, last + self.gp_deep_kernel_layer_width**2)
last = last + self.gp_deep_kernel_layer_width**2
w3_indices = np.arange(last,last + self.gp_deep_kernel_layer_width * self.iset_dim)
w2_indices = np.arange(last, last + self.gp_deep_kernel_layer_width ** 2)
last = last + self.gp_deep_kernel_layer_width ** 2
w3_indices = np.arange(last, last + self.gp_deep_kernel_layer_width * self.iset_dim)
last = last + self.gp_deep_kernel_layer_width * self.iset_dim
b1_indices = np.arange(last,last + self.gp_deep_kernel_layer_width)
b1_indices = np.arange(last, last + self.gp_deep_kernel_layer_width)
last = last + self.gp_deep_kernel_layer_width
b2_indices = np.arange(last,last + self.gp_deep_kernel_layer_width)
b2_indices = np.arange(last, last + self.gp_deep_kernel_layer_width)
last = last + self.gp_deep_kernel_layer_width
b3_indices = np.arange(last, last + self.iset_dim)

self.n.set_weights(hps_nn[w1_indices].reshape(self.gp_deep_kernel_layer_width ,self.iset_dim),
hps_nn[w2_indices].reshape(self.gp_deep_kernel_layer_width , self.gp_deep_kernel_layer_width),
hps_nn[w3_indices].reshape(self.iset_dim,self.gp_deep_kernel_layer_width))
self.n.set_biases(hps_nn[b1_indices].reshape(self.gp_deep_kernel_layer_width ),
hps_nn[b2_indices].reshape(self.gp_deep_kernel_layer_width ),
self.n.set_weights(hps_nn[w1_indices].reshape(self.gp_deep_kernel_layer_width, self.iset_dim),
hps_nn[w2_indices].reshape(self.gp_deep_kernel_layer_width, self.gp_deep_kernel_layer_width),
hps_nn[w3_indices].reshape(self.iset_dim, self.gp_deep_kernel_layer_width))
self.n.set_biases(hps_nn[b1_indices].reshape(self.gp_deep_kernel_layer_width),
hps_nn[b2_indices].reshape(self.gp_deep_kernel_layer_width),
hps_nn[b3_indices].reshape(self.iset_dim))
x1_nn = self.n.forward(x1)
x2_nn = self.n.forward(x2)
Expand Down

0 comments on commit 1de56e2

Please sign in to comment.