Skip to content

Commit

Permalink
new mcmc
Browse files Browse the repository at this point in the history
  • Loading branch information
MarcusMNoack committed Dec 7, 2022
1 parent 13ca465 commit a03af0d
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 24 deletions.
7 changes: 5 additions & 2 deletions fvgp/gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class GP():
and a `gpcam.gp_optimizer.GPOptimizer` instance. The return value is a 2-D array of shape (len(hyperparameters) x V). If None is provided,
a finite difference scheme is used.
normalize_y : bool, optional
If True, the data point values will be normalized to max(initial values) = 1. The dfault is False.
If True, the data point values will be normalized to max(initial values) = 1. The default is False.
use_inv : bool, optional
If True, the algorithm calculates and stores the inverse of the covariance matrix after each training or update of the dataset,
which makes computing the posterior covariance faster.
Expand Down Expand Up @@ -433,7 +433,7 @@ def _optimize_log_likelihood_async(self,
#print("deflation radius: ",deflation_radius)
#print("local optimizer: ",local_optimizer)
#print("global optimizer: ",global_optimizer)
logger.debug("fvGP hyperparameter tuning in progress. Old hyperparameters: ", starting_hps)
logger.debug("fvGP hyperparameter tuning in progress. Old hyperparameters: {}", starting_hps)

opt_obj = HGDL(self.log_likelihood,
self.log_likelihood_gradient,
Expand All @@ -443,7 +443,9 @@ def _optimize_log_likelihood_async(self,
global_optimizer = global_optimizer,
radius = deflation_radius,
num_epochs = max_iter, constraints = constraints)
logger.debug("HGDL successfully initialized. Calling optimize()")
opt_obj.optimize(dask_client = dask_client, x0 = np.array(starting_hps).reshape(1,-1))
logger.debug("optimize() called")
return opt_obj
##################################################################################
def _optimize_log_likelihood(self,starting_hps,
Expand Down Expand Up @@ -578,6 +580,7 @@ def log_likelihood_gradient(self, hyperparameters):
------
Gradient of the negative marginal log-likelihood : np.ndarray
"""
logger.debug("log-likelihood gradient is being evaluated...")
mean = self.mean_function(self.x_data,hyperparameters,self)
b,K = self._compute_covariance_value_product(hyperparameters,self.y_data, self.variances, mean)
y = self.y_data - mean
Expand Down
7 changes: 3 additions & 4 deletions fvgp/gp2Scale.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,12 +394,12 @@ def optimize_log_likelihood(self,starting_hps,
print('bounds are',hp_bounds)
res = mcmc(self.log_likelihood,hp_bounds,max_iter = max_iter, x0 = starting_hps)
hyperparameters = np.array(res["x"])
print("MCMC has found solution: ", hyperparameters, "with neg. marginal_likelihood ",res["f(x)"])
self.mcmc_info = res
print("MCMC has found solution: ", hyperparameters, "with log marginal_likelihood ",res["f(x)"])
self.hyperparameters = hyperparameters

return hyperparameters


def log_likelihood(self,hyperparameters = None):
"""
computes the marginal log-likelihood
Expand All @@ -419,10 +419,9 @@ def log_likelihood(self,hyperparameters = None):
y = self.y_data - mean
logdet = self.SparsePriorCovariance.logdet().result()
n = len(y)
res = (0.5 * (y.T @ x)) + (0.5 * logdet) + (0.5 * n * np.log(2.0*np.pi))
res = -(0.5 * (y.T @ x)) - (0.5 * logdet) - (0.5 * n * np.log(2.0*np.pi))
return res


##################################################################################
##################################################################################
##################################################################################
Expand Down
41 changes: 23 additions & 18 deletions fvgp/mcmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,33 +17,38 @@ def project_onto_bounds(x,bounds):
return x


def mcmc(func,bounds, x0 = None, distr = None, max_iter = 100, ):
######IMPORTANT: This is for minimization!!!!
def mcmc(func,bounds, x0 = None, distr = None, max_iter = 1000, ):
x = []
f = []
if x0 is None: x.append(np.random.uniform(low = bounds[:,0],high = bounds[:,1],size = len(bounds)))
else: x.append(x0)
bc = False
if distr is None: l = np.diag((np.abs(np.subtract(bounds[:,0],bounds[:,1])))/20.0)**2
counter = 0
new_func = 1e16
current_func = func(x0)
f.append(current_func)
run = True
while run:
x_new = np.random.multivariate_normal(x[-1], l)
x_new = project_onto_bounds(x_new,bounds)
x_proposal = np.random.multivariate_normal(x[-1], l)
#x_proposal = project_onto_bounds(x_proposal,bounds)
##check for constraints?
old_func = new_func
new_func = func(x_new) ####call function
max_f = max(abs(new_func),abs(old_func))
min_f = min(abs(new_func),abs(old_func))
acceptance_prob = 1./(1. + np.exp(- 50.0 * (old_func-new_func)/min_f))
#print("old f: ",old_func, " new f: ",new_func, "acceptance probability: ",acceptance_prob)
u = np.random.rand()
if u <= acceptance_prob: x.append(x_new);f.append(new_func)
else: x.append(x[counter]);f.append(old_func)
proposal_func = func(x_proposal) ####call function
acceptance_prob = proposal_func - current_func ##these are already logs
u = np.log(np.random.rand())
print("current f: ",current_func, " proposal f: ",proposal_func, "acceptance probability: ",acceptance_prob," u ",u, flush = True)
if u < acceptance_prob:
x.append(x_proposal)
f.append(proposal_func)
current_func = proposal_func
print("accepted", flush = True)
else:
x.append(x[-1])
f.append(current_func)
print(f[-1], flush = True)
logger.debug("mcmc f(x):{}",f[-1])
counter += 1
if counter >= max_iter: run = False
arg_min = np.argmin(f)
logger.debug(f"mcmc res: {f[arg_min]} at {x[arg_min]}")
return {"f(x)": f[arg_min],"x":x[arg_min],"distribution": f}
#if len(x)>201 and np.linalg.norm(np.mean(x[-100:],axis = 0)-np.mean(x[-200:-100],axis = 0)) < 0.01 * np.linalg.norm(np.mean(x[-100:],axis = 0)): run = False
arg_max = np.argmax(f)
x = np.asarray(x)
logger.debug(f"mcmc res: {f[arg_max]} at {x[arg_max]}")
return {"f(x)": f[arg_max],"x":x[arg_max],"distribution": x, "distribution mean": np.mean(x,axis = 0),"distribution var": np.var(x,axis = 0)}

0 comments on commit a03af0d

Please sign in to comment.