Skip to content

Commit

Permalink
Update formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
apaleyes committed Apr 21, 2023
1 parent 8bd7d8d commit ae2c89c
Show file tree
Hide file tree
Showing 35 changed files with 1 addition and 47 deletions.
3 changes: 0 additions & 3 deletions emukit/bayesian_optimization/acquisitions/entropy_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ def __init__(
proposal_function: Callable = None,
burn_in_steps: int = 50,
) -> None:

"""
Entropy Search acquisition function approximates the distribution of the global
minimum and tries to decrease its entropy. See this paper for more details:
Expand Down Expand Up @@ -62,7 +61,6 @@ def __init__(

# (unnormalized) density from which to sample the representer points to approximate pmin
if proposal_function is None:

ei = ExpectedImprovement(model)

def prop_func(x):
Expand Down Expand Up @@ -286,7 +284,6 @@ def _sample_representer_points(self):
return repr_points, repr_points_log

def _get_proposal_function(self, model, space):

# Define proposal function for multi-fidelity
ei = ExpectedImprovement(model)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,6 @@ def _gradient_of_the_acquisition_second_term(
else:
B1, B2, B3 = np.zeros((q, d)), np.zeros((q, d)), np.zeros((q, d))
for i in range(q):

# Assign helper variables needed by the gradients (See equation (6) for details)
ineq = [n for n in range(q) if n is not i]
Sigk_ik = Sigk[i, k]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ def __init__(
num_samples: int = 10,
grid_size: int = 5000,
) -> None:

"""
MES acquisition function approximates the distribution of the value at the global
minimum and tries to decrease its entropy. See this paper for more details:
Expand Down Expand Up @@ -159,7 +158,6 @@ def __init__(
num_samples: int = 10,
grid_size: int = 5000,
) -> None:

"""
MUMBO acquisition function approximates the distribution of the value at the global
minimum and tries to decrease its entropy.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@

class NegativeLowerConfidenceBound(Acquisition):
def __init__(self, model: Union[IModel, IDifferentiable], beta: float = 1.0) -> None:

"""
This acquisition computes the negative lower confidence bound for a given input point. This is the same
as optimizing the upper confidence bound if we would maximize instead of minimizing the objective function.
Expand Down
1 change: 0 additions & 1 deletion emukit/bayesian_optimization/epmgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ def joint_min(mu: np.ndarray, var: np.ndarray, with_derivatives: bool = False) -
dlogPdSigma = np.zeros((D, int(0.5 * D * (D + 1))))
dlogPdMudMu = np.zeros((D, D, D))
for i in range(mu.shape[0]):

# logP[k] ) self._min_factor(mu, var, 0)
a = min_factor(mu, var, i)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ def __init__(
batch_size: int = 1,
acquisition_optimizer: AcquisitionOptimizerBase = None,
):

"""
Emukit class that implement a loop for building modular Bayesian optimization
Expand Down Expand Up @@ -74,7 +73,6 @@ def get_results(self):

class BayesianOptimizationResults:
def __init__(self, loop_state: LoopState):

"""
Emukit class that takes as input the loop state and computes some results.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ def __init__(
update_interval: int = 1,
acquisition_optimizer: AcquisitionOptimizerBase = None,
):

"""
Emukit class that implements a loop for building modular cost sensitive Bayesian optimization.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ def __init__(
update_interval: int = 1,
batch_size: int = 1,
):

"""
Emukit class that implements a loop for building Bayesian optimization with an unknown constraint.
For more information see:
Expand Down
1 change: 0 additions & 1 deletion emukit/benchmarking/loop_benchmarking/random_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ class RandomSearch(OuterLoop):
def __init__(
self, space: ParameterSpace, x_init: np.ndarray = None, y_init: np.ndarray = None, cost_init: np.ndarray = None
):

"""
Simple loop to perform random search where in each iteration points are sampled uniformly at random
over the input space.
Expand Down
2 changes: 1 addition & 1 deletion emukit/core/loop/user_function_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(self, X: np.ndarray, Y: np.ndarray, **kwargs) -> None:
raise ValueError("y is expected to be 1-dimensional, actual dimensionality is {}".format(Y.ndim))

self.extra_outputs = dict()
for (key, val) in kwargs.items():
for key, val in kwargs.items():
if val.ndim != 1:
raise ValueError("Key word arguments must be 1-dimensional but {} is {}d".format(key, val.ndim))
self.extra_outputs[key] = val
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ def _sample_representer_points(self):
return repr_points, repr_points_log

def _get_proposal_function(self, model, space):

# Define proposal function for multi-fidelity
ei = ExpectedImprovement(model)

Expand Down
1 change: 0 additions & 1 deletion emukit/examples/fabolas/fabolas_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

class FabolasKernel(GPy.kern.Kern):
def __init__(self, input_dim, basis_func, a=1.0, b=1.0, active_dims=None):

super(FabolasKernel, self).__init__(input_dim, active_dims, "fabolas_kernel")

assert input_dim == 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ def __init__(
model_update_interval: int = int(1),
batch_size: int = 1,
) -> None:

"""
Generic class to run Bayesian optimization with GPyRegression model.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ def __init__(
batch_size: int = 1,
model_update_interval: int = int(1),
) -> None:

"""
Class to run Bayesian optimization with unknown contraints with GPyRegression model.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,6 @@ def _build_likelihood(self):
L = 0.0
KL = 0.0
for fidelity in range(self.num_layers):

if (self._train_upto_fidelity != -1) and (fidelity > self._train_upto_fidelity):
continue

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,6 @@ def __init__(
eta: float = 0.5,
get_logger: Callable = None,
):

super(EPComparisonGP, self).__init__(name=name)

self.N, self.D = X.shape[0], X.shape[1]
Expand Down
1 change: 0 additions & 1 deletion emukit/examples/profet/meta_benchmarks/meta_forrester.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def meta_forrester(fname_objective: str) -> Tuple[UserFunctionWrapper, Parameter
objective.load_state_dict(data["state_dict"])

def objective_function(config):

Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
Expand Down
1 change: 0 additions & 1 deletion emukit/examples/profet/meta_benchmarks/meta_surrogates.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ def objective_function(
log_objective=False,
with_noise=True,
):

Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
Expand Down
1 change: 0 additions & 1 deletion emukit/examples/profet/train_meta_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@


def download_data(path, source="http://www.ml4aad.org/wp-content/uploads/2019/05/profet_data.tar.gz"):

l = urlretrieve(source)[0]

tar = tarfile.open(l)
Expand Down
4 changes: 0 additions & 4 deletions emukit/model_wrappers/gpy_quadrature_wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,6 @@ def __init__(

# product kernel from parameters
if gpy_matern is None:

input_dim = len(lengthscales)
if input_dim < 1:
raise ValueError("'lengthscales' must contain at least 1 value.")
Expand Down Expand Up @@ -295,7 +294,6 @@ def __init__(

# product kernel from parameters
if gpy_matern is None:

input_dim = len(lengthscales)
if input_dim < 1:
raise ValueError("'lengthscales' must contain at least 1 value.")
Expand Down Expand Up @@ -406,7 +404,6 @@ def __init__(

# product kernel from parameters
if gpy_matern is None:

input_dim = len(lengthscales)
if input_dim < 1:
raise ValueError("'lengthscales' must contain at least 1 value.")
Expand Down Expand Up @@ -533,7 +530,6 @@ def __init__(
variance: Optional[float] = None,
input_dim: Optional[int] = None,
):

if gpy_brownian is not None:
if input_dim is not None or variance is not None:
warnings.warn("gpy_brownian and variance is given. The variance will be ignore.")
Expand Down
1 change: 0 additions & 1 deletion emukit/quadrature/kernels/quadrature_brownian.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ def __init__(
measure: IntegrationMeasure,
variable_names: str,
) -> None:

if measure.input_dim != 1:
raise ValueError(
"Integration measure for Brownian motion kernel must be 1-dimensional. Current dimesnion is ({}).".format(
Expand Down
1 change: 0 additions & 1 deletion emukit/quadrature/kernels/quadrature_kernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,6 @@ def __init__(
measure: IntegrationMeasure,
variable_names: str,
) -> None:

super().__init__(kern=kern, measure=measure, variable_names=variable_names)

def _qK_unscaled(self, x2: np.ndarray, skip: List[int] = None) -> np.ndarray:
Expand Down
1 change: 0 additions & 1 deletion emukit/quadrature/loop/wsabil_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ class WSABILLoop(OuterLoop):
def __init__(
self, model: WSABIL, model_updater: ModelUpdater = None, acquisition_optimizer: AcquisitionOptimizerBase = None
):

# WSABI-L is used with uncertainty sampling.
acquisition = UncertaintySampling(model, measure_power=1)

Expand Down
3 changes: 0 additions & 3 deletions emukit/test_functions/multi_fidelity/hartmann.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ def multi_fidelity_hartmann_3d() -> Tuple[MultiSourceFunctionWrapper, ParameterS
delta = np.array([0.01, -0.01, -0.1, 0.1])

def high(x):

res = 0
for i in range(4):
temp = 0
Expand All @@ -73,7 +72,6 @@ def high(x):
return res[:, None]

def medium(x):

alpha_m = alpha + delta

res = 0
Expand All @@ -86,7 +84,6 @@ def medium(x):
return res[:, None]

def low(x):

alpha_l = alpha + 2 * delta

res = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@


def test_loop_state():

fcn, cs = forrester_function()
n_init = 5

Expand Down
1 change: 0 additions & 1 deletion tests/emukit/bayesian_optimization/test_epmgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@


def test_joint_min():

# Uniform distribution
n_points = 5
m = np.ones([n_points])
Expand Down
2 changes: 0 additions & 2 deletions tests/emukit/bayesian_optimization/test_local_penalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@


def test_penalization_function_shape():

model = MockModel()
lp = LocalPenalization(model)
lp.update_batches(np.zeros((5, 1)), 1, -0.1)
Expand All @@ -18,7 +17,6 @@ def test_penalization_function_shape():


def test_penalization_function_gradients_shape():

model = MockModel()
lp = LocalPenalization(model)
lp.update_batches(np.zeros((5, 2)), 1, -0.1)
Expand Down
2 changes: 0 additions & 2 deletions tests/emukit/core/test_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,15 +76,13 @@ def test_inequality_constraint_no_bounds():


def test_inequality_constraint_all_inf_bound():

lower = np.array([-np.inf, 0])
upper = np.array([np.inf, 1])
with pytest.raises(ValueError):
InequalityConstraint(lower, upper)


def test_inequality_constraint_unbounded():

lower = np.array([-np.inf, 0])
upper = None
with pytest.raises(ValueError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ def optimize(self):


def test_batch_point_calculator(mock_model):

acquisition = mock.create_autospec(Acquisition)
acquisition_optimizer = mock.create_autospec(GradientAcquisitionOptimizer)
acquisition_optimizer.optimize.return_value = (np.zeros((1, 1)), 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,6 @@ def wrapper_matern52_2(dim1, gpy_matern52):

@pytest.mark.parametrize("wrapper", gpy_test_list)
def test_create_emukit_model_from_gpy_model_types(wrapper):

gpy_model = GPy.models.GPRegression(kernel=wrapper["gpy_kernel"], X=wrapper["data"][0], Y=wrapper["data"][1])
emukit_gp = create_emukit_model_from_gpy_model(gpy_model=gpy_model, measure=wrapper["measure"])

Expand Down
1 change: 0 additions & 1 deletion tests/emukit/quadrature/ground_truth_integrals_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@ def mc_integral_var_from_measure(num_samples: int, model: WarpedBayesianQuadratu
vZ_SAMPLES = np.zeros(num_runs)
_, vZ = model.integrate()
for i in range(num_runs):

vZ_samples = mc_integral_var_from_measure(num_samples, model)
vZ_SAMPLES[i] = vZ_samples
print(".", end="", flush=True)
Expand Down
1 change: 0 additions & 1 deletion tests/emukit/quadrature/test_quadrature_kernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,6 @@ def test_qkernel_gradient_values(kernel_embedding):


def test_brownian_qkernel_raises():

# measure has wrong dimensionality
wrong_bounds = [(1, 2), (1, 2)]
measure = LebesgueMeasure.from_bounds(bounds=wrong_bounds)
Expand Down
1 change: 0 additions & 1 deletion tests/emukit/quadrature/test_quadrature_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,6 @@ def test_warped_model_transforms(model):

@pytest.mark.parametrize("model", all_models_test_list)
def test_warped_model_gradient_values(model, data):

# gradient of mean
func = lambda z: model.predict(z)[0][:, 0]
dfunc = lambda z: model.get_prediction_gradients(z)[0].T
Expand Down
2 changes: 0 additions & 2 deletions tests/emukit/sensitivity/test_emukit_sensitivity.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ def space():


def test_model_based_montecarlo_sensitivity(space):

model = mock.create_autospec(IModel)
model.predict.return_value = (0.1 * np.ones((3, 2)), np.zeros((3, 2)))

Expand All @@ -43,7 +42,6 @@ def test_model_based_montecarlo_sensitivity(space):


def test_model_free_montecarlo_sensitivity(space):

mock_function = lambda x: 0.1 * np.ones((3, 1))

sensitivity = ModelFreeMonteCarloSensitivity(mock_function, space)
Expand Down
1 change: 0 additions & 1 deletion tests/emukit/test_functions/test_ishigami_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@


def test_ishigami_function():

ishigami = Ishigami(a=5, b=0.1)

assert ishigami.fidelity1(np.array([[0, 1, 0]])).shape == (1,)
Expand Down

0 comments on commit ae2c89c

Please sign in to comment.