-
Notifications
You must be signed in to change notification settings - Fork 24
/
test_6-hump_camel_persistent_aposmm.py
83 lines (67 loc) · 2.98 KB
/
test_6-hump_camel_persistent_aposmm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# """
# Runs libEnsemble on the 6-hump camel problem. Documented here:
# https://www.sfu.ca/~ssurjano/camel6.html
#
# Execute via one of the following commands (e.g. 3 workers):
# mpiexec -np 4 python3 test_6-hump_camel_persistent_uniform_sampling.py
# python3 test_6-hump_camel_persistent_uniform_sampling.py --nworkers 3 --comms local
# python3 test_6-hump_camel_persistent_uniform_sampling.py --nworkers 3 --comms tcp
#
# The number of concurrent evaluations of the objective function will be 4-1=3.
# """
# Do not change these lines - they are parsed by run-tests.sh
# TESTSUITE_COMMS: mpi local tcp
# TESTSUITE_NPROCS: 3 4
import sys
import numpy as np
# Import libEnsemble items for this test
from libensemble.libE import libE
from math import gamma, pi, sqrt
from libensemble.sim_funcs.six_hump_camel import six_hump_camel as sim_f
from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
from libensemble.tests.regression_tests.common import parse_args, save_libE_output, per_worker_stream
from libensemble.tests.regression_tests.support import six_hump_camel_minima as minima
from time import time
nworkers, is_master, libE_specs, _ = parse_args()
if is_master:
start_time = time()
if nworkers < 2:
sys.exit("Cannot run with a persistent worker if only one worker -- aborting...")
n = 2
sim_specs = {'sim_f': sim_f,
'in': ['x'],
'out': [('f', float), ('grad', float, n)]}
gen_out = [('x', float, n), ('x_on_cube', float, n), ('sim_id', int),
('local_min', bool), ('local_pt', bool)]
gen_specs = {'gen_f': gen_f,
'in': [],
'out': gen_out,
'batch_mode': True,
'initial_sample_size': 100,
'sample_points': np.round(minima, 1),
'localopt_method': 'LD_MMA',
'rk_const': 0.5*((gamma(1+(n/2))*5)**(1/n))/sqrt(pi),
'xtol_rel': 1e-6,
'num_active_gens': 1,
'local_min': True,
'dist_to_bound_multiple': 0.5,
'max_active_runs': 6,
'lb': np.array([-3, -2]),
'ub': np.array([3, 2])}
alloc_specs = {'alloc_f': alloc_f, 'out': [('given_back', bool)]}
persis_info = per_worker_stream({}, nworkers + 1)
exit_criteria = {'sim_max': 1000}
# Perform the run
H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info,
alloc_specs, libE_specs)
if is_master:
print('[Manager]:', H[np.where(H['local_min'])]['x'])
print('[Manager]: Time taken =', time() - start_time, flush=True)
tol = 1e-5
for m in minima:
# The minima are known on this test problem.
# We use their values to test APOSMM has identified all minima
print(np.min(np.sum((H[H['local_min']]['x'] - m)**2, 1)), flush=True)
assert np.min(np.sum((H[H['local_min']]['x'] - m)**2, 1)) < tol
save_libE_output(H, persis_info, __file__, nworkers)