Skip to content

Commit

Permalink
Improving performance for persistent allocation function
Browse files Browse the repository at this point in the history
  • Loading branch information
jmlarson1 committed Nov 9, 2017
1 parent 591a961 commit 65adace
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 35 deletions.
45 changes: 18 additions & 27 deletions code/examples/alloc_funcs/start_persistent_local_opt_gens.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,16 @@ def start_persistent_local_opt_gens(active_w, idle_w, persis_w, H, H_ind, sim_sp
for i in idle_w:
gen_info[i] = {'rand_stream': np.random.RandomState(i)}

# If i is idle, but in persistent mode, and its calculated values have
# returned, give them back to i. Otherwise, give nothing to i
for i in persis_w[PERSIS_GEN_TAG]:
if i in persis_w['last_index']:
if np.all(H['returned'][persis_w['last_index'][i]]):
b = persis_w['last_index'].pop(i)
comm.send(obj=H[['x','grad','f']][b], dest=i, tag=PERSIS_ADV)
gen_info[i]['run_order'].append(b)


for w in persis_w[PERSIS_SIM_TAG].union(persis_w[PERSIS_GEN_TAG]):
if comm.Iprobe(source=w, tag=MPI.ANY_TAG, status=status):
D_recv = comm.recv(source=w, tag=MPI.ANY_TAG, status=status)
Expand All @@ -61,42 +71,23 @@ def start_persistent_local_opt_gens(active_w, idle_w, persis_w, H, H_ind, sim_sp
if len(D_recv['calc_out']):
H['local_min'][np.where(np.equal(D_recv['calc_out']['x'],H['x']).all(1))[0]] = True

# If i is idle, but in persistent mode, and its calculated values have
# returned, give them back to i. Otherwise, give nothing to i
for i in persis_w[PERSIS_GEN_TAG]:
if i in persis_w['last_index']:
if np.all(H['returned'][persis_w['last_index'][i]]):
b = persis_w['last_index'].pop(i)
persis_w['advance_info'][i] = H[['x','grad','f']][b]
gen_info[i]['run_order'].append(b)

for i in persis_w['advance_info']:
comm.send(obj=persis_w['advance_info'][i], dest=i, tag=PERSIS_ADV)
persis_w['advance_info'] = {}

for i in persis_w['stop']:
comm.send(obj=None, dest=i, tag=STOP_TAG)
persis_w['stop'] = set([])


Work = {}
gen_count = 0
already_in_Work = np.zeros(H_ind,dtype=bool) # To mark points as they are included in Work, but not yet marked as 'given' in H.

# Find candidate points for starting local opt runs if a sample point has been evaluated
if len(idle_w) > 0 and np.any(~H['known_to_aposmm'][:H_ind]) and np.any(np.logical_and(~H['local_pt'][:H_ind],H['returned'][:H_ind])):
aposmm_logic.update_history_dist(H[:H_ind], gen_specs, c_flag=False)
n_s = np.sum(np.logical_and(~H['local_pt'][:H_ind], H['returned'][:H_ind])) # Number of returned sampled points
starting_inds = aposmm_logic.decide_where_to_start_localopt(H[:H_ind], n_s, gen_specs['rk_const'], lhs_divisions=0, mu=0, nu=0)
else:
starting_inds = []

for i in idle_w:
# Find candidate points for starting local opt runs if a sample point has been evaluated
if np.any(np.logical_and(~H['local_pt'][:H_ind],H['returned'][:H_ind])):
n, n_s, c_flag, _, rk_const, lhs_divisions, mu, nu = aposmm_logic.initialize_APOSMM(H[:H_ind], gen_specs)
aposmm_logic.update_history_dist(H[:H_ind], gen_specs, c_flag=False)
starting_inds = aposmm_logic.decide_where_to_start_localopt(H[:H_ind], n_s, rk_const, lhs_divisions, mu, nu)
else:
starting_inds = []

# Start up a persistent generator that is a local opt run but don't do it if all workers will be persistent generators.
if len(starting_inds) and gen_count + len(persis_w[PERSIS_GEN_TAG]) + 1 < len(idle_w) + len(active_w[EVAL_GEN_TAG]) + len(active_w[EVAL_SIM_TAG]):
# Start at the best possible starting point
ind = starting_inds[np.argmin(H['f'][starting_inds])]
ind = starting_inds.pop(np.argmin(H['f'][starting_inds]))

Work[i] = {'gen_info':gen_info[i],
'H_fields': ['x','grad','f'],
Expand Down
7 changes: 4 additions & 3 deletions code/examples/gen_funcs/aposmm_logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ def update_history_dist(H, gen_specs, c_flag):

p = np.logical_and.reduce((H['returned'],H['obj_component']==0,~np.isnan(H['f'])))
else:
p = np.logical_and.reduce((H['returned'],~np.isnan(H['f'])))
p = np.logical_and(H['returned'],~np.isnan(H['f']))


for new_ind in new_inds:
Expand All @@ -219,6 +219,7 @@ def update_history_dist(H, gen_specs, c_flag):

dist_to_all = sp.spatial.distance.cdist(np.atleast_2d(H['x_on_cube'][new_ind]), H['x_on_cube'][p], 'euclidean').flatten()
new_better_than = H['f'][new_ind] < H['f'][p]
new_worse_than = H['f'][new_ind] > H['f'][p]

# Update any other points if new_ind is closer and better
if H['local_pt'][new_ind]:
Expand All @@ -236,8 +237,8 @@ def update_history_dist(H, gen_specs, c_flag):
# Since we allow equality when deciding better_than_new_l and
# better_than_new_s, we have to prevent new_ind from being its own
# better point.
better_than_new_l = np.logical_and.reduce((~new_better_than, H['local_pt'][p], H['sim_id'][p] != new_ind))
better_than_new_s = np.logical_and.reduce((~new_better_than, ~H['local_pt'][p], H['sim_id'][p] != new_ind))
better_than_new_l = np.logical_and(new_worse_than, H['local_pt'][p])
better_than_new_s = np.logical_and(new_worse_than, ~H['local_pt'][p])

# Who is closest to ind and better
if np.any(better_than_new_l):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from mpi4py import MPI # for libE communicator
import sys, os # for adding to path
import numpy as np
from math import gamma, sqrt, pi

# Import libEnsemble main
sys.path.append('../../src')
Expand All @@ -33,10 +34,11 @@

script_name = os.path.splitext(os.path.basename(__file__))[0]

n = 2
#State the objective function, its arguments, output, and necessary parameters (and their sizes)
sim_specs = {'sim_f': [six_hump_camel], # This is the function whose output is being minimized
'in': ['x'], # These keys will be given to the above function
'out': [('f',float), ('grad',float,2) # This is the output from the function being minimized
'out': [('f',float), ('grad',float,n) # This is the output from the function being minimized
],
}

Expand All @@ -45,8 +47,8 @@
'in': [],
'localopt_method':'LD_MMA',
'xtol_rel':1e-4,
'out': [('x_on_cube',float,2),
('x',float,2),
'out': [('x_on_cube',float,n),
('x',float,n),
('dist_to_unit_bounds',float),
('dist_to_better_l',float),
('dist_to_better_s',float),
Expand All @@ -61,10 +63,11 @@
'gen_batch_size': 2,
'batch_mode': True,
'num_inst':1,
'rk_const': ((gamma(1+(n/2.0))*5.0)**(1.0/n))/sqrt(pi),
}

gen_out = [('x',float,2),
('x_on_cube',float,2),
gen_out = [('x',float,n),
('x_on_cube',float,n),
('sim_id',int),
('priority',float),
('local_pt',bool),
Expand Down

0 comments on commit 65adace

Please sign in to comment.