Skip to content

Commit

Permalink
Merge pull request #23 from lacava/lexicase_survival
Browse files Browse the repository at this point in the history
adds lex_size flag to do size-mediated parent selection in epsilon lexicase survival.
  • Loading branch information
lacava committed Aug 2, 2017
2 parents bee67c4 + 7df082a commit ee6da54
Show file tree
Hide file tree
Showing 6 changed files with 67 additions and 29 deletions.
2 changes: 1 addition & 1 deletion few/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@
"""

__version__ = '0.0.44'
__version__ = '0.0.45'
8 changes: 6 additions & 2 deletions few/few.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(self, population_size=50, generations=100,
scoring_function=None, disable_update_check=False,
elitism=True, boolean = False,classification=False,clean=False,
track_diversity=False,mdr=False,otype='f',c=True,
weight_parents=True,operators=None):
weight_parents=True,operators=None, lex_size=False):
# sets up GP.

# Save params to be recalled later by get_params()
Expand Down Expand Up @@ -95,6 +95,7 @@ def __init__(self, population_size=50, generations=100,
self.op_weight = op_weight
self.max_stall = max_stall
self.weight_parents = weight_parents
self.lex_size = lex_size
self.seed_with_ml = seed_with_ml
self.erc = erc
self.random_state = check_random_state(random_state)
Expand Down Expand Up @@ -726,6 +727,9 @@ def main():
dest='WEIGHT_PARENTS',default=True,
help='Feature importance weights parent selection.')

parser.add_argument('--lex_size', action='store_true',dest='LEX_SIZE',default=False,
help='Size mediated parent selection for lexicase survival.')

parser.add_argument('-sel', action='store', dest='SEL',
default='epsilon_lexicase',
choices = ['tournament','lexicase','epsilon_lexicase',
Expand Down Expand Up @@ -857,7 +861,7 @@ def main():
fit_choice = args.FIT_CHOICE,boolean=args.BOOLEAN,
classification=args.CLASSIFICATION,clean = args.CLEAN,
track_diversity=args.TRACK_DIVERSITY,mdr=args.MDR,
otype=args.OTYPE,c=args.c,
otype=args.OTYPE,c=args.c, lex_size = args.LEX_SIZE,
weight_parents = args.WEIGHT_PARENTS,operators=args.OPS)

learner.fit(training_features, training_labels)
Expand Down
29 changes: 21 additions & 8 deletions few/lib/epsilon_lexicase.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ static random_device rd;
static mt19937 gen(rd());
//extern "C"
void epsilon_lexicase(const ExtMat & F, int n, int d,
int num_selections, ExtVec& locs)
int num_selections, ExtVec& locs, bool lex_size, ExtVec& sizes)
{
// training cases
// ExtMat T (F, n, d);
Expand All @@ -96,15 +96,28 @@ void epsilon_lexicase(const ExtMat & F, int n, int d,
for (int i = 0; i<epsilon.size(); ++i)
epsilon(i) = mad(F.col(i));



// individual locations
vector<int> ind_locs(n);
iota(ind_locs.begin(),ind_locs.end(),0);
vector<int> ind_locs;
if(lex_size){
//randomly select a size from sizes
int max_index = sizes.size();
int random_index = rand() % max_index;

// individual locations
int j=0;
for(int i=0;i<max_index;i++){
if(sizes[i]<=sizes[random_index])
ind_locs.push_back(i);
}

}
else{
// individual locations
ind_locs.resize(n);
iota(ind_locs.begin(),ind_locs.end(),0);
}

// temporary winner pool
vector<int> winner;

for (int i = 0; i<num_selections; ++i){
//cout << "selection " << i << "\n";
// perform selection
Expand All @@ -121,7 +134,7 @@ void epsilon_lexicase(const ExtMat & F, int n, int d,
winner.resize(0);
// minimum error on case
double minfit;
for (int j = 0; j< can_locs.size(); ++j){
for (int j = 0; j<can_locs.size(); ++j){
if (j==0 || F(can_locs[j],cases.back())<minfit )
minfit = F(can_locs[j],cases.back());
}
Expand Down
20 changes: 6 additions & 14 deletions few/lib/few_lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -2,24 +2,16 @@
from eigency.core cimport *
cimport numpy as np
from libcpp.vector cimport vector
from libcpp cimport bool

cdef extern from "epsilon_lexicase.h":
cdef void _epsilon_lexicase "epsilon_lexicase"(Map[ArrayXXd] & F, int n,
int d, int num_selections,
Map[ArrayXi] & locs)
Map[ArrayXi] & locs, bool lex_size,
Map[ArrayXi] &sizes)

# This will be exposed to Python
def ep_lex(np.ndarray F, int n, int d, int num_selections, np.ndarray locs):
def ep_lex(np.ndarray F, int n, int d, int num_selections, np.ndarray locs, bool lex_size,
np.ndarray sizes):
return _epsilon_lexicase(Map[ArrayXXd](F), n, d, num_selections,
Map[ArrayXi](locs))
# WIP
# cdef extern from "evaluation.h":
# cdef void _evaluate "evaluate"(node n, Map[ArrayXXd] & features,
# vector[Map[ArrayXd]]] stack_float,
# vector[Map[ArrayXb]] stack_bool)

# def evaluate(node n, np.ndarray features, vector[np.ndarray] stack_float,
# vector[np.ndarray] stack_bool):
# return _evaluate(node n, Map[ArrayXXd](features),
# vector[Map[ArrayXd]]](stack_float),
# vector[Map[ArrayXb]](stack_bool))
Map[ArrayXi](locs), lex_size, Map[ArrayXi](sizes))
16 changes: 12 additions & 4 deletions few/selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,13 @@ def survival(self,parents,offspring,elite=None,elite_index=None,X=None,F=None,F_
survivors, survivor_index = self.lexicase(parents + offspring, num_selections = len(parents), survival = True)
elif self.sel == 'epsilon_lexicase':
# survivors, survivor_index = self.epsilon_lexicase(parents + offspring, num_selections = len(parents), survival = True)
survivor_index = self.epsilon_lexicase(np.vstack((F,F_offspring)), num_selections = F.shape[0], survival = True)
survivors = [(parents+ offspring)[s] for s in survivor_index]
if self.lex_size:
sizes = [len(i.stack) for i in (parents + offspring)]
survivor_index = self.epsilon_lexicase(np.vstack((F,F_offspring)), sizes, num_selections = F.shape[0], survival = True)
survivors = [(parents+ offspring)[s] for s in survivor_index]
else:
survivor_index = self.epsilon_lexicase(np.vstack((F,F_offspring)), [], num_selections = F.shape[0], survival = True)
survivors = [(parents+ offspring)[s] for s in survivor_index]
elif self.sel == 'deterministic_crowding':
survivors, survivor_index = self.deterministic_crowding(parents,offspring,X,X_offspring)
elif self.sel == 'random':
Expand Down Expand Up @@ -107,14 +112,17 @@ def lexicase(self,individuals, num_selections=None, epsilon = False, survival =

return winners, locs

def epsilon_lexicase(self, F, num_selections=None, survival = False):
def epsilon_lexicase(self, F, sizes, num_selections=None, survival = False):
"""conducts epsilon lexicase selection for de-aggregated fitness vectors"""
# pdb.set_trace()
if self.c: # use c library
# define c types
locs = np.empty(num_selections,dtype='int32',order='F')
# self.lib.epsilon_lexicase(F,F.shape[0],F.shape[1],num_selections,locs)
ep_lex(F,F.shape[0],F.shape[1],num_selections,locs)
if self.lex_size:
ep_lex(F,F.shape[0],F.shape[1],num_selections,locs,self.lex_size,np.array(sizes))
else:
ep_lex(F,F.shape[0],F.shape[1],num_selections,locs,self.lex_size,np.array([]))
return locs
else: # use python version
if num_selections is None:
Expand Down
21 changes: 21 additions & 0 deletions few/tests/test_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,3 +103,24 @@ def test_lexicase_survival_shapes():
i.fitness_vec = np.random.rand(10,1)
offspring,locs = few.lexicase(pop.individuals,num_selections=1,survival=True)
assert len(offspring) == 1;

def test_lex_size():
"""test_selection.py: lex_size flag on/off"""

few = FEW(seed_with_ml=False,population_size=257, lex_size=True)

Fitness_mat = np.random.rand(257,10)
size_mat = np.random.randint(1,100,size=257)

locs = few.epsilon_lexicase(Fitness_mat,size_mat,num_selections=100,
survival=True)
assert len(locs) == 100

few = FEW(seed_with_ml=False,population_size=257, lex_size=False)

Fitness_mat = np.random.rand(257,10)
size_mat = np.random.rand(257,1)

locs = few.epsilon_lexicase(Fitness_mat,size_mat,num_selections=100,
survival=True)
assert len(locs) == 100

0 comments on commit ee6da54

Please sign in to comment.