Skip to content

Commit

Permalink
PEP8
Browse files Browse the repository at this point in the history
  • Loading branch information
mghasemi committed Feb 22, 2019
1 parent 94ce38a commit 683e154
Show file tree
Hide file tree
Showing 5 changed files with 114 additions and 85 deletions.
27 changes: 16 additions & 11 deletions SKSurrogate/NpyProximation.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class Measure(object):
of tuples defining the domain's box. If None is given, it will be set to :math:`[-1, 1]^n`
"""

def __init__(self, density=None, domain=None, **kwargs):
def __init__(self, density=None, domain=None):
from types import FunctionType
# set the density
if density is None:
Expand Down Expand Up @@ -144,9 +144,9 @@ def Fourier(self, n, deg, l=1.):
if (sum(o) <= deg) and (sum(o) > 0):
for ex in E:
if sum(ex) > 0:
f_ = lambda x, o=o, ex=ex: prod(
[sin(o[i] * x[i] / l) ** ex[i] * cos(o[i] * x[i] / l) ** (1 - ex[i]) if o[i] > 0 else 1. for
i in range(n)])
f_ = lambda x, o_=o, ex_=ex: prod(
[sin(o_[i] * x[i] / l)** ex_[i] * cos(o_[i] * x[i] / l) ** (1 - ex_[i]) if o_[i] > 0 else 1.
for i in range(n)])
B.append(f_)
return B

Expand Down Expand Up @@ -174,7 +174,7 @@ def __init__(self, dim=1, measure=None, basis=None):
from numpy import array
B = [lambda x: 1.]
for i in range(self.dim):
B.append(lambda x, i=i: x[i] if type(x) is array else x)
B.append(lambda x, i_=i: x[i_] if type(x) is array else x)
self.base = B
else:
self.base = basis
Expand Down Expand Up @@ -256,7 +256,7 @@ def FormBasis(self):
D.append(cf)
self.OrthBase = []
for i in range(len(D)):
fn = lambda x, i=i: sum([D[i][j] * self.base[j](x) for j in range(len(D[i]))])
fn = lambda x, i_=i: sum([D[i_][_j] * self.base[_j](x) for _j in range(len(D[i_]))])
self.OrthBase.append(fn)

def Series(self, f):
Expand Down Expand Up @@ -284,9 +284,10 @@ class Regression(object):
+ if at initiation the parameter `deg=n` is set, then ``R.fit()`` returns the polynomial regression of
degree `n`.
+ if a basis of functions provided by means of an `OrthSystem` object (``R.SetOrthSys(orth)``) then
calling ``R.fit()`` returns the best approximation that can be found using the basic functions of the `orth` object.
calling ``R.fit()`` returns the best approximation that can be found using the basic functions of
the `orth` object.
:param point: a list of points to be fitted or a callable to be approximated
:param points: a list of points to be fitted or a callable to be approximated
:param dim: dimension of the domain
"""

Expand All @@ -300,7 +301,7 @@ def __init__(self, points, dim=None):
for p in points:
supp[tuple(p[:-1])] = 1.
self.meas = Measure(supp)
self.f = lambda x: sum([p[-1] * (1 * (abs(x - array(p[:-1])) < 1.e-4)).min() for p in points])
self.f = lambda x: sum([p_[-1] * (1 * (abs(x - array(p_[:-1])) < 1.e-4)).min() for p_ in points])
elif callable(points):
if dim is None:
raise Error("The dimension can not be determined")
Expand Down Expand Up @@ -350,7 +351,7 @@ def fit(self):

try:
from sklearn.base import BaseEstimator, RegressorMixin
except:
except ModuleNotFoundError:
BaseEstimator = type('BaseEstimator', (object,), dict())
RegressorMixin = type('RegressorMixin', (object,), dict())

Expand All @@ -364,7 +365,8 @@ class HilbertRegressor(BaseEstimator, RegressorMixin):
:param base: list, default = None
a list of function to form an orthogonal function basis
:param meas: NpyProximation.Measure, default = None
the measure to form the :math:`L_2(\mu)` space. If `None` a discrete measure will be constructed based on `fit` inputs
the measure to form the :math:`L_2(\mu)` space. If `None` a discrete measure will be constructed based
on `fit` inputs
:param fspace: NpyProximation.FunctionBasis, default = None
the function subspace of :math:`L_2(\mu)`, if `None` it will be initiated according to `self.meas`
"""
Expand All @@ -374,6 +376,9 @@ def __init__(self, deg=3, base=None, meas=None, fspace=None):
self.meas = meas
self.base = base
self.fspace = fspace
self.Regressor = None
self.dim = 0
self.apprx = None

def fit(self, X, y):
"""
Expand Down
15 changes: 9 additions & 6 deletions SKSurrogate/eoa.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,16 @@ class EOA(object):
evolutionary optimization algorithm.
:param population: The whole possible population as a list
:param fitness: The fitness evaluation. Accepts an OrderedDict of individuals with their corresponding fitness and updates their fitness
:param fitness: The fitness evaluation. Accepts an OrderedDict of individuals with their corresponding fitness and
updates their fitness
:param init_pop: default=`UniformRand`; The python class that initiates the initial population
:param recomb: default=`UniformCrossover`; The python class that defines how to combine parents to produce children
:param mutation: default=`Mutation`; The python class that performs mutation on offspring population
:param termination: default=`MaxGenTermination`; The python class that determines the termination criterion
:param elitism: default=`Elites`; The python class that decides how to handel elitism
:param num_parents: The size of initial parents population
:param parents_porp: default=0.1; The size of initial parents population given as a portion of whole population (only used if `num_parents` is not given)
:param parents_porp: default=0.1; The size of initial parents population given as a portion of whole population
(only used if `num_parents` is not given)
:param elits_porp: default=0.2; The porportion of offspring to be replaced by elite parents
:param mutation_prob: The probability that a component will be mutated (default: 0.05)
:param kwargs:
Expand All @@ -44,7 +46,7 @@ def __init__(self, population, fitness, **kwargs):
self.genes = kwargs.pop('genes', [])
self.init_genes = kwargs.pop('init_genes', [])
self.term_genes = kwargs.pop('term_genes', [])
if self.genes == []:
if not self.genes:
self.find_genes()
self.evals = OrderedDict([(_, None) for _ in self.population])
self.parents = OrderedDict()
Expand All @@ -55,9 +57,9 @@ def find_genes(self):
for e in ind:
if e not in self.genes:
self.genes.append(e)
if self.init_genes == []:
if not self.init_genes:
self.init_genes = self.genes
if self.term_genes == []:
if not self.term_genes:
self.term_genes = self.genes

def __call__(self, *args, **kwargs):
Expand All @@ -72,7 +74,7 @@ def __call__(self, *args, **kwargs):
except NameError:
from tqdm import tqdm
except ImportError:
pass
tqdm = None
if tqdm is not None:
pbar = tqdm(total=self.max_generations)
self.parents = self.init_pop(self)
Expand Down Expand Up @@ -133,6 +135,7 @@ def __init__(self, **kwargs):
self.fitnesses = []
self.fmin = 0.
self.fmax = 0.
self.kwargs = kwargs

def scale(self, scrs):
self.fmin = min(scrs)
Expand Down
36 changes: 19 additions & 17 deletions SKSurrogate/sensapprx.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

try:
from sklearn.base import BaseEstimator, TransformerMixin
except:
except ModuleNotFoundError:
BaseEstimator = type('BaseEstimator', (object,), dict())
TransformerMixin = type('TransformerMixin', (object,), dict())

Expand All @@ -25,7 +25,8 @@ class SensAprx(BaseEstimator, TransformerMixin):
:param num_levels: number of levels for morris analysis, default: 6
:param grid_jump: grid jump for morris analysis, default: 1
:param num_resmpl: number of resamples for moment independent analysis, default: 10
:param reduce: whether to reduce the data points to uniques and calculate the averages of the target or not, default: False
:param reduce: whether to reduce the data points to uniques and calculate the averages of the target or not,
default: False
:param domain: pre-calculated unique points, if none, and reduce is `True` then unique points will be found
:param probs: pre-calculated values associated to `domain` points
"""
Expand All @@ -44,15 +45,16 @@ def __init__(self, n_features_to_select=10, regressor=None, method='sobol', marg
self.domain = domain
self.probs = probs
self.weights_ = None
self.top_features_ = []

def _avg_fucn(self, X, y):
from numpy import unique, concatenate, array
if self.reduce:
X_ = unique(X, axis=0)
x_ = unique(X, axis=0)
self.domain = []
self.probs = []
data_space = concatenate((X, y.reshape(y.shape[0], 1)), axis=1)
for row in X_:
for row in x_:
X_temp = data_space
for idx in range(row.shape[0]):
X_temp = X_temp[X_temp[:, idx] == row[idx]]
Expand Down Expand Up @@ -82,31 +84,31 @@ def fit(self, X, y):
self.regressor = SVR()
self.regressor.fit(self.domain, self.probs)
bounds = [[min(self.domain[:, idx]) - self.margin, max(self.domain[:, idx]) + self.margin] for idx in range(N)]
problem = dict(num_vars=N, names=['x%d' % (idx) for idx in range(N)], bounds=bounds)
Res = []
problem = dict(num_vars=N, names=['x%d' % idx for idx in range(N)], bounds=bounds)
res = []
if self.method == 'sobol':
from SALib.sample import saltelli
from SALib.analyze import sobol
param_values = saltelli.sample(problem, self.num_smpl)
Y = self.regressor.predict(param_values)
Res = sobol.analyze(problem, Y)['ST']
self.weights_ = Res
y_ = self.regressor.predict(param_values)
res = sobol.analyze(problem, y_)['ST']
self.weights_ = res
elif self.method == 'morris':
from SALib.sample import morris as mrs
from SALib.analyze import morris
param_values = mrs.sample(problem, self.num_smpl, num_levels=self.num_levels, grid_jump=self.grid_jump)
Y = self.regressor.predict(param_values)
Res = morris.analyze(problem, param_values, Y, num_levels=self.num_levels, grid_jump=self.grid_jump)[
param_values = mrs.sample(problem, self.num_smpl, num_levels=self.num_levels)
y_ = self.regressor.predict(param_values)
res = morris.analyze(problem, param_values, y_, num_levels=self.num_levels)[
'mu_star']
self.weights_ = Res
self.weights_ = res
elif self.method == 'delta-mmnt':
from SALib.sample import latin
from SALib.analyze import delta
param_values = latin.sample(problem, self.num_smpl)
Y = self.regressor.predict(param_values)
Res = delta.analyze(problem, param_values, Y, num_resamples=self.num_resmpl)['delta']
self.weights_ = Res
self.top_features_ = argpartition(Res, -self.n_features_to_select)[-self.n_features_to_select:]
y_ = self.regressor.predict(param_values)
res = delta.analyze(problem, param_values, y_, num_resamples=self.num_resmpl)['delta']
self.weights_ = res
self.top_features_ = argpartition(res, -self.n_features_to_select)[-self.n_features_to_select:]
return self

def transform(self, X):
Expand Down

0 comments on commit 683e154

Please sign in to comment.