Skip to content

Commit

Permalink
Merge branch 'release-023'
Browse files Browse the repository at this point in the history
  • Loading branch information
Gabriel-p committed Sep 23, 2017
2 parents f482b2d + 85e56cd commit e5f7c55
Show file tree
Hide file tree
Showing 39 changed files with 714 additions and 566 deletions.
19 changes: 19 additions & 0 deletions CHANGELOG.md
@@ -1,6 +1,19 @@
# Change Log


## [[v0.2.3]][194] - 2017-09-23

### Changed

* Improved performance of synthetic cluster generation ([#227][193]). Code is
now ~4X faster.
* Fix excessive use of memory by Rbf interpolation ([#350][192])
* Use equal bin widths in LF and completeness function ([#300][191])
* Faster star separation by errors ([#351][190])
* Generalize Bayesian DA to N-dimensions, fix statistical issues, improve
performance ([#352][189])


## [[v0.2.2]][188] - 2017-08-29

### Changed
Expand Down Expand Up @@ -590,3 +603,9 @@ ________________________________________________________________________________
[186]: https://github.com/asteca/ASteCA/commit/65d1f89bd0992120c8401c80ef976ba3c3803c38
[187]: https://github.com/asteca/ASteCA/issues/216
[188]: https://github.com/asteca/asteca/releases/tag/v0.2.2
[189]: https://github.com/asteca/ASteCA/issues/352
[190]: https://github.com/asteca/ASteCA/issues/351
[191]: https://github.com/asteca/ASteCA/issues/300
[192]: https://github.com/asteca/ASteCA/issues/350
[193]: https://github.com/asteca/ASteCA/issues/227
[194]: https://github.com/asteca/asteca/releases/tag/v0.2.3
2 changes: 1 addition & 1 deletion packages/_version.py
@@ -1 +1 @@
__version__ = "v0.2.2"
__version__ = "v0.2.3"
59 changes: 12 additions & 47 deletions packages/best_fit/best_fit_synth_cl.py
@@ -1,6 +1,6 @@

import numpy as np
import copy
import max_mag_cut
import obs_clust_prepare
import genetic_algorithm
import brute_force_algor
Expand All @@ -9,41 +9,6 @@
from ..synth_clust import imf


def max_mag_cut(cl_reg_fit, max_mag):
'''
Reject stars beyond the given magnitude limit.
'''
# Maximum observed (main) magnitude.
max_mag_obs = max(list(zip(*zip(*cl_reg_fit)[1:][2])[0]))

if max_mag == 'max':
# No magnitude cut applied.
cl_max_mag, max_mag_syn = copy.deepcopy(cl_reg_fit), max_mag_obs
else:
star_lst = []
for star in cl_reg_fit:
# Check main magnitude value.
if star[3][0] <= max_mag:
# Keep stars brighter that the magnitude limit.
star_lst.append(star)

# Check number of stars left.
if len(star_lst) > 10:
# For the synthetic clusters, use the minimum value between the
# selected 'max_mag' value and the maximum observed magnitude.
# This prevents large 'max_mag' values from generating synthetic
# clusters with low mass stars in the not-observed region.
cl_max_mag, max_mag_syn = star_lst, min(max_mag, max_mag_obs)
print("Maximum magnitude cut at {:.1f} mag applied".format(
max_mag_syn))
else:
cl_max_mag, max_mag_syn = copy.deepcopy(cl_reg_fit), max_mag_obs
print(" WARNING: less than 10 stars left after removing\n"
" stars by magnitude limit. No removal applied.")

return cl_max_mag, max_mag_syn


def params_errors(
lkl_method, e_max, bin_mr, err_lst, completeness, fundam_params,
cl_max_mag, max_mag_syn, theor_tracks, R_V, ext_coefs, st_dist_mass,
Expand Down Expand Up @@ -96,13 +61,11 @@ def main(clp, bf_flag, best_fit_algor, lkl_method, lkl_binning, lkl_weight,
'''
# Check if algorithm should run.
if bf_flag:
print('Searching for optimal parameters.')

err_lst, cl_reg_fit, completeness, e_max = clp['err_lst'],\
clp['cl_reg_fit'], clp['completeness'], clp['err_max']

# Remove stars beyond the maximum magnitude limit, if it was set.
cl_max_mag, max_mag_syn = max_mag_cut(cl_reg_fit, max_mag)
cl_max_mag, max_mag_syn = max_mag_cut.main(cl_reg_fit, max_mag)

# Process observed cluster. This list contains data used by the
# likelihoods, and for plotting.
Expand All @@ -129,11 +92,13 @@ def main(clp, bf_flag, best_fit_algor, lkl_method, lkl_binning, lkl_weight,

# Obtain mass distribution using the selected IMF. We run it once
# because the array only depends on the IMF selected.
st_dist_mass = imf.main(IMF_name, m_high)
st_dist_mass = imf.main(IMF_name, m_high, fundam_params[4])

# Store the number of defined filters and colors.
N_fc = [len(filters), len(colors)]

print('Searching for optimal parameters.')

# Call algorithm to calculate the likelihoods for the set of
# isochrones and return the best fitting parameters.
if best_fit_algor == 'brute':
Expand Down Expand Up @@ -174,13 +139,13 @@ def main(clp, bf_flag, best_fit_algor, lkl_method, lkl_binning, lkl_weight,
else:
# Pass empty lists to make_plots.
print('Skip parameters fitting process.')
cl_max_mag, max_mag_syn, isoch_fit_params, isoch_fit_errors,\
st_dist_mass, N_fc, ext_coefs = [], -1.,\
cl_max_mag, max_mag_syn, ext_coefs, st_dist_mass, N_fc,\
isoch_fit_params, isoch_fit_errors = [], -1., [], {}, [],\
[[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]],\
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [], [], []
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]

clp['cl_max_mag'], clp['max_mag_syn'], clp['isoch_fit_params'],\
clp['isoch_fit_errors'], clp['ext_coefs'], clp['st_dist_mass'],\
clp['N_fc'] = cl_max_mag, max_mag_syn, isoch_fit_params,\
isoch_fit_errors, ext_coefs, st_dist_mass, N_fc
clp['cl_max_mag'], clp['max_mag_syn'], clp['ext_coefs'],\
clp['st_dist_mass'], clp['N_fc'], clp['isoch_fit_params'],\
clp['isoch_fit_errors'] = cl_max_mag, max_mag_syn, ext_coefs,\
st_dist_mass, N_fc, isoch_fit_params, isoch_fit_errors
return clp
60 changes: 31 additions & 29 deletions packages/best_fit/brute_force_algor.py
Expand Up @@ -5,18 +5,20 @@

def main(lkl_method, e_max, bin_mr, err_lst, completeness, max_mag_syn,
fundam_params, obs_clust, theor_tracks, R_V, ext_coefs, st_dist_mass,
N_fc):
N_fc, N_bf=1):
"""
Brute force algorithm that computes the likelihoods for *all* the defined
isochrones.
# TODO what's stated below is addressed by issue #347
It is possible that this algorithm returns a *larger* likelihood than the
GA. This is counter-intuitive, but it is because the GA samples the IMF
*each time* a mass value is checked, and the same mass value can be checked
several times. The BF algorithm on the other hand does this only *once*
per mass value.
"""
# Unpack parameters values.

m_lst, a_lst, e_lst, d_lst, mass_lst, bin_lst = fundam_params

# Initiate list that will hold the likelihood values telling us how well
Expand All @@ -25,13 +27,16 @@ def main(lkl_method, e_max, bin_mr, err_lst, completeness, max_mag_syn,

# Count the number of total models/solutions to explore.
tot_sols, i = np.prod([len(_) for _ in fundam_params]), 0
milestones = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
percs = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]

# Iterate through all metallicities.
for m_i, m in enumerate(m_lst):
for m_i, met in enumerate(m_lst):

# Iterate through all ages.
for a_i, a in enumerate(a_lst):
for a_i, age in enumerate(a_lst):

# Call likelihood function with m,a,e,d values.
isochrone = theor_tracks[m_i][a_i]

# Iterate through all extinction values.
for e in e_lst:
Expand All @@ -43,39 +48,36 @@ def main(lkl_method, e_max, bin_mr, err_lst, completeness, max_mag_syn,
for mass in mass_lst:

# Iterate through all binary fractions.
for bin_frac in bin_lst:
model = [m, a, e, d, mass, bin_frac]

# Call likelihood function with m,a,e,d values.
isochrone = theor_tracks[m_i][a_i]
# Call likelihood function with m,a,e,d values.
lkl = likelihood.main(
lkl_method, e_max, bin_mr, err_lst, obs_clust,
completeness, max_mag_syn, st_dist_mass,
isochrone, R_V, ext_coefs, N_fc, model)
# Store the likelihood for each synthetic
# cluster.
model_done[0].append(model)
model_done[1].append(lkl)
for bf in bin_lst:

# In place for #347
for _ in range(N_bf):
model = [met, age, e, d, mass, bf]
# Call likelihood function for this model.
lkl = likelihood.main(
lkl_method, e_max, bin_mr, err_lst,
obs_clust, completeness, max_mag_syn,
st_dist_mass, isochrone, R_V, ext_coefs,
N_fc, model)
# Store likelihood and synthetic cluster.
model_done[0].append(model)
model_done[1].append(lkl)

# Print percentage done.
i += 1
percentage_complete = (100.0 * (i + 1) /
tot_sols)
while len(milestones) > 0 and \
percentage_complete >= milestones[0]:
p_comp = (100.0 * (i + 1) / tot_sols)
while len(percs) > 0 and p_comp >= percs[0]:
best_fit_indx = np.argmin(model_done[1])
print (" {:>3}% L={:.1f} ({:g}, {:g}, {:g},"
" {:g}, {:g}, {:g})".format(
milestones[0],
percs[0],
model_done[1][best_fit_indx],
*model_done[0][best_fit_indx]))
# Remove that milestone from the list.
milestones = milestones[1:]
# Remove that percentage value from the list.
percs = percs[1:]

# Find index of function with smallest likelihood value.
# This index thus points to the isochrone that best fits the observed
# group of stars.
# Index of function with smallest likelihood value.
# This index points to the model that best fits the observed cluster.
best_fit_indx = np.argmin(model_done[1])

isoch_fit_params = [model_done[0][best_fit_indx], model_done]
Expand Down
37 changes: 37 additions & 0 deletions packages/best_fit/max_mag_cut.py
@@ -0,0 +1,37 @@

import copy


def main(cl_reg_fit, max_mag):
'''
Reject stars beyond the given magnitude limit.
'''
# Maximum observed (main) magnitude.
max_mag_obs = max(list(zip(*zip(*cl_reg_fit)[1:][2])[0]))

if max_mag == 'max':
# No magnitude cut applied.
cl_max_mag, max_mag_syn = copy.deepcopy(cl_reg_fit), max_mag_obs
else:
star_lst = []
for star in cl_reg_fit:
# Check main magnitude value.
if star[3][0] <= max_mag:
# Keep stars brighter that the magnitude limit.
star_lst.append(star)

# Check number of stars left.
if len(star_lst) > 10:
# For the synthetic clusters, use the minimum value between the
# selected 'max_mag' value and the maximum observed magnitude.
# This prevents large 'max_mag' values from generating synthetic
# clusters with low mass stars in the not-observed region.
cl_max_mag, max_mag_syn = star_lst, min(max_mag, max_mag_obs)
print("Maximum magnitude cut applied ({:.1f} mag)".format(
max_mag_syn))
else:
cl_max_mag, max_mag_syn = copy.deepcopy(cl_reg_fit), max_mag_obs
print(" WARNING: less than 10 stars left after removing\n"
" stars by magnitude limit. No removal applied.")

return cl_max_mag, max_mag_syn
11 changes: 10 additions & 1 deletion packages/check/params_match.py
Expand Up @@ -85,7 +85,7 @@ def check(bf_flag, best_fit_algor, lkl_method, lkl_binning, N_bootstrap,
sys.exit("ERROR: the selected isochrones set ('{}') does\n"
"not match a valid input.".format(evol_track))

# Check IMF defined.
# Check maximum magnitude limit defined.
if isinstance(max_mag, str):
if max_mag != 'max':
sys.exit("ERROR: Maximum magnitude value selected ({}) is"
Expand Down Expand Up @@ -121,6 +121,15 @@ def check(bf_flag, best_fit_algor, lkl_method, lkl_binning, N_bootstrap,
sys.exit("ERROR: Range defined for '{}' parameter is"
" empty.".format(p_names[i][0]))

# Check mass range.
if mass_rs[0] == 'r':
if len(np.arange(mass_rs[1][0], mass_rs[1][1],
mass_rs[1][2])) > 100 and mass_rs[1][1] > 1e5:
print(" WARNING: the number of masses defined is > 100 and\n"
" the max mass is large ({:.0f}). This could cause\n"
" memory issues when sampling the IMF.\n".format(
mass_rs[1][1]))

# Check binarity parameters.
# See if it is a list of values or a range.
if par_ranges[-1][0] == 'r':
Expand Down
6 changes: 6 additions & 0 deletions packages/checker.py
Expand Up @@ -64,6 +64,12 @@ def check_all(mypath, file_end):
# Check the best synthetic cluster match parameters.
params_match.check(**pd)

# Filters and colors names.
fs = ', '.join(_[1] for _ in pd['filters'])
cs = ', '.join('(' + _[1].replace(',', '-') + ')' for _ in pd['colors'])
print("Filter: {}".format(fs))
print("Color: {}\n".format(cs))

# Check and store metallicity files.
pd = read_met_files.check_get(pd)

Expand Down

0 comments on commit e5f7c55

Please sign in to comment.