Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Figs final: #225

Merged
merged 5 commits into from
Oct 13, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 61 additions & 17 deletions descqa/BiasVersusRedshift.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
import scipy.optimize as op
import re


from GCR import GCRQuery
import pyccl as ccl

from .base import TestResult
from .CorrelationsTwoPoint import CorrelationsAngularTwoPoint
from .plotting import plt

from .stats import chisq

__all__ = ['BiasValidation']

Expand Down Expand Up @@ -44,8 +45,14 @@ class BiasValidation(CorrelationsAngularTwoPoint):
'label': 'Nicola et al. \n($i<25.3$)',
'colnames': ('z', 'bias'),
'skip':2,
},
}
},
'nicola_25.3_errors':{
'filename_template': 'galaxy_bias/bias_nicola_mlim25.3_with_errors.txt',
'label': 'Nicola et al. \n($i<25.3$)',
'colnames': ('z', 'b_lo', 'bias', 'b_hi'),
'skip':1,
},
}


def __init__(self, **kwargs): #pylint: disable=W0231
Expand All @@ -59,7 +66,8 @@ def __init__(self, **kwargs): #pylint: disable=W0231
self.test_name = kwargs['test_name']
self.fit_range = kwargs['fit_range']
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 10)
self.legend_size = kwargs.get('legend_size', 12)
self.title_fontsize = kwargs.get('title_fontsize', 14)
self.ell_max = kwargs['ell_max'] if 'ell_max' in kwargs.keys() else 20000
validation_filepath = os.path.join(self.data_dir, kwargs['data_filename'])
self.validation_data = np.loadtxt(validation_filepath, skiprows=2)
Expand All @@ -85,7 +93,8 @@ def get_validation_data(self, observations):
return validation_data


def plot_bias_results(self, corr_data, corr_theory, bias, z, catalog_name, output_dir, err=None):
def plot_bias_results(self, corr_data, corr_theory, bias, z, catalog_name, output_dir,
err=None, chisq=None, mag_label=''):
fig, ax = plt.subplots(1, 2, gridspec_kw={'width_ratios': [5, 3]})
colors = plt.cm.plasma_r(np.linspace(0.1, 1, len(self.test_samples))) # pylint: disable=no-member

Expand All @@ -105,31 +114,39 @@ def plot_bias_results(self, corr_data, corr_theory, bias, z, catalog_name, outpu
alpha=0.07, color='grey')

if self.title_in_legend:
lgnd_title = catalog_name
lgnd_title = '{}\n$({})$'.format(catalog_name, mag_label)
title = self.data_label
else:
lgnd_title = None
lgnd_title = '({})'.format(mag_label)
title = '{} vs. {}'.format(catalog_name, self.data_label)
ax[0].legend(loc='lower left', title=lgnd_title, framealpha=0.5, fontsize=self.legend_size)
ax[0].legend(loc='lower left', title=lgnd_title, framealpha=0.5,
fontsize=self.legend_size, title_fontsize=self.title_fontsize)
ax[0].set_xlabel(self.fig_xlabel, size=self.font_size)
ax[0].set_ylim(*self.fig_ylim)
ax[0].set_ylabel(self.fig_ylabel, size=self.font_size)
ax[0].set_title(title, fontsize='medium')

ax[1].errorbar(z, bias, err, marker='o', ls='', label=catalog_name)
ax[1].errorbar(z, bias, err, marker='o', ls='', label='{}\n$({})$'.format(catalog_name, mag_label))
if not self.observations:
ax[1].plot(z, bias)
ax[1].plot(z, bias) #plot curve through points
#add validation data
for v in self.validation_data.values():
colz = v['colnames'][0]
colb = v['colnames'][1]
colb = 'bias'
zmask = (v[colz] < np.max(z)*1.25)
ax[1].plot(v[colz][zmask], v[colb][zmask], label=v['label'])

print(v['colnames'])
if 'b_lo'in v['colnames'] and 'b_hi' in v['colnames']:
print('band', v[colz][zmask], v['b_lo'][zmask], v['b_hi'][zmask])
ax[1].fill_between(v[colz][zmask], v['b_lo'][zmask], v['b_hi'][zmask], alpha=.3, color='grey')
ax[1].set_title('Bias vs redshift', fontsize='medium')
ax[1].set_xlabel('$z$', size=self.font_size)
ax[1].set_ylabel('$b(z)$', size=self.font_size)
ax[1].legend(loc='upper right', framealpha=0.5, frameon=True, fontsize=self.legend_size)
ax[1].legend(loc='upper right', framealpha=0.5, frameon=True, fontsize=self.legend_size-2)
if chisq:
ax[1].text(0.95, 0.05, r'$\chi^2/\rm{{d.o.f}}={}$'.format(', '.join(['{:.2g}'.format(c) for c in chisq])),
horizontalalignment='right', verticalalignment='bottom',
transform=ax[1].transAxes)
plt.subplots_adjust(wspace=.05)
fig.tight_layout()
fig.savefig(os.path.join(output_dir, '{:s}.png'.format(self.test_name)), bbox_inches='tight')
Expand Down Expand Up @@ -208,19 +225,46 @@ def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
#best_bias_obj = result.hess_inv*np.identity(1)[0] #unknown relative normalization
best_bias_err = np.sqrt(cfit[1][0][0])
correlation_theory[sample_name] = best_bias**2*w_th
best_fit_bias.append(best_bias)
best_fit_bias.append(best_bias[0])
best_fit_err.append(best_bias_err)

print(sample_name, best_fit_bias, w_th[angles], xi[angles], xi_sig[angles])

z_mean = np.array(z_mean)
best_fit_bias = np.array(best_fit_bias)
best_fit_err = np.array(best_fit_err)
chi_2 = []
# compute chi*2 between best_fit bias and validation data
for v in self.validation_data.values():
colz = v['colnames'][0]
colb = 'bias'
validation_data = np.interp(z_mean, v[colz], v[colb])
if 'b_lo'in v['colnames'] and 'b_hi' in v['colnames']:
val_err_lo = np.abs(np.interp(z_mean, v[colz], v['b_lo']) - validation_data)
val_err_hi = np.abs(np.interp(z_mean, v[colz], v['b_hi']) - validation_data)
print(val_err_lo, val_err_hi)
val_err = (val_err_lo + val_err_hi)/2 # mean of upper and lower errors
error_sq = best_fit_err**2 + val_err**2 # sum in quadrature
print(val_err, best_fit_err, error_sq)
else:
error_sq = best_fit_err**2
chi__2 = np.sum((best_fit_bias - validation_data)**2/error_sq/len(best_fit_bias))
print('\nchi**2(linear bias - bias data)={:.3g}'.format(chi__2))
chi_2.append(chi__2)

# get mag_cut for plot
mag_label = ''
if 'mag' in self.requested_columns.keys():
filt = self.requested_columns['mag'][0].split('_')[1]
mag_vals = self.test_samples[list(self.test_samples.keys())[0]]['mag'] # assume all cuts the same
#mag_label = '{:.2g} < {}'.format(mag_vals['min'], filt) if 'min' in mag_vals.keys() else filt
mag_label = filt + ' < {:.3g}'.format(mag_vals['max'])
self.plot_bias_results(corr_data=correlation_data,
catalog_name=catalog_name,
corr_theory=correlation_theory,
bias=best_fit_bias,
z=z_mean,
z=z_mean, mag_label=mag_label,
output_dir=output_dir,
err=best_fit_err)
err=best_fit_err, chisq=chi_2)

passed = np.all((best_fit_bias[1:]-best_fit_bias[:-1]) > 0)
score = np.count_nonzero((best_fit_bias[:-1]-best_fit_bias[1:])>0)*1.0/(len(best_fit_bias)-1.0)
Expand Down
2 changes: 1 addition & 1 deletion descqa/DeltaSigmaTest.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
cosmo = WMAP7

# Create interpolation tables for efficient computation of sigma crit
z = np.linspace(0, self.zmax, self.zmax*100)
z = np.linspace(0, self.zmax, int(self.zmax)*100)
d1 = cosmo.angular_diameter_distance(z) # in Mpc
angular_diameter_distance = interp1d(z, d1, kind='quadratic')
d2 = cosmo.comoving_transverse_distance(z) # in Mpc
Expand Down
25 changes: 14 additions & 11 deletions descqa/EllipticityDistribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class EllipticityDistribution(BaseValidationTest):
'disk':{'B/T_min':0., 'B/T_max':0.2, 'mag_lo':24., 'Mag_hi':-21., 'Mag_lo':-17.},
'late':{'B/T_min':0.4, 'B/T_max':0.7, 'mag_lo':24., 'Mag_hi':-21., 'Mag_lo':-17.},
'irregular':{'B/T_min':0.0, 'B/T_max':1.0},
'ancillary_quantities':['bulge_to_total_ratio_i'],
'ancillary_quantities':['bulge_to_total_ratio_i', 'bulge_to_total_ratio_stellar'],
'ancillary_keys':['B/T'],
},
},
Expand All @@ -60,7 +60,7 @@ def e_default(e):
@staticmethod
def e_squared(a, b):
q = b/a
return np.sqrt((1-q**2)/(1+q**2))
return (1-q**2)/(1+q**2)

#plotting constants
lw2 = 2
Expand Down Expand Up @@ -110,7 +110,7 @@ def __init__(self, z='redshift_true', zlo=0., zhi=2., N_ebins=40, observation=''
},
'e_squared':{'possible_quantities':[['size', 'size_true'], ['size_minor', 'size_minor_true']],
'function':self.e_squared,
'xaxis_label': r'$e = \sqrt{(1-q^2)/(1+q^2)}$',
'xaxis_label': r'$e = (1-q^2)/(1+q^2)$',
'file_label':'e2',
},
}
Expand Down Expand Up @@ -160,7 +160,7 @@ def __init__(self, z='redshift_true', zlo=0., zhi=2., N_ebins=40, observation=''
[possible_native_luminosities[band] for band in possible_native_luminosities if band in self.band_Mag]))

#check for ancillary quantities
self.ancillary_quantities = self.validation_data.get('cuts', {}).get('ancillary_quantities', None)
self.possible_ancillary_quantities = self.validation_data.get('cuts', {}).get('ancillary_quantities', None)

#setup subplot configuration
self.init_plots()
Expand Down Expand Up @@ -235,8 +235,11 @@ def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
required_quantities.append(found_quantity)
if not catalog_instance.has_quantities(required_quantities + self.filter_quantities):
return TestResult(skipped=True, summary='Missing some required quantities: {}'.format(', '.join(required_quantities)))
if self.ancillary_quantities is not None and not catalog_instance.has_quantities(self.ancillary_quantities):
return TestResult(skipped=True, summary='Missing some ancillary quantities: {}'.format(', '.join(self.ancillary_quantities)))
ancillary_quantity = None
if self.possible_ancillary_quantities is not None:
ancillary_quantity = catalog_instance.first_available(*self.possible_ancillary_quantities)
if ancillary_quantity is None:
return TestResult(skipped=True, summary='Missing some ancillary quantities: {}'.format(', '.join(self.possible_ancillary_quantities)))

mag_field = catalog_instance.first_available(*self.possible_mag_fields)
if not mag_field:
Expand All @@ -245,8 +248,8 @@ def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
if not Mag_field:
return TestResult(skipped=True, summary='Missing needed quantities to make magnitude cuts')
all_quantities = required_quantities +[mag_field, Mag_field] + self.filter_quantities
if self.ancillary_quantities is not None:
all_quantities = all_quantities + self.ancillary_quantities
if ancillary_quantity is not None:
all_quantities = all_quantities + [ancillary_quantity]
print('Fetching quantities', all_quantities)

mag_filtername = str(mag_field.split('_')[-2])
Expand Down Expand Up @@ -283,8 +286,8 @@ def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
if morphology is not None:
mask = (catalog_data[mag_field] < self.mag_lo.get(morphology))
mask &= (self.Mag_hi.get(morphology) < catalog_data[Mag_field]) & (catalog_data[Mag_field] < self.Mag_lo.get(morphology))
if self.ancillary_quantities is not None:
for aq, key in zip_longest(self.ancillary_quantities, self.validation_data['cuts'].get('ancillary_keys')):
if ancillary_quantity is not None:
for aq, key in zip_longest([ancillary_quantity], self.validation_data['cuts'].get('ancillary_keys')):
mask &= (self.validation_data['cuts'][morphology].get(key+'_min') < catalog_data[aq]) &\
(catalog_data[aq] < self.validation_data['cuts'][morphology].get(key+'_max'))

Expand Down Expand Up @@ -330,7 +333,7 @@ def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
cutlabel = re.sub('-inf < ', '' , cutlabel) #truncate label with inf

ancillary_label = []
if self.ancillary_quantities is not None:
if ancillary_quantity is not None:
for key in self.validation_data['cuts'].get('ancillary_keys'):
ancillary_label.append('${}<{}<{}$'.format(str(self.validation_data['cuts'][morphology].get(key+'_min')),\
key, str(self.validation_data['cuts'][morphology].get(key+'_max'))))
Expand Down
18 changes: 15 additions & 3 deletions descqa/configs/bias_test_mock_fig.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,31 +9,42 @@ requested_columns:
- redshift
halo_id:
- halo_id
mag:
- mag_i_lsst
test_samples:
z_02_04:
redshift: {min: 0.2, max: 0.4}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 0, max: 25.3}
z_04_06:
redshift: {min: 0.4, max: 0.6}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 0, max: 25.3}
z_06_08:
redshift: {min: 0.6, max: 0.8}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 0, max: 25.3}
z_08_10:
redshift: {min: 0.8, max: 1.0}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 0, max: 25.3}
z_10_12:
redshift: {min: 1.0, max: 1.2}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 0, max: 25.3}

test_sample_labels:
z_02_04: '$0.2 < z < 0.4$'
z_04_06: '$0.4 < z < 0.6$'
z_06_08: '$0.6 < z < 0.8$'
z_08_10: '$0.8 < r < 1.0$'

z_08_10: '$0.8 < z < 1.0$'
z_10_12: '$1.0 < z < 1.2$'
test_data:
z_02_04: {data_col: 3, data_err_col: 4}
z_04_06: {data_col: 5, data_err_col: 6}
z_06_08: {data_col: 7, data_err_col: 8}
z_08_10: {data_col: 9, data_err_col: 10}
z_10_12: {data_col: 11, data_Err_col: 12}

output_filename_template: 'w_theta_{}.dat'
label_template: '${} < z < {}$'
Expand All @@ -45,13 +56,14 @@ fit_range:
z_04_06: {min_theta: 0.01, max_theta: 0.4}
z_06_08: {min_theta: 0.01, max_theta: 0.3}
z_08_10: {min_theta: 0.01, max_theta: 0.2}
z_10_12: {min_theta: 0.01, max_theta: 0.2}

need_distance: false
fig_xlabel: '$\theta\quad[{\rm deg}]}$'
fig_ylabel: '$w(\theta)$'
fig_ylim: [0.0001, 2.0]
truncate_cat_name: true
observations: ['nicola_25.3']
observations: ['nicola_25.3_errors']

#Treecorr parameters
min_sep: 0.001
Expand Down
71 changes: 71 additions & 0 deletions descqa/configs/bias_test_mock_fig_4bins.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
subclass_name: BiasVersusRedshift.BiasValidation

requested_columns:
ra:
- ra
dec:
- dec
redshift:
- redshift
halo_id:
- halo_id
mag:
- mag_i_lsst
test_samples:
z_02_04:
redshift: {min: 0.2, max: 0.4}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 16, max: 25.3}
z_04_06:
redshift: {min: 0.4, max: 0.6}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 16, max: 25.3}
z_06_08:
redshift: {min: 0.6, max: 0.8}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 16, max: 25.3}
z_08_10:
redshift: {min: 0.8, max: 1.0}
halo_id: {min: 0, max: 999999999999999999}
mag: {min: 16, max: 25.3}

test_sample_labels:
z_02_04: '$0.2 < z < 0.4$'
z_04_06: '$0.4 < z < 0.6$'
z_06_08: '$0.6 < z < 0.8$'
z_08_10: '$0.8 < r < 1.0$'

test_data:
z_02_04: {data_col: 3, data_err_col: 4}
z_04_06: {data_col: 5, data_err_col: 6}
z_06_08: {data_col: 7, data_err_col: 8}
z_08_10: {data_col: 9, data_err_col: 10}

output_filename_template: 'w_theta_{}.dat'
label_template: '${} < z < {}$'

data_filename: 'tpcf/Wang_2013_MNRAS_stt450_Table2.txt'
data_label: 'Best fit bias'
fit_range:
z_02_04: {min_theta: 0.01, max_theta: 0.6}
z_04_06: {min_theta: 0.01, max_theta: 0.4}
z_06_08: {min_theta: 0.01, max_theta: 0.3}
z_08_10: {min_theta: 0.01, max_theta: 0.2}

need_distance: false
fig_xlabel: '$\theta\quad[{\rm deg}]}$'
fig_ylabel: '$w(\theta)$'
fig_ylim: [0.0001, 2.0]
truncate_cat_name: true
observations: ['nicola_25.3_errors']

#Treecorr parameters
min_sep: 0.001
max_sep: 10
bin_size: 0.5
var_method: jackknife
#Patches for covariance estimate
npatch: 80

description: |
Compare predicted and computed correlation function
Loading