diff --git a/ComputePosteriors.sh b/ComputePosteriors.sh new file mode 100755 index 0000000..efbee40 --- /dev/null +++ b/ComputePosteriors.sh @@ -0,0 +1,21 @@ +#!/bin/bash + + +echo " Recomputing Gravitational Wave posteriors on f_PBH (may take ~ 1 hour...)" +echo " " + +for NOBS in 1 80 +do + for PRIOR in J LF + do + python src/tabulate_posteriors_O3.py -N_obs $NOBS -prior $PRIOR -M_PBH 0.5 + done +done + +for NOBS in 1 24000 +do + for PRIOR in J LF + do + python src/tabulate_posteriors_ET.py -N_obs $NOBS -prior $PRIOR + done +done \ No newline at end of file diff --git a/README.md b/README.md index fda35fc..4ca1006 100644 --- a/README.md +++ b/README.md @@ -18,11 +18,10 @@ The figures can be remade with the following commands: `python plot_frequentist_limits.py -plot_ps_diff` -Computing the point-source limits requires making tables containing `p_gamma`, the probability that an individual PBH is detectable by Fermi-LAT. The tables used for the analysis in the paper are contained in the directory `data/p_gammas/`. These may take a few hours to recompute. They can be regenerated with +Computing the point-source limits requires making tables containing `p_gamma`, the probability that an individual PBH is detectable by Fermi-LAT. The tables used for the analysis in the paper are contained in the directory [`data/p_gammas/`](data/p_gammas/). These may take a few hours to recompute. They can be regenerated with `python generate_p_gamma_tables.py -test`. With the `-test` flag, the `p_gamma` tables will be written to [`data/p_gammas/test/`](data/p_gammas/test/) instead of overwriting the precomputed tables. The script can also be used to generate `p_gamma` tables over different `(m_dm, sv)` grids. - `python generate_p_gamma_tables.py -test` +All limit calculations require tabulated posteriors of `f_pbh`. These are provided in [`data/posteriors_f`](data/posteriors_f/) but they can be recomputed by running the script [`ComputePosteriors.sh`](ComputePosteriors.sh). This script calls a number of scripts in [`src/`](src/) which calculate the posteriors from gravitational wave observations. Run `python src/tabulate_posteriors_O3.py -h` and `python src/tabulate_posteriors_ET.py -h` for more detailed usage of these scripts. Scripts for calculating posteriors from SKA will be added shortly. -With the `-test` flag, the `p_gamma` tables will be written to `data/p_gammas/test/` instead of overwriting the precomputed tables. The script can also be used to generate `p_gamma` tables over different `(m_dm, sv)` grids. ## `bayesian` branch diff --git a/data/ET_redshift.txt b/data/ET_redshift.txt new file mode 100644 index 0000000..606cabe --- /dev/null +++ b/data/ET_redshift.txt @@ -0,0 +1,5 @@ +# Total source frame mass 20 M_sun +# Columns: Redshift, Fraction detectable +7.678380919212519 0.5 +28.52205602981411 0.1 +95.87373195587614 0 \ No newline at end of file diff --git a/data/SubSolar_Horizon.txt b/data/SubSolar_Horizon.txt new file mode 100644 index 0000000..cda87e1 --- /dev/null +++ b/data/SubSolar_Horizon.txt @@ -0,0 +1,11 @@ +# Figure 1 (dashed green line) of https://arxiv.org/pdf/1808.04771.pdf +# Columns: Component Mass [M_sun], Horizon [Mpc] +0.1981205951448708 27.286681430014227 +0.3008613938919342 38.37156747012553 +0.4004698512137823 48.75339548973085 +0.5000783085356305 58.784038259116684 +0.6003132341425216 68.46363328229641 +0.7005481597494128 77.70424674270178 +0.800156617071261 86.8569263865389 +0.9003915426781521 95.83415090927964 +1.0025058731401724 104.8117879440606 diff --git a/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt b/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt index 66d606a..c958aa0 100644 --- a/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt +++ b/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt @@ -1,4 +1,4 @@ -# Posterior distribution for f, given N = 1 merger events in ET. PBH Mass: 10.0 M_sunPrior: jeffreys +# Posterior distribution for f, given N = 1 merger events in ET. PBH Mass: 10.0 M_sunPrior: J # Columns: f, P(f|N) 9.999999999999999547e-07 7.246589174428889635e+01 1.071891319205128528e-06 8.324571817517015404e+01 diff --git a/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt b/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt index 83b177e..4c29ba4 100644 --- a/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt +++ b/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt @@ -1,4 +1,4 @@ -# Posterior distribution for f, given N = 1 merger events at LIGO O3. PBH Mass: 0.5 M_sunPrior: jeffreys +# Posterior distribution for f, given N = 1 merger events at LIGO O3. PBH Mass: 0.5 M_sunPrior: J # Columns: f, P(f|N) 9.999999999999999547e-07 8.416139272344972731e-06 1.071891319205128528e-06 9.669730117647979165e-06 diff --git a/figures/Constraints_fPBH.pdf b/figures/Constraints_fPBH.pdf index 07b7ea5..8f41e46 100644 Binary files a/figures/Constraints_fPBH.pdf and b/figures/Constraints_fPBH.pdf differ diff --git a/requirements.txt b/requirements.txt index 593022e..f52cf47 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,5 @@ matplotlib==3.0.3 scipy==1.2.1 numpy==1.16.2 +tqdm==4.19.9 + diff --git a/src/Cosmo.py b/src/Cosmo.py new file mode 100644 index 0000000..4cc6b17 --- /dev/null +++ b/src/Cosmo.py @@ -0,0 +1,75 @@ +""" Cosmo.py + +Functions/constants for calculating cosmological quantities +such as the Hubble rate and various distances. + +""" + +import numpy as np + +from scipy.integrate import cumtrapz, quad, dblquad +from scipy.interpolate import interp1d + +#--- Some constants ------------ +#------------------------------- + +G_N = 4.302e-3 #(pc/solar mass) (km/s)^2 +G_N_Mpc = 1e-6*4.302e-3 #(Mpc/solar mass) (km/s)^2 + +h = 0.678 +Omega_DM = 0.1186/(h**2) +H0 = 100.0*h #(km/s) Mpc^-1 +H0_peryr = 67.8*(3.24e-20)*(60*60*24*365) +ageUniverse = 13.799e9 #y +Omega_L = 0.692 +Omega_m = 0.308 +Omega_r = 9.3e-5 + +D_H = (3000/h) #Mpc + +c = 3e5 #km/s + + +#------------------------------- + +#--- Useful cosmological functions --- +#------------------------------------- + + +rho_critical = 3.0*H0**2/(8.0*np.pi*G_N_Mpc) #Solar masses per Mpc^3 + +def Hubble(z): + return H0_peryr*np.sqrt(Omega_L + Omega_m*(1+z)**3 + Omega_r*(1+z)**4) + +def Hubble2(z): + return H0*np.sqrt(Omega_L + Omega_m*(1+z)**3 + Omega_r*(1+z)**4) + +def HubbleLaw(age): + return H0_peryr*age + +def rho_z(z): + return 3.0*Hubble2(z)**2/(8*np.pi*G_N) + +def t_univ(z): + integ = lambda x: 1.0/((1+x)*Hubble(x)) + return quad(integ, z, np.inf)[0] + +def Omega_PBH(f): + return f*Omega_DM + +#Luminosity distance (Mpc) +def calcdL(z): + c = 3.06594845e-7 + return c*(1+z)*quad(lambda x: Hubble(x)**-1, 0, z)[0] + +#https://arxiv.org/pdf/astro-ph/9905116.pdf +#Comoving distance (in a flat universe, in Mpc) +def D_C(z): + return c*quad(lambda z: 1/Hubble2(z), 0, z)[0] + +#Comoving volume out to redshift z (in Mpc^3) +def V_C(z): + return (4*np.pi/3)*(D_C(z))**3 + +def dVdz(z): + return 4*np.pi*D_C(z)**2*(c/Hubble2(z)) \ No newline at end of file diff --git a/src/MergerRate.py b/src/MergerRate.py new file mode 100644 index 0000000..90a3c7b --- /dev/null +++ b/src/MergerRate.py @@ -0,0 +1,293 @@ +"""MergerRate.py + +Code for calculating the PBH merger rate as a function +of PBH mass and fraction (along with Remapping.py). +Note: some functions are replicated between the two files. + +Adapted from https://github.com/bradkav/BlackHolesDarkDress. + +See https://arxiv.org/abs/1805.09034 for more details. + +Throughout withHalo=True (withHalo=False) performs the calculation +with (without) including Dark Matter halos formed around the PBHs. + +""" + +import numpy as np + +from scipy.integrate import cumtrapz, quad, dblquad +from scipy.interpolate import interp1d, UnivariateSpline + +import Cosmo + +import Remapping as Remap + +from matplotlib import pyplot as plt + +#--- Some constants ------------ +#------------------------------- + +G_N = 4.302e-3 #(pc/solar mass) (km/s)^2 +G_N_Mpc = 1e-6*4.302e-3 #(Mpc/solar mass) (km/s)^2 + + +z_eq = 3375.0 +rho_eq = 1512.0 #Solar masses per pc^3 +sigma_eq = 0.005 #Variance of DM density perturbations at equality +lambda_max = 3.0 #Maximum value of lambda = 3.0*z_dec/z_eq (i.e. binaries decouple all the way up to z_dec = z_eq) + +alpha = 0.1 + + +#------------------------ + + +#Mean interPBH separation +def xbar(f, M_PBH): + return (3.0*M_PBH/(4*np.pi*rho_eq*(0.85*f)))**(1.0/3.0) + + +#Semi-major axis as a function of decoupling redshift +def semimajoraxis(z_pair, f, M_PBH): + Mtot = M_PBH + X = 3.0*z_eq*0.85*f/z_pair + return alpha*xbar(f, M_PBH)*(f*0.85)**(1.0/3.0)*((X/(0.85*f))**(4.0/3.0)) + + +def bigX(x, f, M_PBH): + return (x/(xbar(f,M_PBH)))**3.0 + +#Calculate x (comoving PBH separation) as a function of a +def x_of_a(a, f, M_PBH, withHalo = False): + + xb = xbar(f, M_PBH) + + if (not withHalo): + return ((a * (0.85*f) * xb**3)/alpha)**(1.0/4.0) + + elif (withHalo): + xb_rescaled = xb * ((M_PBH + Remap.M_halo(z_decoupling(a, f, M_PBH), M_PBH))/M_PBH )**(1./3.) + return ((a * (0.85*f) * xb_rescaled**3)/alpha)**(1.0/4.0) + +#Calculate a (semi-major axis) in terms of x +def a_of_x(x, f, M_PBH): + + xb = xbar(f, M_PBH) + return (alpha/(0.85*f))*x**4/xb**3 + +#Maximum semi-major axis +def a_max(f, M_PBH, withHalo = False): + Mtot = 1.0*M_PBH + if (withHalo): + Mtot += M_halo(z_eq, M_PBH) + return alpha*xbar(f, Mtot)*(f*0.85)**(1.0/3.0)*((lambda_max)**(4.0/3.0)) + +def z_decoupling(a, f, mass): + return (1. + z_eq)/(1./3 * bigX(x_of_a(a, f, mass), f, mass)/(0.85*f)) - 1. + + +def n_PBH(f, M_PBH): + return (1e3)**3*Cosmo.rho_critical*Cosmo.Omega_PBH(f)/M_PBH #PBH per Gpc^3 + + + +#------ Probability distributions ---- +#------------------------------------- + +def j_X(x, f, M_PBH): + return bigX(x, f, M_PBH)*0.5*(1+sigma_eq**2/(0.85*f)**2)**0.5 + +def P_j(j, x, f, M_PBH): + y = j/j_X(x, f, M_PBH) + return (y**2/(1+y**2)**(3.0/2.0))/j + +def P_a_j(a, j, f, M_PBH): + xval = x_of_a(a, f, M_PBH) + X = bigX(xval, f, M_PBH) + xb = xbar(f, M_PBH) + measure = (3.0/4.0)*(a**-0.25)*(0.85*f/(alpha*xb))**0.75 + return P_j(j, xval, f, M_PBH)*np.exp(-X)*measure + +def P_a_j_withHalo(a, j, f, M_PBH): + + xval = x_of_a(a, f, M_PBH, withHalo = True) + X = bigX(xval, f, M_PBH) + xb = xbar(f, M_PBH) + + measure = (3.0/4.0)*(a**-0.25)*(0.85*f/(alpha*xb))**0.75 + measure *= ((M_PBH + Remap.M_halo(z_decoupling(a, f, M_PBH), M_PBH))/M_PBH )**(3./4.) + + return P_j(j, xval, f, M_PBH)*np.exp(-X)*measure + +def P_la_lj(la,lj, f, M_PBH): + j = 10.**lj + a = 10.**la + return P_a_j(a, j, f, M_PBH)*a*j*(np.log(10)**2) #/Norm1 + +def P_la_lj_withHalo(la,lj, f, M_PBH): + j = 10.**lj + a = 10.**la + return P_a_j_withHalo(a, j, f, M_PBH)*a*j*(np.log(10)**2) #/Norm1 + +def t_coal(a, e, M_PBH): + Q = (3.0/170.0)*(G_N*M_PBH)**(-3) # s^6 pc^-3 km^-6 + tc = Q*a**4*(1-e**2)**(7.0/2.0) #s^6 pc km^-6 + tc *= 3.086e+13 #s^6 km^-5 + tc *= (3e5)**5 #s + return tc/(60*60*24*365) #in years + +def j_coal(a, t, M_PBH): + Q = (3.0/170.0)*(G_N*M_PBH)**-3 # s^6 pc^-3 km^-6 + tc = t*(60*60*24*365) + tc /= (3e5)**5 + tc /= 3.086e+13 + return (tc/(Q*a**4))**(1.0/7.0) + +def P_binary(f, M_PBH): + amin = 0 + amax = a_max(f, M_PBH) + + P1 = lambda y,x,f,M_PBH: P_a_j(x, y, f, M_PBH) + Norm1 = dblquad(P1, amin, amax, lambda x: 0, lambda x: 1, args=(f, M_PBH), epsrel=1e-20)[0] + return Norm1 + + +#---- Merger Time Distribution --- +#--------------------------------- + +def P_t_integ(a, t, f, M_PBH, withHalo=False): + + c = 3.e5 #km/s + Q = (c**6)*(3.0/170.0)*(G_N*M_PBH)**-3 # pc^-3 + t_pc = t*(60*60*24*365)*c*3.24078e-14 #Time in years -> Time in parsec + ecc_sq = 1-(t_pc*1.0/(Q*a**4))**(2.0/7.0) + if (ecc_sq < 0): + return 0 + ecc = np.sqrt(ecc_sq) + j_ecc = np.sqrt(1. - ecc**2.) + + P1 = 1. + if (withHalo == False): + P1 = P_a_j(a, j_ecc, f, M_PBH) + else: + P1 = P_a_j_withHalo(a, j_ecc, f, M_PBH) + + djdt = j_ecc/(7*t) + return P1*djdt + +#Time in years +def P_of_t_analytical(t, f, M_PBH, withHalo=False): + + amin = 1e-6 + amax = a_max(f, M_PBH) + + + avals = np.logspace(np.log10(amin), np.log10(amax), 200) #pc + test = np.asarray([P_t_integ(a, t, f, M_PBH, withHalo) for a in avals]) + + integr = np.trapz(test, avals) + + + return integr + + +def P_of_t_withHalo(t, f, M_PBH): + + + do_plots = False + #do_plots = True + + N_a = 500 + + amin = 1e-6 + amax = Remap.semimajoraxis_full(z_eq, f, M_PBH) + + a_i = np.logspace(np.log10(amin), np.log10(amax), N_a) + a_f = np.vectorize(Remap.calc_af)(a_i, M_PBH) + + + ind_cut = np.argmax(a_f) + a_cut = a_i[ind_cut] + + a_f_interp = UnivariateSpline(a_i, a_f, k=3, s=0) + daf_dai = a_f_interp.derivative(n=1) + + Jac = np.sqrt(a_f/a_i)*(1/daf_dai(a_i) - 0.0*a_i/a_f) + + P = a_f*0.0 + + c = 3.e5 #km/s + Q = (c**6)*(3.0/170.0)*(G_N*M_PBH)**-3 # pc^-3 + t_pc = t*(60*60*24*365)*c*3.24078e-14 #Time in years -> Time in parsec + ecc_sq = 1-(t_pc*1.0/(Q*a_f**4))**(2.0/7.0) + + for i in range(N_a): + if (ecc_sq[i] < 0): + P[i] = 0 + else: + ecc = np.sqrt(ecc_sq[i]) + j_ecc = np.sqrt(1. - ecc**2.) + j_i = np.sqrt(a_f[i]/a_i[i])*j_ecc + + P[i] = P_a_j_withHalo(a_i[i], j_i, f, M_PBH) + + djdt = j_ecc/(7*t) + P[i] *= djdt + + + if (do_plots): + plt.figure() + + plt.loglog(a_i[:ind_cut],a_f[:ind_cut]) + plt.loglog(a_i[ind_cut:],a_f[ind_cut:]) + + plt.xlabel(r"Initial semi-major axis, $a_i\,\,[\mathrm{pc}]$") + plt.ylabel(r"Final semi-major axis, $a_f \,\,[\mathrm{pc}]$") + + plt.loglog([1e-4, 4e-1], [1e-4, 4e-1], linestyle='--', color='k') + + plt.xlim(1e-6, 4e-1) + plt.ylim(1e-6, 4e-1) + + plt.gca().set_aspect('equal') + ax2 = plt.gca().twinx() + ax2.loglog(a_i, P) + + plt.figure() + plt.loglog(a_i, np.abs(np.sqrt(a_f/a_i)*(1/daf_dai(a_i)))) + plt.show() + + P *= Jac + + if (do_plots): + plt.figure() + plt.plot(a_f[:ind_cut],P[:ind_cut]) + plt.plot(a_f[ind_cut:][::-1],-P[ind_cut:][::-1]) + plt.show() + + return np.trapz(P[:ind_cut], a_f[:ind_cut]) - np.trapz(P[ind_cut:][::-1],a_f[ind_cut:][::-1]) + + +#---- Merger Rates --- +#--------------------------------- + +def MergerRate_test(z, f, M_PBH): + return 0.5*n_PBH(f, M_PBH)*P_of_t_analytical(Cosmo.t_univ(z), f, M_PBH, withHalo=True) + + +def MergerRate(z, f, M_PBH, withHalo=False): + P_of_t_fun = P_of_t_analytical + if (withHalo): + P_of_t_fun = P_of_t_withHalo + + return 0.5*n_PBH(f, M_PBH)*P_of_t_fun(Cosmo.t_univ(z), f, M_PBH) + + +def AverageMergerRate( f, M_PBH, z1, z2, withHalo=False): + z_list = np.linspace(z1, z2, 20) + Merge_list = np.array([MergerRate(z, f, M_PBH, withHalo) for z in z_list]) + return np.trapz(Merge_list, z_list)/(z2-z1) + + + + diff --git a/src/Remapping.py b/src/Remapping.py new file mode 100644 index 0000000..ca8732f --- /dev/null +++ b/src/Remapping.py @@ -0,0 +1,250 @@ +"""Remapping.py + +Code for calculating the PBH merger rate as a function +of PBH mass and fraction (along with MergerRate.py). +Note: some functions are replicated between the two files. + +Adapted from https://github.com/bradkav/BlackHolesDarkDress. + +See https://arxiv.org/abs/1805.09034 for more details. + +Throughout withHalo=True (withHalo=False) performs the calculation +with (without) including Dark Matter halos formed around the PBHs. + +""" + + +import numpy as np + +from scipy.interpolate import interp1d +from scipy.integrate import quad + +#------------------ + +alpha = 0.1 +z_eq = 3375.0 +rho_eq = 1512.0 #Solar masses per pc^3 +lambda_max = 3.0 #Decouple at a maximum redshift of z_dec = z_eq (lambda = 3.0*z_dec) + +G_N = 4.302e-3 #Units of (pc/solar mass) (km/s)^2 + +rtr_interp = None +Ubind_interp = None +Jac_interp = None +current_MPBH = -10.0 + +#-------------------- + + +# Eq. 11 and 12, https://arxiv.org/abs/0706.0864 +#M_PBH in solar masses +def r_trunc(z, M_PBH): + return 58.0 / (1 + z) * M_halo(z, M_PBH)**(1/3) + #r0 = 6.3e-3 #1300 AU in pc + #return r0*(M_PBH)**(1.0/3.0)*(1.+z_eq)/(1.+z) + +#Truncation radiation at equality +def r_eq(M_PBH): + return r_trunc(z_eq, M_PBH) + +#Halo mass +def M_halo(z, M_PBH): + return (1+z_eq) / (1. + z) * M_PBH + #return M_PBH*(r_trunc(z, M_PBH)/r_eq(M_PBH))**1.5 + + +#--------------------- + +def xbar(f, M_PBH): + return (3.0*M_PBH/(4*np.pi*rho_eq*(0.85*f)))**(1.0/3.0) + +def semimajoraxis(z_pair, f, M_PBH): + #Mtot = M_PBH + M_halo(z_pair, M_PBH) + Mtot = M_PBH + X = 3.0*z_eq*0.85*f/z_pair + return alpha*xbar(f, M_PBH)*(f*0.85)**(1.0/3.0)*((X/(0.85*f))**(4.0/3.0)) + +#Semi-major axis taking into account DM halo mass +def semimajoraxis_full(z_pair, f, M_PBH): + Mtot = M_PBH + M_halo(z_pair, M_PBH) + #Mtot = M_PBH + X = 3.0*z_eq*0.85*f/z_pair + return alpha*xbar(f, Mtot)*(f*0.85)**(1.0/3.0)*((X/(0.85*f))**(4.0/3.0)) + + +def bigX(x, f, M_PBH): + return (x/(xbar(f,M_PBH)))**3.0 + +def x_of_a(a, f, M_PBH): + xb = xbar(f, M_PBH) + return ((a * (0.85*f) * xb**3.)/alpha)**(1./4.) + + +#Maximum semi-major axis +def a_max(f, M_PBH, withHalo = False): + Mtot = 1.0*M_PBH + if (withHalo): + Mtot += M_halo(z_eq, M_PBH) + return alpha*xbar(f, Mtot)*(f*0.85)**(1.0/3.0)*((lambda_max)**(4.0/3.0)) + + +def z_decoupling(a, f, mass): + return (1. + z_eq)/(1./3 * bigX(x_of_a(a, f, mass), f, mass)/(0.85*f)) - 1. + +#-------------------------------------- +def GetRtrInterp(M_PBH): + global rtr_interp + + #NB: We set f = 0.01 in here, because actually f cancels everywhere in some parts of the calculation... + + am = a_max(0.01, M_PBH, withHalo=True) + a_list = np.logspace(-8, np.log10(am*1.1), 101) + + z_decoupling_0 = z_decoupling(a_list, 0.01, M_PBH) + M_halo_0 = M_halo(z_decoupling_0, M_PBH) + + #print(z_decoupling_0) + #print(M_halo_0) + + z_decoupling_1 = np.zeros(len(a_list)) + M_halo_1 = np.zeros(len(a_list)) + for i in range(len(a_list)): + z_decoupling_1[i] = z_decoupling(a_list[i], 0.01, (M_halo_0[i]+M_PBH)) + M_halo_1 = M_halo(z_decoupling_1, (M_PBH)) + + z_decoupling_2 = np.zeros(len(a_list)) + M_halo_2 = np.zeros(len(a_list)) + for i in range(len(a_list)): + z_decoupling_2[i] = z_decoupling(a_list[i], 0.01, (M_halo_1[i]+M_PBH)) + M_halo_2 = M_halo(z_decoupling_2, (M_PBH)) + + z_decoupling_3 = np.zeros(len(a_list)) + z_decoupling_check = np.zeros(len(a_list)) + M_halo_3 = np.zeros(len(a_list)) + for i in range(len(a_list)): + z_decoupling_3[i] = z_decoupling(a_list[i], 0.01, (M_halo_2[i]+M_PBH)) + M_halo_3 = M_halo(z_decoupling_3, (M_PBH)) + # + z_decoupling_check[i] = (1. + z_eq) / (1./3 * bigX(x_of_a(a_list[i], 0.01, (M_halo_3[i]+M_PBH)), 0.01, (M_halo_3[i]+M_PBH))/(0.85*0.01)) - 1. + # + + #print M_halo_3 + + r_list = r_trunc(z_decoupling_3, M_PBH) + rtr_interp = interp1d(a_list, r_list) + return rtr_interp + + +def CalcTruncRadius(ai, M_PBH): + + + z_decoupling_0 = z_decoupling(ai, 0.01, M_PBH) + M_halo_0 = M_halo(z_decoupling_0, M_PBH) + + #print(z_decoupling_0) + #print(M_halo_0) + + + z_decoupling_1 = z_decoupling(ai, 0.01, (M_halo_0+M_PBH)) + M_halo_1 = M_halo(z_decoupling_1, (M_PBH)) + + + z_decoupling_2 = z_decoupling(ai, 0.01, (M_halo_1+M_PBH)) + M_halo_2 = M_halo(z_decoupling_2, (M_PBH)) + + z_decoupling_3 = z_decoupling(ai, 0.01, (M_halo_2+M_PBH)) + M_halo_3 = M_halo(z_decoupling_3, (M_PBH)) + + r_list = r_trunc(z_decoupling_3, M_PBH) + return r_list + + +#----------------------------------- +#print "Edit rho, Menc to fix this..." +def rho(r, r_tr, M_PBH, gamma=9.0/4.0): + x = r/r_tr + A = (3-gamma)*M_PBH/(4*np.pi*(r_tr**gamma)*(r_eq(M_PBH)**(3-gamma))) + if (x <= 1): + return A*x**(-gamma) + else: + return 0 + +def Menc(r, r_tr, M_PBH, gamma=9.0/4.0): + x = r/r_tr + if (x <= 1): + return M_PBH*(1+(r/r_eq(M_PBH))**(3-gamma)) + else: + return M_PBH*(1+(r_tr/r_eq(M_PBH))**(3-gamma)) + + +def calcBindingEnergy(r_tr, M_PBH): + integ = lambda r: Menc(r, r_tr, M_PBH)*rho(r, r_tr, M_PBH)*r + return -G_N*4*np.pi*quad(integ,1e-8, 1.0*r_tr, epsrel=1e-3)[0] + +def getBindingEnergy(r_tr, M_PBH): + global current_MPBH, Ubind_interp, rtr_interp + if ((M_PBH - current_MPBH)**2 >1e-3 or Ubind_interp == None): + current_MPBH = M_PBH + print(" Tabulating binding energy and truncation radius (M_PBH = " + str(M_PBH) +")...") + rtr_vals = np.logspace(np.log10(1e-8), np.log10(1.0*r_eq(M_PBH)),500) + Ubind_vals = np.asarray([calcBindingEnergy(r1, M_PBH) for r1 in rtr_vals]) + Ubind_interp = interp1d(rtr_vals, Ubind_vals) + + rtr_interp = GetRtrInterp(M_PBH) + + return Ubind_interp(r_tr) + + +def calc_af(ai, M_PBH): + global current_MPBH, rtr_interp, Ubind_interp + + if ((M_PBH - current_MPBH)**2 > 1e-3 or rtr_interp == None): + current_MPBH = M_PBH + print(" Tabulating binding energy and truncation radius (M_PBH = " + str(M_PBH) +")...") + rtr_vals = np.logspace(np.log10(1e-8), np.log10(1.0*r_eq(M_PBH)),500) + Ubind_vals = np.asarray([calcBindingEnergy(r1, M_PBH) for r1 in rtr_vals]) + Ubind_interp = interp1d(rtr_vals, Ubind_vals) + + rtr_interp = GetRtrInterp(M_PBH) + + #r_tr = CalcTruncRadius(ai, M_PBH) + r_tr = rtr_interp(ai) + + Mtot = Menc(r_tr, r_tr, M_PBH) + #print Mtot + U_orb_before = -G_N*(Mtot**2)/(2.0*ai) + + if (r_tr > r_eq(M_PBH)): + Ubind = getBindingEnergy(r_eq(M_PBH), M_PBH) + else: + #print r_tr, r_eq(M_PBH) + Ubind = getBindingEnergy(r_tr, M_PBH) + return -G_N*M_PBH**2*0.5/(U_orb_before + 2.0*Ubind) + +def calc_jf(ji, ai, M_PBH): + af = calc_af(ai, M_PBH) + return ji*np.sqrt(ai/af) + +def calc_Tf(Ti, ai, M_PBH): + af = calc_af(ai, M_PBH) + return Ti*np.sqrt(af/ai) + +def getJacobian(): + global Jac_interp + + if ((M_PBH - current_MPBH)**2 >1e-3 or Jac_interp == None): + amin = 1e-6 #pc + amax = RM.semimajoraxis_full(RM.z_eq, 1.0, M_PBH) + a_i = np.logspace(np.log10(amin), np.log10(amax), 200) + a_f = np.vectorize(calc_af)(a_i, M_PBH) + + a_f_interp = UnivariateSpline(a_i, a_f, k=4, s=0) + daf_dai = a_f_interp.derivative(n=1) + + Jac = np.sqrt(a_f/a_i)*(1/daf_dai(a_i) - a_i/a_f) + + Jac_interp = interp1d(a_f, Jac, bounds_error=False, fill_value = 0.0) + + return Jac_interp + + \ No newline at end of file diff --git a/src/tabulate_posteriors_ET.py b/src/tabulate_posteriors_ET.py new file mode 100644 index 0000000..b281d60 --- /dev/null +++ b/src/tabulate_posteriors_ET.py @@ -0,0 +1,173 @@ +""" TabuulatePosteriors_ET.py + +Calculate and tabulate posteriors on f_PBH +given N_obs detections with Einstein telescope. + +Outputs a .txt to the 'results' folder. + +""" + +import numpy as np +import matplotlib.pylab as plt + +from scipy.special import gamma, loggamma +from scipy.integrate import cumtrapz, quad +from scipy.interpolate import interp1d, UnivariateSpline +from scipy.stats import poisson +from scipy.misc import comb + +from tqdm import tqdm +import Cosmo +import MergerRate as MR + +#--------------------------- + +import argparse + +#Parse the arguments! +parser = argparse.ArgumentParser(description='Calculate and tabulate posteriors on f_PBH given N_obs detections with Einstein telescope. Outputs a .txt file to the ../data/posteriors_f folder.') +#parser.add_argument('-M_PBH','--M_PBH', help='PBH mass in solar masses', type=float, default=10.0) +parser.add_argument('-N_obs','--N_obs', help='Number of observed mergers', type=int, default=1) +parser.add_argument('-prior','--prior', help="Merger rate prior, either 'J' for Jeffrey's prior or 'LF' for log-flat prior", type=str, default="J") + +args = parser.parse_args() +#M_PBH = args.M_PBH +N_obs = args.N_obs +prior_type = args.prior + +#We fix M_PBH = 10.0 solar mass in this case +M_PBH = 10.0 + +if (prior_type not in ["J", "LF"]): + raise ValueError('Prior must be one of : "J", "LF"') + +#Prior scales as R^-alpha +if (prior_type == "J"): + alpha_prior = 0.5 +elif (prior_type == "LF"): + alpha_prior = 1.0 + +print("Calculating ET posteriors for M_PBH = " + str(M_PBH) + " M_sun; N = " + str(N_obs) + " observed events...") +print(" Prior: ", prior_type) + +#-------------------------- + +z_ET, frac_ET = np.loadtxt("../data/ET_redshift.txt", unpack=True) + +# Selection function f(z) for ET +# Depends on luminosity distance d_L +# and matches reported f(z) at z > 40 +def selection_ET(z): + z_hor = z_ET[-1] + DL_hor = Cosmo.calcdL(z_hor) + DL = Cosmo.calcdL(z) + return np.clip((1/DL - 1/DL_hor)*4.5e4, 0, 1) + +def calcdVTdz(z, T=0.67): + dVdz = Cosmo.dVdz(z)*1e-9 #Mpc^3 -> Gpc^3 + return (T*dVdz/(1+z))*selection_ET(z) + +#print("Fraction detectable at z = 40:", selection_ET(40.0)) + +#-------------------- + +z_start = 40.0 +z_end = 100.0 + + +# Calculate number of detected events +# integrating over redshift +def CalcLambda(f, M_PBH): + z_list = np.logspace(np.log10(z_start), np.log10(z_end), 20) + + integ = np.array([calcdVTdz(z, T = 0.67)*MR.MergerRate(z, f, M_PBH, withHalo=True) for z in z_list]) + L1 = np.trapz(integ, z_list) + return L1 + + +#Adam and I verified this - we spent roughly 1 hour worrying about whether this was +#Bayes enough. It was sufficiently Bayes. Do not worry in future. +# || +# || +# || +# VV +#P(R_eff) +def Posterior(R_eff, N, VT=1.0): + lam = VT*R_eff + + prior = lam**-alpha_prior + + p_poiss = poisson.pmf(N, lam) + lgamma = loggamma(N + 1 - alpha_prior) + A = (N-alpha_prior)*np.log(lam) - lam - lgamma + + return VT*np.exp(A) + + +def calcCredible(R, P): + cum_dist = cumtrapz(P, R, initial=0) + inv_cum = interp1d(cum_dist, R) + min90 = inv_cum(0.05) + med = inv_cum(0.5) + max90 = inv_cum(0.95) + upper90 = inv_cum(0.90) + return min90, med, max90, upper90 + +def round_to(x, nearest=10): + return int(round(x / nearest) * nearest) + +def P_sensitivity(VT, VT_0): + sig = 0.30 + lVT = np.log(VT) + lVT_0 = np.log(VT_0) + P = (1/VT)*(2*np.pi*sig**2)**-0.5*np.exp(-(lVT-lVT_0)**2/(2*sig**2)) + return np.nan_to_num(P) + +def P_marginal(R_eff, N): + + integ = lambda x: Posterior(R_eff, N, x)*P_sensitivity(x, VT_0=1.0) + return quad(integ, 0, 5.0, points = np.logspace(-5,1.1,20))[0] + +def get_f_interval(N): + + R_list = np.logspace(-1, 8, 1000) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + minR, medR, maxR, upper90 = calcCredible(R_list, P_marg_list) + + return f_of_R(minR), f_of_R(medR), f_of_R(maxR), f_of_R(upper90) + +#------------------------------- +N_f = 200 +f_list = np.logspace(-6, 0, N_f) + +def get_posterior(N, M_PBH): + + R_list = np.zeros(N_f) + for i in tqdm(range(N_f)): + R_list[i] = CalcLambda(f_list[i], M_PBH) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + + R_of_f = UnivariateSpline(f_list, R_list, k=4, s=0) + dR_df = R_of_f.derivative() + P_of_f = P_marg_list*dR_df(f_list) + + return np.nan_to_num(P_of_f) + + +P = get_posterior(N_obs, M_PBH) + +print("Posterior is normalised to:", np.trapz(P, f_list)) + +f_lower = calcCredible(f_list, P)[0] +print("95% Upper limit on f:", f_lower) + +mstr = str(round(M_PBH, 1)) +Nstr = str(N_obs) + +htxt = "Posterior distribution for f, given N = " + Nstr + " merger events in ET. PBH Mass: " + mstr + " M_sun. " +htxt += "Prior: " + prior_type +htxt += "\nColumns: f, P(f|N)" + +np.savetxt("../data/posteriors_f/Posterior_f_ET_Prior_" + prior_type + "_M=" + mstr+"_N=" + Nstr + ".txt", list(zip(f_list, P)), header=htxt) \ No newline at end of file diff --git a/src/tabulate_posteriors_O3.py b/src/tabulate_posteriors_O3.py new file mode 100644 index 0000000..8b9c5fc --- /dev/null +++ b/src/tabulate_posteriors_O3.py @@ -0,0 +1,151 @@ +""" TabuulatePosteriors_O3.py + +Calculate and tabulate posteriors on f_PBH +given N_obs detections with LIGO/Virgo O3. + +Outputs a .txt to the 'results' folder. + +""" + + +import numpy as np +import matplotlib.pylab as plt + +from scipy.special import gamma, loggamma +from scipy.integrate import cumtrapz, quad +from scipy.interpolate import interp1d, UnivariateSpline + +from tqdm import tqdm +import Cosmo +import MergerRate as MR + +from scipy.stats import poisson +#--------------------------- + +import argparse + +#Parse the arguments! +parser = argparse.ArgumentParser(description='Calculate and tabulate posteriors on f_PBH given N_obs detections with LIGO O3. Outputs a .txt file to the ../data/posteriors_f folder.') +parser.add_argument('-M_PBH','--M_PBH', help='PBH mass in solar masses (0.2 -> 1.0)', type=float, default=0.5) +parser.add_argument('-N_obs','--N_obs', help='Number of observed mergers', type=int, default=1) +parser.add_argument('-prior','--prior', help="Merger rate prior, either 'J' for Jeffrey's prior or 'LF' for log-flat prior", type=str, default="J") + + +args = parser.parse_args() +M_PBH = args.M_PBH +N_obs = args.N_obs +prior_type = args.prior + +if ((M_PBH < 0.2) or (M_PBH > 1.0)): + raise ValueError('M_PBH must be between 0.2 and 1.0 (Solar Masses)') + +if (prior_type not in ["J", "LF"]): + raise ValueError('Prior must be one of : "J", "LF"') + +#Prior scales as R^-alpha +if (prior_type == "J"): + alpha_prior = 0.5 +elif (prior_type == "LF"): + alpha_prior = 1.0 + +print(" Calculating LIGO posteriors for M_PBH = " + str(M_PBH) + " M_sun; N = " + str(N_obs) + " observed events...") +print(" Prior: ", prior_type) + +#-------------------------- + +#LIGO O3 sensitive time-volume +VT_O3 = 1.8e-4 #Gpc^3 yr + +#Load in horizon distances and increase by 50% for O3 +m_list, horizon_list = np.loadtxt("../data/SubSolar_Horizon.txt", unpack=True) +horizon_interp = interp1d(m_list, 1.5*horizon_list, bounds_error=True) + +#Calculate maximum redshift of observations +z_list = np.linspace(0, 1, 20) +dL_list = np.asarray([Cosmo.calcdL(z) for z in z_list]) +z_of_dL = interp1d(dL_list, z_list) + +#Horizon redshift +z_max = z_of_dL(horizon_interp(M_PBH)) +#-------------------- + + + +#P(R_eff) +def Posterior(R_eff, N, VT=VT_O3): + lam = VT*R_eff + #print(lam) + prior = lam**-alpha_prior + + p_poiss = poisson.pmf(N, lam) + lgamma = loggamma(N + 1 - alpha_prior) + A = (N-alpha_prior)*np.log(lam) - lam - lgamma + + return VT*np.exp(A) + + +def calcCredible(x, y): + cum_dist = cumtrapz(y, x, initial=0) + inv_cum = interp1d(cum_dist, x) + min90 = inv_cum(0.05) + med = inv_cum(0.5) + max90 = inv_cum(0.95) + upper90 = inv_cum(0.90) + return min90, med, max90, upper90 + +def round_to(x, nearest=10): + return int(round(x / nearest) * nearest) + +def P_sensitivity(VT, VT_0): + sig = 0.30 + lVT = np.log(VT) + lVT_0 = np.log(VT_0) + return (1/VT)*(2*np.pi*sig**2)**-0.5*np.exp(-(lVT-lVT_0)**2/(2*sig**2)) + +def P_marginal(R_eff, N): + integ = lambda x: Posterior(R_eff, N, x)*P_sensitivity(x, VT_0=VT_O3) + return quad(integ, 0, 5.0, points = np.logspace(-3,1.1,10))[0] + +def get_f_interval(N): + + R_list = np.logspace(-1, 8, 1000) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + minR, medR, maxR, upper90 = calcCredible(R_list, P_marg_list) + + return f_of_R(minR), f_of_R(medR), f_of_R(maxR), f_of_R(upper90) + +#------------------------------- +N_f = 200 +f_list = np.logspace(-6, 0, N_f) + +def get_posterior(N, M_PBH): + + R_list = np.zeros(N_f) + for i in tqdm(range(N_f)): + R_list[i] = MR.AverageMergerRate( f_list[i], M_PBH, z1 = 0, z2 = z_max, withHalo=True) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + + R_of_f = UnivariateSpline(f_list, R_list, k=4, s=0) + dR_df = R_of_f.derivative() + P_of_f = P_marg_list*dR_df(f_list) + + return np.nan_to_num(P_of_f) + + +P = get_posterior(N_obs, M_PBH) + +print("Posterior is normalised to:", np.trapz(P, f_list)) + +f_lower = calcCredible(f_list, P)[0] +print("95% Upper limit on f:", f_lower) + +mstr = str(round(M_PBH, 1)) +Nstr = str(N_obs) + +htxt = "Posterior distribution for f, given N = " + Nstr + " merger events at LIGO O3. PBH Mass: " + mstr + " M_sun. " +htxt += "Prior: " + prior_type +htxt += "\nColumns: f, P(f|N)" + +np.savetxt("../data/posteriors_f/Posterior_f_O3_Prior_" + prior_type + "_M=" + mstr+"_N=" + Nstr + ".txt", list(zip(f_list, P)), header=htxt)