From ef3fb1ea0ccdcb96ebd719c22971ae6da2b4729b Mon Sep 17 00:00:00 2001 From: bradkav Date: Fri, 3 May 2019 11:49:14 +0200 Subject: [PATCH 1/5] Added comments to plot_PBHfraction.py --- figures/Constraints_fPBH.pdf | Bin 294728 -> 294730 bytes plot_PBHfraction.py | 57 +++++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/figures/Constraints_fPBH.pdf b/figures/Constraints_fPBH.pdf index 07b7ea59947f95408f97ccc2e242c7bf93dd7bf6..2b4f517dea5700433780167e0c352f84d7086a38 100644 GIT binary patch delta 3981 zcmZWo2|Sd07rvH{l#CLIEH_&lX5X^45ehNcOO}$Y$XG5j-WZ_>QMtD)m6U`~V~vSH zO|q2igPO6AZ7wlsm~VX7)z|d?e(!JIbN|+w4$x*eh65RVRgRbXGfG`|;C})UA3=m8x1BR=;gobgaz|WPyrdXp*CPDb-OBa$1GHv*T>Qh~j}DD9L|XO>`Vnii`IYDD_qvD# znBVm1?+YC@qc6LYjQW`6simSa;yz+qIy-0ascoX$ft>6EpCyaiPN&%>XdA?q*U6eQ zic)nqHzhN2UPM;jX`|Xv?G$_#6E5RxA$qy{<%zcgYiG^!eS@PpT{x-Cfbox9{Tarb_x=AZYHy^A}5A`ebDWOTFcec+!HyXd+W@DYyr{$+FI1?QW@uT48%QNmk*(<_4*Z+c$Cst3$30YFLmDp&M z389DF>3pYPrhkGJ&;cvZC$vp6wa7I@ZuK=G=Q7}!m%fn*T|KSstWe|fUB=E-!=!vs zQ?PMq5-5|%eL3Jl6Pq3Dd}wlhG}$}^Q4ucmMc&U>{(S)XfqKlJAy$m;Sj#*@h!gmh z4h7g*`qv^UcSyO;??Yd`sS8lbV$GZfsv0;J;fxojEeSjkyjTNwuhYti&L9|K^8<3BQ)G0YXm zf88V}_th~UspfE`OHKCG>$(7G)Z50uT>I~2X7QlOXl(Iz*KK{*no|3X5Ir|ZvmO%F zQy!b79C5s3{~@-X9q9Myr_mNpQ%#(PeZ})0A5f?#?5LTF4o42ePu?k+k_!)kEdWnE z_4fV&85Uw;Y{qppn>huZ8Oeh&nSaN|;3t(@u<7ZDew(<>96XOS6& z#Ts^DNZ#~$r6&%alI}Paz41sf^QsY|j&0~WN%3CfjLi-;4hlDylxH$4v;Uikemz&@ z6c=XzF{EAx_T`K6V!T&Q#!5s)0U7{X^Wpt6hglGgog`MB+fL8Z9P_HeX{uAx}q!0;?7(*(*H)dr%^{vWa(CFZq}O49qPC}1Q0ub z9XFju7ph@4ANpY4S)%c-;%Gt>J+Wbtfa`wc>w zqtBB5L!0Qaw4C0lIa!sqT{j|AURSxb-;ly%(|I4Lqe&EiJKNFF1e)GIsN7;$n_uT_ zKIFK5HDJTT=b22?s3*pVa+P=EdgKmxn|>UPU5t@O{&`fg;=ub5u*jWxW8XO&Yv zEk~|MuodasJ#Y~cIVS>;IyIExFKicsF;-M)6Dw+Ps&|iKy?KiSwfv=5%>a1My=^aD z#d0O7AE{t7ZfT6EK8C=HG3`D_RL4(Wm`wfA_%*S(d!7%#anl_ zbmm2ciy%;@GG*IT=rqpcJ?ADN?~5s#7xw@LXF^!IK0Z9<`;7DBwFUL!AutUuel()# z#kj*@>JMM3Hvhi(n=Hy4bWy{gyq&GB^JP%v6J7lT-J0M}ohewDS!>nu{8qN)sTF;r z<`W<5wHr)IdVKVz_)war>HkWry-x8P{i9;A{Ci*C34RFt+c=))m(*UuJFxi6_Q zSaz!3T$2{hqr=7|swUOC+6^+pRf_d<{TOsYN!*jDH@s)c80{V|fJ z6i2z6uEkRstGE37z3u@k*`eccsXP!qFX~t(=ae`Dq}f2@iLZ68&sr*z?OhKU8i77P zN~HnB2g?=1wrg2Kf4KxpyM)bm_1xlt)>ugt%AsjZo=WbB=N|yWPASe%?OFyGxG!T! zgHa}pxXs;q-*`rA(@k86jSl>q7ikd$r^!$zjJQHP#E(->_MV886b9}*h-yHbz`QgdkmJRzJ+|3%k<`Sg35~m)*uLRahZ65EsvnE(|<$5&! zN8A2-dz&Y2ciP4^V}@2sBQ7y;ZCn5KA;~RdueZb+XV;**1AHJT{wV3|vc|%#K-M6N zDeqfxM1^^I#drBFmm2_tk_{?8Bqe9Q3gwJ-ECPo5loy1mNj*bsL9Ujhe9_Sp_h5Ul z;U(b!dCTuz=Kf>0#o1L~g~_60$B~#fxcU6?MwP5-AT79UFsGkO>I{;wx>pO`@V753 z;E?{)_no~fpQRErmPUyN-L|Rw(u{fET~r921p^c>AGw&a+N;BpFJ^1cv-Q}GZE31x zIHPV<_WigQso&l02sFIB05Kqc);P>FO>sTXP5-u@mg1c@)o&x37W|J&Zxve0z64m>gT1}YmIZDM` z?ew9q&)rGd6+k-IY3_7L#@w#E%q97OJIyhdX53IKTf=yPRe8|5(n9=KnE#5DsY{CndP^X{FbD*W zhJ(}Yhl%E$L#TpRAJvPh8&`j!7$_W#{3VA3|B{2jkbJV$=SM9JhJpQ(gG1nH&%;G6 z0H^>S4CWs}f5wAhXao!_kORY!=wEXfFiHRiLnDy_{9qImjreCqU|lE}hJk_w#lSFd zsKAi?Vhmb<1Oi5)(QpA`6bu0s90UbJ!GGcaz+ea%1%?QaKwzj}CgI}{YpT~p^K&Sa z00{(!<{S327zh%LK?u0O$01OFz*X~;D2!l3BnB)v9p5S_6eCE2K|+4bpZb4yu%|Gu90OltV5W$8> zC{)lc{+|Gb_>}~O5!eMN5(XB$cYZAjE^sAK3=EDzq%B5>nr^^s)X+F;X}s~j9suZq delta 3819 zcmZWn2{e@Z8~)1}`%)qbBUvIfo0&0;rF)Z$tfeGn%O!i3?7o;RA$3Vp$dWZ%mTaXY z%UEtv_BD(|gt27COf&z{c2CoH&hLEZ_q^}>JkR@lUngT{J7Z@#8U(m0f{z_qu`xEW zB7{a2NazleQ{fL@Po(me_T5sz-TvEo$Fy9BAZXy_h(oHYwsKQd3BS*(vMg?j+p;Cy zNNCjm8Z_Qm<23#Xv&KB&y{erCSnRZ9oMUBd-9hUk_nqhy1e`{fv3yh&h+ldc@um8os(yocVvh0M`^&srlhhYy zjG7U-f%EQJWeV#n{VOSLBU(99=e-4lRrrtFzPEuj>~f3qnI0MzRlHj?1f(mMUD=f1 z)4{=Q;jtjWfY4TNU=qgK-8PCGoPDC0w)oXf97DspL%K6{rMlq=tU6o)?;>i4TD*Tc3S|3H- zv=1>MK$H2!y|2kU`Y(8c7&BML!OqY1fk=_^)b^fRGLveLcgBoSNzM1F0`9CqditSO zR(-A&BQoH*;M~YPR_a18!S@1%#M~OM;6Bsl=&%~O1P4uzpvrxPikhp!8LHW}_uZTJ z$9F<5Wi1bPJxwdo#121qqqr2Of70#u)5qV1HrL+w?rsT*7`LbycUbmpW2`6W17gs8 zoSl}l3avF)vT>tC)MvD(_e;;MxM$60W0I@#HTfzc1gu`ALQyR;FDa$Y&rXk`YGw?( zXWosMkkl3u>=QciN?JX3mj)ZUqxekkP3ex#hN;+tn9;5bb23kOqyn=r>MoEKmuaXp z>ug2#(o30qXZA|R71m^0J_DTqj_qDU^D`n;7;j`LlBekpy)@xQG=WoVUV=%PcCEH2 zYzYSk1F}*9w_9}3Ax3nZ3PDPKo4oYss;DYe|3*8V(~_QZbOotOeK@i^Wd3Rj_{hJl z7dq<(DpJZUh+tEV8u2{5cb;Qk(;vjE)roK9{9$GO?t0aI6-~*BrGVbNtj)rZXLW9( zV1}RjVMFdIYi^%_Jtcg6kxJ6%;@=*vdpmWXS6#x=PNc*?K|JNINSU9kn;x+Aj2mO0OG&{Kan#3zKjMdX$a?c3x@h_;${|)4mgF^H(3*1%&7`f*7}VtzDJ3~ zt3O;tIP!OR41}Y8n)vb&QF7BTD)Fa?3wUC3JsdS@@J+*SGC)u9kxjs#)zPI#Rb;K! z_S}9^0n86ctmN9enXB*&YFIaV3u&OixcMRL+d?n%5*hh_@+cG!$y&{4iCadoTpknk zZC63_{BTJO9D(`?SS@4?a$XZ^OA*Ta=StoK2mDV*97M-b^H%WJ6);BE+@uF>{fzgT zjG1>7AL?Lk6(Qwnfq&t3>F2HDj&@{NG+*Bv@;;CfjHRvXbT6qsx2rAf>B$^|I=>q~ zyQDHV%~dY41J`{9!k^S%m694! z8|+%1AGi!Wq&3q;r!jTV_CoqD!?bC4n^2&+nba8CGss20Gsx9AT7427OGp=uzI>@S z^qIW{TG^#pz)UhaFUI~h#X~ST*oVRpOfJI+BwoyXY!5CJXnxxaGo;r>scXs!aUo?Y00) zz%>91>CYd_3^bKN3c~K1c}U+;S|}8%tuYbG|H2nSo{y2xG)+e@#KxQ-%#}#rtjK2T!GwF!3hOj7hz)~-t?4`b@@aQ2PK8;@CpHhcsIZ2JZnTDq;T-r~<) z7hd|?@Ga~1%J^-|;Z~w%Cr1e3^D>8u=N*|b!%*)6(mtD)jogLx2B~#D zMuvaZmc7g_#VGkrD+SFGqzjMFKiYCFQ1UA>|0{!H$-Nc)znHAvMAO1q=b-5`(y0FEH>c3D;5deiXW$5%=&z!kudnHSy{ho*t6Eb&->mMAsSf~YZA}*OJ{GB;g zXJc9A@y}}u|LlOt&rw;1>w`=>kR__qfoxoqeZD&Ps7jd$`K}5EfUq%RhK|{*?$wq@ z8h{9Je3vGbsjC#8(0S5buW>Cg=T+E=fw^{Sg#ShTDHtvi?c> zcJX^I-EFNMOl}QwGH$I)2;aCKGs`l3YOQM9KECmRSw`HMKTtYdqKzMQ={pUUzD)vO zBkj&Vp{JVt&9E+AjCcM&+9%g5@=aP*vwMvNI=+rhv^>mG_>lvob?H>!uJ4+U8VxF4 z8WzOk!3#8_Mdc3zjUH|AA)o8oZSU#BTM<+$iLv4u=92^2uN6dIc~iU$1cw98)GNb5 z;eOWUrV=o__*AsBh2Q6kn+(?jf0Pj>drBSj0~WYkd3)};`y@V`JKK9h2RF`C{FK#b z@RUj{S=g0@#yg433=fd51zX%Jt^U#^Ev30(N((J7RXlU#24k~Pn^JP_8PGkTD&U3Z z?;3WV!oQjhn!b?@J?dJ&@u&|-tf3I|&UQscHrzgyV_Yf&8A9>b_bu-xJUy7)Q`;Jw z@RtO62DBAY?G2)L(ewgZSD1Ye&(BbmG&i^`5Np)vGA3Og?_GA0vu5P(5q$>n{6;zN z>|`;2MXbJFK_QJQV(|%T#^sb8d?2mG+Cyt1g~8}7Eu)pw!OVcjX2Abhz_T_g<6dMl z1t+H}E}CTY(oI`qP@6=k@_Jo{3}s3TY%C``J|xUim-B)~RAmsC(v zbgpWS!8$uwhqG2u1mx?=tA;?sQAi9DOL!0rDiTA= z!nPk-Pg%b0Hv)@LQ&%I{hk!)2)YwZ%BntaGhK2uzslgG1iV%>=4mg&;5z3;1{n`RU zs-qC%`^fAM<6ilv{*F!cT640wgGDviDHW#frO*sY$34vu(0ph3jO?BUImV6bd~BhW|~0!Dbl cq7Y*X7lB2pVUYxCILLqp!>6F2XL_9P|5!rFAOHXW diff --git a/plot_PBHfraction.py b/plot_PBHfraction.py index 848a6c5..3f2179e 100644 --- a/plot_PBHfraction.py +++ b/plot_PBHfraction.py @@ -1,3 +1,12 @@ +""" plot_PBHfraction.py + +To run this script and reproduce Fig. 1, run: + +python plot_PBHfraction.py + + +""" + import numpy as np import matplotlib.pylab as plt @@ -6,6 +15,27 @@ from scipy.interpolate import interp1d, UnivariateSpline +#Parameters for the three detection scenarios +mstrings = { + "O3": "0.5", + "ET": "10.0", + "SKA": "100.0" +} + +Nlists = { + "O3": np.array([1, 2, 3, 4, 5, 10, 30, 80]), + "ET": np.array([1, 2, 3, 4, 5, 10, 1000, 10000, 20000, 24000]), + "SKA": np.array([1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) +} + +lims = { + "O3": 0.1128, #Figure 3 of https://arxiv.org/pdf/1808.04771.pdf + "ET": 4.13e-03, #Figure 11 of https://arxiv.org/pdf/1805.09034.pdf + "SKA": 3.2e-4 #2 sigma radio from https://arxiv.org/pdf/1812.07967.pdf +} + + +#Calculate credible intervals def calcCredible(x, y): cum_dist = cumtrapz(y, x, initial=0) inv_cum = interp1d(cum_dist/cum_dist[-1], x) @@ -18,7 +48,8 @@ def calcCredible(x, y): def round_to(x, nearest=10): return int(round(x / nearest) * nearest) - +#Get a polygon which shows the credible intervals +#cut off at YMAX def getPolygon(X, Y1, Y2, YMAX): if (Y1[-1] < YMAX): Y1 = np.append(Y1, YMAX*1.001) @@ -39,6 +70,7 @@ def getPolygon(X, Y1, Y2, YMAX): Y_B_LIST = np.append(Y2[X < X_B], YMAX) return np.append(X_A_LIST, X_B_LIST[::-1]), np.append(Y_A_LIST, Y_B_LIST[::-1]) +#Calculate cut-off in credible intervals def getCutOff(X, Y, YMAX): interp_inv = interp1d(np.log10(Y), np.log10(X)) X_new = 10**interp_inv(np.log10(YMAX)) @@ -48,6 +80,7 @@ def getCutOff(X, Y, YMAX): Y_out = np.append(Y_out, YMAX) return X_out, Y_out +#Get credible intervals as a function of number of observations def get_f_intervals(f_list, P_list): N = len(P_list) f_med = np.zeros(N) @@ -59,24 +92,8 @@ def get_f_intervals(f_list, P_list): return f_min, f_med, f_max -mstrings = { - "O3": "0.5", - "ET": "10.0", - "SKA": "100.0" -} - -Nlists = { - "O3": np.array([1, 2, 3, 4, 5, 10, 30, 81]), - "ET": np.array([1, 2, 3, 4, 5, 10, 1000, 10000, 20000, 24000]), - "SKA": np.array([1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) -} - -lims = { - "O3": 0.1128, #Figure 3 of https://arxiv.org/pdf/1808.04771.pdf - "ET": 4.13e-03, #Figure 11 of https://arxiv.org/pdf/1805.09034.pdf - "SKA": 3.2e-4 #2 sigma radio from https://arxiv.org/pdf/1812.07967.pdf -} +#Add a posterior to the plot def addToPlot(exp, prior, color, cut=True): Nlist = Nlists[exp] Mstr = mstrings[exp] @@ -108,12 +125,12 @@ def addToPlot(exp, prior, color, cut=True): return N_new[-1] +#--------------------------------- + plt.figure(figsize=(3.5,3)) ax = plt.gca() - - N1 = addToPlot("O3", "J", color='C0') plt.plot([N1*0.4, N1*2.1],[1.02*lims["O3"],1.02*lims["O3"]],linestyle='-', color='dimgray', lw=0.8, zorder=2.0) plt.fill_between([N1*0.4, N1*2.1], y1 = 1.02*lims["O3"], y2 = 1.5*lims["O3"], facecolor='lightgrey', edgecolor='None', hatch='//////', lw=0.8, zorder=2.0) From d891d267e4a30cdbd6712db8a2e06ac6ead87525 Mon Sep 17 00:00:00 2001 From: bradkav Date: Fri, 3 May 2019 15:15:19 +0200 Subject: [PATCH 2/5] Added Merger rate codes/scripts --- data/ET_redshift.txt | 5 + data/SubSolar_Horizon.txt | 11 + .../Posterior_f_ET_Prior_J_M=10.0_N=1.txt | 2 +- .../Posterior_f_O3_Prior_J_M=0.5_N=1.txt | 2 +- figures/Constraints_fPBH.pdf | Bin 294730 -> 294730 bytes requirements.txt | 1 + src/Cosmo.py | 75 +++++ src/MergerRate.py | 293 ++++++++++++++++++ src/Remapping.py | 250 +++++++++++++++ src/tabulate_posteriors_ET.py | 173 +++++++++++ src/tabulate_posteriors_O3.py | 151 +++++++++ 11 files changed, 961 insertions(+), 2 deletions(-) create mode 100644 data/ET_redshift.txt create mode 100644 data/SubSolar_Horizon.txt create mode 100644 src/Cosmo.py create mode 100644 src/MergerRate.py create mode 100644 src/Remapping.py create mode 100644 src/tabulate_posteriors_ET.py create mode 100644 src/tabulate_posteriors_O3.py diff --git a/data/ET_redshift.txt b/data/ET_redshift.txt new file mode 100644 index 0000000..606cabe --- /dev/null +++ b/data/ET_redshift.txt @@ -0,0 +1,5 @@ +# Total source frame mass 20 M_sun +# Columns: Redshift, Fraction detectable +7.678380919212519 0.5 +28.52205602981411 0.1 +95.87373195587614 0 \ No newline at end of file diff --git a/data/SubSolar_Horizon.txt b/data/SubSolar_Horizon.txt new file mode 100644 index 0000000..cda87e1 --- /dev/null +++ b/data/SubSolar_Horizon.txt @@ -0,0 +1,11 @@ +# Figure 1 (dashed green line) of https://arxiv.org/pdf/1808.04771.pdf +# Columns: Component Mass [M_sun], Horizon [Mpc] +0.1981205951448708 27.286681430014227 +0.3008613938919342 38.37156747012553 +0.4004698512137823 48.75339548973085 +0.5000783085356305 58.784038259116684 +0.6003132341425216 68.46363328229641 +0.7005481597494128 77.70424674270178 +0.800156617071261 86.8569263865389 +0.9003915426781521 95.83415090927964 +1.0025058731401724 104.8117879440606 diff --git a/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt b/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt index 66d606a..c958aa0 100644 --- a/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt +++ b/data/posteriors_f/Posterior_f_ET_Prior_J_M=10.0_N=1.txt @@ -1,4 +1,4 @@ -# Posterior distribution for f, given N = 1 merger events in ET. PBH Mass: 10.0 M_sunPrior: jeffreys +# Posterior distribution for f, given N = 1 merger events in ET. PBH Mass: 10.0 M_sunPrior: J # Columns: f, P(f|N) 9.999999999999999547e-07 7.246589174428889635e+01 1.071891319205128528e-06 8.324571817517015404e+01 diff --git a/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt b/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt index 83b177e..4c29ba4 100644 --- a/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt +++ b/data/posteriors_f/Posterior_f_O3_Prior_J_M=0.5_N=1.txt @@ -1,4 +1,4 @@ -# Posterior distribution for f, given N = 1 merger events at LIGO O3. PBH Mass: 0.5 M_sunPrior: jeffreys +# Posterior distribution for f, given N = 1 merger events at LIGO O3. PBH Mass: 0.5 M_sunPrior: J # Columns: f, P(f|N) 9.999999999999999547e-07 8.416139272344972731e-06 1.071891319205128528e-06 9.669730117647979165e-06 diff --git a/figures/Constraints_fPBH.pdf b/figures/Constraints_fPBH.pdf index 2b4f517dea5700433780167e0c352f84d7086a38..8f41e46e178e81f4f1ac91283ea831526737e121 100644 GIT binary patch delta 29 lcmX^0Pw><~!G;#b7N!>FEi7}ZSWOL$jSaUis$yBe0sy>$3mE_a delta 29 lcmX^0Pw><~!G;#b7N!>FEi7}ZSPe}qOboU!s$yBe0sy?B3mpIe diff --git a/requirements.txt b/requirements.txt index c447cb6..715dff3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ matplotlib>=3.0.3 scipy>=1.2.1 numpy>=1.16.2 +tqdm>=4.19.9 diff --git a/src/Cosmo.py b/src/Cosmo.py new file mode 100644 index 0000000..4cc6b17 --- /dev/null +++ b/src/Cosmo.py @@ -0,0 +1,75 @@ +""" Cosmo.py + +Functions/constants for calculating cosmological quantities +such as the Hubble rate and various distances. + +""" + +import numpy as np + +from scipy.integrate import cumtrapz, quad, dblquad +from scipy.interpolate import interp1d + +#--- Some constants ------------ +#------------------------------- + +G_N = 4.302e-3 #(pc/solar mass) (km/s)^2 +G_N_Mpc = 1e-6*4.302e-3 #(Mpc/solar mass) (km/s)^2 + +h = 0.678 +Omega_DM = 0.1186/(h**2) +H0 = 100.0*h #(km/s) Mpc^-1 +H0_peryr = 67.8*(3.24e-20)*(60*60*24*365) +ageUniverse = 13.799e9 #y +Omega_L = 0.692 +Omega_m = 0.308 +Omega_r = 9.3e-5 + +D_H = (3000/h) #Mpc + +c = 3e5 #km/s + + +#------------------------------- + +#--- Useful cosmological functions --- +#------------------------------------- + + +rho_critical = 3.0*H0**2/(8.0*np.pi*G_N_Mpc) #Solar masses per Mpc^3 + +def Hubble(z): + return H0_peryr*np.sqrt(Omega_L + Omega_m*(1+z)**3 + Omega_r*(1+z)**4) + +def Hubble2(z): + return H0*np.sqrt(Omega_L + Omega_m*(1+z)**3 + Omega_r*(1+z)**4) + +def HubbleLaw(age): + return H0_peryr*age + +def rho_z(z): + return 3.0*Hubble2(z)**2/(8*np.pi*G_N) + +def t_univ(z): + integ = lambda x: 1.0/((1+x)*Hubble(x)) + return quad(integ, z, np.inf)[0] + +def Omega_PBH(f): + return f*Omega_DM + +#Luminosity distance (Mpc) +def calcdL(z): + c = 3.06594845e-7 + return c*(1+z)*quad(lambda x: Hubble(x)**-1, 0, z)[0] + +#https://arxiv.org/pdf/astro-ph/9905116.pdf +#Comoving distance (in a flat universe, in Mpc) +def D_C(z): + return c*quad(lambda z: 1/Hubble2(z), 0, z)[0] + +#Comoving volume out to redshift z (in Mpc^3) +def V_C(z): + return (4*np.pi/3)*(D_C(z))**3 + +def dVdz(z): + return 4*np.pi*D_C(z)**2*(c/Hubble2(z)) \ No newline at end of file diff --git a/src/MergerRate.py b/src/MergerRate.py new file mode 100644 index 0000000..90a3c7b --- /dev/null +++ b/src/MergerRate.py @@ -0,0 +1,293 @@ +"""MergerRate.py + +Code for calculating the PBH merger rate as a function +of PBH mass and fraction (along with Remapping.py). +Note: some functions are replicated between the two files. + +Adapted from https://github.com/bradkav/BlackHolesDarkDress. + +See https://arxiv.org/abs/1805.09034 for more details. + +Throughout withHalo=True (withHalo=False) performs the calculation +with (without) including Dark Matter halos formed around the PBHs. + +""" + +import numpy as np + +from scipy.integrate import cumtrapz, quad, dblquad +from scipy.interpolate import interp1d, UnivariateSpline + +import Cosmo + +import Remapping as Remap + +from matplotlib import pyplot as plt + +#--- Some constants ------------ +#------------------------------- + +G_N = 4.302e-3 #(pc/solar mass) (km/s)^2 +G_N_Mpc = 1e-6*4.302e-3 #(Mpc/solar mass) (km/s)^2 + + +z_eq = 3375.0 +rho_eq = 1512.0 #Solar masses per pc^3 +sigma_eq = 0.005 #Variance of DM density perturbations at equality +lambda_max = 3.0 #Maximum value of lambda = 3.0*z_dec/z_eq (i.e. binaries decouple all the way up to z_dec = z_eq) + +alpha = 0.1 + + +#------------------------ + + +#Mean interPBH separation +def xbar(f, M_PBH): + return (3.0*M_PBH/(4*np.pi*rho_eq*(0.85*f)))**(1.0/3.0) + + +#Semi-major axis as a function of decoupling redshift +def semimajoraxis(z_pair, f, M_PBH): + Mtot = M_PBH + X = 3.0*z_eq*0.85*f/z_pair + return alpha*xbar(f, M_PBH)*(f*0.85)**(1.0/3.0)*((X/(0.85*f))**(4.0/3.0)) + + +def bigX(x, f, M_PBH): + return (x/(xbar(f,M_PBH)))**3.0 + +#Calculate x (comoving PBH separation) as a function of a +def x_of_a(a, f, M_PBH, withHalo = False): + + xb = xbar(f, M_PBH) + + if (not withHalo): + return ((a * (0.85*f) * xb**3)/alpha)**(1.0/4.0) + + elif (withHalo): + xb_rescaled = xb * ((M_PBH + Remap.M_halo(z_decoupling(a, f, M_PBH), M_PBH))/M_PBH )**(1./3.) + return ((a * (0.85*f) * xb_rescaled**3)/alpha)**(1.0/4.0) + +#Calculate a (semi-major axis) in terms of x +def a_of_x(x, f, M_PBH): + + xb = xbar(f, M_PBH) + return (alpha/(0.85*f))*x**4/xb**3 + +#Maximum semi-major axis +def a_max(f, M_PBH, withHalo = False): + Mtot = 1.0*M_PBH + if (withHalo): + Mtot += M_halo(z_eq, M_PBH) + return alpha*xbar(f, Mtot)*(f*0.85)**(1.0/3.0)*((lambda_max)**(4.0/3.0)) + +def z_decoupling(a, f, mass): + return (1. + z_eq)/(1./3 * bigX(x_of_a(a, f, mass), f, mass)/(0.85*f)) - 1. + + +def n_PBH(f, M_PBH): + return (1e3)**3*Cosmo.rho_critical*Cosmo.Omega_PBH(f)/M_PBH #PBH per Gpc^3 + + + +#------ Probability distributions ---- +#------------------------------------- + +def j_X(x, f, M_PBH): + return bigX(x, f, M_PBH)*0.5*(1+sigma_eq**2/(0.85*f)**2)**0.5 + +def P_j(j, x, f, M_PBH): + y = j/j_X(x, f, M_PBH) + return (y**2/(1+y**2)**(3.0/2.0))/j + +def P_a_j(a, j, f, M_PBH): + xval = x_of_a(a, f, M_PBH) + X = bigX(xval, f, M_PBH) + xb = xbar(f, M_PBH) + measure = (3.0/4.0)*(a**-0.25)*(0.85*f/(alpha*xb))**0.75 + return P_j(j, xval, f, M_PBH)*np.exp(-X)*measure + +def P_a_j_withHalo(a, j, f, M_PBH): + + xval = x_of_a(a, f, M_PBH, withHalo = True) + X = bigX(xval, f, M_PBH) + xb = xbar(f, M_PBH) + + measure = (3.0/4.0)*(a**-0.25)*(0.85*f/(alpha*xb))**0.75 + measure *= ((M_PBH + Remap.M_halo(z_decoupling(a, f, M_PBH), M_PBH))/M_PBH )**(3./4.) + + return P_j(j, xval, f, M_PBH)*np.exp(-X)*measure + +def P_la_lj(la,lj, f, M_PBH): + j = 10.**lj + a = 10.**la + return P_a_j(a, j, f, M_PBH)*a*j*(np.log(10)**2) #/Norm1 + +def P_la_lj_withHalo(la,lj, f, M_PBH): + j = 10.**lj + a = 10.**la + return P_a_j_withHalo(a, j, f, M_PBH)*a*j*(np.log(10)**2) #/Norm1 + +def t_coal(a, e, M_PBH): + Q = (3.0/170.0)*(G_N*M_PBH)**(-3) # s^6 pc^-3 km^-6 + tc = Q*a**4*(1-e**2)**(7.0/2.0) #s^6 pc km^-6 + tc *= 3.086e+13 #s^6 km^-5 + tc *= (3e5)**5 #s + return tc/(60*60*24*365) #in years + +def j_coal(a, t, M_PBH): + Q = (3.0/170.0)*(G_N*M_PBH)**-3 # s^6 pc^-3 km^-6 + tc = t*(60*60*24*365) + tc /= (3e5)**5 + tc /= 3.086e+13 + return (tc/(Q*a**4))**(1.0/7.0) + +def P_binary(f, M_PBH): + amin = 0 + amax = a_max(f, M_PBH) + + P1 = lambda y,x,f,M_PBH: P_a_j(x, y, f, M_PBH) + Norm1 = dblquad(P1, amin, amax, lambda x: 0, lambda x: 1, args=(f, M_PBH), epsrel=1e-20)[0] + return Norm1 + + +#---- Merger Time Distribution --- +#--------------------------------- + +def P_t_integ(a, t, f, M_PBH, withHalo=False): + + c = 3.e5 #km/s + Q = (c**6)*(3.0/170.0)*(G_N*M_PBH)**-3 # pc^-3 + t_pc = t*(60*60*24*365)*c*3.24078e-14 #Time in years -> Time in parsec + ecc_sq = 1-(t_pc*1.0/(Q*a**4))**(2.0/7.0) + if (ecc_sq < 0): + return 0 + ecc = np.sqrt(ecc_sq) + j_ecc = np.sqrt(1. - ecc**2.) + + P1 = 1. + if (withHalo == False): + P1 = P_a_j(a, j_ecc, f, M_PBH) + else: + P1 = P_a_j_withHalo(a, j_ecc, f, M_PBH) + + djdt = j_ecc/(7*t) + return P1*djdt + +#Time in years +def P_of_t_analytical(t, f, M_PBH, withHalo=False): + + amin = 1e-6 + amax = a_max(f, M_PBH) + + + avals = np.logspace(np.log10(amin), np.log10(amax), 200) #pc + test = np.asarray([P_t_integ(a, t, f, M_PBH, withHalo) for a in avals]) + + integr = np.trapz(test, avals) + + + return integr + + +def P_of_t_withHalo(t, f, M_PBH): + + + do_plots = False + #do_plots = True + + N_a = 500 + + amin = 1e-6 + amax = Remap.semimajoraxis_full(z_eq, f, M_PBH) + + a_i = np.logspace(np.log10(amin), np.log10(amax), N_a) + a_f = np.vectorize(Remap.calc_af)(a_i, M_PBH) + + + ind_cut = np.argmax(a_f) + a_cut = a_i[ind_cut] + + a_f_interp = UnivariateSpline(a_i, a_f, k=3, s=0) + daf_dai = a_f_interp.derivative(n=1) + + Jac = np.sqrt(a_f/a_i)*(1/daf_dai(a_i) - 0.0*a_i/a_f) + + P = a_f*0.0 + + c = 3.e5 #km/s + Q = (c**6)*(3.0/170.0)*(G_N*M_PBH)**-3 # pc^-3 + t_pc = t*(60*60*24*365)*c*3.24078e-14 #Time in years -> Time in parsec + ecc_sq = 1-(t_pc*1.0/(Q*a_f**4))**(2.0/7.0) + + for i in range(N_a): + if (ecc_sq[i] < 0): + P[i] = 0 + else: + ecc = np.sqrt(ecc_sq[i]) + j_ecc = np.sqrt(1. - ecc**2.) + j_i = np.sqrt(a_f[i]/a_i[i])*j_ecc + + P[i] = P_a_j_withHalo(a_i[i], j_i, f, M_PBH) + + djdt = j_ecc/(7*t) + P[i] *= djdt + + + if (do_plots): + plt.figure() + + plt.loglog(a_i[:ind_cut],a_f[:ind_cut]) + plt.loglog(a_i[ind_cut:],a_f[ind_cut:]) + + plt.xlabel(r"Initial semi-major axis, $a_i\,\,[\mathrm{pc}]$") + plt.ylabel(r"Final semi-major axis, $a_f \,\,[\mathrm{pc}]$") + + plt.loglog([1e-4, 4e-1], [1e-4, 4e-1], linestyle='--', color='k') + + plt.xlim(1e-6, 4e-1) + plt.ylim(1e-6, 4e-1) + + plt.gca().set_aspect('equal') + ax2 = plt.gca().twinx() + ax2.loglog(a_i, P) + + plt.figure() + plt.loglog(a_i, np.abs(np.sqrt(a_f/a_i)*(1/daf_dai(a_i)))) + plt.show() + + P *= Jac + + if (do_plots): + plt.figure() + plt.plot(a_f[:ind_cut],P[:ind_cut]) + plt.plot(a_f[ind_cut:][::-1],-P[ind_cut:][::-1]) + plt.show() + + return np.trapz(P[:ind_cut], a_f[:ind_cut]) - np.trapz(P[ind_cut:][::-1],a_f[ind_cut:][::-1]) + + +#---- Merger Rates --- +#--------------------------------- + +def MergerRate_test(z, f, M_PBH): + return 0.5*n_PBH(f, M_PBH)*P_of_t_analytical(Cosmo.t_univ(z), f, M_PBH, withHalo=True) + + +def MergerRate(z, f, M_PBH, withHalo=False): + P_of_t_fun = P_of_t_analytical + if (withHalo): + P_of_t_fun = P_of_t_withHalo + + return 0.5*n_PBH(f, M_PBH)*P_of_t_fun(Cosmo.t_univ(z), f, M_PBH) + + +def AverageMergerRate( f, M_PBH, z1, z2, withHalo=False): + z_list = np.linspace(z1, z2, 20) + Merge_list = np.array([MergerRate(z, f, M_PBH, withHalo) for z in z_list]) + return np.trapz(Merge_list, z_list)/(z2-z1) + + + + diff --git a/src/Remapping.py b/src/Remapping.py new file mode 100644 index 0000000..ca8732f --- /dev/null +++ b/src/Remapping.py @@ -0,0 +1,250 @@ +"""Remapping.py + +Code for calculating the PBH merger rate as a function +of PBH mass and fraction (along with MergerRate.py). +Note: some functions are replicated between the two files. + +Adapted from https://github.com/bradkav/BlackHolesDarkDress. + +See https://arxiv.org/abs/1805.09034 for more details. + +Throughout withHalo=True (withHalo=False) performs the calculation +with (without) including Dark Matter halos formed around the PBHs. + +""" + + +import numpy as np + +from scipy.interpolate import interp1d +from scipy.integrate import quad + +#------------------ + +alpha = 0.1 +z_eq = 3375.0 +rho_eq = 1512.0 #Solar masses per pc^3 +lambda_max = 3.0 #Decouple at a maximum redshift of z_dec = z_eq (lambda = 3.0*z_dec) + +G_N = 4.302e-3 #Units of (pc/solar mass) (km/s)^2 + +rtr_interp = None +Ubind_interp = None +Jac_interp = None +current_MPBH = -10.0 + +#-------------------- + + +# Eq. 11 and 12, https://arxiv.org/abs/0706.0864 +#M_PBH in solar masses +def r_trunc(z, M_PBH): + return 58.0 / (1 + z) * M_halo(z, M_PBH)**(1/3) + #r0 = 6.3e-3 #1300 AU in pc + #return r0*(M_PBH)**(1.0/3.0)*(1.+z_eq)/(1.+z) + +#Truncation radiation at equality +def r_eq(M_PBH): + return r_trunc(z_eq, M_PBH) + +#Halo mass +def M_halo(z, M_PBH): + return (1+z_eq) / (1. + z) * M_PBH + #return M_PBH*(r_trunc(z, M_PBH)/r_eq(M_PBH))**1.5 + + +#--------------------- + +def xbar(f, M_PBH): + return (3.0*M_PBH/(4*np.pi*rho_eq*(0.85*f)))**(1.0/3.0) + +def semimajoraxis(z_pair, f, M_PBH): + #Mtot = M_PBH + M_halo(z_pair, M_PBH) + Mtot = M_PBH + X = 3.0*z_eq*0.85*f/z_pair + return alpha*xbar(f, M_PBH)*(f*0.85)**(1.0/3.0)*((X/(0.85*f))**(4.0/3.0)) + +#Semi-major axis taking into account DM halo mass +def semimajoraxis_full(z_pair, f, M_PBH): + Mtot = M_PBH + M_halo(z_pair, M_PBH) + #Mtot = M_PBH + X = 3.0*z_eq*0.85*f/z_pair + return alpha*xbar(f, Mtot)*(f*0.85)**(1.0/3.0)*((X/(0.85*f))**(4.0/3.0)) + + +def bigX(x, f, M_PBH): + return (x/(xbar(f,M_PBH)))**3.0 + +def x_of_a(a, f, M_PBH): + xb = xbar(f, M_PBH) + return ((a * (0.85*f) * xb**3.)/alpha)**(1./4.) + + +#Maximum semi-major axis +def a_max(f, M_PBH, withHalo = False): + Mtot = 1.0*M_PBH + if (withHalo): + Mtot += M_halo(z_eq, M_PBH) + return alpha*xbar(f, Mtot)*(f*0.85)**(1.0/3.0)*((lambda_max)**(4.0/3.0)) + + +def z_decoupling(a, f, mass): + return (1. + z_eq)/(1./3 * bigX(x_of_a(a, f, mass), f, mass)/(0.85*f)) - 1. + +#-------------------------------------- +def GetRtrInterp(M_PBH): + global rtr_interp + + #NB: We set f = 0.01 in here, because actually f cancels everywhere in some parts of the calculation... + + am = a_max(0.01, M_PBH, withHalo=True) + a_list = np.logspace(-8, np.log10(am*1.1), 101) + + z_decoupling_0 = z_decoupling(a_list, 0.01, M_PBH) + M_halo_0 = M_halo(z_decoupling_0, M_PBH) + + #print(z_decoupling_0) + #print(M_halo_0) + + z_decoupling_1 = np.zeros(len(a_list)) + M_halo_1 = np.zeros(len(a_list)) + for i in range(len(a_list)): + z_decoupling_1[i] = z_decoupling(a_list[i], 0.01, (M_halo_0[i]+M_PBH)) + M_halo_1 = M_halo(z_decoupling_1, (M_PBH)) + + z_decoupling_2 = np.zeros(len(a_list)) + M_halo_2 = np.zeros(len(a_list)) + for i in range(len(a_list)): + z_decoupling_2[i] = z_decoupling(a_list[i], 0.01, (M_halo_1[i]+M_PBH)) + M_halo_2 = M_halo(z_decoupling_2, (M_PBH)) + + z_decoupling_3 = np.zeros(len(a_list)) + z_decoupling_check = np.zeros(len(a_list)) + M_halo_3 = np.zeros(len(a_list)) + for i in range(len(a_list)): + z_decoupling_3[i] = z_decoupling(a_list[i], 0.01, (M_halo_2[i]+M_PBH)) + M_halo_3 = M_halo(z_decoupling_3, (M_PBH)) + # + z_decoupling_check[i] = (1. + z_eq) / (1./3 * bigX(x_of_a(a_list[i], 0.01, (M_halo_3[i]+M_PBH)), 0.01, (M_halo_3[i]+M_PBH))/(0.85*0.01)) - 1. + # + + #print M_halo_3 + + r_list = r_trunc(z_decoupling_3, M_PBH) + rtr_interp = interp1d(a_list, r_list) + return rtr_interp + + +def CalcTruncRadius(ai, M_PBH): + + + z_decoupling_0 = z_decoupling(ai, 0.01, M_PBH) + M_halo_0 = M_halo(z_decoupling_0, M_PBH) + + #print(z_decoupling_0) + #print(M_halo_0) + + + z_decoupling_1 = z_decoupling(ai, 0.01, (M_halo_0+M_PBH)) + M_halo_1 = M_halo(z_decoupling_1, (M_PBH)) + + + z_decoupling_2 = z_decoupling(ai, 0.01, (M_halo_1+M_PBH)) + M_halo_2 = M_halo(z_decoupling_2, (M_PBH)) + + z_decoupling_3 = z_decoupling(ai, 0.01, (M_halo_2+M_PBH)) + M_halo_3 = M_halo(z_decoupling_3, (M_PBH)) + + r_list = r_trunc(z_decoupling_3, M_PBH) + return r_list + + +#----------------------------------- +#print "Edit rho, Menc to fix this..." +def rho(r, r_tr, M_PBH, gamma=9.0/4.0): + x = r/r_tr + A = (3-gamma)*M_PBH/(4*np.pi*(r_tr**gamma)*(r_eq(M_PBH)**(3-gamma))) + if (x <= 1): + return A*x**(-gamma) + else: + return 0 + +def Menc(r, r_tr, M_PBH, gamma=9.0/4.0): + x = r/r_tr + if (x <= 1): + return M_PBH*(1+(r/r_eq(M_PBH))**(3-gamma)) + else: + return M_PBH*(1+(r_tr/r_eq(M_PBH))**(3-gamma)) + + +def calcBindingEnergy(r_tr, M_PBH): + integ = lambda r: Menc(r, r_tr, M_PBH)*rho(r, r_tr, M_PBH)*r + return -G_N*4*np.pi*quad(integ,1e-8, 1.0*r_tr, epsrel=1e-3)[0] + +def getBindingEnergy(r_tr, M_PBH): + global current_MPBH, Ubind_interp, rtr_interp + if ((M_PBH - current_MPBH)**2 >1e-3 or Ubind_interp == None): + current_MPBH = M_PBH + print(" Tabulating binding energy and truncation radius (M_PBH = " + str(M_PBH) +")...") + rtr_vals = np.logspace(np.log10(1e-8), np.log10(1.0*r_eq(M_PBH)),500) + Ubind_vals = np.asarray([calcBindingEnergy(r1, M_PBH) for r1 in rtr_vals]) + Ubind_interp = interp1d(rtr_vals, Ubind_vals) + + rtr_interp = GetRtrInterp(M_PBH) + + return Ubind_interp(r_tr) + + +def calc_af(ai, M_PBH): + global current_MPBH, rtr_interp, Ubind_interp + + if ((M_PBH - current_MPBH)**2 > 1e-3 or rtr_interp == None): + current_MPBH = M_PBH + print(" Tabulating binding energy and truncation radius (M_PBH = " + str(M_PBH) +")...") + rtr_vals = np.logspace(np.log10(1e-8), np.log10(1.0*r_eq(M_PBH)),500) + Ubind_vals = np.asarray([calcBindingEnergy(r1, M_PBH) for r1 in rtr_vals]) + Ubind_interp = interp1d(rtr_vals, Ubind_vals) + + rtr_interp = GetRtrInterp(M_PBH) + + #r_tr = CalcTruncRadius(ai, M_PBH) + r_tr = rtr_interp(ai) + + Mtot = Menc(r_tr, r_tr, M_PBH) + #print Mtot + U_orb_before = -G_N*(Mtot**2)/(2.0*ai) + + if (r_tr > r_eq(M_PBH)): + Ubind = getBindingEnergy(r_eq(M_PBH), M_PBH) + else: + #print r_tr, r_eq(M_PBH) + Ubind = getBindingEnergy(r_tr, M_PBH) + return -G_N*M_PBH**2*0.5/(U_orb_before + 2.0*Ubind) + +def calc_jf(ji, ai, M_PBH): + af = calc_af(ai, M_PBH) + return ji*np.sqrt(ai/af) + +def calc_Tf(Ti, ai, M_PBH): + af = calc_af(ai, M_PBH) + return Ti*np.sqrt(af/ai) + +def getJacobian(): + global Jac_interp + + if ((M_PBH - current_MPBH)**2 >1e-3 or Jac_interp == None): + amin = 1e-6 #pc + amax = RM.semimajoraxis_full(RM.z_eq, 1.0, M_PBH) + a_i = np.logspace(np.log10(amin), np.log10(amax), 200) + a_f = np.vectorize(calc_af)(a_i, M_PBH) + + a_f_interp = UnivariateSpline(a_i, a_f, k=4, s=0) + daf_dai = a_f_interp.derivative(n=1) + + Jac = np.sqrt(a_f/a_i)*(1/daf_dai(a_i) - a_i/a_f) + + Jac_interp = interp1d(a_f, Jac, bounds_error=False, fill_value = 0.0) + + return Jac_interp + + \ No newline at end of file diff --git a/src/tabulate_posteriors_ET.py b/src/tabulate_posteriors_ET.py new file mode 100644 index 0000000..f0660b6 --- /dev/null +++ b/src/tabulate_posteriors_ET.py @@ -0,0 +1,173 @@ +""" TabuulatePosteriors_ET.py + +Calculate and tabulate posteriors on f_PBH +given N_obs detections with Einstein telescope. + +Outputs a .txt to the 'results' folder. + +""" + +import numpy as np +import matplotlib.pylab as plt + +from scipy.special import gamma, loggamma +from scipy.integrate import cumtrapz, quad +from scipy.interpolate import interp1d, UnivariateSpline +from scipy.stats import poisson +from scipy.misc import comb + +from tqdm import tqdm +import Cosmo +import MergerRate as MR + +#--------------------------- + +import argparse + +#Parse the arguments! +parser = argparse.ArgumentParser(description='...') +#parser.add_argument('-M_PBH','--M_PBH', help='PBH mass in solar masses', type=float, default=10.0) +parser.add_argument('-N_obs','--N_obs', help='Number of observed mergers', type=int, default=1) +parser.add_argument('-prior','--prior', help="Merger rate prior, either 'J' for Jeffrey's prior or 'LF' for log-flat prior", type=str, default="J") + +args = parser.parse_args() +#M_PBH = args.M_PBH +N_obs = args.N_obs +prior_type = args.prior + +#We fix M_PBH = 10.0 solar mass in this case +M_PBH = 10.0 + +if (prior_type not in ["J", "LF"]): + raise ValueError('Prior must be one of : "J", "LF"') + +#Prior scales as R^-alpha +if (prior_type == "J"): + alpha_prior = 0.5 +elif (prior_type == "LF"): + alpha_prior = 1.0 + +print("Calculating ET posteriors for M_PBH = " + str(M_PBH) + " M_sun; N = " + str(N_obs) + " observed events...") +print(" Prior: ", prior_type) + +#-------------------------- + +z_ET, frac_ET = np.loadtxt("../data/ET_redshift.txt", unpack=True) + +# Selection function f(z) for ET +# Depends on luminosity distance d_L +# and matches reported f(z) at z > 40 +def selection_ET(z): + z_hor = z_ET[-1] + DL_hor = Cosmo.calcdL(z_hor) + DL = Cosmo.calcdL(z) + return np.clip((1/DL - 1/DL_hor)*4.5e4, 0, 1) + +def calcdVTdz(z, T=0.67): + dVdz = Cosmo.dVdz(z)*1e-9 #Mpc^3 -> Gpc^3 + return (T*dVdz/(1+z))*selection_ET(z) + +#print("Fraction detectable at z = 40:", selection_ET(40.0)) + +#-------------------- + +z_start = 40.0 +z_end = 100.0 + + +# Calculate number of detected events +# integrating over redshift +def CalcLambda(f, M_PBH): + z_list = np.logspace(np.log10(z_start), np.log10(z_end), 20) + + integ = np.array([calcdVTdz(z, T = 0.67)*MR.MergerRate(z, f, M_PBH, withHalo=True) for z in z_list]) + L1 = np.trapz(integ, z_list) + return L1 + + +#Adam and I verified this - we spent roughly 1 hour worrying about whether this was +#Bayes enough. It was sufficiently Bayes. Do not worry in future. +# || +# || +# || +# VV +#P(R_eff) +def Posterior(R_eff, N, VT=1.0): + lam = VT*R_eff + + prior = lam**-alpha_prior + + p_poiss = poisson.pmf(N, lam) + lgamma = loggamma(N + 1 - alpha_prior) + A = (N-alpha_prior)*np.log(lam) - lam - lgamma + + return VT*np.exp(A) + + +def calcCredible(R, P): + cum_dist = cumtrapz(P, R, initial=0) + inv_cum = interp1d(cum_dist, R) + min90 = inv_cum(0.05) + med = inv_cum(0.5) + max90 = inv_cum(0.95) + upper90 = inv_cum(0.90) + return min90, med, max90, upper90 + +def round_to(x, nearest=10): + return int(round(x / nearest) * nearest) + +def P_sensitivity(VT, VT_0): + sig = 0.30 + lVT = np.log(VT) + lVT_0 = np.log(VT_0) + P = (1/VT)*(2*np.pi*sig**2)**-0.5*np.exp(-(lVT-lVT_0)**2/(2*sig**2)) + return np.nan_to_num(P) + +def P_marginal(R_eff, N): + + integ = lambda x: Posterior(R_eff, N, x)*P_sensitivity(x, VT_0=1.0) + return quad(integ, 0, 5.0, points = np.logspace(-5,1.1,20))[0] + +def get_f_interval(N): + + R_list = np.logspace(-1, 8, 1000) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + minR, medR, maxR, upper90 = calcCredible(R_list, P_marg_list) + + return f_of_R(minR), f_of_R(medR), f_of_R(maxR), f_of_R(upper90) + +#------------------------------- +N_f = 200 +f_list = np.logspace(-6, 0, N_f) + +def get_posterior(N, M_PBH): + + R_list = np.zeros(N_f) + for i in tqdm(range(N_f)): + R_list[i] = CalcLambda(f_list[i], M_PBH) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + + R_of_f = UnivariateSpline(f_list, R_list, k=4, s=0) + dR_df = R_of_f.derivative() + P_of_f = P_marg_list*dR_df(f_list) + + return np.nan_to_num(P_of_f) + + +P = get_posterior(N_obs, M_PBH) + +print("Posterior is normalised to:", np.trapz(P, f_list)) + +f_lower = calcCredible(f_list, P)[0] +print("95% Upper limit on f:", f_lower) + +mstr = str(round(M_PBH, 1)) +Nstr = str(N_obs) + +htxt = "Posterior distribution for f, given N = " + Nstr + " merger events in ET. PBH Mass: " + mstr + " M_sun" +htxt += "Prior: " + prior_type +htxt += "\nColumns: f, P(f|N)" + +np.savetxt("../data/posteriors_f/Posterior_f_ET_Prior_" + prior_type + "_M=" + mstr+"_N=" + Nstr + ".txt", list(zip(f_list, P)), header=htxt) \ No newline at end of file diff --git a/src/tabulate_posteriors_O3.py b/src/tabulate_posteriors_O3.py new file mode 100644 index 0000000..db6be6f --- /dev/null +++ b/src/tabulate_posteriors_O3.py @@ -0,0 +1,151 @@ +""" TabuulatePosteriors_O3.py + +Calculate and tabulate posteriors on f_PBH +given N_obs detections with LIGO/Virgo O3. + +Outputs a .txt to the 'results' folder. + +""" + + +import numpy as np +import matplotlib.pylab as plt + +from scipy.special import gamma, loggamma +from scipy.integrate import cumtrapz, quad +from scipy.interpolate import interp1d, UnivariateSpline + +from tqdm import tqdm +import Cosmo +import MergerRate as MR + +from scipy.stats import poisson +#--------------------------- + +import argparse + +#Parse the arguments! +parser = argparse.ArgumentParser(description='...') +parser.add_argument('-M_PBH','--M_PBH', help='PBH mass in solar masses (0.2 -> 1.0)', type=float, default=0.5) +parser.add_argument('-N_obs','--N_obs', help='Number of observed mergers', type=int, default=1) +parser.add_argument('-prior','--prior', help="Merger rate prior, either 'J' for Jeffrey's prior or 'LF' for log-flat prior", type=str, default="J") + + +args = parser.parse_args() +M_PBH = args.M_PBH +N_obs = args.N_obs +prior_type = args.prior + +if ((M_PBH < 0.2) or (M_PBH > 1.0)): + raise ValueError('M_PBH must be between 0.2 and 1.0 (Solar Masses)') + +if (prior_type not in ["J", "LF"]): + raise ValueError('Prior must be one of : "J", "LF"') + +#Prior scales as R^-alpha +if (prior_type == "J"): + alpha_prior = 0.5 +elif (prior_type == "LF"): + alpha_prior = 1.0 + +print(" Calculating LIGO posteriors for M_PBH = " + str(M_PBH) + " M_sun; N = " + str(N_obs) + " observed events...") +print(" Prior: ", prior_type) + +#-------------------------- + +#LIGO O3 sensitive time-volume +VT_O3 = 1.8e-4 #Gpc^3 yr + +#Load in horizon distances and increase by 50% for O3 +m_list, horizon_list = np.loadtxt("../data/SubSolar_Horizon.txt", unpack=True) +horizon_interp = interp1d(m_list, 1.5*horizon_list, bounds_error=True) + +#Calculate maximum redshift of observations +z_list = np.linspace(0, 1, 20) +dL_list = np.asarray([Cosmo.calcdL(z) for z in z_list]) +z_of_dL = interp1d(dL_list, z_list) + +#Horizon redshift +z_max = z_of_dL(horizon_interp(M_PBH)) +#-------------------- + + + +#P(R_eff) +def Posterior(R_eff, N, VT=VT_O3): + lam = VT*R_eff + #print(lam) + prior = lam**-alpha_prior + + p_poiss = poisson.pmf(N, lam) + lgamma = loggamma(N + 1 - alpha_prior) + A = (N-alpha_prior)*np.log(lam) - lam - lgamma + + return VT*np.exp(A) + + +def calcCredible(x, y): + cum_dist = cumtrapz(y, x, initial=0) + inv_cum = interp1d(cum_dist, x) + min90 = inv_cum(0.05) + med = inv_cum(0.5) + max90 = inv_cum(0.95) + upper90 = inv_cum(0.90) + return min90, med, max90, upper90 + +def round_to(x, nearest=10): + return int(round(x / nearest) * nearest) + +def P_sensitivity(VT, VT_0): + sig = 0.30 + lVT = np.log(VT) + lVT_0 = np.log(VT_0) + return (1/VT)*(2*np.pi*sig**2)**-0.5*np.exp(-(lVT-lVT_0)**2/(2*sig**2)) + +def P_marginal(R_eff, N): + integ = lambda x: Posterior(R_eff, N, x)*P_sensitivity(x, VT_0=VT_O3) + return quad(integ, 0, 5.0, points = np.logspace(-3,1.1,10))[0] + +def get_f_interval(N): + + R_list = np.logspace(-1, 8, 1000) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + minR, medR, maxR, upper90 = calcCredible(R_list, P_marg_list) + + return f_of_R(minR), f_of_R(medR), f_of_R(maxR), f_of_R(upper90) + +#------------------------------- +N_f = 200 +f_list = np.logspace(-6, 0, N_f) + +def get_posterior(N, M_PBH): + + R_list = np.zeros(N_f) + for i in tqdm(range(N_f)): + R_list[i] = MR.AverageMergerRate( f_list[i], M_PBH, z1 = 0, z2 = z_max, withHalo=True) + P_marg_list = np.array([P_marginal(R, N) for R in R_list]) + + + R_of_f = UnivariateSpline(f_list, R_list, k=4, s=0) + dR_df = R_of_f.derivative() + P_of_f = P_marg_list*dR_df(f_list) + + return np.nan_to_num(P_of_f) + + +P = get_posterior(N_obs, M_PBH) + +print("Posterior is normalised to:", np.trapz(P, f_list)) + +f_lower = calcCredible(f_list, P)[0] +print("95% Upper limit on f:", f_lower) + +mstr = str(round(M_PBH, 1)) +Nstr = str(N_obs) + +htxt = "Posterior distribution for f, given N = " + Nstr + " merger events at LIGO O3. PBH Mass: " + mstr + " M_sun" +htxt += "Prior: " + prior_type +htxt += "\nColumns: f, P(f|N)" + +np.savetxt("../data/posteriors_f/Posterior_f_O3_Prior_" + prior_type + "_M=" + mstr+"_N=" + Nstr + ".txt", list(zip(f_list, P)), header=htxt) From accb5a4d27fb74c94d43d894787ff88a6a44e9ef Mon Sep 17 00:00:00 2001 From: bradkav Date: Fri, 3 May 2019 15:35:38 +0200 Subject: [PATCH 3/5] Added script for recomputing posteriors --- ComputePosteriors.sh | 21 +++++++++++++++++++++ README.md | 5 ++++- src/tabulate_posteriors_ET.py | 2 +- src/tabulate_posteriors_O3.py | 2 +- 4 files changed, 27 insertions(+), 3 deletions(-) create mode 100755 ComputePosteriors.sh diff --git a/ComputePosteriors.sh b/ComputePosteriors.sh new file mode 100755 index 0000000..efbee40 --- /dev/null +++ b/ComputePosteriors.sh @@ -0,0 +1,21 @@ +#!/bin/bash + + +echo " Recomputing Gravitational Wave posteriors on f_PBH (may take ~ 1 hour...)" +echo " " + +for NOBS in 1 80 +do + for PRIOR in J LF + do + python src/tabulate_posteriors_O3.py -N_obs $NOBS -prior $PRIOR -M_PBH 0.5 + done +done + +for NOBS in 1 24000 +do + for PRIOR in J LF + do + python src/tabulate_posteriors_ET.py -N_obs $NOBS -prior $PRIOR + done +done \ No newline at end of file diff --git a/README.md b/README.md index f84199b..218de36 100644 --- a/README.md +++ b/README.md @@ -10,10 +10,13 @@ The figures can be remade with the following commands: * Figure 3: `python plot_frequentist_limits.py -plot_ps_diff` -Computing the point-source limits requires making tables containing `p_gamma`, the probability that an individual PBH is detectable by ermi-LAT. The tables used for the analysis in the paper are contained in the directory `data/p_gammas/`. These may take a few hours to recompute. They can be regenerated with +Computing the point-source limits requires making tables containing `p_gamma`, the probability that an individual PBH is detectable by Fermi-LAT. The tables used for the analysis in the paper are contained in the directory `data/p_gammas/`. These may take a few hours to recompute. They can be regenerated with `python generate_p_gamma_tables.py -test` With the `-test` flag, the `p_gamma` tables will be written to `data/p_gammas/test/` instead of overwriting the precomputed tables. The script can also be used to generate `p_gamma` tables over different `(m_dm, sv)` grids. +All limit calculations require tabulated posteriors on f_PBH. These are provided in [`data/posteriors_f`](data/posteriors_f/) but they can be recomputed by running the script [`ComputePosteriors.sh`](ComputerPosteriors.sh). This script calls a number of scripts in [`src/`](src/) which calculate the posteriors from gravitational wave observations. Run `python src/tabulate_posteriors_O3.py -h` and `python src/tabulate_posteriors_ET.py -h` for more detailed usage of these scripts. Scripts for calculating posteriors from SKA will be added shortly. + + ## `bayesian` branch The Bayesian branch contains code for performing a Bayesian analysis of the cross section limits. * The notebook `plot_bayesian_limits.ipynb` generates Bayesian versions of figures 2 and 3 assuming a uniform prior on the cross section. diff --git a/src/tabulate_posteriors_ET.py b/src/tabulate_posteriors_ET.py index f0660b6..52ca1e0 100644 --- a/src/tabulate_posteriors_ET.py +++ b/src/tabulate_posteriors_ET.py @@ -25,7 +25,7 @@ import argparse #Parse the arguments! -parser = argparse.ArgumentParser(description='...') +parser = argparse.ArgumentParser(description='Calculate and tabulate posteriors on f_PBH given N_obs detections with Einstein telescope. Outputs a .txt file to the ../data/posteriors_f folder.') #parser.add_argument('-M_PBH','--M_PBH', help='PBH mass in solar masses', type=float, default=10.0) parser.add_argument('-N_obs','--N_obs', help='Number of observed mergers', type=int, default=1) parser.add_argument('-prior','--prior', help="Merger rate prior, either 'J' for Jeffrey's prior or 'LF' for log-flat prior", type=str, default="J") diff --git a/src/tabulate_posteriors_O3.py b/src/tabulate_posteriors_O3.py index db6be6f..8cd5902 100644 --- a/src/tabulate_posteriors_O3.py +++ b/src/tabulate_posteriors_O3.py @@ -25,7 +25,7 @@ import argparse #Parse the arguments! -parser = argparse.ArgumentParser(description='...') +parser = argparse.ArgumentParser(description='Calculate and tabulate posteriors on f_PBH given N_obs detections with LIGO O3. Outputs a .txt file to the ../data/posteriors_f folder.') parser.add_argument('-M_PBH','--M_PBH', help='PBH mass in solar masses (0.2 -> 1.0)', type=float, default=0.5) parser.add_argument('-N_obs','--N_obs', help='Number of observed mergers', type=int, default=1) parser.add_argument('-prior','--prior', help="Merger rate prior, either 'J' for Jeffrey's prior or 'LF' for log-flat prior", type=str, default="J") From f8638ba7b64e30213e9b9168b02ea648aba056a6 Mon Sep 17 00:00:00 2001 From: bradkav Date: Fri, 3 May 2019 15:37:02 +0200 Subject: [PATCH 4/5] Fixed minor typo in a file header --- src/tabulate_posteriors_ET.py | 2 +- src/tabulate_posteriors_O3.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tabulate_posteriors_ET.py b/src/tabulate_posteriors_ET.py index 52ca1e0..b281d60 100644 --- a/src/tabulate_posteriors_ET.py +++ b/src/tabulate_posteriors_ET.py @@ -166,7 +166,7 @@ def get_posterior(N, M_PBH): mstr = str(round(M_PBH, 1)) Nstr = str(N_obs) -htxt = "Posterior distribution for f, given N = " + Nstr + " merger events in ET. PBH Mass: " + mstr + " M_sun" +htxt = "Posterior distribution for f, given N = " + Nstr + " merger events in ET. PBH Mass: " + mstr + " M_sun. " htxt += "Prior: " + prior_type htxt += "\nColumns: f, P(f|N)" diff --git a/src/tabulate_posteriors_O3.py b/src/tabulate_posteriors_O3.py index 8cd5902..8b9c5fc 100644 --- a/src/tabulate_posteriors_O3.py +++ b/src/tabulate_posteriors_O3.py @@ -144,7 +144,7 @@ def get_posterior(N, M_PBH): mstr = str(round(M_PBH, 1)) Nstr = str(N_obs) -htxt = "Posterior distribution for f, given N = " + Nstr + " merger events at LIGO O3. PBH Mass: " + mstr + " M_sun" +htxt = "Posterior distribution for f, given N = " + Nstr + " merger events at LIGO O3. PBH Mass: " + mstr + " M_sun. " htxt += "Prior: " + prior_type htxt += "\nColumns: f, P(f|N)" From da4fe7ada5af1d05d213ea6e8601c545cdaa2013 Mon Sep 17 00:00:00 2001 From: "Bradley J. Kavanagh" Date: Fri, 3 May 2019 15:38:09 +0200 Subject: [PATCH 5/5] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 218de36..0c06c72 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Computing the point-source limits requires making tables containing `p_gamma`, t `python generate_p_gamma_tables.py -test` With the `-test` flag, the `p_gamma` tables will be written to `data/p_gammas/test/` instead of overwriting the precomputed tables. The script can also be used to generate `p_gamma` tables over different `(m_dm, sv)` grids. -All limit calculations require tabulated posteriors on f_PBH. These are provided in [`data/posteriors_f`](data/posteriors_f/) but they can be recomputed by running the script [`ComputePosteriors.sh`](ComputerPosteriors.sh). This script calls a number of scripts in [`src/`](src/) which calculate the posteriors from gravitational wave observations. Run `python src/tabulate_posteriors_O3.py -h` and `python src/tabulate_posteriors_ET.py -h` for more detailed usage of these scripts. Scripts for calculating posteriors from SKA will be added shortly. +All limit calculations require tabulated posteriors on f_PBH. These are provided in [`data/posteriors_f`](data/posteriors_f/) but they can be recomputed by running the script [`ComputePosteriors.sh`](ComputePosteriors.sh). This script calls a number of scripts in [`src/`](src/) which calculate the posteriors from gravitational wave observations. Run `python src/tabulate_posteriors_O3.py -h` and `python src/tabulate_posteriors_ET.py -h` for more detailed usage of these scripts. Scripts for calculating posteriors from SKA will be added shortly. ## `bayesian` branch