From 085cddcb4e3f72b98fb51008c9a97c26607e5c37 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 14 Oct 2022 16:06:00 -0700 Subject: [PATCH 01/86] Add the option to remove a dynamic from builder --- PySDM/builder.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/PySDM/builder.py b/PySDM/builder.py index 409c84fa78..c6869d2b65 100644 --- a/PySDM/builder.py +++ b/PySDM/builder.py @@ -45,6 +45,12 @@ def add_dynamic(self, dynamic): assert key not in self.particulator.dynamics self.particulator.dynamics[key] = dynamic + def remove_dynamic(self, dynamic): + assert self.particulator.environment is not None + key = inspect.getmro(type(dynamic))[-2].__name__ + assert key in self.particulator.dynamics + self.particulator.dynamics.pop(key) + def register_product(self, product, buffer): if product.name in self.particulator.products: raise Exception(f'product name "{product.name}" already registered') From 81eb1d42279435e5567efd196617427b23566357 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Wed, 8 Feb 2023 17:38:57 -0700 Subject: [PATCH 02/86] Revisions to JOSS v2 paper --- paper/{ARG_fig1.pdf => joss-ARG-fig_1.pdf} | Bin 19993 -> 22352 bytes paper/paper.bib | 38 +++++++++++++++------ paper/paper.md | 31 ++++++++++++----- 3 files changed, 51 insertions(+), 18 deletions(-) rename paper/{ARG_fig1.pdf => joss-ARG-fig_1.pdf} (63%) diff --git a/paper/ARG_fig1.pdf b/paper/joss-ARG-fig_1.pdf similarity index 63% rename from paper/ARG_fig1.pdf rename to paper/joss-ARG-fig_1.pdf index 48ec60183bc623eed4870ef06470a4c2f21c4ff7..b620bae0d6f30ae75c4fe759b3d916f8a1e632ec 100644 GIT binary patch delta 6827 zcmai32|QHozi$wl?0eQ@H?}#m&sbv!DeIdh*$u|t7)x2EvJ^=;Z} zIRFweM>w*EcG^bDlf+uSUU~J9S0z)|WIFh+95>>I+Y56>6r(R&!s}%d4AAE(>vMW$3Rt>7>!(fp&VBv%QEX-;?sS|% zMHou!u7Re`#Tsh2!Tql3!fomJgZoV|^v-AR3f@D54QbI{LA0{R8W$fFOj&(#JZ(~T zrnw=2dOx?iH(2T5O{XB{fua+;0GHwf_1G#G_7hL(Q}g-(C;t}olH1x`WtKYk_ce_x zp^R*Uy`&sXmIk5m{KC$kdbB3>k4JY57(D8=uyb)2uOn1F9k_l85*}_WY>D_s|82mY za{I)Pg)-;km!338t5GF|np*;(Y~q-!rPtGdnUdyu)ugtHQd5(d=fSVtf3O`3%C@v? zj)XLoHgww=DPFDB59|qOr-)~0_~#>KYmPs69go=_v5R$M>`~wB#VW}Q6uEJ4IW@x* zub#VQ*D|)%d=Vpf%#ocRnU)OrZ#Vh1FJ#Ao^PV-`n)NyZ^s_G4>~KfhB`*f_X*Jj= z9Px5Cr}tNib*nmFL@tQzr;OMrh&S3O#4KGp-anpjeqwKZ-^km2LbIOVZ$8{~WhqPX z9TzLR`5S7{koC?REZiGed9prZkm)`2LghvEIkU%4XHV8dcHIpAHIHFBHr0HeE-H)u z!Cp-LB^%>+t4A|@ogi)|YXBWQW#;A?_uxVQ+c>eGQ!@pR5{f*Tv;E?q{`z^Tw`F!I zW}tbgveSJ!1~0XaQ0OvVR`cQkW0P^{r7gLiZeuA4B8da6;|HlC&8Bah0%c<~=RWek zr#153mLYTm_%SppQbfx6EOM%IZPgF=Pr<`QU&6!97&+u;$5E{tgw6Ymfde4hPFbe- z1wBSZ>By_(`~;D4z$VAxfYW%2$YObeA&sKMY%t7XO&Prrn{|TbmVu@+>^DU9%JxjMc zKHU1g-`BoH{wW`0IQ-dp^H>$v+VpdL_}UD93-9n7pN<5z$vr>0?auQg50-v_u;99F zMftP+GYv-j1O?jCDo@9?)aVOa|9DDJdH2`Y2y4R3obS2TPntzr#r-F|Pt$sji-6!j z;!?ujlCfqr&i9Lp1nfoZ^yh_Mnz{=DHm}`7`)otFCtYhz3VIaG*)zm%yR`M+@cN4R z_7ld=P6f0uKJ*qB<=01jwtqA%i*KBZ`wm$SgZrsdyRBMe#@#(_U!A&f?MKv7Tfx;g zN|7x4sw*bRjXTg0{nJ-Y!9n3`q&6BB^iPk>d%>Gc8(xI{ePpn_BVvcCTT#(GUD$|N z^5pTe-6c;1H2Y+eHjqt;)s@}?P1BuCp{hN zUR2j0NMFv>{b%x*yWeLEq^FwBX}fy{7e6p-{{D@mNAL^OY2Q0x`T1j?CNJfFjpz{0 z1J808KH)Z);mSMIR<|oCRABeqOi9DzasDGa>~7F%XcQAFZ$SDX&2Jes#vTt$B3{2M zmzo&2XgoBrX}EWsdaL|8ZO>d@1a!(Krs8?#Q+d(6 z0tt`j7R?er2zK5P;Tt-@jLdFCU&xZh4BwMrd89wqj+8JC7u?t?wBYVGVudgBvNQU* z6eIQSYFEP36A4k*gA8^WVHJJ74T#A&(;Y?86d4}_kyjNl{ORQDKU)ph#m<7@kerXx z$XBiWQ%8-ki`k_H#tj%cS*84 zvJ_I!QD~K`@$Nhybf@~GbuGU~uVRuJ3zNS8*sT|~Q4q#F{o8>5nG_8zggoW0?s$20 z?1`;>erg>XBpapPsv7chO$f%0s|94W9=a%R){s^&UL$VX+chyMp+yx5sEsMu&=&kr z%+{to+r{OL?t1HX)hnod-6gfVA}|2r1S=D(!dI((cJUBPVoUfD(@saMi`82krCXsY zS2}mwJ%(0T;wD~4s&%&GUZQ)%`Xmuzyrlcf43mL8jq-Q=zU2-!c@^eIzah{E@tu?F z(k^_s9cX5Ewfaih<#n6q()8cpB`RFpI#VdbV_$@BALvH$rt$MEt+%3&)tX)9dnU;8 zvF{rHIAuSW^6cs(fnr;~rsUcluZX-G@*S4ex&bo6uj74RkH6x{)nt;7?*B06wGP~u zBy10at3>+>_XxhU5KuQcl(%27dv8fzxTw|SNuFt)-VUb=mjb`}V5MZw#BYw>tTwKj zv0vuw!5E)g4nV%G>1*h#3&z6t!TL>DKUR8HYnjiUsZixkJApCFX5H%;NkuVBMHemt z|I`N)WlbI(Ux!b~s>xEfhg;qciK(7eTYr})-K#z2=q;r)e7&d)J~c60L$X}zA#md< zIiWtQG%DyYRx5j!>yp~K)|0P><$ug%8=JQX)JQsc@q|imlzc0q<+CkSM(8rP{plAh zV2*_uvquCl5Sq)ivRJYK z-7`*{SD#Z$tf1wlI?im%K`Zk)WNiAjQ-5lwmY6fc<>ux>M$Ic{6roi;tm z`rGa0;n(?I-rBwWZRu<{tB3Z^)RCF;35hX3+@crme|^^|G4Pe@di<{NXMRPz z{KZVAS}4VC^bLu7GX15u4|mk(tCG(v$d`|`W@_i|43WvNx?Y$ie@(RROxcarn~oc5 zE9b?hs;`Sxov8}()V>~Xz{R~GXFPV#8Wabu zc9+Q3Ykvt$GJ3Ml`mj~!#8NF1$Ynx{8;)iQeQ_+^b@O$4bf4l5bh?Ysp`(7U5)_a1 zN3po*)&Y07wvp*>qoSdd^6fS%A?(4ohc@6IkZmc|T&>%Ly4-bh2u_b(1nGQ=b&}1Q z(P>O|EB}J6Q{9=z(gSrA1M|wrJEBFWr(*PH8W9QelNGyYtg;C7y@$8H@AMqmwX14c z^*{>W9ja-gaR`67IaT(E(Z<6?E{3jod!<g+4`TGS4 z_w2$!Ywlt$V(0ai$HPe31>6~ zPIp=9VCWaO)LF(2Wx@x_ta;@&)Ne5~-afUe{EwVp*4 zrK8)Ia4%;!1@#HLk`h^hPZ^ybzUaMw@$P9VI^DyX-bViGYrf%?l$+0EoUeUC?tUp- z-RSU-ri<{^bCxLC_uQ`BeE5)>#?wEHURF)w#V0FohW1-$2Mdl4?uq}f81MP*y2lbZ zVpiGu>a25TR|U9V7-5p;rhCVmH0^hd)l|ibODZ|+^X1pE!#}{VBeDKBt}IoWIQ2$% zpCT;o?Aqo#GZAGFA$!s=VAIxyyoW9iTtal0zJ{j*C&p%-)N%N|l#U5RuJ@#o1V$;H^DPZMKLo*-k(N85i*UiJPZ zdcAq)(3J4B-a^a!B|cKv)hTGJpaK_>1knX8Sy&1Pn3y1=nGBk5I5PeeA+IZ3)l9&0 zR-wb*XWy8Mt&uU9SpDhdLd^>k{&Kgmk@J|V9&_7?3=eNFFmn*WwNbvx*s--Pq5PVKNw*(FuenFno$A8hf@JMuTfd#lXsyK-gx#L?}pH_-fFnrROcZpO?cm$LnwG@^szraq^rZXhUn-1Ix@hGD@9sS9ot=+CrISePhT6s(?j(zny~5bmBft-DCrw(T+}RG%IpKJWoDccCgan!)4*Yy-lr{-p4fX z!k1kol@AxTcGM$P(}r%cZY3poDVgT13(SP1gbX2ygs`n~R-@pp&HOykl_jaK&jI%s zQSVK^D5r|Y)(OjJZ0zVA6%p``7j+=EfRnsLb5~P~C1<;Zb*M6W|CU0#-Y@Yfb=xYY zzVhWHOJ_ns!iFpik+{MX#yPe^+!%i9S4!GZ5pX=Z=t8Cb*{>r50p$^XeBMb)h4ZpkopX)svA}rtVPS{@AGIIC3lcn(9!nx9Hj{~8% z8&Y0YSG?~!F5Nox?NFMke>dfq-Kb1*2p3^6_2_=Q^5^V;q|G6`Nmj~Fbsye!9Mw+J z=IW9FY+Yl{oN{2-exEU0TDUSAQ9QqdfYy^Wppzm-EHY-b;$qy&{1#t}&Kw0yAAV9h z*6qjm6)vk8So)I(DHnz^NdLqj1h^ViNTzF=^ei3-8Vd>eX_bdOk>q?Zs{^+uQ(P?5 z3c>*IpM^}H+l_4%XwuaP^r_0cVxfQDfosZfE;z94Rmj-`ElKSzC%#BgrTHznG<1V> zmWsI_PfIBXNF9Giy2mz}M?Pli-cLy1BW$Uf%sa6&!iRP_r8r%o@M}Y`)~|8R1gZG? zxP&A`Kq6Hfx@mygWJPs#vvAR|poRFl#5ns#MC;l6h0%A%(4g-IN_qg=f*u%1gWtC~ zFW4Rcg#w`c0Srh0&~yNU!2@Upd?Es9Cj4R!DHx$e;V*hP0tSFB3&Mg#GURDwtAa$^ z`G(T~^#92QK&?jF2qg5*$Re0RM54DuMlj9)v;*`o;F?J}Z2*qQ|1&ERZNu~pqx+jh z1clMy?f`nbZ@eo#fEf&6a0HN@YaGnQMF62iQ+os%vfXTiAVG&VZ-)POZ9c3_MiOx# zK*Zqw*AIn4fMU!NQ2qdx)31OV5p=jq@#l&GV3(T{aF+u28#6J1PD@~*D!~NkgrqjR zTzCTl>XJf110wBEkc>9eE*b)Tl|ivx#iS8Xv@8lL5Z?n;%c0 znhdF-w4rArLQsWTJtU&dp%Q`KYe+z6)u|9x2L*{~ltDf4d04{^TCay<@6c31a1n6O z9<55KT^|K)L$yJsXcR=&UW8Df7UZE54c)?MvFCL}5ReTH1)=pCpfdO@q~8EV;Ze{# zeJ4neh=PLAw;)FnitPwSARucpiv1792LZhxYeNhi2-Q$f&^_E)1Q8ll(qb3lnFvT! zLW@l#$|GP?nxVCz1X3@80^ikyc9TP3lkUcAK|GYpTqF!6s*QqPQocYD;)2kyfhHH0 z1o0Ujibz^SK%`#?62`w9^>AVF>`r4j z9u}4a-{;^)MgZCj!2WJU0ca|KrTlRT3jp?|-)^!6z%9Fp6@XiIlkEWRkDH+F0NigX zB-#PM5&n#D0&v7XSIz*ALj*gU3xHeZ@P&J2tUsV1fL|u@2k^g1kZAwN@Ni!Mzf2SW z;FpPL0RGRK&@_5bFcZKplLgXa;akgmfpBLH!2hO1qJshaG9?|rFE1+uAaEF2uts43 z9QD6-3kL|xbP)hynIsY*{3bx6BO_=4VR`;2fUvxtC{9l=j26fQ2+JhD5yo%b2+M3y zGzL8~0Duz<%!7ne5Ak2dXn?p(8BLD|h|6n?j`H=V0mNk$m;iBkjm%&M4aR>ffkZRo zA_3yE8ZiKInLZXEF0<|dNXxsrm&S+$VE-AVDk8DVOW1;imZ@M;oRPfnDrB*Bp>Yvk}CCm>eJ;jwT@Ux{JJAZEqlSnz!e2{zGcK@yhZ zTPr5SP(Y5oS70C!3pcY@V>lA*bF1WtiM2}fCr5!UV&i3F}u zkVN4mf|bgUv78|19IXF#K9Ef0#1-fG9~c>2vl0r1ycQ#@aY&Ft#;h?NTw_;a@cv^! zEGMU|AcmhuNUOICg8?}SZlxRs#|ipX7=e?8S79Vh@UOxs7@W~+5R1XA_G%0k%SqQO z2V)6vaY1YsXRVnQOM!pXSS5|au2mC<=R94koE1mnl#Nvwg_B%YVtCk*|6uE z*9?XOZS7zRr(~?0kbqk&N5HSmCm435mG18%`v`kA16%i6l;LU#Sd{ zvL+2;K_aKz{42*~_|n5@47fN$L)-ZXztb@1m!rr?CID|Lr=B{~_tF3o?$0y5FM}D+ Spamj-XJe$gx~2UV ze?wT-hd?E1P$*O!o&@1>SPdKrN2QP;M+8uUpZ_t|-EB^MT4MdwvWl~Nv1d%Wso7%z!B<;CnRe2jn;s1KK77ILpBMM1kqJ8-(B&?} z^H9@7X;i4cRg8jRZw^!CZtlUb?H%dSE*RD@Zjxi62SxB74tjWF~LCe;5?%z)lN`VsbmVLhBd8l%Q(l1{?$w6BoVejPZ#Yb%(61$vjYvlU1bw7h2dslFBh+!b&JhSdw5URNd0i(YmK&aQRb*=kutx3yR_67ZcN5xLS^`ifU8^LG!I@oD@k>}auHwb@akSUUr(mnOD#EvqZ#nX6vbt0^^9mq{!*cyeX6!DkVn(neNlnR8lxK1x#fz)a1-4SSS}P{}22M3Z!zC(CkL*un z7K?_2SY41Pn3FXVx{#1Jmrf5Ba{iZ1TC^9M=NNO!IL-c0ZpHbXDMt&9R|h9oxWaKg zP4(vFwr!6iuC+ZS^t&1wWhR<8Ux>W3!=mLaSIE}r2kG&`Ns*1qHqPQ(4u8l89P}|> zU35gVnZ+Jh3VYWnm13&)h;&lLDJ5=`3})=)tn63?|658!AwugEx06hwLAr~(6^F4} z-rxFyjXSx!3a?GPu}hWQZeeWfU8_>O^~UH_V5Wgcd0Mfc)El;zeg_45Vt;zo?vcDi zGgIR&Ey9uNhfJOn$2Soqdg-2{BsJ$~iv-WshI?>+E=GHNvcUjs(&H?~>cA>Y?AoVN5Iot9Q*A2+PTc({#Rw6o0ZRd%(eaYc5* z()kyK?cU0M!&36+_G7qXzt85ov+tg{DgNMqywpI_Ti9Hrg~721g8MBgA!WHqrhIh* z5xvdPE>b{PP!N@(YP*H=XpY+HZF1>~C-l9IUEUlx+G(0<`dDLz;9LB{Xp6F(1|nNN za7J5%p1#agDy;22Lbc}>Nt{O;CApu3&_U%kYM1UPr-^W>8%yUIm~D|h8&x(HXLC%T zp+)f~DNsBsHY@q+nY5xJIKS-Kbmo!EvkJ-bT5x`<(iwx35j(lk9~8bgZvW2AcW%y8 z9EuUs8fALS$1G=DKPheN(Wh2_-NuVkF=&6;zjll{7%FI~U}L<}Xg?7K6cC~$8^ye+18 z=2o1pUdm5OSU`l)w>hpS^QN!c;8I5$=0!NKSJ>fhl)qq`w3amcyNK}7i>T6;gDB0c74Yy=?!lQO zd!>KA(2htcT0C;aNwRal%2Y_-J?+=JwIipBj^J+(3H3jT>c8{tQ;l{mv;TUb$LAZf zgF{u#nX@HTT~EII6h4x%a(XmUi^50>(3%@^A%6m zm1EO)Ar4#;Vo0_{wjXml@Xupx(2E!7&TJ_ODe0D=8Hu-LLww`ZyXSlj_TH96mV`sy zTUvz=_Ez5|gE0%wc6=b;8&f8MdV9n-^hWOzXO9 zCtca~6AZh-YCO_vo}7OKE*@2Th_QD^kIZ(S<{rn-5+Yi2mIW(Am@Okuz>4uyC7~?Q3j5pQ`VB`oSp~15)OGQl8Wca#-dL z$3{cNoF7FpC1Mg&MUG;(-10x%;bnF!b3O}Jz1i_mX+A8CmTlSI`(d#4>5{8_z^5v+ z+piuGX0gpStGe+ANER!|>E45}D_^e*e3*MXNnbtZI()XK^smzG(W%yMyFV8^q(9R@ zC{-kQn;`IgQSVI>lkj6~v&`Cv>gwyGx%JrtWfnJUug)Q=o_WMdh5A&F6UOJ5F0UE& zRw$WK?BRMlz1Ks`d)l=QI`^^Kb;fspem$>;3b0RI9UIW!Ge0|iwoYZp{`-vap>`qg zn4jM07xf}_B=56Jwsy&(j^mDUyuM@cBYuHi30E6q{BM?VQkG_kk#aet{Wwt9F&mM2 zNc4F{`{jUWDLK05Lo-*yEdJUICJ*Mh=)P~>`+5>|lbx2yY7?$C4Uea&Sl+$ry>P<)%5f=j@!r?iy_JhiL1%ODSJOK_Y6ROC_vTE|{0?TvWZasw zD-@cZ=$stn@q9IVow*z=f^1Un?$Y~7jf!5P1mB6QUi8r@)BW_}9Wr`hNWscI8s-|sublH2Lv>a^hiBzNpWz^MSpdz%lZlY7E+RN2h~eUg zh{(~gt4ZI2<87}(oNG8}r&CvR*m)rr)u5w=!725n_0QX7D%37Z_YBm;YkeInblKeV>iwzRA3!p2J5rpxVufnjM(C7*&Y3>=riRPI#df&oz!21F^j0q*UpmE6i4Fz^?I0hX$Ql_qFz z7&!r{zO2 z^>Czm;f?b^BB1*7`2~}+e(8MlE_EJ*KHrQ*G5m4PqvDw+q3^s4TL!=RmVH}c>+~g> zfo^%7N^4bd7?4v$gH*K`uuBOIOe6&XUHvu~X4!HYo`4^X0TmjTfDHx%Vl=IQf~rd8 zH%(a>JCOn;aK(UET@`%BHG)NT40wrO1}8PpV3`mM>^0Gq=ZJzZ@I(_0I!RSP9LEc$ zC^DdwdlmBvtfkKiB5Yonic(Mxc)h+}K z3c8Y}6AA+(ETdOf8V1^^XdtUzQPsC;YOEAw2PyWdgC@-Pcc8LiqLE z2u&*pPxsy{A4J^H21MG(9|(~)k_Wm3G5mcQ9w88E9R#7d z?g?6FN!maQWCVE!xI?53#2|>o+<***NE-@+y(1vfx<-U%aG;AD3v7b|EA$Qv|5A@YXCFo?WS)gusV-L6+Jk`crT{RPR|zYKqO5Nd%0z`_dfTB2YC zplW%PHU5=FmRq;5lgXgm2E~uZlCVT9jYP)d)UgyWXcNXxrhq-R8s0c6VU6_K1NlRU zbpuBs&{zci@DbN$=MNu=y!kAN$`bxZ92rXmQ?|+?e`dfEh(sJf+bJ>E2KdiIw9P)+ zTBCoDqvB|++Wzh%QE{8%NM!6LVkGQZmsk(;KkCU;@}Ge$o@>qd4FpRdZ>k8EK-(Ng z#FJSq|NShHg#E2@%(btH_6g2cq(foem_fMIXC&JYxnf`IF=5f zO^V52b{CRKrcgHNreLX?(y}IIQ(6jXGZclocJ+UUr{dPG#2-HL+Ad931$k;2jw$fzw fib=Doy8i3aB`72!h~a_2QArd6LQ#=!ZH)LI(!9GK diff --git a/paper/paper.bib b/paper/paper.bib index e985372802..74d5c0bfaa 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -153,11 +153,12 @@ @article{Richter_et_al_2021 year = {2021} } -@misc{Bartman_et_al_2021, +@misc{Bartman_et_al_2022_MP, author = {Bartman, P. and Banaśkiewicz, J. and Drenda, S. and Manna, M. and Olesik, M. and Rozwoda, P. and Sadowski, M. and Arabas, S.}, - year = {2021}, + year = {2022}, title = {PyMPDATA v1: Numba-accelerated implementation of MPDATA with examples in {P}ython, {J}ulia and {M}atlab}, - journal = {J. Open Source Soft. (subm.)}, + journal = {J. Open Source Soft.}, + doi = {10.21105/joss.03896}, url = {https://github.com/atmos-cloud-sim-uj/PyMPDATA} } @@ -211,10 +212,10 @@ @article{Bartman_et_al_2022_JOSS @article{Bieli_et_al_2022, title = {An efficient Bayesian approach to learning droplet collision kernels: Proof of concept using "Cloudy", a new n-moment bulk microphysics scheme}, - author = {Bieli, M. and Dunbar, O.R.A. and De Jong, E.K. and Jaruga, A. and Schneider, T. and Bischoff, T.}, - doi = {10.1002/essoar.10510248.1}, + author = {Bieli, M. and Dunbar, O.R.A. and de Jong, E.K. and Jaruga, A. and Schneider, T. and Bischoff, T.}, + doi = {10.1029/2022MS002994}, journal = {J. Adv. Model. Earth Syst.}, - year = {in review} + year = {2022} } @article{Alpert_and_Knopf_2016, @@ -242,10 +243,11 @@ @article{Abdul_Razzak_and_Ghan_2000 } @article{DeJong_et_al_2022, - title = {Breakups are Complicated: Representing Collisional Droplet Breakup in the Superdroplet Method}, - author = {De Jong, E. and Mackay, J. B. and Jaruga, A.}, - journal = {J. Adv. Model. Earth Syst.}, - year = {in preparation} + title = {Breakups are Complicated: An Efficient Representation of Collisional Breakup in the Superdroplet Method}, + author = {de Jong, E. and Mackay, J. B. and Jaruga, A. and Arabas, S.}, + doi = {10.5194/egusphere-2022-1243}, + journal = {Geosci. Model Dev.}, + year = {in review} } @article{Ovadnevaite_et_al_2017, @@ -304,6 +306,14 @@ @inproceedings{Arabas_et_al_2022 year = {2022} } +@inproceedings{Bartman_et_al_2023, + title = {TODO: adaptivity}, + author = {TODO}, + booktitle = {103nd American Meteorological Society Annual Meeting}, + url = {TODO}, + year = {2023} +} + @article{Knopf_and_Alpert_2013, title = {A water activity based model of heterogeneous ice nucleation kinetics for freezing of water and aqueous solution droplets}, author = {Knopf, D.A. and Alpert, P.A.}, @@ -311,3 +321,11 @@ @article{Knopf_and_Alpert_2013 year = {2013}, doi = {10.1039/c3fd00035d} } + +@article{Hill_et_al_2023, + title = {Toward a Numerical Benchmark for Warm Rain Processes}, + author = {Hill, A.A. and Lebo, Z.J. and Andrejczuk, M. and Arabas, S. and Dziekan, P. and Field, P. and Gettelman, A. and Hoffmann, F. and Pawlowska, H. and Onishi, R. and Vie, B.}, + journal = {J. Atmos. Sci}, + year = {2023}, + doi = {10.1175/JAS-D-21-0275.1} +} diff --git a/paper/paper.md b/paper/paper.md index d4d4a32a91..a239213ebf 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -18,6 +18,9 @@ authors: - name: Sajjad Azimi affiliation: "2" orcid: 0000-0002-6329-7775 + - name: Oleksii Bulenok + orcid: 0000-0003-2272-8548 + affiliation: "3" - name: Piotr Bartman orcid: 0000-0003-0265-6428 affiliation: "3" @@ -141,18 +144,22 @@ from PySDM import Builder from PySDM.backends import CPU from PySDM.environments import Box from PySDM.physics import si +from PySDM.formulae import Formulae +TRIVIA = Formulae().trivia builder = Builder(backend=CPU(), n_sd=100) builder.set_environment(Box(dv=1 * si.m**3, dt=1 * si.s)) +frag_scale = TRIVIA.volume(radius=100 * si.micrometres) builder.add_dynamic(Collision( collision_kernel=Golovin(b=1.5e3 / si.s), coalescence_efficiency=ConstEc(Ec=0.9), breakup_efficiency=ConstEb(Eb=1.0), - fragmentation_function=ExponFrag(scale=100*si.um**3) + fragmentation_function=ExponFrag(scale=frag_scale), + adaptive=True, )) ``` -In `PySDM-examples`, we introduced a set of notebooks reproducing figures from two forthcoming publications. +In `PySDM-examples`, we introduced a set of notebooks reproducing figures from two publications. In @Bieli_et_al_2022, `PySDM` results from collisional coalescence and breakup were used as a calibration tool for learning microphysical rate parameters. In @DeJong_et_al_2022, the physics of and algorithm for superdroplet breakup are described, @@ -161,9 +168,8 @@ In @DeJong_et_al_2022, the physics of and algorithm for superdroplet breakup are ## Immersion Freezing -This release of `PySDM` introduces representation of immersion freezing, - i.e. freezing contingent on the presence of insoluble ice nuclei immersed - in supercooled water droplets. +This release of `PySDM` introduces representation of immersion freezing and melting, + i.e. liquid-solid phase change contingent on the presence of insoluble ice nuclei immersed in supercooled water droplets. There are two alternative models implemented: the singular approach presented in @Shima_et_al_2020, and the time-dependent approach of @Alpert_and_Knopf_2016. For the time-dependent model, the water Activity Based Immersion Freezing Model (ABIFM) @@ -229,7 +235,7 @@ The new aerosol initialization framework is used in several examples in `PySDM-e simulations against data retrieved from the publication as shown in \autoref{fig:ARG}). -![Activated aerosol fraction in Mode 1 as a function of aerosol number concentration in Mode 2, reproducing results from @Abdul_Razzak_and_Ghan_2000. The figure shows the results from `PySDM` in color with two definitions of activated fraction based on the critical supersaturation threshold (Scrit) or the critical volume threshold (Vcrit). For comparison, we include the parameterization developed in @Abdul_Razzak_and_Ghan_2000 as formulated in their paper (solid line) and as implemented in a new Julia model (`CloudMicrophysics.jl`, dashed line), as well as the results from simulations reported in @Abdul_Razzak_and_Ghan_2000 (black dots).](ARG_fig1.pdf){#fig:ARG width="100%"} +![Activated aerosol fraction in Mode 1 as a function of aerosol number concentration in Mode 2, reproducing results from @Abdul_Razzak_and_Ghan_2000. The figure shows the results from `PySDM` in color with two definitions of activated fraction based on the critical supersaturation threshold (Scrit) or the critical volume threshold (Vcrit). For comparison, we include the parameterization developed in @Abdul_Razzak_and_Ghan_2000 as formulated in their paper (solid line) and as implemented in a new Julia model (`CloudMicrophysics.jl`, dashed line), as well as the results from simulations reported in @Abdul_Razzak_and_Ghan_2000 (black dots).](joss-ARG-fig_1.pdf){#fig:ARG width="100%"} ## Surface-partitioning of organics to modify surface tension of droplets @@ -260,7 +266,7 @@ In `PySDM` "v2", the `Condensation`, `Collision`, and `Displacement` "dynamics" Adaptivity is enabled by default and can be disabled by passing `False` as the value of optional `adaptive` keyword to the given dynamic. This adaptive time-stepping applies separately in each grid box of a multidimensional environment, - and includes a load-balancing logic. + and includes a load-balancing logic as described in @Bartman_et_al_2023. In the case of collisions, the time-step adaptivity is aimed at eliminating errors associated with multiple coalescence events within a timestep. In the case of condensation, the time-step adaptivity is aimed at reducing computational @@ -271,6 +277,15 @@ In the case of displacement, the time-step adaptivity is aimed at obeying a give in integration of the super-particle trajectories, and the error measure is constructed by comparing implicit- and explicit-Euler solutions. +# Relevant recent open-source developments +`PySDM` supports a `PyMPDATA`-based [@Bartman_et_al_2022_MP] reimplementation of the 1D kinematically-driven test framework in a recently-published intercomparison of microphysics methods [@Hill_et_al_2023]. +The authors are unaware of recent SDM algorithm implementations in open-source packages beyond those mentioned in [@Bartman_et_al_2022_JOSS] + and the related list of links in the `PySDM` README file. +Furthermore, none of these implementations include superdroplet-count-conserving collisional breakup, + organic surface partitioning, immersion freezing/melting, or adaptive time-stepping of coagulation or displacement. +The aerosol initialization method described + in `PySDM` v2 is similar to that of `pyrcel` [@Rothenberg_and_Wang_2017], but differs in its application to superdroplets rather than a moving-section representation. + # Author contributions EdJ led the formulation and implementation of the collisional breakup scheme with contributions from JBM. @@ -281,7 +296,7 @@ PB led the formulation and worked with SAr on implementation of the adaptive tim KD contributed to setting up continuous integration workflows for the GPU backend. ID, CES, and AJ contributed to the aerosol activation examples. The immersion freezing representation code was developed by SAr. -Maintenance of the project have been carried out by SAr, CS, and EdJ. +Maintenance of the project have been carried out by SAr, CES, and EdJ. # Acknowledgments From 75a33cf9de66e135406c75f7c456e14a5a3a99e4 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 9 Feb 2023 11:30:29 +0100 Subject: [PATCH 03/86] updated references in JOSS v2 paper --- paper/paper.bib | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 74d5c0bfaa..65548c671d 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -299,21 +299,29 @@ @book{Jacobson_2005 } @inproceedings{Arabas_et_al_2022, - title = {On Particle-Based Modeling of Immersion Freezing}, - author = {Arabas, S. and Curtis, J.H. and Silber, I. and Fridlind, A. and Knopf, D.A. and Riemer, N.}, - booktitle = {102nd American Meteorological Society Annual Meeting}, - url = {https://ams.confex.com/ams/102ANNUAL/meetingapp.cgi/Paper/396046}, - year = {2022} + title = {On Probabilistic Particle-Based Modeling of Immersion Freezing}, + author = {Arabas, S. and Curtis, J.H. and Silber, I. and Fridlind, A. and Knopf, D.A. and West, M. and Riemer, N.}, + booktitle = {103rd American Meteorological Society Annual Meeting}, + url = {https://ams.confex.com/ams/103ANNUAL/meetingapp.cgi/Paper/420160}, + year = {2023} } @inproceedings{Bartman_et_al_2023, - title = {TODO: adaptivity}, - author = {TODO}, + title = {Adaptive Time-Stepping for Particle-Based Cloud Microphysics: Super-Droplet Transport, Collisions and Condensational Growth}, + author = {Bartman, P. and Arabas, S.}, booktitle = {103nd American Meteorological Society Annual Meeting}, - url = {TODO}, + url = {https://ams.confex.com/ams/103ANNUAL/meetingapp.cgi/Paper/419078}, year = {2023} } +@inproceedings{PyPartMC_AMS2023, + title = {PyPartMC: a Pythonic Interface to a Particle-Resolved Monte-Carlo Aerosol Simulation Framework}, + author = {D'Aquino, Z. and Arabas, S. and Curtis, J.H. and Vaishnav, A. and Choi, J. and Riemer, N. and West, M.}, + booktitle = {103nd American Meteorological Society Annual Meeting}, + year = {2023}, + url = {https://ams.confex.com/ams/103ANNUAL/meetingapp.cgi/Paper/421645} +} + @article{Knopf_and_Alpert_2013, title = {A water activity based model of heterogeneous ice nucleation kinetics for freezing of water and aqueous solution droplets}, author = {Knopf, D.A. and Alpert, P.A.}, From 4bbb108fd7ef6e5b523345ab0ea52876d652235d Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 9 Feb 2023 12:00:31 +0100 Subject: [PATCH 04/86] grant number, mention of PyPartMC, remove melting, OB contribs, breakup code updates --- paper/paper.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index a239213ebf..c0fee795f2 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -146,10 +146,10 @@ from PySDM.environments import Box from PySDM.physics import si from PySDM.formulae import Formulae -TRIVIA = Formulae().trivia -builder = Builder(backend=CPU(), n_sd=100) +formulae = Formulae(fragmentation_function="ExponFrag") +builder = Builder(backend=CPU(formulae), n_sd=100) builder.set_environment(Box(dv=1 * si.m**3, dt=1 * si.s)) -frag_scale = TRIVIA.volume(radius=100 * si.micrometres) +frag_scale = formulae.trivia.volume(radius=100 * si.micrometres) builder.add_dynamic(Collision( collision_kernel=Golovin(b=1.5e3 / si.s), coalescence_efficiency=ConstEc(Ec=0.9), @@ -168,7 +168,7 @@ In @DeJong_et_al_2022, the physics of and algorithm for superdroplet breakup are ## Immersion Freezing -This release of `PySDM` introduces representation of immersion freezing and melting, +This release of `PySDM` introduces representation of immersion freezing, i.e. liquid-solid phase change contingent on the presence of insoluble ice nuclei immersed in supercooled water droplets. There are two alternative models implemented: the singular approach presented in @Shima_et_al_2020, and the time-dependent approach of @Alpert_and_Knopf_2016. @@ -282,9 +282,11 @@ In the case of displacement, the time-step adaptivity is aimed at obeying a give The authors are unaware of recent SDM algorithm implementations in open-source packages beyond those mentioned in [@Bartman_et_al_2022_JOSS] and the related list of links in the `PySDM` README file. Furthermore, none of these implementations include superdroplet-count-conserving collisional breakup, - organic surface partitioning, immersion freezing/melting, or adaptive time-stepping of coagulation or displacement. + organic surface partitioning or adaptive time-stepping for coagulation. The aerosol initialization method described - in `PySDM` v2 is similar to that of `pyrcel` [@Rothenberg_and_Wang_2017], but differs in its application to superdroplets rather than a moving-section representation. + in `PySDM` v2 is similar to that of `pyrcel` [@Rothenberg_and_Wang_2017]. +Leveraging the availability of `PyPartMC` - a new Python interface to the `PartMC` particle-resolved Monte-Carlo aerosol simulation code [@PyPartMC_AMS], + `PySDM` test suite has been extended with automated checks against `PartMC`. # Author contributions @@ -294,6 +296,7 @@ CES contributed the new surface tension models and relevant examples, in consult SAz contributed to extensions and enhancement of the one-dimensional kinematic framework environment. PB led the formulation and worked with SAr on implementation of the adaptive time-stepping schemes. KD contributed to setting up continuous integration workflows for the GPU backend. +OB implemented breakup handling within the GPU backend and contributed code refactors and new tests for both CPU and GPU backends. ID, CES, and AJ contributed to the aerosol activation examples. The immersion freezing representation code was developed by SAr. Maintenance of the project have been carried out by SAr, CES, and EdJ. @@ -306,6 +309,7 @@ Development of ice-phase microphysics representation has been supported through grant no. DE-SC0021034 by the Atmospheric System Research Program and Atmospheric Radiation Measurement Program sponsored by the U.S. Department of Energy (DOE). EdJ's contributions were made possible by support from the Department of Energy Computational Sciences Graduate Research Fellowship. +SAr acknowledges support from the Polish National Science Centre (grant no. 2020/39/D/ST10/01220). # References From aa2e76a0b3d5f5f494b2afdc23ee676f0b3bc119 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 9 Feb 2023 12:03:38 +0100 Subject: [PATCH 05/86] fix PyPartMC_AMS ref --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index c0fee795f2..08f8292d36 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -285,7 +285,7 @@ Furthermore, none of these implementations include superdroplet-count-conserving organic surface partitioning or adaptive time-stepping for coagulation. The aerosol initialization method described in `PySDM` v2 is similar to that of `pyrcel` [@Rothenberg_and_Wang_2017]. -Leveraging the availability of `PyPartMC` - a new Python interface to the `PartMC` particle-resolved Monte-Carlo aerosol simulation code [@PyPartMC_AMS], +Leveraging the availability of `PyPartMC` - a new Python interface to the `PartMC` particle-resolved Monte-Carlo aerosol simulation code [@PyPartMC_AMS2023], `PySDM` test suite has been extended with automated checks against `PartMC`. # Author contributions From 5eee3494d5fbde11bfb0289b33ae240103a8bfa1 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 9 Feb 2023 19:20:11 +0100 Subject: [PATCH 06/86] make math usable within Formulae (test to be added) --- PySDM/formulae.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/PySDM/formulae.py b/PySDM/formulae.py index 916a433691..d6d88ecbe0 100644 --- a/PySDM/formulae.py +++ b/PySDM/formulae.py @@ -2,6 +2,7 @@ Logic for enabling common CPU/GPU physics formulae code """ import inspect +import math import numbers import re import warnings @@ -131,7 +132,7 @@ def _formula(func, constants, dimensional_analysis, **kw): extras = func.__extras if hasattr(func, "__extras") else {} exec( # pylint:disable=exec-used - source, {"const": constants, "np": np, **extras}, loc + source, {"const": constants, "np": np, "math": math, **extras}, loc ) n_params = len(parameters_keys) - (1 if parameters_keys[0] in special_params else 0) From bedaf6f7210546cef46b0b73d5ff59edc5194fdd Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 9 Feb 2023 20:07:16 +0100 Subject: [PATCH 07/86] make math available in cpp2python --- PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py | 1 + 1 file changed, 1 insertion(+) diff --git a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py index 4556280215..52384e93cb 100644 --- a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py +++ b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py @@ -189,6 +189,7 @@ def to_numba(name, args, iter_var, body): result = ( f""" def make(self): + import math import numpy as np from numpy import floor, ceil, exp, log, power, sqrt import numba From 08758485e40b9827159fed6d67392872a610e5f1 Mon Sep 17 00:00:00 2001 From: Oleksii Bulenok <63914185+abulenok@users.noreply.github.com> Date: Fri, 10 Feb 2023 18:22:23 +0100 Subject: [PATCH 08/86] Update collisions_methods.py --- .../backends/impl_numba/methods/collisions_methods.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index e2ff9420f7..8268d76504 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -96,13 +96,13 @@ def breakup_fun1( attributes[a, k] *= multiplicity[k] attributes[a, k] += take_from_j * attributes[a, j] attributes[a, k] /= new_mult_k - - if multiplicity[j] > take_from_j: - nj = multiplicity[j] - take_from_j - nk = new_mult_k - else: + + if multiplicity[j] == take_from_j: nj = new_mult_k / 2 nk = nj + else: + nj = multiplicity[j] - take_from_j + nk = new_mult_k return nj, nk From f4372bc67ff265257cc658cc09511ed0171bcf4d Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:02:03 -0700 Subject: [PATCH 09/86] Switch from tanh to erf in gaussian CDF --- PySDM/physics/fragmentation_function/gaussian.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index da7b87c777..dbb38ac8b4 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -3,6 +3,8 @@ CDF = 1/2(1 + erf(x/sqrt(2))); approximate as erf(x) ~ tanh(ax) with a = sqrt(pi)log(2) as in Vedder 1987 """ +import math + import numpy as np @@ -12,6 +14,7 @@ def __init__(self, _): @staticmethod def frag_size(const, mu, sigma, rand): - return mu - sigma / const.sqrt_two / const.sqrt_pi / np.log(2) * np.log( - (0.5 + rand) / (1.5 - rand) - ) + return mu + sigma / 2 * (1 + math.erf(rand / const.sqrt_two)) + # return mu - sigma / const.sqrt_two / const.sqrt_pi / np.log(2) * np.log( + # (0.5 + rand) / (1.5 - rand) + # ) From 8c1611ffb6a13a496718c985215e007b76bf4e0d Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:07:36 -0700 Subject: [PATCH 10/86] Rename breakup subfunctions in CPU backend --- .../impl_numba/methods/collisions_methods.py | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 8268d76504..f43498800b 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -59,7 +59,7 @@ def coalesce( # pylint: disable=too-many-arguments @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) -def breakup_fun0( +def breakup0_compute_mult_transfer( gamma, j, k, multiplicity, volume, nfi, fragment_size_i, max_multiplicity ): # pylint: disable=too-many-arguments overflow_flag = False @@ -89,14 +89,14 @@ def breakup_fun0( @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) -def breakup_fun1( +def breakup1_update_mult_attributes( j, k, attributes, multiplicity, take_from_j, new_mult_k ): # pylint: disable=too-many-arguments for a in range(len(attributes)): attributes[a, k] *= multiplicity[k] attributes[a, k] += take_from_j * attributes[a, j] attributes[a, k] /= new_mult_k - + if multiplicity[j] == take_from_j: nj = new_mult_k / 2 nk = nj @@ -107,7 +107,7 @@ def breakup_fun1( @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) -def breakup_fun2( +def breakup2_round_mults_to_ints( j, k, nj, nk, attributes, multiplicity, take_from_j ): # pylint: disable=too-many-arguments if multiplicity[j] <= take_from_j: @@ -140,7 +140,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals warn_overflows, volume, ): - take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup_fun0( + take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup0_compute_mult_transfer( gamma[i], j, k, @@ -152,7 +152,9 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals ) gamma_deficit = gamma[i] - gamma_j_k - nj, nk = breakup_fun1(j, k, attributes, multiplicity, take_from_j, new_mult_k) + nj, nk = breakup1_update_mult_attributes( + j, k, attributes, multiplicity, take_from_j, new_mult_k + ) if multiplicity[j] <= take_from_j and round(nj) == 0: atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) @@ -161,7 +163,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) - breakup_fun2(j, k, nj, nk, attributes, multiplicity, take_from_j) + breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) if overflow_flag and warn_overflows: warn("overflow", __file__) @@ -201,7 +203,12 @@ def break_up_while( else: if multiplicity[k] > multiplicity[j]: j, k = k, j - take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup_fun0( + ( + take_from_j, + new_mult_k, + gamma_j_k, + overflow_flag, + ) = breakup0_compute_mult_transfer( gamma_deficit, j, k, @@ -212,7 +219,9 @@ def break_up_while( max_multiplicity, ) - nj, nk = breakup_fun1(j, k, attributes, multiplicity, take_from_j, new_mult_k) + nj, nk = breakup1_update_mult_attributes( + j, k, attributes, multiplicity, take_from_j, new_mult_k + ) if multiplicity[j] <= take_from_j and round(nj) == 0: atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) @@ -220,7 +229,9 @@ def break_up_while( atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) gamma_deficit -= gamma_j_k - breakup_fun2(j, k, nj, nk, attributes, multiplicity, take_from_j) + breakup2_round_mults_to_ints( + j, k, nj, nk, attributes, multiplicity, take_from_j + ) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) From 48ba0d32a31a7716d969a52fd849d6b8dc0d47fb Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:34:34 -0700 Subject: [PATCH 11/86] reassign attributes[a,j] in case of new_nj == 0 (breakup1) --- .../impl_numba/methods/collisions_methods.py | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index f43498800b..4e826b90fb 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -78,7 +78,7 @@ def breakup0_compute_mult_transfer( overflow_flag = True break - # check for new_n > 0 + # check for new_n >= 0 if take_from_j_test > multiplicity[j]: break @@ -100,7 +100,9 @@ def breakup1_update_mult_attributes( if multiplicity[j] == take_from_j: nj = new_mult_k / 2 nk = nj - else: + for a in range(len(attributes)): + attributes[a, j] = attributes[a, k] + else: # take_from_j < multiplicity[j] nj = multiplicity[j] - take_from_j nk = new_mult_k return nj, nk @@ -110,9 +112,10 @@ def breakup1_update_mult_attributes( def breakup2_round_mults_to_ints( j, k, nj, nk, attributes, multiplicity, take_from_j ): # pylint: disable=too-many-arguments - if multiplicity[j] <= take_from_j: - for a in range(len(attributes)): - attributes[a, j] = attributes[a, k] + # REDUNDANT + # if multiplicity[j] <= take_from_j: + # for a in range(len(attributes)): + # attributes[a, j] = attributes[a, k] multiplicity[j] = max(round(nj), 1) multiplicity[k] = max(round(nk), 1) @@ -139,7 +142,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals breakup_rate_deficit, warn_overflows, volume, -): +): # breakup0 guarantees take_from_j <= multiplicity[j] take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup0_compute_mult_transfer( gamma[i], j, @@ -150,21 +153,23 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals fragment_size[i], max_multiplicity, ) + print(take_from_j, new_mult_k, gamma_j_k) gamma_deficit = gamma[i] - gamma_j_k - + # breakup1 handles new_n[j] == 0 case via splitting nj, nk = breakup1_update_mult_attributes( j, k, attributes, multiplicity, take_from_j, new_mult_k ) - - if multiplicity[j] <= take_from_j and round(nj) == 0: - atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) - return + print(nj, nk, attributes, multiplicity) + # REDUNDANT + # if multiplicity[j] <= take_from_j and round(nj) == 0: + # atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) + # return atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) - + # breakup2 guarantees that no multiplicities are set to 0 breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) - + print(nj, nk, attributes, multiplicity) if overflow_flag and warn_overflows: warn("overflow", __file__) From 0341a0761e9d6537f1bdbcdd026f0c012e1d5fd9 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:38:51 -0700 Subject: [PATCH 12/86] Remove redundant checks, add logic comments, clean up breakup CPU backend --- .../impl_numba/methods/collisions_methods.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 4e826b90fb..b9e7bbee4c 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -112,11 +112,6 @@ def breakup1_update_mult_attributes( def breakup2_round_mults_to_ints( j, k, nj, nk, attributes, multiplicity, take_from_j ): # pylint: disable=too-many-arguments - # REDUNDANT - # if multiplicity[j] <= take_from_j: - # for a in range(len(attributes)): - # attributes[a, j] = attributes[a, k] - multiplicity[j] = max(round(nj), 1) multiplicity[k] = max(round(nk), 1) factor_j = nj / multiplicity[j] @@ -153,23 +148,18 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals fragment_size[i], max_multiplicity, ) - print(take_from_j, new_mult_k, gamma_j_k) gamma_deficit = gamma[i] - gamma_j_k - # breakup1 handles new_n[j] == 0 case via splitting + + # breakup1 also handles new_n[j] == 0 case via splitting nj, nk = breakup1_update_mult_attributes( j, k, attributes, multiplicity, take_from_j, new_mult_k ) - print(nj, nk, attributes, multiplicity) - # REDUNDANT - # if multiplicity[j] <= take_from_j and round(nj) == 0: - # atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) - # return atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) - # breakup2 guarantees that no multiplicities are set to 0 + + # breakup2 also guarantees that no multiplicities are set to 0 breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) - print(nj, nk, attributes, multiplicity) if overflow_flag and warn_overflows: warn("overflow", __file__) From d9fc399c2bd85ae3169843cdc6299f480acd2807 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:48:43 -0700 Subject: [PATCH 13/86] Add Ben's Scripps affiliation --- paper/paper.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 08f8292d36..264e9b99b2 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -32,14 +32,14 @@ authors: affiliation: "2" orcid: 0000-0003-3194-6440 - name: J. Ben Mackay - affiliation: "2" + affiliation: "2,4" orcid: 0000-0001-8677-3562 - name: Ryan X. Ward affiliation: "2" orcid: 0000-0003-2317-3310 - name: Sylwester Arabas orcid: 0000-0003-2361-0082 - affiliation: "4,3" + affiliation: "5,3" affiliations: - name: Department of Mechanical and Civil Engineering, California Institute of Technology, Pasadena, CA, USA index: 1 @@ -47,8 +47,10 @@ affiliations: index: 2 - name: Faculty of Mathematics and Computer Science, Jagiellonian University, Kraków, Poland index: 3 - - name: Department of Atmospheric Sciences, University of Illinois at Urbana-Champaign, Urbana, IL, USA + - name: Scripps Institution of Oceanography, San Diego, CA, USA index: 4 + - name: Department of Atmospheric Sciences, University of Illinois at Urbana-Champaign, Urbana, IL, USA + index: 5 bibliography: paper.bib --- From 9989efe95056e67664c1fd71127085c13f0626e5 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 13 Feb 2023 11:51:09 -0700 Subject: [PATCH 14/86] Linting --- .../impl_numba/methods/collisions_methods.py | 13 ++++++++----- PySDM/physics/fragmentation_function/gaussian.py | 5 ----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index b9e7bbee4c..4b8a5c28d1 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -110,7 +110,12 @@ def breakup1_update_mult_attributes( @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) def breakup2_round_mults_to_ints( - j, k, nj, nk, attributes, multiplicity, take_from_j + j, + k, + nj, + nk, + attributes, + multiplicity, ): # pylint: disable=too-many-arguments multiplicity[j] = max(round(nj), 1) multiplicity[k] = max(round(nk), 1) @@ -159,7 +164,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) # breakup2 also guarantees that no multiplicities are set to 0 - breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) + breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity) if overflow_flag and warn_overflows: warn("overflow", __file__) @@ -224,9 +229,7 @@ def break_up_while( atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) gamma_deficit -= gamma_j_k - breakup2_round_mults_to_ints( - j, k, nj, nk, attributes, multiplicity, take_from_j - ) + breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index dbb38ac8b4..3e4d2a0d5d 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -5,8 +5,6 @@ """ import math -import numpy as np - class Gaussian: # pylint: disable=too-few-public-methods def __init__(self, _): @@ -15,6 +13,3 @@ def __init__(self, _): @staticmethod def frag_size(const, mu, sigma, rand): return mu + sigma / 2 * (1 + math.erf(rand / const.sqrt_two)) - # return mu - sigma / const.sqrt_two / const.sqrt_pi / np.log(2) * np.log( - # (0.5 + rand) / (1.5 - rand) - # ) From 6f622e71250bb0d99b236d91920f31920f07f740 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 14 Feb 2023 10:06:12 +0100 Subject: [PATCH 15/86] removing spurious bib entry --- paper/paper.bib | 8 -------- 1 file changed, 8 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 4967597d49..ac69adb775 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -322,14 +322,6 @@ @inproceedings{PyPartMC_AMS2023 url = {https://ams.confex.com/ams/103ANNUAL/meetingapp.cgi/Paper/421645} } -@inproceedings{Bartman_et_al_2023, - title = {TODO: adaptivity}, - author = {TODO}, - booktitle = {103nd American Meteorological Society Annual Meeting}, - url = {TODO}, - year = {2023} -} - @article{Knopf_and_Alpert_2013, title = {A water activity based model of heterogeneous ice nucleation kinetics for freezing of water and aqueous solution droplets}, author = {Knopf, D.A. and Alpert, P.A.}, From 5937fe75a598e58e7b5f175d7448d6c0352932d6 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 14 Feb 2023 10:14:56 +0100 Subject: [PATCH 16/86] rename codecov workflow to highlight that it uses nojit --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 623cc0ebdd..b664a9ef5a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -30,7 +30,7 @@ jobs: pre-commit autoupdate pre-commit run --all-files - codecov: + nojit_and_codecov: runs-on: ubuntu-latest env: NUMBA_DISABLE_JIT: 1 @@ -106,7 +106,7 @@ jobs: prop_path: 'creators' build: - needs: [pylint, pdoc, codecov, precommit, zenodo_json] + needs: [pylint, pdoc, nojit_and_codecov, precommit, zenodo_json] strategy: matrix: platform: [ubuntu-latest, macos-12, windows-latest] From fa9e5da22f86cb4b1e4c0c81c83cdf6fabbde1a5 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Wed, 8 Feb 2023 17:38:57 -0700 Subject: [PATCH 17/86] Revisions to JOSS v2 paper --- paper/paper.bib | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/paper/paper.bib b/paper/paper.bib index 925c822e76..e4ed369af9 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -321,6 +321,14 @@ @inproceedings{PyPartMC_AMS2023 url = {https://ams.confex.com/ams/103ANNUAL/meetingapp.cgi/Paper/421645} } +@inproceedings{Bartman_et_al_2023, + title = {TODO: adaptivity}, + author = {TODO}, + booktitle = {103nd American Meteorological Society Annual Meeting}, + url = {TODO}, + year = {2023} +} + @article{Knopf_and_Alpert_2013, title = {A water activity based model of heterogeneous ice nucleation kinetics for freezing of water and aqueous solution droplets}, author = {Knopf, D.A. and Alpert, P.A.}, From fd0f63a53d399c84c29dd77d90a3af18c3107022 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 9 Feb 2023 19:20:11 +0100 Subject: [PATCH 18/86] make math usable within Formulae (test to be added) --- PySDM/formulae.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/PySDM/formulae.py b/PySDM/formulae.py index 916a433691..d6d88ecbe0 100644 --- a/PySDM/formulae.py +++ b/PySDM/formulae.py @@ -2,6 +2,7 @@ Logic for enabling common CPU/GPU physics formulae code """ import inspect +import math import numbers import re import warnings @@ -131,7 +132,7 @@ def _formula(func, constants, dimensional_analysis, **kw): extras = func.__extras if hasattr(func, "__extras") else {} exec( # pylint:disable=exec-used - source, {"const": constants, "np": np, **extras}, loc + source, {"const": constants, "np": np, "math": math, **extras}, loc ) n_params = len(parameters_keys) - (1 if parameters_keys[0] in special_params else 0) From 3ef73af9fa7748648dd1f6dfdb80846fc2702f38 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 9 Feb 2023 20:07:16 +0100 Subject: [PATCH 19/86] make math available in cpp2python --- PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py | 1 + 1 file changed, 1 insertion(+) diff --git a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py index 4556280215..52384e93cb 100644 --- a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py +++ b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py @@ -189,6 +189,7 @@ def to_numba(name, args, iter_var, body): result = ( f""" def make(self): + import math import numpy as np from numpy import floor, ceil, exp, log, power, sqrt import numba From 71a7d4dcd9344aa04a19a5c7dbb5a5210bb1625d Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:02:03 -0700 Subject: [PATCH 20/86] Switch from tanh to erf in gaussian CDF --- PySDM/physics/fragmentation_function/gaussian.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index da7b87c777..dbb38ac8b4 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -3,6 +3,8 @@ CDF = 1/2(1 + erf(x/sqrt(2))); approximate as erf(x) ~ tanh(ax) with a = sqrt(pi)log(2) as in Vedder 1987 """ +import math + import numpy as np @@ -12,6 +14,7 @@ def __init__(self, _): @staticmethod def frag_size(const, mu, sigma, rand): - return mu - sigma / const.sqrt_two / const.sqrt_pi / np.log(2) * np.log( - (0.5 + rand) / (1.5 - rand) - ) + return mu + sigma / 2 * (1 + math.erf(rand / const.sqrt_two)) + # return mu - sigma / const.sqrt_two / const.sqrt_pi / np.log(2) * np.log( + # (0.5 + rand) / (1.5 - rand) + # ) From bf981844c6d0497b799cfb2816c833a503184cb6 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:07:36 -0700 Subject: [PATCH 21/86] Rename breakup subfunctions in CPU backend --- .../impl_numba/methods/collisions_methods.py | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 62c8271e2c..f43498800b 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -59,7 +59,7 @@ def coalesce( # pylint: disable=too-many-arguments @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) -def breakup_fun0( +def breakup0_compute_mult_transfer( gamma, j, k, multiplicity, volume, nfi, fragment_size_i, max_multiplicity ): # pylint: disable=too-many-arguments overflow_flag = False @@ -89,7 +89,7 @@ def breakup_fun0( @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) -def breakup_fun1( +def breakup1_update_mult_attributes( j, k, attributes, multiplicity, take_from_j, new_mult_k ): # pylint: disable=too-many-arguments for a in range(len(attributes)): @@ -107,7 +107,7 @@ def breakup_fun1( @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) -def breakup_fun2( +def breakup2_round_mults_to_ints( j, k, nj, nk, attributes, multiplicity, take_from_j ): # pylint: disable=too-many-arguments if multiplicity[j] <= take_from_j: @@ -140,7 +140,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals warn_overflows, volume, ): - take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup_fun0( + take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup0_compute_mult_transfer( gamma[i], j, k, @@ -152,7 +152,9 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals ) gamma_deficit = gamma[i] - gamma_j_k - nj, nk = breakup_fun1(j, k, attributes, multiplicity, take_from_j, new_mult_k) + nj, nk = breakup1_update_mult_attributes( + j, k, attributes, multiplicity, take_from_j, new_mult_k + ) if multiplicity[j] <= take_from_j and round(nj) == 0: atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) @@ -161,7 +163,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) - breakup_fun2(j, k, nj, nk, attributes, multiplicity, take_from_j) + breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) if overflow_flag and warn_overflows: warn("overflow", __file__) @@ -201,7 +203,12 @@ def break_up_while( else: if multiplicity[k] > multiplicity[j]: j, k = k, j - take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup_fun0( + ( + take_from_j, + new_mult_k, + gamma_j_k, + overflow_flag, + ) = breakup0_compute_mult_transfer( gamma_deficit, j, k, @@ -212,7 +219,9 @@ def break_up_while( max_multiplicity, ) - nj, nk = breakup_fun1(j, k, attributes, multiplicity, take_from_j, new_mult_k) + nj, nk = breakup1_update_mult_attributes( + j, k, attributes, multiplicity, take_from_j, new_mult_k + ) if multiplicity[j] <= take_from_j and round(nj) == 0: atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) @@ -220,7 +229,9 @@ def break_up_while( atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) gamma_deficit -= gamma_j_k - breakup_fun2(j, k, nj, nk, attributes, multiplicity, take_from_j) + breakup2_round_mults_to_ints( + j, k, nj, nk, attributes, multiplicity, take_from_j + ) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) From f631f4a823b82bd87f2ff67a1e7c0b8276609db6 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:34:34 -0700 Subject: [PATCH 22/86] reassign attributes[a,j] in case of new_nj == 0 (breakup1) --- .../impl_numba/methods/collisions_methods.py | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index f43498800b..4e826b90fb 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -78,7 +78,7 @@ def breakup0_compute_mult_transfer( overflow_flag = True break - # check for new_n > 0 + # check for new_n >= 0 if take_from_j_test > multiplicity[j]: break @@ -100,7 +100,9 @@ def breakup1_update_mult_attributes( if multiplicity[j] == take_from_j: nj = new_mult_k / 2 nk = nj - else: + for a in range(len(attributes)): + attributes[a, j] = attributes[a, k] + else: # take_from_j < multiplicity[j] nj = multiplicity[j] - take_from_j nk = new_mult_k return nj, nk @@ -110,9 +112,10 @@ def breakup1_update_mult_attributes( def breakup2_round_mults_to_ints( j, k, nj, nk, attributes, multiplicity, take_from_j ): # pylint: disable=too-many-arguments - if multiplicity[j] <= take_from_j: - for a in range(len(attributes)): - attributes[a, j] = attributes[a, k] + # REDUNDANT + # if multiplicity[j] <= take_from_j: + # for a in range(len(attributes)): + # attributes[a, j] = attributes[a, k] multiplicity[j] = max(round(nj), 1) multiplicity[k] = max(round(nk), 1) @@ -139,7 +142,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals breakup_rate_deficit, warn_overflows, volume, -): +): # breakup0 guarantees take_from_j <= multiplicity[j] take_from_j, new_mult_k, gamma_j_k, overflow_flag = breakup0_compute_mult_transfer( gamma[i], j, @@ -150,21 +153,23 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals fragment_size[i], max_multiplicity, ) + print(take_from_j, new_mult_k, gamma_j_k) gamma_deficit = gamma[i] - gamma_j_k - + # breakup1 handles new_n[j] == 0 case via splitting nj, nk = breakup1_update_mult_attributes( j, k, attributes, multiplicity, take_from_j, new_mult_k ) - - if multiplicity[j] <= take_from_j and round(nj) == 0: - atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) - return + print(nj, nk, attributes, multiplicity) + # REDUNDANT + # if multiplicity[j] <= take_from_j and round(nj) == 0: + # atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) + # return atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) - + # breakup2 guarantees that no multiplicities are set to 0 breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) - + print(nj, nk, attributes, multiplicity) if overflow_flag and warn_overflows: warn("overflow", __file__) From 97b8ac76501ef7ff0c19e0f61cb03409f828c97c Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 10 Feb 2023 12:38:51 -0700 Subject: [PATCH 23/86] Remove redundant checks, add logic comments, clean up breakup CPU backend --- .../impl_numba/methods/collisions_methods.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 4e826b90fb..b9e7bbee4c 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -112,11 +112,6 @@ def breakup1_update_mult_attributes( def breakup2_round_mults_to_ints( j, k, nj, nk, attributes, multiplicity, take_from_j ): # pylint: disable=too-many-arguments - # REDUNDANT - # if multiplicity[j] <= take_from_j: - # for a in range(len(attributes)): - # attributes[a, j] = attributes[a, k] - multiplicity[j] = max(round(nj), 1) multiplicity[k] = max(round(nk), 1) factor_j = nj / multiplicity[j] @@ -153,23 +148,18 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals fragment_size[i], max_multiplicity, ) - print(take_from_j, new_mult_k, gamma_j_k) gamma_deficit = gamma[i] - gamma_j_k - # breakup1 handles new_n[j] == 0 case via splitting + + # breakup1 also handles new_n[j] == 0 case via splitting nj, nk = breakup1_update_mult_attributes( j, k, attributes, multiplicity, take_from_j, new_mult_k ) - print(nj, nk, attributes, multiplicity) - # REDUNDANT - # if multiplicity[j] <= take_from_j and round(nj) == 0: - # atomic_add(breakup_rate_deficit, cid, gamma[i] * multiplicity[k]) - # return atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) - # breakup2 guarantees that no multiplicities are set to 0 + + # breakup2 also guarantees that no multiplicities are set to 0 breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) - print(nj, nk, attributes, multiplicity) if overflow_flag and warn_overflows: warn("overflow", __file__) From a152424de90fcdf9d1f203a000fe03fda1a9270f Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 13 Feb 2023 11:51:09 -0700 Subject: [PATCH 24/86] Linting --- .../impl_numba/methods/collisions_methods.py | 13 ++++++++----- PySDM/physics/fragmentation_function/gaussian.py | 5 ----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index b9e7bbee4c..4b8a5c28d1 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -110,7 +110,12 @@ def breakup1_update_mult_attributes( @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) def breakup2_round_mults_to_ints( - j, k, nj, nk, attributes, multiplicity, take_from_j + j, + k, + nj, + nk, + attributes, + multiplicity, ): # pylint: disable=too-many-arguments multiplicity[j] = max(round(nj), 1) multiplicity[k] = max(round(nk), 1) @@ -159,7 +164,7 @@ def break_up( # pylint: disable=too-many-arguments,c,too-many-locals atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) # breakup2 also guarantees that no multiplicities are set to 0 - breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity, take_from_j) + breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity) if overflow_flag and warn_overflows: warn("overflow", __file__) @@ -224,9 +229,7 @@ def break_up_while( atomic_add(breakup_rate, cid, gamma_j_k * multiplicity[k]) gamma_deficit -= gamma_j_k - breakup2_round_mults_to_ints( - j, k, nj, nk, attributes, multiplicity, take_from_j - ) + breakup2_round_mults_to_ints(j, k, nj, nk, attributes, multiplicity) atomic_add(breakup_rate_deficit, cid, gamma_deficit * multiplicity[k]) diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index dbb38ac8b4..3e4d2a0d5d 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -5,8 +5,6 @@ """ import math -import numpy as np - class Gaussian: # pylint: disable=too-few-public-methods def __init__(self, _): @@ -15,6 +13,3 @@ def __init__(self, _): @staticmethod def frag_size(const, mu, sigma, rand): return mu + sigma / 2 * (1 + math.erf(rand / const.sqrt_two)) - # return mu - sigma / const.sqrt_two / const.sqrt_pi / np.log(2) * np.log( - # (0.5 + rand) / (1.5 - rand) - # ) From 49fb9786a61d83337eda28fa792f3e8201cfb73d Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 14 Feb 2023 10:06:12 +0100 Subject: [PATCH 25/86] removing spurious bib entry --- paper/paper.bib | 8 -------- 1 file changed, 8 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index e4ed369af9..925c822e76 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -321,14 +321,6 @@ @inproceedings{PyPartMC_AMS2023 url = {https://ams.confex.com/ams/103ANNUAL/meetingapp.cgi/Paper/421645} } -@inproceedings{Bartman_et_al_2023, - title = {TODO: adaptivity}, - author = {TODO}, - booktitle = {103nd American Meteorological Society Annual Meeting}, - url = {TODO}, - year = {2023} -} - @article{Knopf_and_Alpert_2013, title = {A water activity based model of heterogeneous ice nucleation kinetics for freezing of water and aqueous solution droplets}, author = {Knopf, D.A. and Alpert, P.A.}, From 7925a9e243cdb6e75a0ce9b70fb0d3dfe172ea4c Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 14 Feb 2023 10:14:56 +0100 Subject: [PATCH 26/86] rename codecov workflow to highlight that it uses nojit --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 623cc0ebdd..b664a9ef5a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -30,7 +30,7 @@ jobs: pre-commit autoupdate pre-commit run --all-files - codecov: + nojit_and_codecov: runs-on: ubuntu-latest env: NUMBA_DISABLE_JIT: 1 @@ -106,7 +106,7 @@ jobs: prop_path: 'creators' build: - needs: [pylint, pdoc, codecov, precommit, zenodo_json] + needs: [pylint, pdoc, nojit_and_codecov, precommit, zenodo_json] strategy: matrix: platform: [ubuntu-latest, macos-12, windows-latest] From e0c43cda80311709ab481ae0026281657f5be29d Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Sat, 18 Feb 2023 17:38:15 -0700 Subject: [PATCH 27/86] Remove old docstring --- PySDM/physics/fragmentation_function/gaussian.py | 1 - 1 file changed, 1 deletion(-) diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index 3e4d2a0d5d..09ea86148f 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -1,7 +1,6 @@ """ Gaussian PDF CDF = 1/2(1 + erf(x/sqrt(2))); -approximate as erf(x) ~ tanh(ax) with a = sqrt(pi)log(2) as in Vedder 1987 """ import math From 7ceb588f84fe0a7c5e41964f67f574cff1f909a2 Mon Sep 17 00:00:00 2001 From: de Jong Date: Mon, 20 Feb 2023 17:19:24 -0700 Subject: [PATCH 28/86] WIP on LL82 fragmenttion fucntion --- .../impl_numba/methods/collisions_methods.py | 96 +++++++++++++++ .../breakup_fragmentations/lowlist82.py | 113 ++++++++++++++++++ 2 files changed, 209 insertions(+) create mode 100644 PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 62c8271e2c..fad27469ab 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -250,6 +250,25 @@ def straub_Nr( # pylint: disable=too-many-arguments,unused-argument Nr4[i] = 1.0 Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i] +@numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) +def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument + i, + Rf, + Nr2, + Nr3, + Nr4, + Nrt, + CKE, + CW, + gam, +): # pylint: disable=too-many-branches` + if CKE >= 89.3: + Rf[i] = 1.11e-4 * CKE[i]**(-0.654) + else: + Rf[i] = 1.0 + + + class CollisionsMethods(BackendMethods): def __init__(self): @@ -355,6 +374,41 @@ def __straub_fragmentation_body( CW[i], ds[i], v_max[i], Nr1[i], Nr2[i], Nr3[i] ) + self.__straub_fragmentation_body = __straub_fragmentation_body + elif self.formulae.fragmentation_function.__name__ == "LowList1982Nf": + straub_p1 = self.formulae.fragmentation_function.p1 + straub_p2 = self.formulae.fragmentation_function.p2 + straub_p3 = self.formulae.fragmentation_function.p3 + straub_p4 = self.formulae.fragmentation_function.p4 + straub_sigma1 = self.formulae.fragmentation_function.sigma1 + + @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) + def __ll82_fragmentation_body( + *, CW, gam, ds, v_max, frag_size, rand, Nr1, Nr2, Nr3, Nr4, Nrt + ): + for i in numba.prange( # pylint: disable=not-an-iterable + len(frag_size) + ): + ll82_Nr(i, Nr1, Nr2, Nr3, Nr4, Nrt, CW, gam) + if rand[i] < Nr1[i] / Nrt[i]: + frag_size[i] = straub_p1( + rand[i] * Nrt[i] / Nr1[i], straub_sigma1(CW[i]) + ) + elif rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]: + frag_size[i] = straub_p2( + CW[i], (rand[i] * Nrt[i] - Nr1[i]) / (Nr2[i] - Nr1[i]) + ) + elif rand[i] < (Nr3[i] + Nr2[i] + Nr1[i]) / Nrt[i]: + frag_size[i] = straub_p3( + CW[i], + ds[i], + (rand[i] * Nrt[i] - Nr2[i]) / (Nr3[i] - Nr2[i]), + ) + else: + frag_size[i] = straub_p4( + CW[i], ds[i], v_max[i], Nr1[i], Nr2[i], Nr3[i] + ) + self.__straub_fragmentation_body = __straub_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": gaussian_frag_size = self.formulae.fragmentation_function.frag_size @@ -750,6 +804,48 @@ def straub_fragmentation( nfmax=nfmax, ) + def ll82_fragmentation( + # pylint: disable=too-many-arguments,too-many-locals + self, + *, + n_fragment, + CW, + gam, + ds, + frag_size, + v_max, + x_plus_y, + rand, + vmin, + nfmax, + Nr1, + Nr2, + Nr3, + Nr4, + Nrt, + ): + self.__ll82_fragmentation_body( + CW=CW.data, + gam=gam.data, + ds=ds.data, + frag_size=frag_size.data, + v_max=v_max.data, + rand=rand.data, + Nr1=Nr1.data, + Nr2=Nr2.data, + Nr3=Nr3.data, + Nr4=Nr4.data, + Nrt=Nrt.data, + ) + self.__fragmentation_limiters( + n_fragment=n_fragment.data, + frag_size=frag_size.data, + v_max=v_max.data, + x_plus_y=x_plus_y.data, + vmin=vmin, + nfmax=nfmax, + ) + @staticmethod @numba.njit(**conf.JIT_FLAGS) # pylint: disable=too-many-arguments,too-many-locals diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py new file mode 100644 index 0000000000..a9a229c34e --- /dev/null +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -0,0 +1,113 @@ +""" +See Low & List 1982 +""" +from PySDM.physics.constants import si + + +class LowList1982Nf: + # pylint: disable=too-many-instance-attributes + def __init__(self, vmin=0.0, nfmax=None): + self.particulator = None + self.vmin = vmin + self.nfmax = nfmax + self.arrays = {} + self.straub_tmp = {} + self.max_size = None + self.sum_of_volumes = None + self.const = None + + def register(self, builder): + self.particulator = builder.particulator + self.max_size = self.particulator.PairwiseStorage.empty( + self.particulator.n_sd // 2, dtype=float + ) + self.sum_of_volumes = self.particulator.PairwiseStorage.empty( + self.particulator.n_sd // 2, dtype=float + ) + self.const = self.particulator.formulae.constants + builder.request_attribute("radius") + builder.request_attribute("volume") + builder.request_attribute("terminal velocity") + for key in ("Sc", "tmp", "tmp2", "CKE", "We", "W2", "gam", "CW", "ds"): + self.arrays[key] = self.particulator.PairwiseStorage.empty( + self.particulator.n_sd // 2, dtype=float + ) + for key in ("Nr1", "Nr2", "Nr3", "Nr4", "Nrt"): + self.straub_tmp[key] = self.particulator.PairwiseStorage.empty( + self.particulator.n_sd // 2, dtype=float + ) + + def __call__(self, nf, frag_size, u01, is_first_in_pair): + self.max_size.max(self.particulator.attributes["volume"], is_first_in_pair) + self.sum_of_volumes.sum( + self.particulator.attributes["volume"], is_first_in_pair + ) + self.arrays["ds"].min(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["ds"] *= 2 + + # compute the dimensionless numbers and CW=CKE * We + self.arrays["tmp"].sum(self.particulator.attributes["volume"], is_first_in_pair) + self.arrays["Sc"][:] = self.arrays["tmp"][:] # TODO #976 + self.arrays["Sc"] **= 2 / 3 + self.arrays["Sc"] *= ( + self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) + ) + + self.arrays["W2"].max(self.particulator.attributes["volume"], is_first_in_pair) + self.arrays["tmp2"].min(self.particulator.attributes["volume"], is_first_in_pair) + self.arrays["W2"] **= 2/3 + self.arrays["tmp2"] ** 2/3 + self.arrays["W2"] *= ( + self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) + ) + self.arrays["tmp2"] *= ( + self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) + ) + self.arrays["W2"] += self.arrays["tmp2"] + self.arrays["W2"] **= (-1) + + + self.arrays["tmp"] *= 2 + self.arrays["tmp2"].distance( + self.particulator.attributes["terminal velocity"], is_first_in_pair + ) + self.arrays["tmp2"] **= 2 + self.arrays["CKE"].multiply( + self.particulator.attributes["volume"], is_first_in_pair + ) + self.arrays["CKE"].divide_if_not_zero(self.arrays["tmp"]) + self.arrays["CKE"] *= self.arrays["tmp2"] + self.arrays["CKE"] *= self.const.rho_w + + self.arrays["We"][:] = self.arrays["CKE"][:] # TODO #976 + self.arrays["We"].divide_if_not_zero(self.arrays["Sc"]) + self.arrays["W2"].multiply(self.arrays["CKE"]) + + self.arrays["CW"][:] = self.arrays["We"][:] # TODO #976 + self.arrays["CW"] *= self.arrays["CKE"] + self.arrays["CW"] /= si.joule * 1e-6 # convert to µJ + + self.arrays["gam"].max(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["tmp"].min(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["gam"].divide_if_not_zero(self.arrays["tmp"]) + + for key in ("Nr1", "Nr2", "Nr3", "Nr4", "Nrt"): + self.straub_tmp[key] *= 0.0 + + self.particulator.backend.ll82_fragmentation( + n_fragment=nf, + CW=self.arrays["CW"], + gam=self.arrays["gam"], + ds=self.arrays["ds"], + frag_size=frag_size, + v_max=self.max_size, + x_plus_y=self.sum_of_volumes, + rand=u01, + vmin=self.vmin, + nfmax=self.nfmax, + Nr1=self.straub_tmp["Nr1"], + Nr2=self.straub_tmp["Nr2"], + Nr3=self.straub_tmp["Nr3"], + Nr4=self.straub_tmp["Nr4"], + Nrt=self.straub_tmp["Nrt"], + ) From 07c90b0d5cb286cc56a7403833b18d8a20991f69 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 21 Feb 2023 18:58:05 +0100 Subject: [PATCH 29/86] include math pkg name replacement in formula c_inline --- PySDM/formulae.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/PySDM/formulae.py b/PySDM/formulae.py index d6d88ecbe0..c6f60672a4 100644 --- a/PySDM/formulae.py +++ b/PySDM/formulae.py @@ -204,7 +204,8 @@ def _c_inline(fun, return_type=None, constants=None, **args): stripped += " " source += stripped source = source.replace("np.power(", "np.pow(") - source = source.replace("np.", "") + for pkg in ('np', 'math'): + source = source.replace(f"{pkg}.", "") source = source.replace(", )", ")") source = re.sub("^return ", "", source) for arg in inspect.signature(fun).parameters: From cb563943c9ca01995b6475de4a31b6dcd614a259 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 21 Feb 2023 19:13:26 +0100 Subject: [PATCH 30/86] sorting out CUDA problem with log(int) --- PySDM/physics/constants.py | 1 + PySDM/physics/fragmentation_function/straub2010nf.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/PySDM/physics/constants.py b/PySDM/physics/constants.py index fd463ecc9b..1c4178e824 100644 --- a/PySDM/physics/constants.py +++ b/PySDM/physics/constants.py @@ -26,6 +26,7 @@ def convert_to(value, unit): sqrt_pi = np.sqrt(sci.pi) PI = sci.pi PI_4_3 = PI * 4 / 3 +LN_2 = np.log(2) TWO = 2 THREE = 3 FOUR = 4 diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 63e3261599..7e8c2f1218 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -27,7 +27,7 @@ def p1(const, rand, sigma1): - sigma1 / const.sqrt_two / const.sqrt_pi - / np.log(2) + / const.LN_2 * np.log((0.5 + rand) / (1.5 - rand)) ) ** 3 @@ -43,7 +43,7 @@ def p2(const, CW, rand): - ((0.007 * (CW - 21.0)) ** 2 / 12) / const.sqrt_two / const.sqrt_pi - / np.log(2) + / const.LN_2 * np.log((0.5 + rand) / (1.5 - rand)) ) ** 3 @@ -59,7 +59,7 @@ def p3(const, CW, ds, rand): - ((0.01 * (0.76 * CW**0.5 + 1.0)) ** 2 / 12) / const.sqrt_two / const.sqrt_pi - / np.log(2) + / const.LN_2 * np.log((0.5 + rand) / (1.5 - rand)) ) ** 3 From 2067f36bdf0e09f61ffc646f794fc945475d6f54 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 21 Feb 2023 23:43:46 +0100 Subject: [PATCH 31/86] make Straub formulae CUDA-compatible --- .../fragmentation_function/straub2010nf.py | 71 ++++++++++++------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 7e8c2f1218..5d4584bab4 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -13,7 +13,12 @@ def __init__(self, _): @staticmethod def sigma1(const, CW): return np.sqrt( - np.log((0.0125 * CW**0.5) ** 2 / 12 / const.STRAUB_E_D1**2 + 1) + np.log( + np.power((np.sqrt(CW) / 8) / 10, 2) + / 12 + / np.power(const.STRAUB_E_D1, const.TWO) + + 1 + ) ) @staticmethod @@ -21,16 +26,18 @@ def p1(const, rand, sigma1): return ( const.PI / 6 - * np.exp( - np.log(const.STRAUB_E_D1) - - sigma1**2 / 2 - - sigma1 - / const.sqrt_two - / const.sqrt_pi - / const.LN_2 - * np.log((0.5 + rand) / (1.5 - rand)) + * np.power( + np.exp( + np.log(const.STRAUB_E_D1) + - np.power(sigma1, const.TWO) / 2 + - sigma1 + / const.sqrt_two + / const.sqrt_pi + / const.LN_2 + * np.log((1 / const.TWO + rand) / (const.THREE / const.TWO - rand)) + ), + const.THREE, ) - ** 3 ) @staticmethod @@ -38,15 +45,15 @@ def p2(const, CW, rand): return ( const.PI / 6 - * ( + * np.power( const.STRAUB_MU2 - - ((0.007 * (CW - 21.0)) ** 2 / 12) + - (np.power(7 * (CW - 21) / 1000, const.TWO) / 12) / const.sqrt_two / const.sqrt_pi / const.LN_2 - * np.log((0.5 + rand) / (1.5 - rand)) + * np.log((1 / const.TWO + rand) / (const.THREE / const.TWO - rand)), + const.THREE, ) - ** 3 ) @staticmethod @@ -54,15 +61,15 @@ def p3(const, CW, ds, rand): return ( const.PI / 6 - * ( - (0.9 * ds) - - ((0.01 * (0.76 * CW**0.5 + 1.0)) ** 2 / 12) + * np.power( + (9 * ds / 10) + - (np.power((76 * np.sqrt(CW) / 100 + 1) / 100, const.TWO) / 12) / const.sqrt_two / const.sqrt_pi / const.LN_2 - * np.log((0.5 + rand) / (1.5 - rand)) + * np.log((1 / const.TWO + rand) / (const.THREE / const.TWO - rand)), + const.THREE, ) - ** 3 ) @staticmethod @@ -72,25 +79,39 @@ def p4(const, CW, ds, v_max, Nr1, Nr2, Nr3): # pylint: disable=too-many-argumen / 6 * ( v_max / const.PI_4_3 * 8 - + ds**3 + + np.power(ds, const.THREE) - Nr1 * np.exp( 3 * np.log(const.STRAUB_E_D1) + 6 * np.log( - (0.0125 * CW**0.5) ** 2 / 12 / const.STRAUB_E_D1**2 + 1 + np.power((np.sqrt(CW) / 8) / 10, const.TWO) + / 12 + / np.power(const.STRAUB_E_D1, const.TWO) + + 1 ) / 2 ) - Nr2 * ( - const.STRAUB_MU2**3 - + 3 * const.STRAUB_MU2 * ((0.007 * (CW - 21.0)) ** 2 / 12) ** 2 + np.power(const.STRAUB_MU2, const.THREE) + + 3 + * const.STRAUB_MU2 + * np.power( + np.power(7 * (CW - 21) / 1000, const.TWO) / 12, const.TWO + ) ) - Nr3 * ( - (0.9 * ds) ** 3 - + 3 * 0.9 * ds * ((0.01 * (0.76 * CW**0.5 + 1.0)) ** 2 / 12) ** 2 + np.power(9 * ds / 10, const.THREE) + + 3 + * 9 + * ds + / 10 + * np.power( + np.power((76 * np.sqrt(CW) / 100 + 1) / 100, const.TWO) / 12, + const.TWO, + ) ) ) ) From db98cd1a3783897d9bba63fe32fbb878d432d901 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 21 Feb 2023 23:47:30 +0100 Subject: [PATCH 32/86] pre-commit cleanup --- PySDM/formulae.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PySDM/formulae.py b/PySDM/formulae.py index c6f60672a4..1848d63833 100644 --- a/PySDM/formulae.py +++ b/PySDM/formulae.py @@ -204,7 +204,7 @@ def _c_inline(fun, return_type=None, constants=None, **args): stripped += " " source += stripped source = source.replace("np.power(", "np.pow(") - for pkg in ('np', 'math'): + for pkg in ("np", "math"): source = source.replace(f"{pkg}.", "") source = source.replace(", )", ")") source = re.sub("^return ", "", source) From 2bc2ce493112d69e6e1387236681ac6dc7f750da Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 21 Feb 2023 23:53:39 +0100 Subject: [PATCH 33/86] fix erf support in FakeThrust --- PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py index 52384e93cb..5222f10071 100644 --- a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py +++ b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py @@ -189,7 +189,7 @@ def to_numba(name, args, iter_var, body): result = ( f""" def make(self): - import math + from math import erf import numpy as np from numpy import floor, ceil, exp, log, power, sqrt import numba From febbb3031fc9c7b6379a86f4670161f2494a9c50 Mon Sep 17 00:00:00 2001 From: de Jong Date: Wed, 22 Feb 2023 15:09:56 -0700 Subject: [PATCH 34/86] WIP on LL82 fragmentation function --- .../impl_numba/methods/collisions_methods.py | 96 ++++---- .../breakup_fragmentations/lowlist82.py | 71 +++--- .../fragmentation_function/lowlist82.py | 233 ++++++++++++++++++ 3 files changed, 309 insertions(+), 91 deletions(-) create mode 100644 PySDM/physics/fragmentation_function/lowlist82.py diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index fad27469ab..6b9b0bbb17 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -254,20 +254,24 @@ def straub_Nr( # pylint: disable=too-many-arguments,unused-argument def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument i, Rf, - Nr2, - Nr3, - Nr4, - Nrt, + Rs, + Rd, CKE, - CW, - gam, + W, + W2, ): # pylint: disable=too-many-branches` - if CKE >= 89.3: + if CKE[i] >= 89.3: Rf[i] = 1.11e-4 * CKE[i]**(-0.654) else: Rf[i] = 1.0 - - + if W[i] >= 0.86: + Rs[i] = 0.685 * (1 - np.exp(-1.63 * (W2[i] - 0.86))) + else: + Rs[i] = 0.0 + if (Rs[i] + Rf[i]) > 1.0: + Rd[i] = 0.0 + else: + Rd[i] = 1.0 - Rs[i] - Rf[i] class CollisionsMethods(BackendMethods): @@ -376,40 +380,25 @@ def __straub_fragmentation_body( self.__straub_fragmentation_body = __straub_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "LowList1982Nf": - straub_p1 = self.formulae.fragmentation_function.p1 - straub_p2 = self.formulae.fragmentation_function.p2 - straub_p3 = self.formulae.fragmentation_function.p3 - straub_p4 = self.formulae.fragmentation_function.p4 - straub_sigma1 = self.formulae.fragmentation_function.sigma1 - @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __ll82_fragmentation_body( - *, CW, gam, ds, v_max, frag_size, rand, Nr1, Nr2, Nr3, Nr4, Nrt + *, CKE, W, W2, St, ds, dl, dcoal, frag_size, rand, Rf, Rs, Rd ): + ll82_pf = self.formulae.fragmentation_function.pf + ll82_ps = self.formulae.fragmentation_function.ps + ll82_pd = self.formulae.fragmentation_function.pd for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): - ll82_Nr(i, Nr1, Nr2, Nr3, Nr4, Nrt, CW, gam) - if rand[i] < Nr1[i] / Nrt[i]: - frag_size[i] = straub_p1( - rand[i] * Nrt[i] / Nr1[i], straub_sigma1(CW[i]) - ) - elif rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]: - frag_size[i] = straub_p2( - CW[i], (rand[i] * Nrt[i] - Nr1[i]) / (Nr2[i] - Nr1[i]) - ) - elif rand[i] < (Nr3[i] + Nr2[i] + Nr1[i]) / Nrt[i]: - frag_size[i] = straub_p3( - CW[i], - ds[i], - (rand[i] * Nrt[i] - Nr2[i]) / (Nr3[i] - Nr2[i]), - ) - else: - frag_size[i] = straub_p4( - CW[i], ds[i], v_max[i], Nr1[i], Nr2[i], Nr3[i] - ) - - self.__straub_fragmentation_body = __straub_fragmentation_body + ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) + if rand[i] < Rf[i]: # filament breakup + frag_size[i] = ll82_pf(rand[i]/Rf[i], ds[i], dl[i], dcoal[i]) + elif rand[i] < Rf[i] + Rs[i]: # sheet breakup + frag_size[i] = ll82_ps((rand[i] - Rf[i]) / Rs[i], ds[i], dl[i], dcoal[i], St[i]) + else: # disk breakup + frag_size[i] = ll82_pd((rand[i] - Rf[i] - Rs[i]) / Rd[i], ds[i], dl[i], dcoal[i], CKE[i], W1[i]) + + self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": gaussian_frag_size = self.formulae.fragmentation_function.frag_size @@ -809,33 +798,36 @@ def ll82_fragmentation( self, *, n_fragment, - CW, - gam, + CKE, + W, + W2, + St, ds, + dl, + dcoal, frag_size, v_max, x_plus_y, rand, vmin, nfmax, - Nr1, - Nr2, - Nr3, - Nr4, - Nrt, + Rf, + Rs, + Rd, ): self.__ll82_fragmentation_body( - CW=CW.data, - gam=gam.data, + CKE=CKE.data, + W=W.data, + W2=W2.data, + St=St.data, ds=ds.data, + dl=dl.data, + dcoal=dcoal.data, frag_size=frag_size.data, - v_max=v_max.data, rand=rand.data, - Nr1=Nr1.data, - Nr2=Nr2.data, - Nr3=Nr3.data, - Nr4=Nr4.data, - Nrt=Nrt.data, + Rf=Rf.data, + Rs=Rs.data, + Rd=Rd.data, ) self.__fragmentation_limiters( n_fragment=n_fragment.data, diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index a9a229c34e..5bcaf80fa7 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -11,7 +11,7 @@ def __init__(self, vmin=0.0, nfmax=None): self.vmin = vmin self.nfmax = nfmax self.arrays = {} - self.straub_tmp = {} + self.ll82_tmp = {} self.max_size = None self.sum_of_volumes = None self.const = None @@ -28,12 +28,12 @@ def register(self, builder): builder.request_attribute("radius") builder.request_attribute("volume") builder.request_attribute("terminal velocity") - for key in ("Sc", "tmp", "tmp2", "CKE", "We", "W2", "gam", "CW", "ds"): + for key in ("Sc", "tmp", "tmp2", "CKE", "We", "W2", "ds", "dl", "dcoal"): self.arrays[key] = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) - for key in ("Nr1", "Nr2", "Nr3", "Nr4", "Nrt"): - self.straub_tmp[key] = self.particulator.PairwiseStorage.empty( + for key in ("Rf", "Rs", "Rd"): + self.ll82_tmp[key] = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) @@ -44,28 +44,26 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): ) self.arrays["ds"].min(self.particulator.attributes["radius"], is_first_in_pair) self.arrays["ds"] *= 2 + self.arrays["dl"].max(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["dl"] *= 2 + self.arrays["dcoal"].sum(self.particulator.attributes["volume"], is_first_in_pair) + self.arrays["dcoal"] /= (self.const.PI / 6) + self.arrays["dcoal"] **= (1/3) - # compute the dimensionless numbers and CW=CKE * We - self.arrays["tmp"].sum(self.particulator.attributes["volume"], is_first_in_pair) - self.arrays["Sc"][:] = self.arrays["tmp"][:] # TODO #976 + # compute the surface energy, CKE, & dimensionless numbers + self.arrays["Sc"].sum(self.particulator.attributes["volume"], is_first_in_pair) self.arrays["Sc"] **= 2 / 3 self.arrays["Sc"] *= ( self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) ) - - self.arrays["W2"].max(self.particulator.attributes["volume"], is_first_in_pair) - self.arrays["tmp2"].min(self.particulator.attributes["volume"], is_first_in_pair) - self.arrays["W2"] **= 2/3 - self.arrays["tmp2"] ** 2/3 - self.arrays["W2"] *= ( - self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) - ) - self.arrays["tmp2"] *= ( - self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) - ) - self.arrays["W2"] += self.arrays["tmp2"] - self.arrays["W2"] **= (-1) - + self.arrays["St"].min(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["St"] *= 2 + self.arrays["St"] **= 2 + self.arrays["tmp"].max(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["tmp"] *= 2 + self.arrays["tmp"] **= 2 + self.arrays["St"] += self.arrays["tmp"] + self.arrays["St"] *= (self.const.PI * self.const.sgm_w) self.arrays["tmp"] *= 2 self.arrays["tmp2"].distance( @@ -80,34 +78,29 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): self.arrays["CKE"] *= self.const.rho_w self.arrays["We"][:] = self.arrays["CKE"][:] # TODO #976 + self.arrays["W2"][:] = self.arrays["CKE"][:] self.arrays["We"].divide_if_not_zero(self.arrays["Sc"]) - self.arrays["W2"].multiply(self.arrays["CKE"]) - - self.arrays["CW"][:] = self.arrays["We"][:] # TODO #976 - self.arrays["CW"] *= self.arrays["CKE"] - self.arrays["CW"] /= si.joule * 1e-6 # convert to µJ - - self.arrays["gam"].max(self.particulator.attributes["radius"], is_first_in_pair) - self.arrays["tmp"].min(self.particulator.attributes["radius"], is_first_in_pair) - self.arrays["gam"].divide_if_not_zero(self.arrays["tmp"]) + self.arrays["W2"].divide_if_not_zero(self.arrays["St"]) - for key in ("Nr1", "Nr2", "Nr3", "Nr4", "Nrt"): - self.straub_tmp[key] *= 0.0 + for key in ("Rf","Rs","Rd"): + self.ll82_tmp[key] *= 0.0 self.particulator.backend.ll82_fragmentation( n_fragment=nf, - CW=self.arrays["CW"], - gam=self.arrays["gam"], + CKE=self.arrays["CKE"], + W=self.arrays["We"], + W2=self.arrays["W2"], + St=self.arrays["St"], ds=self.arrays["ds"], + dl=self.arrays["dl"], + dcoal=self.arrays["dcoal"], frag_size=frag_size, v_max=self.max_size, x_plus_y=self.sum_of_volumes, rand=u01, vmin=self.vmin, nfmax=self.nfmax, - Nr1=self.straub_tmp["Nr1"], - Nr2=self.straub_tmp["Nr2"], - Nr3=self.straub_tmp["Nr3"], - Nr4=self.straub_tmp["Nr4"], - Nrt=self.straub_tmp["Nrt"], + Rf=self.straub_tmp["Rf"], + Rs=self.straub_tmp["Rs"], + Rd=self.straub_tmp["Rd"], ) diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py new file mode 100644 index 0000000000..8f1938682a --- /dev/null +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -0,0 +1,233 @@ +""" +Formulae supporting `PySDM.dynamics.collisions.breakup_fragmentations.lowlist82` +""" + + +import numpy as np +import math +from PySDM.physics.constants import si + + +class LowList1982Nf: # pylint: disable=too-few-public-methods + def __init__(self, _): + pass + + @staticmethod + def params_f1(dl, dcoal): + dcoalCM = dcoal / si.cm + dlCM = dl / si.cm + Hf1 = 50.8 ((dlCM)**(-0.718)) + mu = dlCM + sigma = (1 / Hf1) + for i in range(10): + sigma = 1/Hf1 * np.sqrt(2/np.pi) / (1 + math.erf((dcoalCM-dlCM)/(np.sqrt(2)*sigma))) + return (Hf1, mu, sigma) # in cm + + @staticmethod + def params_f2(ds): + dsCM = ds / si.cm + Hf2 = 4.18 * ((dsCM)**(-1.17)) + mu = dsCM + sigma = 1/(np.sqrt(2*np.pi) * Hf2) + return (Hf2, mu, sigma) + + @staticmethod + def params_f3(ds, dl): + dsCM = ds / si.cm + dlCM = dl / si.cm + # eq (3.3), (3.4) + Ff1 = (-2.25e4 * (dlCM - 0.403 )**2 - 37.9)*(dsCM)**(2.5) + 9.67*(dlCM - 0.170)**2 + 4.95 + Ff2 = 1.02e4 * dsCM*(2.83) + 2 + Ff = Ff1 + Ff2 + # eq (3.5) + ds0 = (Ff1 / 2.83)**(1/1.02e4) + + Dff3 = 0.241*(dsCM) + 0.0129 # (4.14) + # eq (4.18) - (4.21) + Pf301 = 1.68e5 * dsCM**(2.33) + Pf302 = (43.4*(dlCM + 1.81)**2 - 159.0)/dsCM - 3870*(dlCM - 0.285)**2 - 58.1 + alpha = (dsCM - ds0) / (0.2 * ds0) + Pf303 = alpha*Pf301 + (1 - alpha)*Pf302 + if dsCM < ds0: + Pf0 = Pf301 + elif dsCM > 1.2 * ds0: + Pf0 = Pf302 + else: + Pf0 = Pf303 + + # eq (4.22), (4.16), (4.17) (4.23) + sigmaf3 = 10 * Dff3 + muf3 = np.log(Dff3) + sigmaf3**2 + Hf3 = Pf0 * Dff3 / np.exp(-0.5 * sigmaf3**2) + for i in range(10): + sigmaf3 = np.sqrt(2/np.pi) * (Ff - 2) / Hf3 /(1 - math.erf((np.log(0.01)-muf3)/np.sqrt(2)/sigmaf3)) + muf3 = np.log(Dff3) + sigmaf3**2 + Hf3 = Pf0 * Dff3 / np.exp(-0.5 * sigmaf3**2) + + return (Hf3, muf3, sigmaf3) + + @staticmethod + def params_s1(dl, ds, dcoal): + dsCM = ds / si.cm + dlCM = dl / si.cm + dcoalCM = dcoal / si.cm + Hs1 = 100 * np.exp(-3.25 * dsCM) + mus1 = dlCM + sigmas1 = 1 / Hs1 + for i in range(10): + sigmas1 = 1/Hs1 * np.sqrt(2/np.pi) / (1 + math.erf((dcoalCM-dlCM)/(np.sqrt(2)*sigmas1))) + return (Hs1, mus1, sigmas1) # in cm + + @staticmethod + def params_s2(dl, ds, St): + dsCM = ds / si.cm + dlCM = dl / si.cm + Dss2 = 0.254 * (ds**(0.413)) * np.exp(3.53 * dsCM**(-2.51)*(dlCM - dsCM)) #(4.27) + bstar = 14.2 * np.exp(-17.2 * dsCM) + Ps20 = 0.23 * dsCM**(-3.93) * dlCM**(bstar) # (4.29) + sigmas2 = 10 * Dss2 # as in (4.22) + mus2 = np.log(Dss2) + sigmas2**22 #(4.32) + Hs2 = Ps20 * Dss2 / np.exp(-0.5*sigmas2**2) #(4.28) + + Fs = 5 * math.erf((St - 2.52e-6)/(1.85e-6)) + 6 #(3.7) + + for i in range(10): + sigmas2 = np.sqrt(2/np.pi) * (Fs - 1) / Hs2 /(1 - math.erf((np.log(0.01)-mus2)/np.sqrt(2)/sigmas2)) + mus2 = np.log(Dss2) + sigmas2*2 #(4.32) + Hs2 = Ps20 * Dss2 / np.exp(-0.5*sigmas2**2) #(4.28) + + return (Hs2, mus2, sigmas2) + + @staticmethod + def params_d1(W1, dl, CKE, dcoal): + dlCM = dl / si.cm + dcoalCM = dlCM / si.cm + mud1 = dlCM * (1 - np.exp(-3.70*(3.10 - W1))) + Hd1 = 1.58e-5 * CKE**(-1.22) + sigmad1 = (1 / Hd1) + for i in range(10): + sigmad1 = 1/Hd1 * np.sqrt(2/np.pi) / (1 + math.erf((dcoalCM-mud1)/(np.sqrt(2)*sigmad1))) + return (Hd1, mud1, sigmad1) # in cm + + @staticmethod + def params_d2(ds, dl, CKE): + dsCM = ds / si.cm + dlCM = dl / si.cm + Ddd2 = np.exp(-17.4 * dsCM - 0.671*(dlCM - dsCM))*dsCM #(4.37) + bstar = 0.007*dsCM**(-2.54) #(4.39) + Pd20 = 0.0884*dsCM**(-2.52)*(dlCM - dsCM)**(bstar) #(4.38) + sigmad2 = 10 * Ddd2 + mud2 = np.log(Ddd2) + sigmad2**2 + Hd2 = Pd20*Ddd2/np.exp(-0.5*sigmad2**2) + + Fd = 297.5 + 23.7 * np.log(CKE) # (3.9) + + for i in range(10): + sigmad2 = np.sqrt(2/np.pi) * (Fd - 1) / Hd2 /(1 - math.erf((np.log(0.01)-mud2)/np.sqrt(2)/sigmad2)) + mud2 = np.log(Ddd2) + sigmad2**2 + Hd2 = Pd20*Ddd2/np.exp(-0.5*sigmad2**2) + + return (Hd2, mud2, sigmad2) + + @staticmethod + def pf(rand, ds, dl, dcoal): + (H1, mu1, sigma1) = LowList1982Nf.params_f1(dl, dcoal) + (H2, mu2, sigma2) = LowList1982Nf.params_f2(ds) + (H3, mu3, sigma3) = LowList1982Nf.params_f3(ds, dl) + + @staticmethod + def ps(rand, ds, dl, dcoal, St): + (H1, mu1, sigma1) = LowList1982Nf.params_s1(dl, ds, dcoal) + (H2, mu2, sigma2) = LowList1982Nf.params_s2(dl, ds, St) + + @staticmethod + def pd(rand, ds, dl, dcoal, CKE, W1): + (H1, mu1, sigma1) = LowList1982Nf.params_d1(W1, dl, CKE, dcoal) + (H2, mu2, sigma2) = LowList1982Nf.params_d2(ds, dl, CKE) + + + + # @staticmethod + # def sigma1(const, CW): + # return np.sqrt( + # np.log((0.0125 * CW**0.5) ** 2 / 12 / const.STRAUB_E_D1**2 + 1) + # ) + + # @staticmethod + # def p1(const, rand, sigma1): + # return ( + # const.PI + # / 6 + # * np.exp( + # np.log(const.STRAUB_E_D1) + # - sigma1**2 / 2 + # - sigma1 + # / const.sqrt_two + # / const.sqrt_pi + # / np.log(2) + # * np.log((0.5 + rand) / (1.5 - rand)) + # ) + # ** 3 + # ) + + # @staticmethod + # def p2(const, CW, rand): + # return ( + # const.PI + # / 6 + # * ( + # const.STRAUB_MU2 + # - ((0.007 * (CW - 21.0)) ** 2 / 12) + # / const.sqrt_two + # / const.sqrt_pi + # / np.log(2) + # * np.log((0.5 + rand) / (1.5 - rand)) + # ) + # ** 3 + # ) + + # @staticmethod + # def p3(const, CW, ds, rand): + # return ( + # const.PI + # / 6 + # * ( + # (0.9 * ds) + # - ((0.01 * (0.76 * CW**0.5 + 1.0)) ** 2 / 12) + # / const.sqrt_two + # / const.sqrt_pi + # / np.log(2) + # * np.log((0.5 + rand) / (1.5 - rand)) + # ) + # ** 3 + # ) + + # @staticmethod + # def p4(const, CW, ds, v_max, Nr1, Nr2, Nr3): # pylint: disable=too-many-arguments + # return ( + # const.PI + # / 6 + # * ( + # v_max / const.PI_4_3 * 8 + # + ds**3 + # - Nr1 + # * np.exp( + # 3 * np.log(const.STRAUB_E_D1) + # + 6 + # * np.log( + # (0.0125 * CW**0.5) ** 2 / 12 / const.STRAUB_E_D1**2 + 1 + # ) + # / 2 + # ) + # - Nr2 + # * ( + # const.STRAUB_MU2**3 + # + 3 * const.STRAUB_MU2 * ((0.007 * (CW - 21.0)) ** 2 / 12) ** 2 + # ) + # - Nr3 + # * ( + # (0.9 * ds) ** 3 + # + 3 * 0.9 * ds * ((0.01 * (0.76 * CW**0.5 + 1.0)) ** 2 / 12) ** 2 + # ) + # ) + # ) From 33c96f8e66435043753f840d0ff65dc3260659a7 Mon Sep 17 00:00:00 2001 From: de Jong Date: Wed, 22 Feb 2023 15:41:53 -0700 Subject: [PATCH 35/86] Attempt at CDF sampling for ll82 --- .../fragmentation_function/lowlist82.py | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 8f1938682a..a5f79db882 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -5,6 +5,8 @@ import numpy as np import math +import scipy.special as sps +from scipy.interpolate import interp1d from PySDM.physics.constants import si @@ -134,17 +136,85 @@ def pf(rand, ds, dl, dcoal): (H1, mu1, sigma1) = LowList1982Nf.params_f1(dl, dcoal) (H2, mu2, sigma2) = LowList1982Nf.params_f2(ds) (H3, mu3, sigma3) = LowList1982Nf.params_f3(ds, dl) + grid = default_interp_grid() + percentiles = [gaussian_inv_cdf(grid, mu1, sigma1), + gaussian_inv_cdf(grid, mu2, sigma2), + lognormal_inv_cdf(grid, mu3, sigma3) + ] + cdf_arg = np.zeros(len(grid) * 3 + 1) + cdf_arg[1:] = np.concatenate(percentiles) + cdf = ( + H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + + H2 * gaussian_cdf(cdf_arg, mu2, sigma2) + + H3 * lognormal_cdf(cdf_arg, mu3, sigma3) + ) / (H1 + H2 + H3) + inverse_cdf = interp1d(cdf, cdf_arg) + + return inverse_cdf(rand) + @staticmethod def ps(rand, ds, dl, dcoal, St): (H1, mu1, sigma1) = LowList1982Nf.params_s1(dl, ds, dcoal) (H2, mu2, sigma2) = LowList1982Nf.params_s2(dl, ds, St) + grid = default_interp_grid() + percentiles = [gaussian_inv_cdf(grid, mu1, sigma1), + lognormal_inv_cdf(grid, mu2, sigma2) + ] + cdf_arg = np.zeros(len(grid) * 2 + 1) + cdf_arg[1:] = np.concatenate(percentiles) + cdf = ( + H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) + ) / (H1 + H2) + inverse_cdf = interp1d(cdf, cdf_arg) + + return inverse_cdf(rand) @staticmethod def pd(rand, ds, dl, dcoal, CKE, W1): (H1, mu1, sigma1) = LowList1982Nf.params_d1(W1, dl, CKE, dcoal) (H2, mu2, sigma2) = LowList1982Nf.params_d2(ds, dl, CKE) + grid = default_interp_grid() + percentiles = [gaussian_inv_cdf(grid, mu1, sigma1), + lognormal_inv_cdf(grid, mu2, sigma2) + ] + cdf_arg = np.zeros(len(grid) * 2 + 1) + cdf_arg[1:] = np.concatenate(percentiles) + cdf = ( + H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) + ) / (H1 + H2) + inverse_cdf = interp1d(cdf, cdf_arg) + + return inverse_cdf(rand) + +@staticmethod +def default_interp_grid(diam_basis=True): + if diam_basis == True: + dmin = 1 * si.um + dmax = 5 * si.cm + return np.logspace(np.log(dmin), np.log(dmax), 100) + else: + pass + +@staticmethod +def gaussian_cdf(arg, mu, sigma): + return (0.5 * (1 + math.erf((arg - mu)/np.sqrt(2)/sigma))) + +@staticmethod +def lognormal_cdf(arg, mu, sigma): + return (0.5 * (1 + math.erf((np.log(arg) - mu)/np.sqrt(2)/sigma))) + +@staticmethod +def gaussian_inv_cdf(X, mu, sigma): + return (mu + np.sqrt(2) * sigma * sps.erfinv(2*X - 1)) + +@staticmethod +def lognormal_inv_cdf(X, mu, sigma): + lnarg = LowList1982Nf.gaussian_cdf(X, mu, sigma) + return np.exp(lnarg) # @staticmethod From d8fffc04ac39626eb4f34fb88b76578b294c3d4f Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 27 Feb 2023 14:57:42 -0700 Subject: [PATCH 36/86] WIP on testing ll82 --- .../impl_numba/methods/collisions_methods.py | 31 +- .../breakup_fragmentations/__init__.py | 1 + .../breakup_fragmentations/lowlist82.py | 20 +- PySDM/physics/constants_defaults.py | 2 + .../fragmentation_function/__init__.py | 1 + .../fragmentation_function/lowlist82.py | 305 ++++++++---------- tests/backends_fixture.py | 2 +- .../collisions/test_fragmentations.py | 298 ++++++++--------- .../physics/test_fragmentation_functions.py | 102 ++++-- 9 files changed, 387 insertions(+), 375 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 6b9b0bbb17..9770020f4e 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -250,6 +250,7 @@ def straub_Nr( # pylint: disable=too-many-arguments,unused-argument Nr4[i] = 1.0 Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i] + @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument i, @@ -261,7 +262,7 @@ def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument W2, ): # pylint: disable=too-many-branches` if CKE[i] >= 89.3: - Rf[i] = 1.11e-4 * CKE[i]**(-0.654) + Rf[i] = 1.11e-4 * CKE[i] ** (-0.654) else: Rf[i] = 1.0 if W[i] >= 0.86: @@ -380,23 +381,33 @@ def __straub_fragmentation_body( self.__straub_fragmentation_body = __straub_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "LowList1982Nf": + ll82_pf = self.formulae.fragmentation_function.pf + ll82_ps = self.formulae.fragmentation_function.ps + ll82_pd = self.formulae.fragmentation_function.pd + @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __ll82_fragmentation_body( *, CKE, W, W2, St, ds, dl, dcoal, frag_size, rand, Rf, Rs, Rd ): - ll82_pf = self.formulae.fragmentation_function.pf - ll82_ps = self.formulae.fragmentation_function.ps - ll82_pd = self.formulae.fragmentation_function.pd for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) - if rand[i] < Rf[i]: # filament breakup - frag_size[i] = ll82_pf(rand[i]/Rf[i], ds[i], dl[i], dcoal[i]) - elif rand[i] < Rf[i] + Rs[i]: # sheet breakup - frag_size[i] = ll82_ps((rand[i] - Rf[i]) / Rs[i], ds[i], dl[i], dcoal[i], St[i]) - else: # disk breakup - frag_size[i] = ll82_pd((rand[i] - Rf[i] - Rs[i]) / Rd[i], ds[i], dl[i], dcoal[i], CKE[i], W1[i]) + if rand[i] < Rf[i]: # filament breakup + frag_size[i] = ll82_pf(rand[i] / Rf[i], ds[i], dl[i], dcoal[i]) + elif rand[i] < Rf[i] + Rs[i]: # sheet breakup + frag_size[i] = ll82_ps( + (rand[i] - Rf[i]) / Rs[i], ds[i], dl[i], dcoal[i], St[i] + ) + else: # disk breakup + frag_size[i] = ll82_pd( + (rand[i] - Rf[i] - Rs[i]) / Rd[i], + ds[i], + dl[i], + dcoal[i], + CKE[i], + W1[i], + ) self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/__init__.py b/PySDM/dynamics/collisions/breakup_fragmentations/__init__.py index 8b5c18866b..11b95e1cc7 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/__init__.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/__init__.py @@ -5,5 +5,6 @@ from .exponential import ExponFrag from .feingold1988 import Feingold1988Frag from .gaussian import Gaussian +from .lowlist82 import LowList1982Nf from .slams import SLAMS from .straub2010 import Straub2010Nf diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index 5bcaf80fa7..d2be6a7ab7 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -28,7 +28,7 @@ def register(self, builder): builder.request_attribute("radius") builder.request_attribute("volume") builder.request_attribute("terminal velocity") - for key in ("Sc", "tmp", "tmp2", "CKE", "We", "W2", "ds", "dl", "dcoal"): + for key in ("Sc", "St", "tmp", "tmp2", "CKE", "We", "W2", "ds", "dl", "dcoal"): self.arrays[key] = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) @@ -46,9 +46,11 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): self.arrays["ds"] *= 2 self.arrays["dl"].max(self.particulator.attributes["radius"], is_first_in_pair) self.arrays["dl"] *= 2 - self.arrays["dcoal"].sum(self.particulator.attributes["volume"], is_first_in_pair) - self.arrays["dcoal"] /= (self.const.PI / 6) - self.arrays["dcoal"] **= (1/3) + self.arrays["dcoal"].sum( + self.particulator.attributes["volume"], is_first_in_pair + ) + self.arrays["dcoal"] /= self.const.PI / 6 + self.arrays["dcoal"] **= 1 / 3 # compute the surface energy, CKE, & dimensionless numbers self.arrays["Sc"].sum(self.particulator.attributes["volume"], is_first_in_pair) @@ -63,7 +65,7 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): self.arrays["tmp"] *= 2 self.arrays["tmp"] **= 2 self.arrays["St"] += self.arrays["tmp"] - self.arrays["St"] *= (self.const.PI * self.const.sgm_w) + self.arrays["St"] *= self.const.PI * self.const.sgm_w self.arrays["tmp"] *= 2 self.arrays["tmp2"].distance( @@ -82,7 +84,7 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): self.arrays["We"].divide_if_not_zero(self.arrays["Sc"]) self.arrays["W2"].divide_if_not_zero(self.arrays["St"]) - for key in ("Rf","Rs","Rd"): + for key in ("Rf", "Rs", "Rd"): self.ll82_tmp[key] *= 0.0 self.particulator.backend.ll82_fragmentation( @@ -100,7 +102,7 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): rand=u01, vmin=self.vmin, nfmax=self.nfmax, - Rf=self.straub_tmp["Rf"], - Rs=self.straub_tmp["Rs"], - Rd=self.straub_tmp["Rd"], + Rf=self.ll82_tmp["Rf"], + Rs=self.ll82_tmp["Rs"], + Rd=self.ll82_tmp["Rd"], ) diff --git a/PySDM/physics/constants_defaults.py b/PySDM/physics/constants_defaults.py index 15efacd9f8..430d760991 100644 --- a/PySDM/physics/constants_defaults.py +++ b/PySDM/physics/constants_defaults.py @@ -163,3 +163,5 @@ STRAUB_E_D1 = 0.04 * si.cm STRAUB_MU2 = 0.095 * si.cm + +CM = 1 * si.cm diff --git a/PySDM/physics/fragmentation_function/__init__.py b/PySDM/physics/fragmentation_function/__init__.py index 3aa998f1bc..38b195ba22 100644 --- a/PySDM/physics/fragmentation_function/__init__.py +++ b/PySDM/physics/fragmentation_function/__init__.py @@ -5,5 +5,6 @@ from .expon_frag import ExponFrag from .feingold1988frag import Feingold1988Frag from .gaussian import Gaussian +from .lowlist82 import LowList1982Nf from .slams import SLAMS from .straub2010nf import Straub2010Nf diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index a5f79db882..16794668ab 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -2,12 +2,11 @@ Formulae supporting `PySDM.dynamics.collisions.breakup_fragmentations.lowlist82` """ +import math import numpy as np -import math import scipy.special as sps from scipy.interpolate import interp1d -from PySDM.physics.constants import si class LowList1982Nf: # pylint: disable=too-few-public-methods @@ -15,41 +14,55 @@ def __init__(self, _): pass @staticmethod - def params_f1(dl, dcoal): - dcoalCM = dcoal / si.cm - dlCM = dl / si.cm - Hf1 = 50.8 ((dlCM)**(-0.718)) + def params_f1(const, dl, dcoal): + print("hello") + dcoalCM = dcoal / const.cm + dlCM = dl / const.cm + Hf1 = 50.8 * ((dlCM) ** (-0.718)) mu = dlCM - sigma = (1 / Hf1) + sigma = 1 / Hf1 for i in range(10): - sigma = 1/Hf1 * np.sqrt(2/np.pi) / (1 + math.erf((dcoalCM-dlCM)/(np.sqrt(2)*sigma))) - return (Hf1, mu, sigma) # in cm + sigma = ( + 1 + / Hf1 + * np.sqrt(2 / np.pi) + / (1 + math.erf((dcoalCM - dlCM) / (np.sqrt(2) * sigma))) + ) + return (Hf1, mu, sigma) # in cm @staticmethod - def params_f2(ds): - dsCM = ds / si.cm - Hf2 = 4.18 * ((dsCM)**(-1.17)) + def params_f2(const, ds): + dsCM = ds / const.cm + Hf2 = 4.18 * ((dsCM) ** (-1.17)) mu = dsCM - sigma = 1/(np.sqrt(2*np.pi) * Hf2) + sigma = 1 / (np.sqrt(2 * np.pi) * Hf2) return (Hf2, mu, sigma) @staticmethod - def params_f3(ds, dl): - dsCM = ds / si.cm - dlCM = dl / si.cm + def params_f3(const, ds, dl): + dsCM = ds / const.cm + dlCM = dl / const.cm # eq (3.3), (3.4) - Ff1 = (-2.25e4 * (dlCM - 0.403 )**2 - 37.9)*(dsCM)**(2.5) + 9.67*(dlCM - 0.170)**2 + 4.95 - Ff2 = 1.02e4 * dsCM*(2.83) + 2 + Ff1 = ( + (-2.25e4 * (dlCM - 0.403) ** 2 - 37.9) * (dsCM) ** (2.5) + + 9.67 * (dlCM - 0.170) ** 2 + + 4.95 + ) + Ff2 = 1.02e4 * dsCM * (2.83) + 2 Ff = Ff1 + Ff2 # eq (3.5) - ds0 = (Ff1 / 2.83)**(1/1.02e4) + ds0 = (Ff1 / 2.83) ** (1 / 1.02e4) - Dff3 = 0.241*(dsCM) + 0.0129 # (4.14) + Dff3 = 0.241 * (dsCM) + 0.0129 # (4.14) # eq (4.18) - (4.21) - Pf301 = 1.68e5 * dsCM**(2.33) - Pf302 = (43.4*(dlCM + 1.81)**2 - 159.0)/dsCM - 3870*(dlCM - 0.285)**2 - 58.1 + Pf301 = 1.68e5 * dsCM ** (2.33) + Pf302 = ( + (43.4 * (dlCM + 1.81) ** 2 - 159.0) / dsCM + - 3870 * (dlCM - 0.285) ** 2 + - 58.1 + ) alpha = (dsCM - ds0) / (0.2 * ds0) - Pf303 = alpha*Pf301 + (1 - alpha)*Pf302 + Pf303 = alpha * Pf301 + (1 - alpha) * Pf302 if dsCM < ds0: Pf0 = Pf301 elif dsCM > 1.2 * ds0: @@ -62,242 +75,190 @@ def params_f3(ds, dl): muf3 = np.log(Dff3) + sigmaf3**2 Hf3 = Pf0 * Dff3 / np.exp(-0.5 * sigmaf3**2) for i in range(10): - sigmaf3 = np.sqrt(2/np.pi) * (Ff - 2) / Hf3 /(1 - math.erf((np.log(0.01)-muf3)/np.sqrt(2)/sigmaf3)) + sigmaf3 = ( + np.sqrt(2 / np.pi) + * (Ff - 2) + / Hf3 + / (1 - math.erf((np.log(0.01) - muf3) / np.sqrt(2) / sigmaf3)) + ) muf3 = np.log(Dff3) + sigmaf3**2 Hf3 = Pf0 * Dff3 / np.exp(-0.5 * sigmaf3**2) return (Hf3, muf3, sigmaf3) @staticmethod - def params_s1(dl, ds, dcoal): - dsCM = ds / si.cm - dlCM = dl / si.cm - dcoalCM = dcoal / si.cm + def params_s1(const, dl, ds, dcoal): + dsCM = ds / const.cm + dlCM = dl / const.cm + dcoalCM = dcoal / const.cm Hs1 = 100 * np.exp(-3.25 * dsCM) mus1 = dlCM sigmas1 = 1 / Hs1 for i in range(10): - sigmas1 = 1/Hs1 * np.sqrt(2/np.pi) / (1 + math.erf((dcoalCM-dlCM)/(np.sqrt(2)*sigmas1))) - return (Hs1, mus1, sigmas1) # in cm + sigmas1 = ( + 1 + / Hs1 + * np.sqrt(2 / np.pi) + / (1 + math.erf((dcoalCM - dlCM) / (np.sqrt(2) * sigmas1))) + ) + return (Hs1, mus1, sigmas1) # in cm @staticmethod def params_s2(dl, ds, St): - dsCM = ds / si.cm - dlCM = dl / si.cm - Dss2 = 0.254 * (ds**(0.413)) * np.exp(3.53 * dsCM**(-2.51)*(dlCM - dsCM)) #(4.27) + dsCM = ds / const.cm + dlCM = dl / const.cm + Dss2 = ( + 0.254 * (ds ** (0.413)) * np.exp(3.53 * dsCM ** (-2.51) * (dlCM - dsCM)) + ) # (4.27) bstar = 14.2 * np.exp(-17.2 * dsCM) - Ps20 = 0.23 * dsCM**(-3.93) * dlCM**(bstar) # (4.29) + Ps20 = 0.23 * dsCM ** (-3.93) * dlCM ** (bstar) # (4.29) sigmas2 = 10 * Dss2 # as in (4.22) - mus2 = np.log(Dss2) + sigmas2**22 #(4.32) - Hs2 = Ps20 * Dss2 / np.exp(-0.5*sigmas2**2) #(4.28) + mus2 = np.log(Dss2) + sigmas2**22 # (4.32) + Hs2 = Ps20 * Dss2 / np.exp(-0.5 * sigmas2**2) # (4.28) - Fs = 5 * math.erf((St - 2.52e-6)/(1.85e-6)) + 6 #(3.7) + Fs = 5 * math.erf((St - 2.52e-6) / (1.85e-6)) + 6 # (3.7) for i in range(10): - sigmas2 = np.sqrt(2/np.pi) * (Fs - 1) / Hs2 /(1 - math.erf((np.log(0.01)-mus2)/np.sqrt(2)/sigmas2)) - mus2 = np.log(Dss2) + sigmas2*2 #(4.32) - Hs2 = Ps20 * Dss2 / np.exp(-0.5*sigmas2**2) #(4.28) + sigmas2 = ( + np.sqrt(2 / np.pi) + * (Fs - 1) + / Hs2 + / (1 - math.erf((np.log(0.01) - mus2) / np.sqrt(2) / sigmas2)) + ) + mus2 = np.log(Dss2) + sigmas2 * 2 # (4.32) + Hs2 = Ps20 * Dss2 / np.exp(-0.5 * sigmas2**2) # (4.28) return (Hs2, mus2, sigmas2) @staticmethod - def params_d1(W1, dl, CKE, dcoal): - dlCM = dl / si.cm - dcoalCM = dlCM / si.cm - mud1 = dlCM * (1 - np.exp(-3.70*(3.10 - W1))) - Hd1 = 1.58e-5 * CKE**(-1.22) - sigmad1 = (1 / Hd1) + def params_d1(const, W1, dl, CKE, dcoal): + dlCM = dl / const.cm + dcoalCM = dlCM / const.cm + mud1 = dlCM * (1 - np.exp(-3.70 * (3.10 - W1))) + Hd1 = 1.58e-5 * CKE ** (-1.22) + sigmad1 = 1 / Hd1 for i in range(10): - sigmad1 = 1/Hd1 * np.sqrt(2/np.pi) / (1 + math.erf((dcoalCM-mud1)/(np.sqrt(2)*sigmad1))) - return (Hd1, mud1, sigmad1) # in cm - + sigmad1 = ( + 1 + / Hd1 + * np.sqrt(2 / np.pi) + / (1 + math.erf((dcoalCM - mud1) / (np.sqrt(2) * sigmad1))) + ) + return (Hd1, mud1, sigmad1) # in cm + @staticmethod - def params_d2(ds, dl, CKE): - dsCM = ds / si.cm - dlCM = dl / si.cm - Ddd2 = np.exp(-17.4 * dsCM - 0.671*(dlCM - dsCM))*dsCM #(4.37) - bstar = 0.007*dsCM**(-2.54) #(4.39) - Pd20 = 0.0884*dsCM**(-2.52)*(dlCM - dsCM)**(bstar) #(4.38) + def params_d2(const, ds, dl, CKE): + dsCM = ds / const.cm + dlCM = dl / const.cm + Ddd2 = np.exp(-17.4 * dsCM - 0.671 * (dlCM - dsCM)) * dsCM # (4.37) + bstar = 0.007 * dsCM ** (-2.54) # (4.39) + Pd20 = 0.0884 * dsCM ** (-2.52) * (dlCM - dsCM) ** (bstar) # (4.38) sigmad2 = 10 * Ddd2 mud2 = np.log(Ddd2) + sigmad2**2 - Hd2 = Pd20*Ddd2/np.exp(-0.5*sigmad2**2) + Hd2 = Pd20 * Ddd2 / np.exp(-0.5 * sigmad2**2) - Fd = 297.5 + 23.7 * np.log(CKE) # (3.9) + Fd = 297.5 + 23.7 * np.log(CKE) # (3.9) for i in range(10): - sigmad2 = np.sqrt(2/np.pi) * (Fd - 1) / Hd2 /(1 - math.erf((np.log(0.01)-mud2)/np.sqrt(2)/sigmad2)) + sigmad2 = ( + np.sqrt(2 / np.pi) + * (Fd - 1) + / Hd2 + / (1 - math.erf((np.log(0.01) - mud2) / np.sqrt(2) / sigmad2)) + ) mud2 = np.log(Ddd2) + sigmad2**2 - Hd2 = Pd20*Ddd2/np.exp(-0.5*sigmad2**2) - + Hd2 = Pd20 * Ddd2 / np.exp(-0.5 * sigmad2**2) + return (Hd2, mud2, sigmad2) - + @staticmethod - def pf(rand, ds, dl, dcoal): + def pf(const, rand, ds, dl, dcoal): (H1, mu1, sigma1) = LowList1982Nf.params_f1(dl, dcoal) (H2, mu2, sigma2) = LowList1982Nf.params_f2(ds) (H3, mu3, sigma3) = LowList1982Nf.params_f3(ds, dl) grid = default_interp_grid() - percentiles = [gaussian_inv_cdf(grid, mu1, sigma1), - gaussian_inv_cdf(grid, mu2, sigma2), - lognormal_inv_cdf(grid, mu3, sigma3) + percentiles = [ + gaussian_inv_cdf(grid, mu1, sigma1), + gaussian_inv_cdf(grid, mu2, sigma2), + lognormal_inv_cdf(grid, mu3, sigma3), ] cdf_arg = np.zeros(len(grid) * 3 + 1) cdf_arg[1:] = np.concatenate(percentiles) cdf = ( - H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + - H2 * gaussian_cdf(cdf_arg, mu2, sigma2) + - H3 * lognormal_cdf(cdf_arg, mu3, sigma3) + H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + + H2 * gaussian_cdf(cdf_arg, mu2, sigma2) + + H3 * lognormal_cdf(cdf_arg, mu3, sigma3) ) / (H1 + H2 + H3) inverse_cdf = interp1d(cdf, cdf_arg) - - return inverse_cdf(rand) + return inverse_cdf(rand) @staticmethod - def ps(rand, ds, dl, dcoal, St): + def ps(const, rand, ds, dl, dcoal, St): (H1, mu1, sigma1) = LowList1982Nf.params_s1(dl, ds, dcoal) (H2, mu2, sigma2) = LowList1982Nf.params_s2(dl, ds, St) grid = default_interp_grid() - percentiles = [gaussian_inv_cdf(grid, mu1, sigma1), - lognormal_inv_cdf(grid, mu2, sigma2) + percentiles = [ + gaussian_inv_cdf(grid, mu1, sigma1), + lognormal_inv_cdf(grid, mu2, sigma2), ] cdf_arg = np.zeros(len(grid) * 2 + 1) cdf_arg[1:] = np.concatenate(percentiles) cdf = ( - H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + - H2 * lognormal_cdf(cdf_arg, mu2, sigma2) + H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) ) / (H1 + H2) inverse_cdf = interp1d(cdf, cdf_arg) - + return inverse_cdf(rand) @staticmethod - def pd(rand, ds, dl, dcoal, CKE, W1): + def pd(const, rand, ds, dl, dcoal, CKE, W1): (H1, mu1, sigma1) = LowList1982Nf.params_d1(W1, dl, CKE, dcoal) (H2, mu2, sigma2) = LowList1982Nf.params_d2(ds, dl, CKE) grid = default_interp_grid() - percentiles = [gaussian_inv_cdf(grid, mu1, sigma1), - lognormal_inv_cdf(grid, mu2, sigma2) + percentiles = [ + gaussian_inv_cdf(grid, mu1, sigma1), + lognormal_inv_cdf(grid, mu2, sigma2), ] cdf_arg = np.zeros(len(grid) * 2 + 1) cdf_arg[1:] = np.concatenate(percentiles) cdf = ( - H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + - H2 * lognormal_cdf(cdf_arg, mu2, sigma2) + H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) ) / (H1 + H2) inverse_cdf = interp1d(cdf, cdf_arg) - + return inverse_cdf(rand) + @staticmethod def default_interp_grid(diam_basis=True): if diam_basis == True: dmin = 1 * si.um - dmax = 5 * si.cm + dmax = 5 * const.cm return np.logspace(np.log(dmin), np.log(dmax), 100) else: pass + @staticmethod def gaussian_cdf(arg, mu, sigma): - return (0.5 * (1 + math.erf((arg - mu)/np.sqrt(2)/sigma))) + return 0.5 * (1 + math.erf((arg - mu) / np.sqrt(2) / sigma)) + @staticmethod def lognormal_cdf(arg, mu, sigma): - return (0.5 * (1 + math.erf((np.log(arg) - mu)/np.sqrt(2)/sigma))) + return 0.5 * (1 + math.erf((np.log(arg) - mu) / np.sqrt(2) / sigma)) + @staticmethod def gaussian_inv_cdf(X, mu, sigma): - return (mu + np.sqrt(2) * sigma * sps.erfinv(2*X - 1)) + return mu + np.sqrt(2) * sigma * sps.erfinv(2 * X - 1) + @staticmethod def lognormal_inv_cdf(X, mu, sigma): lnarg = LowList1982Nf.gaussian_cdf(X, mu, sigma) return np.exp(lnarg) - - - # @staticmethod - # def sigma1(const, CW): - # return np.sqrt( - # np.log((0.0125 * CW**0.5) ** 2 / 12 / const.STRAUB_E_D1**2 + 1) - # ) - - # @staticmethod - # def p1(const, rand, sigma1): - # return ( - # const.PI - # / 6 - # * np.exp( - # np.log(const.STRAUB_E_D1) - # - sigma1**2 / 2 - # - sigma1 - # / const.sqrt_two - # / const.sqrt_pi - # / np.log(2) - # * np.log((0.5 + rand) / (1.5 - rand)) - # ) - # ** 3 - # ) - - # @staticmethod - # def p2(const, CW, rand): - # return ( - # const.PI - # / 6 - # * ( - # const.STRAUB_MU2 - # - ((0.007 * (CW - 21.0)) ** 2 / 12) - # / const.sqrt_two - # / const.sqrt_pi - # / np.log(2) - # * np.log((0.5 + rand) / (1.5 - rand)) - # ) - # ** 3 - # ) - - # @staticmethod - # def p3(const, CW, ds, rand): - # return ( - # const.PI - # / 6 - # * ( - # (0.9 * ds) - # - ((0.01 * (0.76 * CW**0.5 + 1.0)) ** 2 / 12) - # / const.sqrt_two - # / const.sqrt_pi - # / np.log(2) - # * np.log((0.5 + rand) / (1.5 - rand)) - # ) - # ** 3 - # ) - - # @staticmethod - # def p4(const, CW, ds, v_max, Nr1, Nr2, Nr3): # pylint: disable=too-many-arguments - # return ( - # const.PI - # / 6 - # * ( - # v_max / const.PI_4_3 * 8 - # + ds**3 - # - Nr1 - # * np.exp( - # 3 * np.log(const.STRAUB_E_D1) - # + 6 - # * np.log( - # (0.0125 * CW**0.5) ** 2 / 12 / const.STRAUB_E_D1**2 + 1 - # ) - # / 2 - # ) - # - Nr2 - # * ( - # const.STRAUB_MU2**3 - # + 3 * const.STRAUB_MU2 * ((0.007 * (CW - 21.0)) ** 2 / 12) ** 2 - # ) - # - Nr3 - # * ( - # (0.9 * ds) ** 3 - # + 3 * 0.9 * ds * ((0.01 * (0.76 * CW**0.5 + 1.0)) ** 2 / 12) ** 2 - # ) - # ) - # ) diff --git a/tests/backends_fixture.py b/tests/backends_fixture.py index 3983e21275..ac96ac3783 100644 --- a/tests/backends_fixture.py +++ b/tests/backends_fixture.py @@ -4,6 +4,6 @@ from PySDM.backends import CPU, GPU -@pytest.fixture(params=(CPU, GPU)) +@pytest.fixture(params=(CPU, CPU)) # (CPU, GPU)) def backend_class(request): return request.param diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index b8c70d12c3..1302924312 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -9,6 +9,7 @@ ExponFrag, Feingold1988Frag, Gaussian, + LowList1982Nf, Straub2010Nf, ) from PySDM.environments import Box @@ -24,12 +25,13 @@ class TestFragmentations: # pylint: disable=too-few-public-methods @pytest.mark.parametrize( "fragmentation_fn", ( - AlwaysN(n=2), - ExponFrag(scale=1e6 * si.um**3), - Feingold1988Frag(scale=1e6 * si.um**3), - Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), - SLAMS(), - Straub2010Nf(), + # AlwaysN(n=2), + # ExponFrag(scale=1e6 * si.um**3), + # Feingold1988Frag(scale=1e6 * si.um**3), + # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), + # SLAMS(), + # Straub2010Nf(), + LowList1982Nf(), ), ) def test_fragmentation_fn_call( @@ -67,145 +69,145 @@ def test_fragmentation_fn_call( np.testing.assert_array_less([0.99], nf.to_ndarray()) np.testing.assert_array_less([0.0], frag_size.to_ndarray()) - @staticmethod - @pytest.mark.parametrize( - "fragmentation_fn", - [ - ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), - SLAMS(vmin=6660.0 * si.um**3), - Straub2010Nf(vmin=6660.0 * si.um**3), - pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - ], - ) - def test_fragmentation_limiters_vmin( - fragmentation_fn, backend_class - ): # pylint: disable=redefined-outer-name - # arrange - volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - fragments = np.asarray([-1.0]) - builder = Builder( - volume.size, - backend_class( - Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), - double_precision=True, - ), - ) - sut = fragmentation_fn - sut.register(builder) - builder.set_environment(Box(dv=None, dt=None)) - _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - _PairwiseStorage = builder.particulator.PairwiseStorage - _Indicator = builder.particulator.PairIndicator - nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - is_first_in_pair = _Indicator(length=volume.size) - is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - np.asarray([True, False]) - ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # act - sut(nf, frag_size, u01, is_first_in_pair) - - # Assert - np.testing.assert_array_equal([(440.0 + 6660.0) / 6660.0], nf.to_ndarray()) - np.testing.assert_array_equal([6660.0 * si.um**3], frag_size.to_ndarray()) - - @staticmethod - @pytest.mark.parametrize( - "fragmentation_fn", - [ - ExponFrag(scale=1.0 * si.cm**3), - Feingold1988Frag(scale=1.0 * si.cm**3), - Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), - SLAMS(), - Straub2010Nf(), - pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), - ], - ) - def test_fragmentation_limiters_vmax( - fragmentation_fn, backend_class - ): # pylint: disable=redefined-outer-name - # arrange - volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - fragments = np.asarray([-1.0]) - builder = Builder( - volume.size, - backend_class( - Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - ), - ) - sut = fragmentation_fn - sut.vmin = 1 * si.um**3 - sut.register(builder) - builder.set_environment(Box(dv=None, dt=None)) - _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - _PairwiseStorage = builder.particulator.PairwiseStorage - _Indicator = builder.particulator.PairIndicator - nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - is_first_in_pair = _Indicator(length=volume.size) - is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - np.asarray([True, False]) - ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # act - sut(nf, frag_size, u01, is_first_in_pair) - - # Assert - np.testing.assert_array_less([(440.0 + 6660.0) / 6661.0], nf.to_ndarray()) - np.testing.assert_array_less(frag_size.to_ndarray(), [6661.0 * si.um**3]) - - @staticmethod - @pytest.mark.parametrize( - "fragmentation_fn", - [ - ExponFrag(scale=1.0 * si.um**3, nfmax=2), - Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), - Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), - SLAMS(nfmax=2), - Straub2010Nf(nfmax=2), - pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - ], - ) - def test_fragmentation_limiters_nfmax( - fragmentation_fn, backend_class - ): # pylint: disable=redefined-outer-name - # arrange - volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - fragments = np.asarray([-1.0]) - builder = Builder( - volume.size, - backend_class( - Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - ), - ) - sut = fragmentation_fn - sut.vmin = 1 * si.um**3 - sut.register(builder) - builder.set_environment(Box(dv=None, dt=None)) - _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - _PairwiseStorage = builder.particulator.PairwiseStorage - _Indicator = builder.particulator.PairIndicator - nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - is_first_in_pair = _Indicator(length=volume.size) - is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - np.asarray([True, False]) - ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # act - sut(nf, frag_size, u01, is_first_in_pair) - - # Assert - np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) - np.testing.assert_array_less( - [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() - ) + # @staticmethod + # @pytest.mark.parametrize( + # "fragmentation_fn", + # [ + # ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + # Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + # Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), + # SLAMS(vmin=6660.0 * si.um**3), + # Straub2010Nf(vmin=6660.0 * si.um**3), + # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + # ], + # ) + # def test_fragmentation_limiters_vmin( + # fragmentation_fn, backend_class + # ): # pylint: disable=redefined-outer-name + # # arrange + # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + # fragments = np.asarray([-1.0]) + # builder = Builder( + # volume.size, + # backend_class( + # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), + # double_precision=True, + # ), + # ) + # sut = fragmentation_fn + # sut.register(builder) + # builder.set_environment(Box(dv=None, dt=None)) + # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + # _PairwiseStorage = builder.particulator.PairwiseStorage + # _Indicator = builder.particulator.PairIndicator + # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # is_first_in_pair = _Indicator(length=volume.size) + # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + # np.asarray([True, False]) + # ) + # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # # act + # sut(nf, frag_size, u01, is_first_in_pair) + + # # Assert + # np.testing.assert_array_equal([(440.0 + 6660.0) / 6660.0], nf.to_ndarray()) + # np.testing.assert_array_equal([6660.0 * si.um**3], frag_size.to_ndarray()) + + # @staticmethod + # @pytest.mark.parametrize( + # "fragmentation_fn", + # [ + # ExponFrag(scale=1.0 * si.cm**3), + # Feingold1988Frag(scale=1.0 * si.cm**3), + # Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), + # SLAMS(), + # Straub2010Nf(), + # pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), + # ], + # ) + # def test_fragmentation_limiters_vmax( + # fragmentation_fn, backend_class + # ): # pylint: disable=redefined-outer-name + # # arrange + # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + # fragments = np.asarray([-1.0]) + # builder = Builder( + # volume.size, + # backend_class( + # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + # ), + # ) + # sut = fragmentation_fn + # sut.vmin = 1 * si.um**3 + # sut.register(builder) + # builder.set_environment(Box(dv=None, dt=None)) + # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + # _PairwiseStorage = builder.particulator.PairwiseStorage + # _Indicator = builder.particulator.PairIndicator + # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # is_first_in_pair = _Indicator(length=volume.size) + # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + # np.asarray([True, False]) + # ) + # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # # act + # sut(nf, frag_size, u01, is_first_in_pair) + + # # Assert + # np.testing.assert_array_less([(440.0 + 6660.0) / 6661.0], nf.to_ndarray()) + # np.testing.assert_array_less(frag_size.to_ndarray(), [6661.0 * si.um**3]) + + # @staticmethod + # @pytest.mark.parametrize( + # "fragmentation_fn", + # [ + # ExponFrag(scale=1.0 * si.um**3, nfmax=2), + # Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), + # Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), + # SLAMS(nfmax=2), + # Straub2010Nf(nfmax=2), + # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + # ], + # ) + # def test_fragmentation_limiters_nfmax( + # fragmentation_fn, backend_class + # ): # pylint: disable=redefined-outer-name + # # arrange + # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + # fragments = np.asarray([-1.0]) + # builder = Builder( + # volume.size, + # backend_class( + # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + # ), + # ) + # sut = fragmentation_fn + # sut.vmin = 1 * si.um**3 + # sut.register(builder) + # builder.set_environment(Box(dv=None, dt=None)) + # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + # _PairwiseStorage = builder.particulator.PairwiseStorage + # _Indicator = builder.particulator.PairIndicator + # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # is_first_in_pair = _Indicator(length=volume.size) + # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + # np.asarray([True, False]) + # ) + # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # # act + # sut(nf, frag_size, u01, is_first_in_pair) + + # # Assert + # np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) + # np.testing.assert_array_less( + # [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() + # ) diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index e553f9348d..67558f81a3 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -2,57 +2,89 @@ import numpy as np from PySDM import Formulae +from PySDM.physics.constants import si class TestFragmentationFunctions: # pylint:disable=too-few-public-methods - @staticmethod - def test_straub_p1(): - # arrange - formulae = Formulae(fragmentation_function="Straub2010Nf") - sigma1 = formulae.fragmentation_function.sigma1(CW=0.666) + # @staticmethod + # def test_straub_p1(): + # # arrange + # formulae = Formulae(fragmentation_function="Straub2010Nf") + # sigma1 = formulae.fragmentation_function.sigma1(CW=0.666) - # act - frag_size = formulae.fragmentation_function.p1(sigma1=sigma1, rand=0) + # # act + # frag_size = formulae.fragmentation_function.p1(sigma1=sigma1, rand=0) - # assert - np.testing.assert_approx_equal(frag_size, 3.6490627e-12) + # # assert + # np.testing.assert_approx_equal(frag_size, 3.6490627e-12) - @staticmethod - def test_straub_p2(): - # arrange - formulae = Formulae(fragmentation_function="Straub2010Nf") + # @staticmethod + # def test_straub_p2(): + # # arrange + # formulae = Formulae(fragmentation_function="Straub2010Nf") - # act - frag_size = formulae.fragmentation_function.p2(CW=0.666, rand=0) + # # act + # frag_size = formulae.fragmentation_function.p2(CW=0.666, rand=0) - # assert - np.testing.assert_approx_equal(frag_size, 4.3000510e-09) + # # assert + # np.testing.assert_approx_equal(frag_size, 4.3000510e-09) - @staticmethod - def test_straub_p3(): - # arrange - formulae = Formulae(fragmentation_function="Straub2010Nf") + # @staticmethod + # def test_straub_p3(): + # # arrange + # formulae = Formulae(fragmentation_function="Straub2010Nf") - # act - frag_size = formulae.fragmentation_function.p3(CW=0.666, ds=0, rand=0) + # # act + # frag_size = formulae.fragmentation_function.p3(CW=0.666, ds=0, rand=0) - # assert - np.testing.assert_approx_equal(frag_size, 1.3857897e-15) + # # assert + # np.testing.assert_approx_equal(frag_size, 1.3857897e-15) + + # @staticmethod + # def test_straub_p4(): + # # arrange + # formulae = Formulae(fragmentation_function="Straub2010Nf") + + # # act + # frag_size = formulae.fragmentation_function.p4( + # CW=0.666, + # ds=0, + # v_max=0, + # Nr1=1, + # Nr2=2, + # Nr3=0, + # ) + + # # assert + # np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) + + # @staticmethod + # def test_ll82_p1(): + # # arrange + # formulae = Formulae(fragmentation_function="LowList1982Nf") + + # # act + # frag_size = formulae.fragmentation_function.p4( + # CW=0.666, + # ds=0, + # v_max=0, + # Nr1=1, + # Nr2=2, + # Nr3=0, + # ) + + # # assert + # np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) @staticmethod - def test_straub_p4(): + def test_ll82_pf(): # arrange - formulae = Formulae(fragmentation_function="Straub2010Nf") + formulae = Formulae(fragmentation_function="LowList1982Nf") # act - frag_size = formulae.fragmentation_function.p4( - CW=0.666, - ds=0, - v_max=0, - Nr1=1, - Nr2=2, - Nr3=0, + params = formulae.fragmentation_function.params_f1( + dl=100 * si.um, dcoal=200 * si.um ) # assert - np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) + np.testing.assert_approx_equal(len(params), 3.0) From dd20975cedf5f84b45d1255b1499ea685ec01c13 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 27 Feb 2023 17:28:17 -0700 Subject: [PATCH 37/86] WIP to satisfy numba's needs --- .../impl_numba/methods/collisions_methods.py | 25 ++- PySDM/physics/constants_defaults.py | 1 + .../fragmentation_function/lowlist82.py | 188 ++++++++---------- .../physics/test_fragmentation_functions.py | 110 ++++++++-- 4 files changed, 202 insertions(+), 122 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index f31b458fa8..9d56a8bfc3 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -390,7 +390,10 @@ def __straub_fragmentation_body( self.__straub_fragmentation_body = __straub_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "LowList1982Nf": - ll82_pf = self.formulae.fragmentation_function.pf + ll82_params_f1 = self.formulae.fragmentation_function.params_f1 + ll82_params_f2 = self.formulae.fragmentation_function.params_f2 + ll82_params_f3 = self.formulae.fragmentation_function.params_f3 + ll82_erfinv = self.formulae.fragmentation_function.erfinv ll82_ps = self.formulae.fragmentation_function.ps ll82_pd = self.formulae.fragmentation_function.pd @@ -403,7 +406,25 @@ def __ll82_fragmentation_body( ): ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) if rand[i] < Rf[i]: # filament breakup - frag_size[i] = ll82_pf(rand[i] / Rf[i], ds[i], dl[i], dcoal[i]) + (H1, mu1, sigma1) = ll82_params_f1(dl[i], dcoal[i]) + (H2, mu2, sigma2) = ll82_params_f2(ds[i]) + (H3, mu3, sigma3) = ll82_params_f3(ds[i], dl[i]) + Hsum = H1 + H2 + H3 + if rand[i] < H1 / Hsum: + X = rand[i] * Hsum / H1 + frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( + 2 * X - 1 + ) + elif rand[i] < (H1 + H2) / Hsum: + X = (rand[i] * Hsum - H1) / H2 + frag_size[i] = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( + 2 * X - 1 + ) + else: + X = (rand[i] * Hsum - H1 - H2) / H3 + lnarg = mu3 + np.sqrt(2) * sigma3 * ll82_erfinv(2 * X - 1) + frag_size[i] = np.exp(lnarg) + elif rand[i] < Rf[i] + Rs[i]: # sheet breakup frag_size[i] = ll82_ps( (rand[i] - Rf[i]) / Rs[i], ds[i], dl[i], dcoal[i], St[i] diff --git a/PySDM/physics/constants_defaults.py b/PySDM/physics/constants_defaults.py index 430d760991..8222364629 100644 --- a/PySDM/physics/constants_defaults.py +++ b/PySDM/physics/constants_defaults.py @@ -165,3 +165,4 @@ STRAUB_MU2 = 0.095 * si.cm CM = 1 * si.cm +UM = 1 * si.um diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 16794668ab..25c607803f 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -1,12 +1,9 @@ """ Formulae supporting `PySDM.dynamics.collisions.breakup_fragmentations.lowlist82` """ - import math import numpy as np -import scipy.special as sps -from scipy.interpolate import interp1d class LowList1982Nf: # pylint: disable=too-few-public-methods @@ -15,9 +12,8 @@ def __init__(self, _): @staticmethod def params_f1(const, dl, dcoal): - print("hello") - dcoalCM = dcoal / const.cm - dlCM = dl / const.cm + dcoalCM = dcoal / const.CM + dlCM = dl / const.CM Hf1 = 50.8 * ((dlCM) ** (-0.718)) mu = dlCM sigma = 1 / Hf1 @@ -32,7 +28,7 @@ def params_f1(const, dl, dcoal): @staticmethod def params_f2(const, ds): - dsCM = ds / const.cm + dsCM = ds / const.CM Hf2 = 4.18 * ((dsCM) ** (-1.17)) mu = dsCM sigma = 1 / (np.sqrt(2 * np.pi) * Hf2) @@ -40,8 +36,8 @@ def params_f2(const, ds): @staticmethod def params_f3(const, ds, dl): - dsCM = ds / const.cm - dlCM = dl / const.cm + dsCM = ds / const.CM + dlCM = dl / const.CM # eq (3.3), (3.4) Ff1 = ( (-2.25e4 * (dlCM - 0.403) ** 2 - 37.9) * (dsCM) ** (2.5) @@ -86,11 +82,60 @@ def params_f3(const, ds, dl): return (Hf3, muf3, sigmaf3) + @staticmethod + def erfinv(X): + a = 8 * (np.pi - 3) / (3 * np.pi * (4 - np.pi)) + arg = (2 / np.pi / a) + np.log(1 - X**2) / 2 + arg = arg * arg + arg = arg - np.log(1 - X**2) / a + arg = np.sqrt(arg) + arg = arg - (2 / np.pi / a + np.log(1 - X**2) / 2) + return np.sqrt(arg) + + # @staticmethod + # def ps(const, rand, ds, dl, dcoal, St): + # (H1, mu1, sigma1) = params_s1(dl, ds, dcoal) + # (H2, mu2, sigma2) = params_s2(dl, ds, St) + # grid = default_interp_grid() + # percentiles = [ + # gaussian_inv_cdf(grid, mu1, sigma1), + # lognormal_inv_cdf(grid, mu2, sigma2), + # ] + # cdf_arg = np.zeros(len(grid) * 2 + 1) + # cdf_arg[1:] = np.concatenate(percentiles) + # cdf = ( + # H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + # + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) + # ) / (H1 + H2) + # inverse_cdf = interp1d(cdf, cdf_arg) + + # return inverse_cdf(rand) + + # @staticmethod + # def pd(const, rand, ds, dl, dcoal, CKE, W1): + # (H1, mu1, sigma1) = params_d1(W1, dl, CKE, dcoal) + # (H2, mu2, sigma2) = params_d2(ds, dl, CKE) + + # grid = default_interp_grid() + # percentiles = [ + # gaussian_inv_cdf(grid, mu1, sigma1), + # lognormal_inv_cdf(grid, mu2, sigma2), + # ] + # cdf_arg = np.zeros(len(grid) * 2 + 1) + # cdf_arg[1:] = np.concatenate(percentiles) + # cdf = ( + # H1 * gaussian_cdf(cdf_arg, mu1, sigma1) + # + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) + # ) / (H1 + H2) + # inverse_cdf = interp1d(cdf, cdf_arg) + + # return inverse_cdf(rand) + @staticmethod def params_s1(const, dl, ds, dcoal): - dsCM = ds / const.cm - dlCM = dl / const.cm - dcoalCM = dcoal / const.cm + dsCM = ds / const.CM + dlCM = dl / const.CM + dcoalCM = dcoal / const.CM Hs1 = 100 * np.exp(-3.25 * dsCM) mus1 = dlCM sigmas1 = 1 / Hs1 @@ -104,9 +149,9 @@ def params_s1(const, dl, ds, dcoal): return (Hs1, mus1, sigmas1) # in cm @staticmethod - def params_s2(dl, ds, St): - dsCM = ds / const.cm - dlCM = dl / const.cm + def params_s2(const, dl, ds, St): + dsCM = ds / const.CM + dlCM = dl / const.CM Dss2 = ( 0.254 * (ds ** (0.413)) * np.exp(3.53 * dsCM ** (-2.51) * (dlCM - dsCM)) ) # (4.27) @@ -131,9 +176,9 @@ def params_s2(dl, ds, St): return (Hs2, mus2, sigmas2) @staticmethod - def params_d1(const, W1, dl, CKE, dcoal): - dlCM = dl / const.cm - dcoalCM = dlCM / const.cm + def params_d1(const, W1, dl, CKE): + dlCM = dl / const.CM + dcoalCM = dlCM / const.CM mud1 = dlCM * (1 - np.exp(-3.70 * (3.10 - W1))) Hd1 = 1.58e-5 * CKE ** (-1.22) sigmad1 = 1 / Hd1 @@ -148,8 +193,8 @@ def params_d1(const, W1, dl, CKE, dcoal): @staticmethod def params_d2(const, ds, dl, CKE): - dsCM = ds / const.cm - dlCM = dl / const.cm + dsCM = ds / const.CM + dlCM = dl / const.CM Ddd2 = np.exp(-17.4 * dsCM - 0.671 * (dlCM - dsCM)) * dsCM # (4.37) bstar = 0.007 * dsCM ** (-2.54) # (4.39) Pd20 = 0.0884 * dsCM ** (-2.52) * (dlCM - dsCM) ** (bstar) # (4.38) @@ -171,94 +216,33 @@ def params_d2(const, ds, dl, CKE): return (Hd2, mud2, sigmad2) - @staticmethod - def pf(const, rand, ds, dl, dcoal): - (H1, mu1, sigma1) = LowList1982Nf.params_f1(dl, dcoal) - (H2, mu2, sigma2) = LowList1982Nf.params_f2(ds) - (H3, mu3, sigma3) = LowList1982Nf.params_f3(ds, dl) - grid = default_interp_grid() - percentiles = [ - gaussian_inv_cdf(grid, mu1, sigma1), - gaussian_inv_cdf(grid, mu2, sigma2), - lognormal_inv_cdf(grid, mu3, sigma3), - ] - cdf_arg = np.zeros(len(grid) * 3 + 1) - cdf_arg[1:] = np.concatenate(percentiles) - cdf = ( - H1 * gaussian_cdf(cdf_arg, mu1, sigma1) - + H2 * gaussian_cdf(cdf_arg, mu2, sigma2) - + H3 * lognormal_cdf(cdf_arg, mu3, sigma3) - ) / (H1 + H2 + H3) - inverse_cdf = interp1d(cdf, cdf_arg) - - return inverse_cdf(rand) - - @staticmethod - def ps(const, rand, ds, dl, dcoal, St): - (H1, mu1, sigma1) = LowList1982Nf.params_s1(dl, ds, dcoal) - (H2, mu2, sigma2) = LowList1982Nf.params_s2(dl, ds, St) - grid = default_interp_grid() - percentiles = [ - gaussian_inv_cdf(grid, mu1, sigma1), - lognormal_inv_cdf(grid, mu2, sigma2), - ] - cdf_arg = np.zeros(len(grid) * 2 + 1) - cdf_arg[1:] = np.concatenate(percentiles) - cdf = ( - H1 * gaussian_cdf(cdf_arg, mu1, sigma1) - + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) - ) / (H1 + H2) - inverse_cdf = interp1d(cdf, cdf_arg) - - return inverse_cdf(rand) - @staticmethod - def pd(const, rand, ds, dl, dcoal, CKE, W1): - (H1, mu1, sigma1) = LowList1982Nf.params_d1(W1, dl, CKE, dcoal) - (H2, mu2, sigma2) = LowList1982Nf.params_d2(ds, dl, CKE) - - grid = default_interp_grid() - percentiles = [ - gaussian_inv_cdf(grid, mu1, sigma1), - lognormal_inv_cdf(grid, mu2, sigma2), - ] - cdf_arg = np.zeros(len(grid) * 2 + 1) - cdf_arg[1:] = np.concatenate(percentiles) - cdf = ( - H1 * gaussian_cdf(cdf_arg, mu1, sigma1) - + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) - ) / (H1 + H2) - inverse_cdf = interp1d(cdf, cdf_arg) - - return inverse_cdf(rand) - - -@staticmethod -def default_interp_grid(diam_basis=True): - if diam_basis == True: - dmin = 1 * si.um - dmax = 5 * const.cm - return np.logspace(np.log(dmin), np.log(dmax), 100) - else: - pass +# @staticmethod +# def default_interp_grid(const, diam_basis=True): +# if diam_basis == True: +# dmin = 1 * const.UM +# dmax = 5 * const.CM +# return np.logspace(np.log(dmin), np.log(dmax), 100) +# else: +# pass -@staticmethod -def gaussian_cdf(arg, mu, sigma): - return 0.5 * (1 + math.erf((arg - mu) / np.sqrt(2) / sigma)) +# @staticmethod +# def gaussian_cdf(arg, mu, sigma): +# return 0.5 * (1 + math.erf((arg - mu) / np.sqrt(2) / sigma)) -@staticmethod -def lognormal_cdf(arg, mu, sigma): - return 0.5 * (1 + math.erf((np.log(arg) - mu) / np.sqrt(2) / sigma)) +# @staticmethod +# def lognormal_cdf(arg, mu, sigma): +# return 0.5 * (1 + math.erf((np.log(arg) - mu) / np.sqrt(2) / sigma)) -@staticmethod -def gaussian_inv_cdf(X, mu, sigma): - return mu + np.sqrt(2) * sigma * sps.erfinv(2 * X - 1) +# @staticmethod +# def gaussian_inv_cdf(X, mu, sigma): +# return mu + np.sqrt(2) * sigma * math.erfinv(2 * X - 1) -@staticmethod -def lognormal_inv_cdf(X, mu, sigma): - lnarg = LowList1982Nf.gaussian_cdf(X, mu, sigma) - return np.exp(lnarg) +# @staticmethod +# def lognormal_inv_cdf(X, mu, sigma): +# lnarg = gaussian_cdf(X, mu, sigma) +# return np.exp(lnarg) diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index 67558f81a3..e0c799c37c 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -1,9 +1,13 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring +import os + import numpy as np from PySDM import Formulae from PySDM.physics.constants import si +os.environ["NUMBA_DISABLE_JIT"] = "1" + class TestFragmentationFunctions: # pylint:disable=too-few-public-methods # @staticmethod @@ -58,33 +62,103 @@ class TestFragmentationFunctions: # pylint:disable=too-few-public-methods # # assert # np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) - # @staticmethod - # def test_ll82_p1(): - # # arrange - # formulae = Formulae(fragmentation_function="LowList1982Nf") + @staticmethod + def test_ll82_pf1(): + # arrange + formulae = Formulae(fragmentation_function="LowList1982Nf") - # # act - # frag_size = formulae.fragmentation_function.p4( - # CW=0.666, - # ds=0, - # v_max=0, - # Nr1=1, - # Nr2=2, - # Nr3=0, - # ) + # act + params = formulae.fragmentation_function.params_f1( + dl=100 * si.um, dcoal=200 * si.um + ) - # # assert - # np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) + # assert + np.testing.assert_approx_equal(len(params), 3.0) @staticmethod - def test_ll82_pf(): + def test_ll82_pf2(): # arrange formulae = Formulae(fragmentation_function="LowList1982Nf") # act - params = formulae.fragmentation_function.params_f1( - dl=100 * si.um, dcoal=200 * si.um + params = formulae.fragmentation_function.params_f2(ds=100 * si.um) + + # assert + np.testing.assert_approx_equal(len(params), 3.0) + + @staticmethod + def test_ll82_pf3(): + # arrange + formulae = Formulae(fragmentation_function="LowList1982Nf") + + # act + params = formulae.fragmentation_function.params_f3( + ds=100 * si.um, dl=200 * si.um ) # assert np.testing.assert_approx_equal(len(params), 3.0) + + @staticmethod + def test_ll82_ps1(): + # arrange + formulae = Formulae(fragmentation_function="LowList1982Nf") + + # act + params = formulae.fragmentation_function.params_s1( + dl=100 * si.um, ds=50 * si.um, dcoal=200 * si.um + ) + + # assert + np.testing.assert_approx_equal(len(params), 3.0) + + @staticmethod + def test_ll82_ps2(): + # arrange + formulae = Formulae(fragmentation_function="LowList1982Nf") + + # act + params = formulae.fragmentation_function.params_s2( + dl=100 * si.um, ds=50 * si.um, St=1e-6 * si.J + ) + + # assert + np.testing.assert_approx_equal(len(params), 3.0) + + @staticmethod + def test_ll82_pd1(): + # arrange + formulae = Formulae(fragmentation_function="LowList1982Nf") + + # act + params = formulae.fragmentation_function.params_d1( + W1=1.0, dl=100 * si.um, CKE=1.0 + ) + + # assert + np.testing.assert_approx_equal(len(params), 3.0) + + @staticmethod + def test_ll82_pd2(): + # arrange + formulae = Formulae(fragmentation_function="LowList1982Nf") + + # act + params = formulae.fragmentation_function.params_d2( + ds=100 * si.um, dl=100 * si.um, CKE=200 * si.um + ) + + # assert + np.testing.assert_approx_equal(len(params), 3.0) + + @staticmethod + def test_erfinv(): + # arrange + formulae = Formulae(fragmentation_function="LowList1982Nf") + + # act + params = formulae.fragmentation_function.erfinv(0.5) + + # assert + diff = np.abs(params - 0.476936) + np.testing.assert_array_less(diff, 1e-3) From 6ae2a9c12fa7b8a9684938f266e6863b11b069e2 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Wed, 1 Mar 2023 16:37:47 -0700 Subject: [PATCH 38/86] Some fixes to LL82 --- .../impl_numba/methods/collisions_methods.py | 62 +++++++---- PySDM/physics/constants.py | 3 + .../fragmentation_function/lowlist82.py | 8 +- tests/backends_fixture.py | 2 +- .../collisions/test_fragmentations.py | 2 +- .../physics/test_fragmentation_functions.py | 104 +++++++++--------- 6 files changed, 104 insertions(+), 77 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 9d56a8bfc3..e71e686756 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -393,51 +393,73 @@ def __straub_fragmentation_body( ll82_params_f1 = self.formulae.fragmentation_function.params_f1 ll82_params_f2 = self.formulae.fragmentation_function.params_f2 ll82_params_f3 = self.formulae.fragmentation_function.params_f3 + ll82_params_s1 = self.formulae.fragmentation_function.params_s1 + ll82_params_s2 = self.formulae.fragmentation_function.params_s2 + ll82_params_d1 = self.formulae.fragmentation_function.params_d1 + ll82_params_d2 = self.formulae.fragmentation_function.params_d2 ll82_erfinv = self.formulae.fragmentation_function.erfinv - ll82_ps = self.formulae.fragmentation_function.ps - ll82_pd = self.formulae.fragmentation_function.pd @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __ll82_fragmentation_body( - *, CKE, W, W2, St, ds, dl, dcoal, frag_size, rand, Rf, Rs, Rd + *, CKE, W, W2, St, ds, dl, dcoal, frag_size, rand, Rf, Rs, Rd, tol=1e-8 ): for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) - if rand[i] < Rf[i]: # filament breakup + print(rand[i], Rf, Rs, Rd) + if rand[i] <= Rf[i]: # filament breakup (H1, mu1, sigma1) = ll82_params_f1(dl[i], dcoal[i]) (H2, mu2, sigma2) = ll82_params_f2(ds[i]) (H3, mu3, sigma3) = ll82_params_f3(ds[i], dl[i]) Hsum = H1 + H2 + H3 - if rand[i] < H1 / Hsum: - X = rand[i] * Hsum / H1 + rand[i] = rand[i] / Rf[i] + if rand[i] <= H1 / Hsum: + X = max(rand[i] * Hsum / H1, tol) frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( 2 * X - 1 ) - elif rand[i] < (H1 + H2) / Hsum: + elif rand[i] <= (H1 + H2) / Hsum: X = (rand[i] * Hsum - H1) / H2 frag_size[i] = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( 2 * X - 1 ) else: - X = (rand[i] * Hsum - H1 - H2) / H3 + X = min((rand[i] * Hsum - H1 - H2) / H3, 1.0 - tol) lnarg = mu3 + np.sqrt(2) * sigma3 * ll82_erfinv(2 * X - 1) frag_size[i] = np.exp(lnarg) - elif rand[i] < Rf[i] + Rs[i]: # sheet breakup - frag_size[i] = ll82_ps( - (rand[i] - Rf[i]) / Rs[i], ds[i], dl[i], dcoal[i], St[i] - ) + elif rand[i] <= Rf[i] + Rs[i]: # sheet breakup + (H1, mu1, sigma1) = ll82_params_s1(dl[i], ds[i], dcoal[i]) + (H2, mu2, sigma2) = ll82_params_s2(dl[i], ds[i], St[i]) + Hsum = H1 + H2 + rand[i] = (rand[i] - Rf[i]) / (Rs[i]) + if rand[i] <= H1 / Hsum: + X = max(rand[i] * Hsum / H1, tol) + frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( + 2 * X - 1 + ) + else: + X = min((rand[i] - H1) / H2, 1.0 - tol) + lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv(2 * X - 1) + frag_size[i] = np.exp(lnarg) + else: # disk breakup - frag_size[i] = ll82_pd( - (rand[i] - Rf[i] - Rs[i]) / Rd[i], - ds[i], - dl[i], - dcoal[i], - CKE[i], - W1[i], - ) + (H1, mu1, sigma1) = ll82_params_d1(W[i], dl[i], CKE[i]) + (H2, mu2, sigma2) = ll82_params_d2(ds[i], dl[i], CKE[i]) + Hsum = H1 + H2 + rand[i] = (rand[i] - Rf[i] - Rs[i]) / Rd[i] + if rand[i] <= H1 / Hsum: + X = max(rand[i] * Hsum / H1, tol) + frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( + 2 * X - 1 + ) + else: + X = min((rand[i] - H1) / H2, 1 - tol) + lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv(2 * X - 1) + frag_size[i] = np.exp(lnarg) + + frag_size[i] = frag_size[i] / 0.01 self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": diff --git a/PySDM/physics/constants.py b/PySDM/physics/constants.py index 1c4178e824..05b4282355 100644 --- a/PySDM/physics/constants.py +++ b/PySDM/physics/constants.py @@ -49,3 +49,6 @@ def convert_to(value, unit): # there are so few water ions instead of K we have K [H2O] (see Seinfeld & Pandis p 345) M = si.mole / si.litre K_H2O = 1e-14 * M * M + +CM = 1 * si.cm +UM = 1 * si.um diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 25c607803f..2c922ca6e3 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -44,10 +44,14 @@ def params_f3(const, ds, dl): + 9.67 * (dlCM - 0.170) ** 2 + 4.95 ) - Ff2 = 1.02e4 * dsCM * (2.83) + 2 - Ff = Ff1 + Ff2 + Ff2 = 1.02e4 * dsCM ** (2.83) + 2 + # eq (3.5) ds0 = (Ff1 / 2.83) ** (1 / 1.02e4) + if dsCM > ds0: + Ff = max(2.0, Ff1) + else: + Ff = max(2.0, Ff2) Dff3 = 0.241 * (dsCM) + 0.0129 # (4.14) # eq (4.18) - (4.21) diff --git a/tests/backends_fixture.py b/tests/backends_fixture.py index ac96ac3783..f830869828 100644 --- a/tests/backends_fixture.py +++ b/tests/backends_fixture.py @@ -4,6 +4,6 @@ from PySDM.backends import CPU, GPU -@pytest.fixture(params=(CPU, CPU)) # (CPU, GPU)) +@pytest.fixture(params=(CPU, CPU)) def backend_class(request): return request.param diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 1302924312..72841f7eaa 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -60,7 +60,7 @@ def test_fragmentation_fn_call( is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( np.asarray([True, False]) ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) # act sut(nf, frag_size, u01, is_first_in_pair) diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index e0c799c37c..2415b77738 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -6,61 +6,59 @@ from PySDM import Formulae from PySDM.physics.constants import si -os.environ["NUMBA_DISABLE_JIT"] = "1" - class TestFragmentationFunctions: # pylint:disable=too-few-public-methods - # @staticmethod - # def test_straub_p1(): - # # arrange - # formulae = Formulae(fragmentation_function="Straub2010Nf") - # sigma1 = formulae.fragmentation_function.sigma1(CW=0.666) - - # # act - # frag_size = formulae.fragmentation_function.p1(sigma1=sigma1, rand=0) - - # # assert - # np.testing.assert_approx_equal(frag_size, 3.6490627e-12) - - # @staticmethod - # def test_straub_p2(): - # # arrange - # formulae = Formulae(fragmentation_function="Straub2010Nf") - - # # act - # frag_size = formulae.fragmentation_function.p2(CW=0.666, rand=0) - - # # assert - # np.testing.assert_approx_equal(frag_size, 4.3000510e-09) - - # @staticmethod - # def test_straub_p3(): - # # arrange - # formulae = Formulae(fragmentation_function="Straub2010Nf") - - # # act - # frag_size = formulae.fragmentation_function.p3(CW=0.666, ds=0, rand=0) - - # # assert - # np.testing.assert_approx_equal(frag_size, 1.3857897e-15) - - # @staticmethod - # def test_straub_p4(): - # # arrange - # formulae = Formulae(fragmentation_function="Straub2010Nf") - - # # act - # frag_size = formulae.fragmentation_function.p4( - # CW=0.666, - # ds=0, - # v_max=0, - # Nr1=1, - # Nr2=2, - # Nr3=0, - # ) - - # # assert - # np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) + @staticmethod + def test_straub_p1(): + # arrange + formulae = Formulae(fragmentation_function="Straub2010Nf") + sigma1 = formulae.fragmentation_function.sigma1(CW=0.666) + + # act + frag_size = formulae.fragmentation_function.p1(sigma1=sigma1, rand=0) + + # assert + np.testing.assert_approx_equal(frag_size, 3.6490627e-12) + + @staticmethod + def test_straub_p2(): + # arrange + formulae = Formulae(fragmentation_function="Straub2010Nf") + + # act + frag_size = formulae.fragmentation_function.p2(CW=0.666, rand=0) + + # assert + np.testing.assert_approx_equal(frag_size, 4.3000510e-09) + + @staticmethod + def test_straub_p3(): + # arrange + formulae = Formulae(fragmentation_function="Straub2010Nf") + + # act + frag_size = formulae.fragmentation_function.p3(CW=0.666, ds=0, rand=0) + + # assert + np.testing.assert_approx_equal(frag_size, 1.3857897e-15) + + @staticmethod + def test_straub_p4(): + # arrange + formulae = Formulae(fragmentation_function="Straub2010Nf") + + # act + frag_size = formulae.fragmentation_function.p4( + CW=0.666, + ds=0, + v_max=0, + Nr1=1, + Nr2=2, + Nr3=0, + ) + + # assert + np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) @staticmethod def test_ll82_pf1(): From 60dd8ea611499cde71360d648a2d2a89646fd485 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Wed, 1 Mar 2023 17:29:49 -0700 Subject: [PATCH 39/86] Testing & debugging --- .../impl_numba/methods/collisions_methods.py | 14 +++- .../fragmentation_function/lowlist82.py | 72 +------------------ 2 files changed, 12 insertions(+), 74 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index e71e686756..f8af2d970b 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -401,13 +401,12 @@ def __straub_fragmentation_body( @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __ll82_fragmentation_body( - *, CKE, W, W2, St, ds, dl, dcoal, frag_size, rand, Rf, Rs, Rd, tol=1e-8 + *, CKE, W, W2, St, ds, dl, dcoal, frag_size, rand, Rf, Rs, Rd, tol ): for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) - print(rand[i], Rf, Rs, Rd) if rand[i] <= Rf[i]: # filament breakup (H1, mu1, sigma1) = ll82_params_f1(dl[i], dcoal[i]) (H2, mu2, sigma2) = ll82_params_f2(ds[i]) @@ -416,6 +415,7 @@ def __ll82_fragmentation_body( rand[i] = rand[i] / Rf[i] if rand[i] <= H1 / Hsum: X = max(rand[i] * Hsum / H1, tol) + X = rand[i] frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( 2 * X - 1 ) @@ -459,7 +459,8 @@ def __ll82_fragmentation_body( lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv(2 * X - 1) frag_size[i] = np.exp(lnarg) - frag_size[i] = frag_size[i] / 0.01 + frag_size[i] = frag_size[i] * 0.01 # diameter in cm; convert to m + frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": @@ -679,14 +680,19 @@ def collision_coalescence_breakup( # pylint: disable=too-many-arguments def __fragmentation_limiters(n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y): for i in numba.prange(len(frag_size)): # pylint: disable=not-an-iterable + if np.isnan(frag_size[i]): + frag_size[i] = x_plus_y[i] frag_size[i] = min(frag_size[i], v_max[i]) frag_size[i] = max(frag_size[i], vmin) + frag_size[i] = min(frag_size[i], x_plus_y[i]) if nfmax is not None: if x_plus_y[i] / frag_size[i] > nfmax: frag_size[i] = x_plus_y[i] / nfmax if frag_size[i] == 0.0: frag_size[i] = x_plus_y[i] n_fragment[i] = x_plus_y[i] / frag_size[i] + # print("fragsize", frag_size) + # print("nf", n_fragment) def fragmentation_limiters( self, *, n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y @@ -877,6 +883,7 @@ def ll82_fragmentation( Rf, Rs, Rd, + tol=1e-8, ): self.__ll82_fragmentation_body( CKE=CKE.data, @@ -891,6 +898,7 @@ def ll82_fragmentation( Rf=Rf.data, Rs=Rs.data, Rd=Rd.data, + tol=tol, ) self.__fragmentation_limiters( n_fragment=n_fragment.data, diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 2c922ca6e3..6dd5f573e8 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -14,7 +14,7 @@ def __init__(self, _): def params_f1(const, dl, dcoal): dcoalCM = dcoal / const.CM dlCM = dl / const.CM - Hf1 = 50.8 * ((dlCM) ** (-0.718)) + Hf1 = 50.8 * (dlCM) ** (-0.718) mu = dlCM sigma = 1 / Hf1 for i in range(10): @@ -96,45 +96,6 @@ def erfinv(X): arg = arg - (2 / np.pi / a + np.log(1 - X**2) / 2) return np.sqrt(arg) - # @staticmethod - # def ps(const, rand, ds, dl, dcoal, St): - # (H1, mu1, sigma1) = params_s1(dl, ds, dcoal) - # (H2, mu2, sigma2) = params_s2(dl, ds, St) - # grid = default_interp_grid() - # percentiles = [ - # gaussian_inv_cdf(grid, mu1, sigma1), - # lognormal_inv_cdf(grid, mu2, sigma2), - # ] - # cdf_arg = np.zeros(len(grid) * 2 + 1) - # cdf_arg[1:] = np.concatenate(percentiles) - # cdf = ( - # H1 * gaussian_cdf(cdf_arg, mu1, sigma1) - # + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) - # ) / (H1 + H2) - # inverse_cdf = interp1d(cdf, cdf_arg) - - # return inverse_cdf(rand) - - # @staticmethod - # def pd(const, rand, ds, dl, dcoal, CKE, W1): - # (H1, mu1, sigma1) = params_d1(W1, dl, CKE, dcoal) - # (H2, mu2, sigma2) = params_d2(ds, dl, CKE) - - # grid = default_interp_grid() - # percentiles = [ - # gaussian_inv_cdf(grid, mu1, sigma1), - # lognormal_inv_cdf(grid, mu2, sigma2), - # ] - # cdf_arg = np.zeros(len(grid) * 2 + 1) - # cdf_arg[1:] = np.concatenate(percentiles) - # cdf = ( - # H1 * gaussian_cdf(cdf_arg, mu1, sigma1) - # + H2 * lognormal_cdf(cdf_arg, mu2, sigma2) - # ) / (H1 + H2) - # inverse_cdf = interp1d(cdf, cdf_arg) - - # return inverse_cdf(rand) - @staticmethod def params_s1(const, dl, ds, dcoal): dsCM = ds / const.CM @@ -219,34 +180,3 @@ def params_d2(const, ds, dl, CKE): Hd2 = Pd20 * Ddd2 / np.exp(-0.5 * sigmad2**2) return (Hd2, mud2, sigmad2) - - -# @staticmethod -# def default_interp_grid(const, diam_basis=True): -# if diam_basis == True: -# dmin = 1 * const.UM -# dmax = 5 * const.CM -# return np.logspace(np.log(dmin), np.log(dmax), 100) -# else: -# pass - - -# @staticmethod -# def gaussian_cdf(arg, mu, sigma): -# return 0.5 * (1 + math.erf((arg - mu) / np.sqrt(2) / sigma)) - - -# @staticmethod -# def lognormal_cdf(arg, mu, sigma): -# return 0.5 * (1 + math.erf((np.log(arg) - mu) / np.sqrt(2) / sigma)) - - -# @staticmethod -# def gaussian_inv_cdf(X, mu, sigma): -# return mu + np.sqrt(2) * sigma * math.erfinv(2 * X - 1) - - -# @staticmethod -# def lognormal_inv_cdf(X, mu, sigma): -# lnarg = gaussian_cdf(X, mu, sigma) -# return np.exp(lnarg) From 58b772329d211b8530f853190d7a796e49b5aac9 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Thu, 2 Mar 2023 20:51:33 -0700 Subject: [PATCH 40/86] Adding limiting cases to LL82 --- .../impl_numba/methods/collisions_methods.py | 132 ++++++++++-------- .../breakup_fragmentations/lowlist82.py | 1 + .../fragmentation_function/lowlist82.py | 29 ++-- 3 files changed, 90 insertions(+), 72 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index f8af2d970b..54da2827f0 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -406,61 +406,72 @@ def __ll82_fragmentation_body( for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): - ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) - if rand[i] <= Rf[i]: # filament breakup - (H1, mu1, sigma1) = ll82_params_f1(dl[i], dcoal[i]) - (H2, mu2, sigma2) = ll82_params_f2(ds[i]) - (H3, mu3, sigma3) = ll82_params_f3(ds[i], dl[i]) - Hsum = H1 + H2 + H3 - rand[i] = rand[i] / Rf[i] - if rand[i] <= H1 / Hsum: - X = max(rand[i] * Hsum / H1, tol) - X = rand[i] - frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( - 2 * X - 1 - ) - elif rand[i] <= (H1 + H2) / Hsum: - X = (rand[i] * Hsum - H1) / H2 - frag_size[i] = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( - 2 * X - 1 - ) - else: - X = min((rand[i] * Hsum - H1 - H2) / H3, 1.0 - tol) - lnarg = mu3 + np.sqrt(2) * sigma3 * ll82_erfinv(2 * X - 1) - frag_size[i] = np.exp(lnarg) - - elif rand[i] <= Rf[i] + Rs[i]: # sheet breakup - (H1, mu1, sigma1) = ll82_params_s1(dl[i], ds[i], dcoal[i]) - (H2, mu2, sigma2) = ll82_params_s2(dl[i], ds[i], St[i]) - Hsum = H1 + H2 - rand[i] = (rand[i] - Rf[i]) / (Rs[i]) - if rand[i] <= H1 / Hsum: - X = max(rand[i] * Hsum / H1, tol) - frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( - 2 * X - 1 - ) - else: - X = min((rand[i] - H1) / H2, 1.0 - tol) - lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv(2 * X - 1) - frag_size[i] = np.exp(lnarg) - - else: # disk breakup - (H1, mu1, sigma1) = ll82_params_d1(W[i], dl[i], CKE[i]) - (H2, mu2, sigma2) = ll82_params_d2(ds[i], dl[i], CKE[i]) - Hsum = H1 + H2 - rand[i] = (rand[i] - Rf[i] - Rs[i]) / Rd[i] - if rand[i] <= H1 / Hsum: - X = max(rand[i] * Hsum / H1, tol) - frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( - 2 * X - 1 - ) - else: - X = min((rand[i] - H1) / H2, 1 - tol) - lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv(2 * X - 1) - frag_size[i] = np.exp(lnarg) - - frag_size[i] = frag_size[i] * 0.01 # diameter in cm; convert to m - frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 + if ds[i] == 0.0 or dl[i] == 0.0: + frag_size[i] = 1e-18 + else: + ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) + if rand[i] <= Rf[i]: # filament breakup + (H1, mu1, sigma1) = ll82_params_f1(dl[i], dcoal[i]) + (H2, mu2, sigma2) = ll82_params_f2(ds[i]) + (H3, mu3, sigma3) = ll82_params_f3(ds[i], dl[i]) + Hsum = H1 + H2 + H3 + rand[i] = rand[i] / Rf[i] + if rand[i] <= H1 / Hsum: + X = max(rand[i] * Hsum / H1, tol) + X = rand[i] + frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( + 2 * X - 1 + ) + elif rand[i] <= (H1 + H2) / Hsum: + X = (rand[i] * Hsum - H1) / H2 + frag_size[i] = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( + 2 * X - 1 + ) + else: + X = min((rand[i] * Hsum - H1 - H2) / H3, 1.0 - tol) + lnarg = mu3 + np.sqrt(2) * sigma3 * ll82_erfinv( + 2 * X - 1 + ) + frag_size[i] = np.exp(lnarg) + + elif rand[i] <= Rf[i] + Rs[i]: # sheet breakup + (H1, mu1, sigma1) = ll82_params_s1(dl[i], ds[i], dcoal[i]) + (H2, mu2, sigma2) = ll82_params_s2(dl[i], ds[i], St[i]) + Hsum = H1 + H2 + rand[i] = (rand[i] - Rf[i]) / (Rs[i]) + if rand[i] <= H1 / Hsum: + X = max(rand[i] * Hsum / H1, tol) + frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( + 2 * X - 1 + ) + else: + X = min((rand[i] - H1) / H2, 1.0 - tol) + lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( + 2 * X - 1 + ) + frag_size[i] = np.exp(lnarg) + + else: # disk breakup + (H1, mu1, sigma1) = ll82_params_d1(W[i], dl[i], CKE[i]) + (H2, mu2, sigma2) = ll82_params_d2(ds[i], dl[i], CKE[i]) + Hsum = H1 + H2 + rand[i] = (rand[i] - Rf[i] - Rs[i]) / Rd[i] + if rand[i] <= H1 / Hsum: + X = max(rand[i] * Hsum / H1, tol) + frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( + 2 * X - 1 + ) + else: + X = min((rand[i] - H1) / H2, 1 - tol) + lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( + 2 * X - 1 + ) + frag_size[i] = np.exp(lnarg) + + frag_size[i] = ( + frag_size[i] * 0.01 + ) # diameter in cm; convert to m + frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": @@ -683,16 +694,17 @@ def __fragmentation_limiters(n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y if np.isnan(frag_size[i]): frag_size[i] = x_plus_y[i] frag_size[i] = min(frag_size[i], v_max[i]) - frag_size[i] = max(frag_size[i], vmin) frag_size[i] = min(frag_size[i], x_plus_y[i]) + frag_size[i] = max(frag_size[i], vmin) if nfmax is not None: - if x_plus_y[i] / frag_size[i] > nfmax: - frag_size[i] = x_plus_y[i] / nfmax + try: + if x_plus_y[i] / frag_size[i] > nfmax: + frag_size[i] = x_plus_y[i] / nfmax + except RuntimeWarning: + print(x_plus_y[i], frag_size[i], nfmax) if frag_size[i] == 0.0: frag_size[i] = x_plus_y[i] n_fragment[i] = x_plus_y[i] / frag_size[i] - # print("fragsize", frag_size) - # print("nf", n_fragment) def fragmentation_limiters( self, *, n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index d2be6a7ab7..62f7537974 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -49,6 +49,7 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): self.arrays["dcoal"].sum( self.particulator.attributes["volume"], is_first_in_pair ) + self.arrays["dcoal"] /= self.const.PI / 6 self.arrays["dcoal"] **= 1 / 3 diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 6dd5f573e8..6a6449f832 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -39,27 +39,31 @@ def params_f3(const, ds, dl): dsCM = ds / const.CM dlCM = dl / const.CM # eq (3.3), (3.4) - Ff1 = ( - (-2.25e4 * (dlCM - 0.403) ** 2 - 37.9) * (dsCM) ** (2.5) - + 9.67 * (dlCM - 0.170) ** 2 - + 4.95 + Ff1 = max( + 0, + ( + (-2.25e4 * (dlCM - 0.403) ** 2 - 37.9) * (dsCM) ** (2.5) + + 9.67 * (dlCM - 0.170) ** 2 + + 4.95 + ), ) Ff2 = 1.02e4 * dsCM ** (2.83) + 2 - # eq (3.5) - ds0 = (Ff1 / 2.83) ** (1 / 1.02e4) + ds0 = max(0.04, (Ff1 / 2.83) ** (1 / 1.02e4)) if dsCM > ds0: Ff = max(2.0, Ff1) else: Ff = max(2.0, Ff2) - Dff3 = 0.241 * (dsCM) + 0.0129 # (4.14) # eq (4.18) - (4.21) Pf301 = 1.68e5 * dsCM ** (2.33) - Pf302 = ( - (43.4 * (dlCM + 1.81) ** 2 - 159.0) / dsCM - - 3870 * (dlCM - 0.285) ** 2 - - 58.1 + Pf302 = max( + 0, + ( + (43.4 * (dlCM + 1.81) ** 2 - 159.0) / dsCM + - 3870 * (dlCM - 0.285) ** 2 + - 58.1 + ), ) alpha = (dsCM - ds0) / (0.2 * ds0) Pf303 = alpha * Pf301 + (1 - alpha) * Pf302 @@ -69,12 +73,13 @@ def params_f3(const, ds, dl): Pf0 = Pf302 else: Pf0 = Pf303 - # eq (4.22), (4.16), (4.17) (4.23) sigmaf3 = 10 * Dff3 muf3 = np.log(Dff3) + sigmaf3**2 Hf3 = Pf0 * Dff3 / np.exp(-0.5 * sigmaf3**2) for i in range(10): + if sigmaf3 == 0.0 or Hf3 == 0: + return (0.0, np.log(ds0), np.log(ds0)) sigmaf3 = ( np.sqrt(2 / np.pi) * (Ff - 2) From 0f6afc9c681634ac5998153e57ba1eb7e3af9514 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 3 Mar 2023 13:07:19 -0700 Subject: [PATCH 41/86] Update fragmentation unit tests --- .../collisions/test_fragmentations.py | 299 +++++++++--------- 1 file changed, 151 insertions(+), 148 deletions(-) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 72841f7eaa..e417884c28 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -25,12 +25,12 @@ class TestFragmentations: # pylint: disable=too-few-public-methods @pytest.mark.parametrize( "fragmentation_fn", ( - # AlwaysN(n=2), - # ExponFrag(scale=1e6 * si.um**3), - # Feingold1988Frag(scale=1e6 * si.um**3), - # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), - # SLAMS(), - # Straub2010Nf(), + AlwaysN(n=2), + ExponFrag(scale=1e6 * si.um**3), + Feingold1988Frag(scale=1e6 * si.um**3), + Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), + SLAMS(), + Straub2010Nf(), LowList1982Nf(), ), ) @@ -69,145 +69,148 @@ def test_fragmentation_fn_call( np.testing.assert_array_less([0.99], nf.to_ndarray()) np.testing.assert_array_less([0.0], frag_size.to_ndarray()) - # @staticmethod - # @pytest.mark.parametrize( - # "fragmentation_fn", - # [ - # ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - # Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - # Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), - # SLAMS(vmin=6660.0 * si.um**3), - # Straub2010Nf(vmin=6660.0 * si.um**3), - # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - # ], - # ) - # def test_fragmentation_limiters_vmin( - # fragmentation_fn, backend_class - # ): # pylint: disable=redefined-outer-name - # # arrange - # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - # fragments = np.asarray([-1.0]) - # builder = Builder( - # volume.size, - # backend_class( - # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), - # double_precision=True, - # ), - # ) - # sut = fragmentation_fn - # sut.register(builder) - # builder.set_environment(Box(dv=None, dt=None)) - # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - # _PairwiseStorage = builder.particulator.PairwiseStorage - # _Indicator = builder.particulator.PairIndicator - # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # is_first_in_pair = _Indicator(length=volume.size) - # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - # np.asarray([True, False]) - # ) - # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # # act - # sut(nf, frag_size, u01, is_first_in_pair) - - # # Assert - # np.testing.assert_array_equal([(440.0 + 6660.0) / 6660.0], nf.to_ndarray()) - # np.testing.assert_array_equal([6660.0 * si.um**3], frag_size.to_ndarray()) - - # @staticmethod - # @pytest.mark.parametrize( - # "fragmentation_fn", - # [ - # ExponFrag(scale=1.0 * si.cm**3), - # Feingold1988Frag(scale=1.0 * si.cm**3), - # Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), - # SLAMS(), - # Straub2010Nf(), - # pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), - # ], - # ) - # def test_fragmentation_limiters_vmax( - # fragmentation_fn, backend_class - # ): # pylint: disable=redefined-outer-name - # # arrange - # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - # fragments = np.asarray([-1.0]) - # builder = Builder( - # volume.size, - # backend_class( - # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - # ), - # ) - # sut = fragmentation_fn - # sut.vmin = 1 * si.um**3 - # sut.register(builder) - # builder.set_environment(Box(dv=None, dt=None)) - # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - # _PairwiseStorage = builder.particulator.PairwiseStorage - # _Indicator = builder.particulator.PairIndicator - # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # is_first_in_pair = _Indicator(length=volume.size) - # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - # np.asarray([True, False]) - # ) - # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # # act - # sut(nf, frag_size, u01, is_first_in_pair) - - # # Assert - # np.testing.assert_array_less([(440.0 + 6660.0) / 6661.0], nf.to_ndarray()) - # np.testing.assert_array_less(frag_size.to_ndarray(), [6661.0 * si.um**3]) - - # @staticmethod - # @pytest.mark.parametrize( - # "fragmentation_fn", - # [ - # ExponFrag(scale=1.0 * si.um**3, nfmax=2), - # Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), - # Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), - # SLAMS(nfmax=2), - # Straub2010Nf(nfmax=2), - # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - # ], - # ) - # def test_fragmentation_limiters_nfmax( - # fragmentation_fn, backend_class - # ): # pylint: disable=redefined-outer-name - # # arrange - # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - # fragments = np.asarray([-1.0]) - # builder = Builder( - # volume.size, - # backend_class( - # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - # ), - # ) - # sut = fragmentation_fn - # sut.vmin = 1 * si.um**3 - # sut.register(builder) - # builder.set_environment(Box(dv=None, dt=None)) - # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - # _PairwiseStorage = builder.particulator.PairwiseStorage - # _Indicator = builder.particulator.PairIndicator - # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # is_first_in_pair = _Indicator(length=volume.size) - # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - # np.asarray([True, False]) - # ) - # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # # act - # sut(nf, frag_size, u01, is_first_in_pair) - - # # Assert - # np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) - # np.testing.assert_array_less( - # [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() - # ) + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + [ + ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), + SLAMS(vmin=6660.0 * si.um**3), + Straub2010Nf(vmin=6660.0 * si.um**3), + LowList1982Nf(vmin=6660.0 * si.um**3), + pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + ], + ) + def test_fragmentation_limiters_vmin( + fragmentation_fn, backend_class + ): # pylint: disable=redefined-outer-name + # arrange + volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), + double_precision=True, + ), + ) + sut = fragmentation_fn + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_equal([(440.0 + 6660.0) / 6660.0], nf.to_ndarray()) + np.testing.assert_array_equal([6660.0 * si.um**3], frag_size.to_ndarray()) + + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + [ + ExponFrag(scale=1.0 * si.cm**3), + Feingold1988Frag(scale=1.0 * si.cm**3), + Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), + SLAMS(), + Straub2010Nf(), + LowList1982Nf(), + pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), + ], + ) + def test_fragmentation_limiters_vmax( + fragmentation_fn, backend_class + ): # pylint: disable=redefined-outer-name + # arrange + volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + ), + ) + sut = fragmentation_fn + sut.vmin = 1 * si.um**3 + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_less([(440.0 + 6660.0) / 6661.0], nf.to_ndarray()) + np.testing.assert_array_less(frag_size.to_ndarray(), [6661.0 * si.um**3]) + + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + [ + ExponFrag(scale=1.0 * si.um**3, nfmax=2), + Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), + Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), + SLAMS(nfmax=2), + Straub2010Nf(nfmax=2), + LowList1982Nf(nfmax=2), + pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + ], + ) + def test_fragmentation_limiters_nfmax( + fragmentation_fn, backend_class + ): # pylint: disable=redefined-outer-name + # arrange + volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + ), + ) + sut = fragmentation_fn + sut.vmin = 1 * si.um**3 + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) + np.testing.assert_array_less( + [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() + ) From 9866cec58a64b6e1202dde1fb0fd3f6205cc3f4e Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Fri, 3 Mar 2023 13:31:49 -0700 Subject: [PATCH 42/86] Implement LL82 coalescence efficiency --- .../coalescence_efficiencies/__init__.py | 1 + .../coalescence_efficiencies/lowlist1982.py | 93 +++++++++++++++++++ .../dynamics/collisions/test_efficiencies.py | 3 + 3 files changed, 97 insertions(+) create mode 100644 PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/__init__.py b/PySDM/dynamics/collisions/coalescence_efficiencies/__init__.py index 223470d5b7..58571cc782 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/__init__.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/__init__.py @@ -3,5 +3,6 @@ """ from .berry1967 import Berry1967 from .constEc import ConstEc +from .lowlist1982 import LowList1982Ec from .specified_eff import SpecifiedEff from .straub2010 import Straub2010Ec diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py new file mode 100644 index 0000000000..93e675c491 --- /dev/null +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py @@ -0,0 +1,93 @@ +""" +See Low & List 1982 +""" +import numpy as np + +from PySDM.physics.constants import si + + +class LowList1982Ec: + # pylint: disable=too-many-instance-attributes + def __init__(self, vmin=0.0, nfmax=None): + self.particulator = None + self.vmin = vmin + self.nfmax = nfmax + self.arrays = {} + self.ll82_tmp = {} + self.max_size = None + self.sum_of_volumes = None + self.const = None + + def register(self, builder): + self.particulator = builder.particulator + self.max_size = self.particulator.PairwiseStorage.empty( + self.particulator.n_sd // 2, dtype=float + ) + self.sum_of_volumes = self.particulator.PairwiseStorage.empty( + self.particulator.n_sd // 2, dtype=float + ) + self.const = self.particulator.formulae.constants + builder.request_attribute("radius") + builder.request_attribute("volume") + builder.request_attribute("terminal velocity") + for key in ("Sc", "St", "dS", "tmp", "tmp2", "CKE", "Et", "ds", "dl"): + self.arrays[key] = self.particulator.PairwiseStorage.empty( + self.particulator.n_sd // 2, dtype=float + ) + + def __call__(self, output, is_first_in_pair): + self.max_size.max(self.particulator.attributes["volume"], is_first_in_pair) + self.sum_of_volumes.sum( + self.particulator.attributes["volume"], is_first_in_pair + ) + self.arrays["ds"].min(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["ds"] *= 2 + self.arrays["dl"].max(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["dl"] *= 2 + + # compute the surface energy, CKE + self.arrays["Sc"].sum(self.particulator.attributes["volume"], is_first_in_pair) + self.arrays["Sc"] **= 2 / 3 + self.arrays["Sc"] *= ( + self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) + ) + self.arrays["St"].min(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["St"] *= 2 + self.arrays["St"] **= 2 + self.arrays["tmp"].max(self.particulator.attributes["radius"], is_first_in_pair) + self.arrays["tmp"] *= 2 + self.arrays["tmp"] **= 2 + self.arrays["St"] += self.arrays["tmp"] + self.arrays["St"] *= self.const.PI * self.const.sgm_w + self.arrays["dS"][:] = self.arrays["St"][:] + self.arrays["dS"] -= self.arrays["Sc"] + + self.arrays["tmp"] *= 2 + self.arrays["tmp2"].distance( + self.particulator.attributes["terminal velocity"], is_first_in_pair + ) + self.arrays["tmp2"] **= 2 + self.arrays["CKE"].multiply( + self.particulator.attributes["volume"], is_first_in_pair + ) + self.arrays["CKE"].divide_if_not_zero(self.arrays["tmp"]) + self.arrays["CKE"] *= self.arrays["tmp2"] + self.arrays["CKE"] *= self.const.rho_w + + self.arrays["Et"][:] = self.arrays["CKE"][:] + self.arrays["Et"] += self.arrays["dS"] + + a = 0.778 + b = 2.61e6 / si.J**2 * si.m**2 + + self.arrays["tmp2"][:] = self.arrays["Et"] + self.arrays["tmp2"] **= 2 + self.arrays["tmp2"] *= -1.0 * b * self.const.sgm_w + self.arrays["tmp2"] /= self.arrays["Sc"] + + output[:] = self.arrays["ds"][:] + output /= self.arrays["dl"] + output += 1.0 + output **= -2.0 + output *= a + output *= np.exp(self.arrays["tmp2"]) diff --git a/tests/unit_tests/dynamics/collisions/test_efficiencies.py b/tests/unit_tests/dynamics/collisions/test_efficiencies.py index 74fcf3e4a6..564d1eb5a4 100644 --- a/tests/unit_tests/dynamics/collisions/test_efficiencies.py +++ b/tests/unit_tests/dynamics/collisions/test_efficiencies.py @@ -8,6 +8,7 @@ from PySDM.dynamics.collisions.coalescence_efficiencies import ( Berry1967, ConstEc, + LowList1982Ec, SpecifiedEff, Straub2010Ec, ) @@ -24,6 +25,7 @@ class TestEfficiencies: # pylint: disable=too-few-public-methods ConstEc(Ec=0.5), SpecifiedEff(A=0.8, B=0.6), Straub2010Ec(), + LowList1982Ec(), ConstEb(Eb=0.3), ], ) @@ -49,3 +51,4 @@ def test_efficiency_fn_call(efficiency, backend_class=CPU): # Assert np.testing.assert_array_less([0.0 - 1e-6], eff.to_ndarray()) + np.testing.assert_array_less(eff.to_ndarray(), [1.0]) From db532e9a1049f80fc96eccbd50b854b9ef5e8656 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 7 Mar 2023 10:19:31 -0700 Subject: [PATCH 43/86] Final fixes on LL82 efficiency --- PySDM/backends/impl_numba/methods/collisions_methods.py | 7 ++----- .../collisions/coalescence_efficiencies/lowlist1982.py | 4 ++-- .../collisions/coalescence_efficiencies/straub2010.py | 2 +- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 54da2827f0..51fe9a58ae 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -697,11 +697,8 @@ def __fragmentation_limiters(n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y frag_size[i] = min(frag_size[i], x_plus_y[i]) frag_size[i] = max(frag_size[i], vmin) if nfmax is not None: - try: - if x_plus_y[i] / frag_size[i] > nfmax: - frag_size[i] = x_plus_y[i] / nfmax - except RuntimeWarning: - print(x_plus_y[i], frag_size[i], nfmax) + if x_plus_y[i] / frag_size[i] > nfmax: + frag_size[i] = x_plus_y[i] / nfmax if frag_size[i] == 0.0: frag_size[i] = x_plus_y[i] n_fragment[i] = x_plus_y[i] / frag_size[i] diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py index 93e675c491..7adf941245 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py @@ -62,7 +62,7 @@ def __call__(self, output, is_first_in_pair): self.arrays["dS"][:] = self.arrays["St"][:] self.arrays["dS"] -= self.arrays["Sc"] - self.arrays["tmp"] *= 2 + self.arrays["tmp"].sum(self.particulator.attributes["volume"], is_first_in_pair) self.arrays["tmp2"].distance( self.particulator.attributes["terminal velocity"], is_first_in_pair ) @@ -72,7 +72,7 @@ def __call__(self, output, is_first_in_pair): ) self.arrays["CKE"].divide_if_not_zero(self.arrays["tmp"]) self.arrays["CKE"] *= self.arrays["tmp2"] - self.arrays["CKE"] *= self.const.rho_w + self.arrays["CKE"] *= self.const.rho_w / 2 self.arrays["Et"][:] = self.arrays["CKE"][:] self.arrays["Et"] += self.arrays["dS"] diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py b/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py index c152b47a96..263f704b68 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py @@ -37,7 +37,7 @@ def __call__(self, output, is_first_in_pair): ) self.arrays["We"].divide_if_not_zero(self.arrays["tmp"]) self.arrays["We"] *= self.arrays["tmp2"] - self.arrays["We"] *= self.const.rho_w + self.arrays["We"] *= self.const.rho_w / 2 self.arrays["Sc"] **= 2 / 3 self.arrays["Sc"] *= ( From 1763590f51a841162b114025877e5cf235eb572c Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 7 Mar 2023 12:25:32 -0700 Subject: [PATCH 44/86] Fixes to MP distribution --- .../initialisation/spectra/marshall_palmer.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 PySDM/initialisation/spectra/marshall_palmer.py diff --git a/PySDM/initialisation/spectra/marshall_palmer.py b/PySDM/initialisation/spectra/marshall_palmer.py new file mode 100644 index 0000000000..720a3d587c --- /dev/null +++ b/PySDM/initialisation/spectra/marshall_palmer.py @@ -0,0 +1,46 @@ +""" +Marshall-Palmer spectrum +""" +import numpy as np +from scipy.interpolate import interp1d + +from PySDM.formulae import Formulae +from PySDM.initialisation.sampling.spectral_sampling import default_cdf_range +from PySDM.physics.constants import si + +default_interpolation_grid = tuple(np.linspace(*default_cdf_range, 999)) +MP_N0 = 8000 * si.m ** (-3) * si.mm ** (-1) +MP_LAMBDA0 = 4.1 * si.mm ** (-1) +MP_LAMBDA1 = -0.21 +TRIVIA = Formulae().trivia + + +class MarshallPalmer: + def __init__(self, rain_rate, dv, interpolation_grid=None): + self.rain_rate = rain_rate / si.mm * si.h + self.scale = MP_LAMBDA0 * self.rain_rate ** (MP_LAMBDA1) * si.mm ** (-1) + self.norm_factor = MP_LAMBDA0 * dv / self.scale + # interpolation_grid = interpolation_grid or default_interpolation_grid + # cdf_arg = np.zeros(len(interpolation_grid) + 1) + # cdf_arg[1:] = interpolation_grid + # cdf = self.cumulative(cdf_arg) / self.norm_factor + # print(cdf, cdf_arg) + # self.inverse_cdf = interp1d(cdf, cdf_arg) + + def size_distribution(self, arg): + diam_arg = 2 * TRIVIA.radius(volume=arg) + result = MP_N0 * np.exp(-self.scale * diam_arg) + return result + + def cumulative(self, arg): + diam_arg = 2 * TRIVIA.radius(volume=arg) + cdf = 1.0 - np.exp(-self.scale * diam_arg) + return self.norm_factor * cdf + + def percentiles(self, cdf_values): + print(cdf_values[0], cdf_values[-1]) + diams = np.array( + [-np.log(1 - cdf_values[i]) / self.scale for i in range(len(cdf_values))] + ) + print(diams[0], diams[-1]) + return TRIVIA.volume(radius=diams / 2) From 645862e639e7e097cd3783561ad9187fd603e35d Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 7 Mar 2023 17:27:42 -0700 Subject: [PATCH 45/86] remove MP separate file --- .../initialisation/spectra/marshall_palmer.py | 46 ------------------- 1 file changed, 46 deletions(-) delete mode 100644 PySDM/initialisation/spectra/marshall_palmer.py diff --git a/PySDM/initialisation/spectra/marshall_palmer.py b/PySDM/initialisation/spectra/marshall_palmer.py deleted file mode 100644 index 720a3d587c..0000000000 --- a/PySDM/initialisation/spectra/marshall_palmer.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Marshall-Palmer spectrum -""" -import numpy as np -from scipy.interpolate import interp1d - -from PySDM.formulae import Formulae -from PySDM.initialisation.sampling.spectral_sampling import default_cdf_range -from PySDM.physics.constants import si - -default_interpolation_grid = tuple(np.linspace(*default_cdf_range, 999)) -MP_N0 = 8000 * si.m ** (-3) * si.mm ** (-1) -MP_LAMBDA0 = 4.1 * si.mm ** (-1) -MP_LAMBDA1 = -0.21 -TRIVIA = Formulae().trivia - - -class MarshallPalmer: - def __init__(self, rain_rate, dv, interpolation_grid=None): - self.rain_rate = rain_rate / si.mm * si.h - self.scale = MP_LAMBDA0 * self.rain_rate ** (MP_LAMBDA1) * si.mm ** (-1) - self.norm_factor = MP_LAMBDA0 * dv / self.scale - # interpolation_grid = interpolation_grid or default_interpolation_grid - # cdf_arg = np.zeros(len(interpolation_grid) + 1) - # cdf_arg[1:] = interpolation_grid - # cdf = self.cumulative(cdf_arg) / self.norm_factor - # print(cdf, cdf_arg) - # self.inverse_cdf = interp1d(cdf, cdf_arg) - - def size_distribution(self, arg): - diam_arg = 2 * TRIVIA.radius(volume=arg) - result = MP_N0 * np.exp(-self.scale * diam_arg) - return result - - def cumulative(self, arg): - diam_arg = 2 * TRIVIA.radius(volume=arg) - cdf = 1.0 - np.exp(-self.scale * diam_arg) - return self.norm_factor * cdf - - def percentiles(self, cdf_values): - print(cdf_values[0], cdf_values[-1]) - diams = np.array( - [-np.log(1 - cdf_values[i]) / self.scale for i in range(len(cdf_values))] - ) - print(diams[0], diams[-1]) - return TRIVIA.volume(radius=diams / 2) From bace062f65d6f6b19e8d16558009d1c71325ae09 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 7 Mar 2023 18:52:56 -0700 Subject: [PATCH 46/86] Fixing issues with LL82 --- .../impl_numba/methods/collisions_methods.py | 6 ++- .../fragmentation_function/lowlist82.py | 12 ++--- .../physics/test_fragmentation_functions.py | 47 ++++++++++++------- 3 files changed, 40 insertions(+), 25 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 51fe9a58ae..f333161f14 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -270,7 +270,7 @@ def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument W, W2, ): # pylint: disable=too-many-branches` - if CKE[i] >= 89.3: + if CKE[i] >= 89.3e-6: Rf[i] = 1.11e-4 * CKE[i] ** (-0.654) else: Rf[i] = 1.0 @@ -452,7 +452,9 @@ def __ll82_fragmentation_body( frag_size[i] = np.exp(lnarg) else: # disk breakup - (H1, mu1, sigma1) = ll82_params_d1(W[i], dl[i], CKE[i]) + (H1, mu1, sigma1) = ll82_params_d1( + W[i], dl[i], dcoal[i], CKE[i] + ) (H2, mu2, sigma2) = ll82_params_d2(ds[i], dl[i], CKE[i]) Hsum = H1 + H2 rand[i] = (rand[i] - Rf[i] - Rs[i]) / Rd[i] diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 6a6449f832..6e3f24f0c4 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -123,12 +123,12 @@ def params_s2(const, dl, ds, St): dsCM = ds / const.CM dlCM = dl / const.CM Dss2 = ( - 0.254 * (ds ** (0.413)) * np.exp(3.53 * dsCM ** (-2.51) * (dlCM - dsCM)) + 0.254 * (dsCM ** (0.413)) * np.exp(3.53 * dsCM ** (2.51) * (dlCM - dsCM)) ) # (4.27) bstar = 14.2 * np.exp(-17.2 * dsCM) Ps20 = 0.23 * dsCM ** (-3.93) * dlCM ** (bstar) # (4.29) sigmas2 = 10 * Dss2 # as in (4.22) - mus2 = np.log(Dss2) + sigmas2**22 # (4.32) + mus2 = np.log(Dss2) + sigmas2**2 # (4.32) Hs2 = Ps20 * Dss2 / np.exp(-0.5 * sigmas2**2) # (4.28) Fs = 5 * math.erf((St - 2.52e-6) / (1.85e-6)) + 6 # (3.7) @@ -140,15 +140,15 @@ def params_s2(const, dl, ds, St): / Hs2 / (1 - math.erf((np.log(0.01) - mus2) / np.sqrt(2) / sigmas2)) ) - mus2 = np.log(Dss2) + sigmas2 * 2 # (4.32) + mus2 = np.log(Dss2) + sigmas2**2 # (4.32) Hs2 = Ps20 * Dss2 / np.exp(-0.5 * sigmas2**2) # (4.28) return (Hs2, mus2, sigmas2) @staticmethod - def params_d1(const, W1, dl, CKE): + def params_d1(const, W1, dl, dcoal, CKE): dlCM = dl / const.CM - dcoalCM = dlCM / const.CM + dcoalCM = dcoal / const.CM mud1 = dlCM * (1 - np.exp(-3.70 * (3.10 - W1))) Hd1 = 1.58e-5 * CKE ** (-1.22) sigmad1 = 1 / Hd1 @@ -165,7 +165,7 @@ def params_d1(const, W1, dl, CKE): def params_d2(const, ds, dl, CKE): dsCM = ds / const.CM dlCM = dl / const.CM - Ddd2 = np.exp(-17.4 * dsCM - 0.671 * (dlCM - dsCM)) * dsCM # (4.37) + Ddd2 = np.exp(-1.74 * dsCM - 0.671 * (dlCM - dsCM)) * dsCM # (4.37) bstar = 0.007 * dsCM ** (-2.54) # (4.39) Pd20 = 0.0884 * dsCM ** (-2.52) * (dlCM - dsCM) ** (bstar) # (4.38) sigmad2 = 10 * Ddd2 diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index 2415b77738..f7d8869408 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -67,11 +67,12 @@ def test_ll82_pf1(): # act params = formulae.fragmentation_function.params_f1( - dl=100 * si.um, dcoal=200 * si.um + dl=0.36 * si.cm, dcoal=0.3744 * si.cm ) - # assert - np.testing.assert_approx_equal(len(params), 3.0) + np.testing.assert_array_equal( + params, [105.78851401149461, 0.36, 0.003771383856549656] + ) @staticmethod def test_ll82_pf2(): @@ -79,10 +80,12 @@ def test_ll82_pf2(): formulae = Formulae(fragmentation_function="LowList1982Nf") # act - params = formulae.fragmentation_function.params_f2(ds=100 * si.um) + params = formulae.fragmentation_function.params_f2(ds=0.18 * si.cm) # assert - np.testing.assert_approx_equal(len(params), 3.0) + np.testing.assert_array_equal( + params, (31.081892267202157, 0.18, 0.01283519925273017) + ) @staticmethod def test_ll82_pf3(): @@ -91,11 +94,13 @@ def test_ll82_pf3(): # act params = formulae.fragmentation_function.params_f3( - ds=100 * si.um, dl=200 * si.um + ds=0.0715 * si.cm, dl=0.18 * si.cm ) # assert - np.testing.assert_approx_equal(len(params), 3.0) + np.testing.assert_array_equal( + params, (11.078017412424996, -3.4579794266811095, 0.21024917628814235) + ) @staticmethod def test_ll82_ps1(): @@ -104,11 +109,13 @@ def test_ll82_ps1(): # act params = formulae.fragmentation_function.params_s1( - dl=100 * si.um, ds=50 * si.um, dcoal=200 * si.um + dl=0.36 * si.cm, ds=0.18 * si.cm, dcoal=0.3744 * si.cm ) # assert - np.testing.assert_approx_equal(len(params), 3.0) + np.testing.assert_array_equal( + params, (55.710586181217394, 0.36, 0.007344262785151853) + ) @staticmethod def test_ll82_ps2(): @@ -117,11 +124,13 @@ def test_ll82_ps2(): # act params = formulae.fragmentation_function.params_s2( - dl=100 * si.um, ds=50 * si.um, St=1e-6 * si.J + dl=0.36 * si.cm, ds=0.18 * si.cm, St=3.705e-6 * si.J ) # assert - np.testing.assert_approx_equal(len(params), 3.0) + np.testing.assert_array_equal( + params, (13.120297517162507, -2.0082590717125437, 0.24857168491193957) + ) @staticmethod def test_ll82_pd1(): @@ -130,11 +139,13 @@ def test_ll82_pd1(): # act params = formulae.fragmentation_function.params_d1( - W1=1.0, dl=100 * si.um, CKE=1.0 + W1=2.67, dl=0.36 * si.cm, dcoal=0.3744 * si.cm, CKE=8.55e-6 * si.J ) # assert - np.testing.assert_approx_equal(len(params), 3.0) + np.testing.assert_array_equal( + params, (24.080107809942664, 0.28666015630152986, 0.016567297254868083) + ) @staticmethod def test_ll82_pd2(): @@ -143,11 +154,13 @@ def test_ll82_pd2(): # act params = formulae.fragmentation_function.params_d2( - ds=100 * si.um, dl=100 * si.um, CKE=200 * si.um + ds=0.18 * si.cm, dl=0.36 * si.cm, CKE=8.55e-6 * si.J ) # assert - np.testing.assert_approx_equal(len(params), 3.0) + np.testing.assert_array_equal( + params, (0.30464721998964595, -2.148778428091927, 3.1133226212867343e-147) + ) @staticmethod def test_erfinv(): @@ -155,8 +168,8 @@ def test_erfinv(): formulae = Formulae(fragmentation_function="LowList1982Nf") # act - params = formulae.fragmentation_function.erfinv(0.5) + params = formulae.fragmentation_function.erfinv(0.25) # assert - diff = np.abs(params - 0.476936) + diff = np.abs(params - 0.2253) np.testing.assert_array_less(diff, 1e-3) From 583538d2d4f0a5397926a1a1ebc407f340d82700 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Wed, 8 Mar 2023 13:42:39 -0700 Subject: [PATCH 47/86] Add'l limiters in LL82 --- .../impl_numba/methods/collisions_methods.py | 33 ++++++++++++++++--- .../breakup_fragmentations/lowlist82.py | 2 +- .../coalescence_efficiencies/lowlist1982.py | 4 +++ .../fragmentation_function/lowlist82.py | 12 +++++-- PySDM/products/size_spectral/__init__.py | 1 + 5 files changed, 44 insertions(+), 8 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index f333161f14..4408d8d4d8 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -270,7 +270,7 @@ def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument W, W2, ): # pylint: disable=too-many-branches` - if CKE[i] >= 89.3e-6: + if CKE[i] >= 0.893e-6: Rf[i] = 1.11e-4 * CKE[i] ** (-0.654) else: Rf[i] = 1.0 @@ -354,6 +354,14 @@ def __collision_coalescence_breakup_body( self.__collision_coalescence_breakup_body = __collision_coalescence_breakup_body + @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) + def __ll82_coalescence_check_body(*, Ec, ds, dl): + for i in numba.prange(len(Ec)): # pylint: disable=not-an-iterable + if dl[i] < 0.4e-3: + Ec[i] = 1.0 + + self.__ll82_coalescence_check_body = __ll82_coalescence_check_body + if self.formulae.fragmentation_function.__name__ == "Straub2010Nf": straub_p1 = self.formulae.fragmentation_function.p1 straub_p2 = self.formulae.fragmentation_function.p2 @@ -406,7 +414,9 @@ def __ll82_fragmentation_body( for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): - if ds[i] == 0.0 or dl[i] == 0.0: + if dl[i] <= 0.4e-3: + frag_size[i] = dcoal[i] ** 3 * 3.1415 / 6 + elif ds[i] == 0.0 or dl[i] == 0.0: frag_size[i] = 1e-18 else: ll82_Nr(i, Rf, Rs, Rd, CKE, W, W2) @@ -414,11 +424,13 @@ def __ll82_fragmentation_body( (H1, mu1, sigma1) = ll82_params_f1(dl[i], dcoal[i]) (H2, mu2, sigma2) = ll82_params_f2(ds[i]) (H3, mu3, sigma3) = ll82_params_f3(ds[i], dl[i]) + H1 = H1 * mu1 + H2 = H2 * mu2 + H3 = H3 * np.exp(mu3) Hsum = H1 + H2 + H3 rand[i] = rand[i] / Rf[i] if rand[i] <= H1 / Hsum: X = max(rand[i] * Hsum / H1, tol) - X = rand[i] frag_size[i] = mu1 + np.sqrt(2) * sigma1 * ll82_erfinv( 2 * X - 1 ) @@ -437,6 +449,8 @@ def __ll82_fragmentation_body( elif rand[i] <= Rf[i] + Rs[i]: # sheet breakup (H1, mu1, sigma1) = ll82_params_s1(dl[i], ds[i], dcoal[i]) (H2, mu2, sigma2) = ll82_params_s2(dl[i], ds[i], St[i]) + H1 = H1 * mu1 + H2 = H2 * np.exp(mu2) Hsum = H1 + H2 rand[i] = (rand[i] - Rf[i]) / (Rs[i]) if rand[i] <= H1 / Hsum: @@ -445,7 +459,7 @@ def __ll82_fragmentation_body( 2 * X - 1 ) else: - X = min((rand[i] - H1) / H2, 1.0 - tol) + X = min((rand[i] * Hsum - H1) / H2, 1.0 - tol) lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( 2 * X - 1 ) @@ -456,6 +470,7 @@ def __ll82_fragmentation_body( W[i], dl[i], dcoal[i], CKE[i] ) (H2, mu2, sigma2) = ll82_params_d2(ds[i], dl[i], CKE[i]) + H1 = H1 * mu1 Hsum = H1 + H2 rand[i] = (rand[i] - Rf[i] - Rs[i]) / Rd[i] if rand[i] <= H1 / Hsum: @@ -464,7 +479,7 @@ def __ll82_fragmentation_body( 2 * X - 1 ) else: - X = min((rand[i] - H1) / H2, 1 - tol) + X = min((rand[i] * Hsum - H1) / H2, 1 - tol) lnarg = mu2 + np.sqrt(2) * sigma2 * ll82_erfinv( 2 * X - 1 ) @@ -474,6 +489,7 @@ def __ll82_fragmentation_body( frag_size[i] * 0.01 ) # diameter in cm; convert to m frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 + # print(np.sum(Rf), np.sum(Rs), np.sum(Rd)) self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": @@ -920,6 +936,13 @@ def ll82_fragmentation( nfmax=nfmax, ) + def ll82_coalescence_check(self, *, Ec, ds, dl): + self.__ll82_coalescence_check_body( + Ec=Ec.data, + ds=ds.data, + dl=dl.data, + ) + @staticmethod @numba.njit(**conf.JIT_FLAGS) # pylint: disable=too-many-arguments,too-many-locals diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index 62f7537974..6f5e7cb7eb 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -68,7 +68,7 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): self.arrays["St"] += self.arrays["tmp"] self.arrays["St"] *= self.const.PI * self.const.sgm_w - self.arrays["tmp"] *= 2 + self.arrays["tmp"].sum(self.particulator.attributes["volume"], is_first_in_pair) self.arrays["tmp2"].distance( self.particulator.attributes["terminal velocity"], is_first_in_pair ) diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py index 7adf941245..9ea658095d 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py @@ -91,3 +91,7 @@ def __call__(self, output, is_first_in_pair): output **= -2.0 output *= a output *= np.exp(self.arrays["tmp2"]) + + self.particulator.backend.ll82_coalescence_check( + Ec=output, ds=self.arrays["ds"], dl=self.arrays["dl"] + ) diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 6e3f24f0c4..7d5fe57946 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -159,22 +159,30 @@ def params_d1(const, W1, dl, dcoal, CKE): * np.sqrt(2 / np.pi) / (1 + math.erf((dcoalCM - mud1) / (np.sqrt(2) * sigmad1))) ) + return (Hd1, mud1, sigmad1) # in cm @staticmethod def params_d2(const, ds, dl, CKE): dsCM = ds / const.CM dlCM = dl / const.CM - Ddd2 = np.exp(-1.74 * dsCM - 0.671 * (dlCM - dsCM)) * dsCM # (4.37) + Ddd2 = np.exp(-17.4 * dsCM - 0.671 * (dlCM - dsCM)) * dsCM # (4.37) bstar = 0.007 * dsCM ** (-2.54) # (4.39) Pd20 = 0.0884 * dsCM ** (-2.52) * (dlCM - dsCM) ** (bstar) # (4.38) sigmad2 = 10 * Ddd2 + mud2 = np.log(Ddd2) + sigmad2**2 Hd2 = Pd20 * Ddd2 / np.exp(-0.5 * sigmad2**2) - Fd = 297.5 + 23.7 * np.log(CKE) # (3.9) + Fd = max(1.0, 297.5 + 23.7 * np.log(CKE)) # (3.9) + if Fd == 1.0: + return (0.0, np.log(Ddd2), np.log(Ddd2)) for i in range(10): + if sigmad2 == 0.0 or Hd2 <= 0.1: + return (0.0, np.log(Ddd2), np.log(Ddd2)) + elif sigmad2 >= 1.0: + return (0.0, np.log(Ddd2), np.log(Ddd2)) sigmad2 = ( np.sqrt(2 / np.pi) * (Fd - 1) diff --git a/PySDM/products/size_spectral/__init__.py b/PySDM/products/size_spectral/__init__.py index 470715212e..bc2ef2b5ee 100644 --- a/PySDM/products/size_spectral/__init__.py +++ b/PySDM/products/size_spectral/__init__.py @@ -2,6 +2,7 @@ from .arbitrary_moment import RadiusFirstMoment, RadiusSixthMoment, VolumeFirstMoment from .effective_radius import EffectiveRadius from .mean_radius import MeanRadius +from .number_size_spectrum import NumberSizeSpectrum from .particle_concentration import ParticleConcentration, ParticleSpecificConcentration from .particle_size_spectrum import ( ParticleSizeSpectrumPerMass, From 3f1ddba3f3d2c3e2a95e580015f5323db3ce654c Mon Sep 17 00:00:00 2001 From: Oleksii Bulenok Date: Sat, 11 Mar 2023 18:02:51 +0100 Subject: [PATCH 48/86] Test LowList82 distribution --- .../collisions/test_fragmentations.py | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index e417884c28..8f3be41353 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -1,4 +1,7 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring +from timeit import default_timer as timer + +import matplotlib.pyplot as plt import numpy as np import pytest @@ -214,3 +217,96 @@ def test_fragmentation_limiters_nfmax( np.testing.assert_array_less( [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() ) + + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + ( + # AlwaysN(n=2), + ExponFrag(scale=1e6 * si.um**3), + # Feingold1988Frag(scale=1e6 * si.um**3), + # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), + # SLAMS(), + # Straub2010Nf(), + LowList1982Nf(), + ), + ) + def test_fragmentation_fn_distribution( + fragmentation_fn, + ): # pylint: disable=redefined-outer-name + # arrange + from PySDM.backends import CPU + + drop_size_L_diam = 0.4 * si.cm + drop_size_S_diam = 0.2 * si.cm + + get_volume_from_diam = lambda d: (4 / 3) * np.pi * (d / 2) ** 3 + + n = 100 + res = np.empty((n, 2), dtype=np.double) + + backend = CPU( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + ) + volume = np.asarray( + [ + get_volume_from_diam(drop_size_S_diam), + get_volume_from_diam(drop_size_L_diam), + ] + ) + fragments = np.asarray([-1.0]) + builder = Builder(volume.size, backend) + sut = fragmentation_fn + sut.vmin = 1 * si.um**3 + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + rns = np.linspace(0, 1, n) + for i, rn in enumerate(rns): + print("i", i) + start = timer() + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray( + np.zeros_like(fragments, dtype=np.double) + ) + frag_size = _PairwiseStorage.from_ndarray( + np.zeros_like(fragments, dtype=np.double) + ) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray( + np.asarray([rn]) + ) # (np.random.rand(*fragments.shape)) + print("u01", u01.data) + + end = timer() + print("elapsed time setup", end - start) + + start = timer() + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + end = timer() + print("elapsed time sut", end - start) + print(nf.data) + print(frag_size.data) + res[i][0] = nf[0] + res[i][1] = frag_size[0] + + # Assert + # np.testing.assert_array_less([0.99], nf.to_ndarray()) + # np.testing.assert_array_less([0.0], frag_size.to_ndarray()) + + # res = np.asarray(sorted(res, key=lambda x: x[1], reverse=True)) + print(res[:, 0]) + print(np.unique(res[:, 0]), len(np.unique(res[:, 0]))) + print(np.unique(res[:, 1]), len(np.unique(res[:, 1]))) + + plt.plot(rns, res[:, 0]) + plt.show() From 72f648068cb52b9833b0cfaf65dee5452d12fb5b Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 13 Mar 2023 21:04:21 -0600 Subject: [PATCH 49/86] Refactor Straub -> mass weighting --- .../impl_numba/methods/collisions_methods.py | 62 ++++++++++++++----- .../fragmentation_function/straub2010nf.py | 52 ++++++++++++++++ 2 files changed, 97 insertions(+), 17 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 4408d8d4d8..9cdb867e94 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -363,11 +363,16 @@ def __ll82_coalescence_check_body(*, Ec, ds, dl): self.__ll82_coalescence_check_body = __ll82_coalescence_check_body if self.formulae.fragmentation_function.__name__ == "Straub2010Nf": - straub_p1 = self.formulae.fragmentation_function.p1 - straub_p2 = self.formulae.fragmentation_function.p2 - straub_p3 = self.formulae.fragmentation_function.p3 - straub_p4 = self.formulae.fragmentation_function.p4 - straub_sigma1 = self.formulae.fragmentation_function.sigma1 + # straub_p1 = self.formulae.fragmentation_function.p1 + # straub_p2 = self.formulae.fragmentation_function.p2 + # straub_p3 = self.formulae.fragmentation_function.p3 + # straub_p4 = self.formulae.fragmentation_function.p4 + # straub_sigma1 = self.formulae.fragmentation_function.sigma1 + straub_paramsp1 = self.formulae.fragmentation_function.params_p1 + straub_paramsp2 = self.formulae.fragmentation_function.params_p2 + straub_paramsp3 = self.formulae.fragmentation_function.params_p3 + straub_paramsp4 = self.formulae.fragmentation_function.params_p4 + straub_erfinv = self.formulae.fragmentation_function.erfinv @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __straub_fragmentation_body( @@ -377,24 +382,47 @@ def __straub_fragmentation_body( len(frag_size) ): straub_Nr(i, Nr1, Nr2, Nr3, Nr4, Nrt, CW, gam) + (mu1, sigma1) = straub_paramsp1(CW[i]) + (mu2, sigma2) = straub_paramsp2(CW[i]) + (mu3, sigma3) = straub_paramsp3(CW[i], ds[i]) + (M31, M32, M33, M34, d34) = straub_paramsp4( + v_max[i], + ds[i], + mu1, + sigma1, + mu2, + sigma2, + mu3, + sigma3, + Nr1[i], + Nr2[i], + Nr3[i], + CW[i], + ) + Nr1[i] = Nr1[i] * M31 + Nr2[i] = Nr2[i] * M32 + Nr3[i] = Nr3[i] * M33 + Nr4[i] = Nr4[i] * M34 + Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i] + if rand[i] < Nr1[i] / Nrt[i]: - frag_size[i] = straub_p1( - rand[i] * Nrt[i] / Nr1[i], straub_sigma1(CW[i]) - ) + X = rand[i] * Nrt[i] / Nr1[i] + lnarg = mu1 + np.sqrt(2) * sigma1 * straub_erfinv(2 * X - 1) + frag_size[i] = np.exp(lnarg) elif rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]: - frag_size[i] = straub_p2( - CW[i], (rand[i] * Nrt[i] - Nr1[i]) / (Nr2[i] - Nr1[i]) + X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i] + frag_size[i] = mu2 + np.sqrt(2) * sigma2 * straub_erfinv( + 2 * X - 1 ) elif rand[i] < (Nr3[i] + Nr2[i] + Nr1[i]) / Nrt[i]: - frag_size[i] = straub_p3( - CW[i], - ds[i], - (rand[i] * Nrt[i] - Nr2[i]) / (Nr3[i] - Nr2[i]), + X = (rand[i] * Nrt[i] - Nr1[i] - Nr2[i]) / Nr3[i] + frag_size[i] = mu3 + np.sqrt(2) * sigma3 * straub_erfinv( + 2 * X - 1 ) else: - frag_size[i] = straub_p4( - CW[i], ds[i], v_max[i], Nr1[i], Nr2[i], Nr3[i] - ) + frag_size[i] = d34 + + frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 self.__straub_fragmentation_body = __straub_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "LowList1982Nf": diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 5d4584bab4..691b10f07f 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -40,6 +40,19 @@ def p1(const, rand, sigma1): ) ) + @staticmethod + def params_p1(const, CW): + sigma1 = np.sqrt( + np.log( + np.power((np.sqrt(CW) / 8) / 10, 2) + / 12 + / np.power(const.STRAUB_E_D1, const.TWO) + + 1 + ) + ) + mu1 = np.log(const.STRAUB_E_D1) - np.power(sigma1, const.TWO) / 2 + return (mu1, sigma1) + @staticmethod def p2(const, CW, rand): return ( @@ -56,6 +69,14 @@ def p2(const, CW, rand): ) ) + @staticmethod + def params_p2(const, CW): + mu2 = const.STRAUB_MU2 + deltaD2 = 7 * (CW - 21) / 1000 + deltaD2 = max(0.0, deltaD2) + sigma2 = deltaD2 / np.sqrt(12) + return (mu2, sigma2) + @staticmethod def p3(const, CW, ds, rand): return ( @@ -72,6 +93,13 @@ def p3(const, CW, ds, rand): ) ) + @staticmethod + def params_p3(const, CW, ds): + mu3 = 0.9 * ds + deltaD3 = (1 + 0.76 * np.sqrt(CW)) / 100 + sigma3 = deltaD3 / np.sqrt(12) + return (mu3, sigma3) + @staticmethod def p4(const, CW, ds, v_max, Nr1, Nr2, Nr3): # pylint: disable=too-many-arguments return ( @@ -115,3 +143,27 @@ def p4(const, CW, ds, v_max, Nr1, Nr2, Nr3): # pylint: disable=too-many-argumen ) ) ) + + @staticmethod + def params_p4(const, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3, CW): + M31 = N1 * np.exp(3 * mu1 + 9 * np.power(sigma1, 2) / 2) + M32 = N2 * (mu2**3 + 3 * mu2 * sigma2**2) + M33 = N3 * (mu3**3 + 3 * mu3 * sigma3**2) + dl = np.exp(np.log(vl / 4 * 3 / np.pi) / 3) + M34 = dl**3 + ds**3 - M31 - M32 - M33 + if M34 <= 0.0: + d34 = 0 + M34 = 0 + else: + d34 = np.exp(np.log(M34) / 3) + return (M31, M32, M33, M34, d34) + + @staticmethod + def erfinv(X): + a = 8 * (np.pi - 3) / (3 * np.pi * (4 - np.pi)) + arg = (2 / np.pi / a) + np.log(1 - X**2) / 2 + arg = arg * arg + arg = arg - np.log(1 - X**2) / a + arg = np.sqrt(arg) + arg = arg - (2 / np.pi / a + np.log(1 - X**2) / 2) + return np.sqrt(arg) From 0239839b0eb389571ed96166454c71581c773101 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 13 Mar 2023 22:06:42 -0600 Subject: [PATCH 50/86] Remove unnecessary limiter --- PySDM/backends/impl_numba/methods/collisions_methods.py | 1 - 1 file changed, 1 deletion(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 9cdb867e94..f9986e19d2 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -739,7 +739,6 @@ def __fragmentation_limiters(n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y for i in numba.prange(len(frag_size)): # pylint: disable=not-an-iterable if np.isnan(frag_size[i]): frag_size[i] = x_plus_y[i] - frag_size[i] = min(frag_size[i], v_max[i]) frag_size[i] = min(frag_size[i], x_plus_y[i]) frag_size[i] = max(frag_size[i], vmin) if nfmax is not None: From 8051f9f91f5c995217e4a3aa48808423408c5efc Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 13 Mar 2023 23:40:00 -0600 Subject: [PATCH 51/86] Factor 2 in CKE, factor CM in straub delta D's --- .../dynamics/collisions/breakup_fragmentations/lowlist82.py | 2 +- .../collisions/breakup_fragmentations/straub2010.py | 5 ++--- PySDM/physics/fragmentation_function/straub2010nf.py | 6 +++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index 6f5e7cb7eb..382737da34 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -78,7 +78,7 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): ) self.arrays["CKE"].divide_if_not_zero(self.arrays["tmp"]) self.arrays["CKE"] *= self.arrays["tmp2"] - self.arrays["CKE"] *= self.const.rho_w + self.arrays["CKE"] *= self.const.rho_w / 2 self.arrays["We"][:] = self.arrays["CKE"][:] # TODO #976 self.arrays["W2"][:] = self.arrays["CKE"][:] diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py b/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py index 99b3c6ecc1..bb04c78ac3 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py @@ -47,12 +47,11 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): # compute the dimensionless numbers and CW=CKE * We self.arrays["tmp"].sum(self.particulator.attributes["volume"], is_first_in_pair) - self.arrays["Sc"][:] = self.arrays["tmp"][:] # TODO #976 + self.arrays["Sc"].sum(self.particulator.attributes["volume"], is_first_in_pair) self.arrays["Sc"] **= 2 / 3 self.arrays["Sc"] *= ( self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) ) - self.arrays["tmp"] *= 2 self.arrays["tmp2"].distance( self.particulator.attributes["terminal velocity"], is_first_in_pair ) @@ -62,7 +61,7 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): ) self.arrays["CKE"].divide_if_not_zero(self.arrays["tmp"]) self.arrays["CKE"] *= self.arrays["tmp2"] - self.arrays["CKE"] *= self.const.rho_w + self.arrays["CKE"] *= self.const.rho_w / 2 self.arrays["We"][:] = self.arrays["CKE"][:] # TODO #976 self.arrays["We"].divide_if_not_zero(self.arrays["Sc"]) diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 691b10f07f..048b1f5ce5 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -44,7 +44,7 @@ def p1(const, rand, sigma1): def params_p1(const, CW): sigma1 = np.sqrt( np.log( - np.power((np.sqrt(CW) / 8) / 10, 2) + np.power((np.sqrt(CW) / 8) / 10 * const.CM, 2) / 12 / np.power(const.STRAUB_E_D1, const.TWO) + 1 @@ -72,7 +72,7 @@ def p2(const, CW, rand): @staticmethod def params_p2(const, CW): mu2 = const.STRAUB_MU2 - deltaD2 = 7 * (CW - 21) / 1000 + deltaD2 = 7 * (CW - 21) / 1000 * const.CM deltaD2 = max(0.0, deltaD2) sigma2 = deltaD2 / np.sqrt(12) return (mu2, sigma2) @@ -96,7 +96,7 @@ def p3(const, CW, ds, rand): @staticmethod def params_p3(const, CW, ds): mu3 = 0.9 * ds - deltaD3 = (1 + 0.76 * np.sqrt(CW)) / 100 + deltaD3 = (1 + 0.76 * np.sqrt(CW)) / 100 * const.CM sigma3 = deltaD3 / np.sqrt(12) return (mu3, sigma3) From d854234a2ed20432abd6ae0d6b5c5fc551bd4f87 Mon Sep 17 00:00:00 2001 From: Oleksii Bulenok Date: Tue, 14 Mar 2023 23:19:01 +0100 Subject: [PATCH 52/86] Enhance prints in test_fragmentation_fn_distribution --- .../dynamics/collisions/test_fragmentations.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 8f3be41353..f9cfcfdbad 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -223,7 +223,7 @@ def test_fragmentation_limiters_nfmax( "fragmentation_fn", ( # AlwaysN(n=2), - ExponFrag(scale=1e6 * si.um**3), + # ExponFrag(scale=1e6 * si.um**3), # Feingold1988Frag(scale=1e6 * si.um**3), # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), # SLAMS(), @@ -232,7 +232,7 @@ def test_fragmentation_limiters_nfmax( ), ) def test_fragmentation_fn_distribution( - fragmentation_fn, + fragmentation_fn, plot=False ): # pylint: disable=redefined-outer-name # arrange from PySDM.backends import CPU @@ -304,9 +304,12 @@ def test_fragmentation_fn_distribution( # np.testing.assert_array_less([0.0], frag_size.to_ndarray()) # res = np.asarray(sorted(res, key=lambda x: x[1], reverse=True)) - print(res[:, 0]) - print(np.unique(res[:, 0]), len(np.unique(res[:, 0]))) - print(np.unique(res[:, 1]), len(np.unique(res[:, 1]))) + # print(res[:, 0]) + unique_nfs, nfs_counts = np.unique(res[:, 0], return_counts=True) + unique_frag_size, frag_sizes_counts = np.unique(res[:, 1], return_counts=True) + print("nfs", unique_nfs, nfs_counts) + print("frag_sizes", unique_frag_size, frag_sizes_counts) plt.plot(rns, res[:, 0]) - plt.show() + if plot: + plt.show() From 13caca3f6bc600f254d1865d35fcdcd6245049cd Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 14 Mar 2023 21:28:47 -0600 Subject: [PATCH 53/86] Swap out erfinv approx --- .../impl_numba/methods/collisions_methods.py | 10 +- .../fragmentation_function/gaussian.py | 6 +- .../fragmentation_function/straub2010nf.py | 18 +- .../collisions/test_fragmentations.py | 394 +++++++++--------- 4 files changed, 215 insertions(+), 213 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index f9986e19d2..5e12c74995 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -363,11 +363,6 @@ def __ll82_coalescence_check_body(*, Ec, ds, dl): self.__ll82_coalescence_check_body = __ll82_coalescence_check_body if self.formulae.fragmentation_function.__name__ == "Straub2010Nf": - # straub_p1 = self.formulae.fragmentation_function.p1 - # straub_p2 = self.formulae.fragmentation_function.p2 - # straub_p3 = self.formulae.fragmentation_function.p3 - # straub_p4 = self.formulae.fragmentation_function.p4 - # straub_sigma1 = self.formulae.fragmentation_function.sigma1 straub_paramsp1 = self.formulae.fragmentation_function.params_p1 straub_paramsp2 = self.formulae.fragmentation_function.params_p2 straub_paramsp3 = self.formulae.fragmentation_function.params_p3 @@ -399,6 +394,7 @@ def __straub_fragmentation_body( Nr3[i], CW[i], ) + print(mu1, sigma1, mu2, sigma2, mu3, sigma3, d34) Nr1[i] = Nr1[i] * M31 Nr2[i] = Nr2[i] * M32 Nr3[i] = Nr3[i] * M33 @@ -521,7 +517,7 @@ def __ll82_fragmentation_body( self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": - gaussian_frag_size = self.formulae.fragmentation_function.frag_size + gaussian_erfinv = self.formulae.fragmentation_function.erfinv @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __gauss_fragmentation_body( @@ -530,7 +526,7 @@ def __gauss_fragmentation_body( for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): - frag_size[i] = gaussian_frag_size(mu, sigma, rand[i]) + frag_size[i] = mu + sigma * gaussian_erfinv(rand[i]) self.__gauss_fragmentation_body = __gauss_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Feingold1988Frag": diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index 09ea86148f..bdbcfcfce6 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -4,11 +4,13 @@ """ import math +import numpy as np + class Gaussian: # pylint: disable=too-few-public-methods def __init__(self, _): pass @staticmethod - def frag_size(const, mu, sigma, rand): - return mu + sigma / 2 * (1 + math.erf(rand / const.sqrt_two)) + def erfinv(X): + return np.arctanh(2 * X - 1) * 2 * np.sqrt(3) / np.pi diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 048b1f5ce5..7177f58032 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -158,12 +158,16 @@ def params_p4(const, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3, d34 = np.exp(np.log(M34) / 3) return (M31, M32, M33, M34, d34) + # @staticmethod + # def erfinv(X): + # a = 8 * (np.pi - 3) / (3 * np.pi * (4 - np.pi)) + # arg = (2 / np.pi / a) + np.log(1 - X**2) / 2 + # arg = arg * arg + # arg = arg - np.log(1 - X**2) / a + # arg = np.sqrt(arg) + # arg = arg - (2 / np.pi / a + np.log(1 - X**2) / 2) + # return np.sqrt(arg) + @staticmethod def erfinv(X): - a = 8 * (np.pi - 3) / (3 * np.pi * (4 - np.pi)) - arg = (2 / np.pi / a) + np.log(1 - X**2) / 2 - arg = arg * arg - arg = arg - np.log(1 - X**2) / a - arg = np.sqrt(arg) - arg = arg - (2 / np.pi / a + np.log(1 - X**2) / 2) - return np.sqrt(arg) + return np.arctanh(2 * X - 1) * 2 * np.sqrt(3) / np.pi diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index f9cfcfdbad..4374fe853e 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -24,199 +24,199 @@ class TestFragmentations: # pylint: disable=too-few-public-methods - @staticmethod - @pytest.mark.parametrize( - "fragmentation_fn", - ( - AlwaysN(n=2), - ExponFrag(scale=1e6 * si.um**3), - Feingold1988Frag(scale=1e6 * si.um**3), - Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), - SLAMS(), - Straub2010Nf(), - LowList1982Nf(), - ), - ) - def test_fragmentation_fn_call( - fragmentation_fn, backend_class - ): # pylint: disable=redefined-outer-name - # arrange - volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - fragments = np.asarray([-1.0]) - builder = Builder( - volume.size, - backend_class( - Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - ), - ) - sut = fragmentation_fn - sut.vmin = 1 * si.um**3 - sut.register(builder) - builder.set_environment(Box(dv=None, dt=None)) - _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - _PairwiseStorage = builder.particulator.PairwiseStorage - _Indicator = builder.particulator.PairIndicator - nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - is_first_in_pair = _Indicator(length=volume.size) - is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - np.asarray([True, False]) - ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) - - # act - sut(nf, frag_size, u01, is_first_in_pair) - - # Assert - np.testing.assert_array_less([0.99], nf.to_ndarray()) - np.testing.assert_array_less([0.0], frag_size.to_ndarray()) - - @staticmethod - @pytest.mark.parametrize( - "fragmentation_fn", - [ - ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), - SLAMS(vmin=6660.0 * si.um**3), - Straub2010Nf(vmin=6660.0 * si.um**3), - LowList1982Nf(vmin=6660.0 * si.um**3), - pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - ], - ) - def test_fragmentation_limiters_vmin( - fragmentation_fn, backend_class - ): # pylint: disable=redefined-outer-name - # arrange - volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - fragments = np.asarray([-1.0]) - builder = Builder( - volume.size, - backend_class( - Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), - double_precision=True, - ), - ) - sut = fragmentation_fn - sut.register(builder) - builder.set_environment(Box(dv=None, dt=None)) - _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - _PairwiseStorage = builder.particulator.PairwiseStorage - _Indicator = builder.particulator.PairIndicator - nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - is_first_in_pair = _Indicator(length=volume.size) - is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - np.asarray([True, False]) - ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # act - sut(nf, frag_size, u01, is_first_in_pair) - - # Assert - np.testing.assert_array_equal([(440.0 + 6660.0) / 6660.0], nf.to_ndarray()) - np.testing.assert_array_equal([6660.0 * si.um**3], frag_size.to_ndarray()) - - @staticmethod - @pytest.mark.parametrize( - "fragmentation_fn", - [ - ExponFrag(scale=1.0 * si.cm**3), - Feingold1988Frag(scale=1.0 * si.cm**3), - Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), - SLAMS(), - Straub2010Nf(), - LowList1982Nf(), - pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), - ], - ) - def test_fragmentation_limiters_vmax( - fragmentation_fn, backend_class - ): # pylint: disable=redefined-outer-name - # arrange - volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - fragments = np.asarray([-1.0]) - builder = Builder( - volume.size, - backend_class( - Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - ), - ) - sut = fragmentation_fn - sut.vmin = 1 * si.um**3 - sut.register(builder) - builder.set_environment(Box(dv=None, dt=None)) - _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - _PairwiseStorage = builder.particulator.PairwiseStorage - _Indicator = builder.particulator.PairIndicator - nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - is_first_in_pair = _Indicator(length=volume.size) - is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - np.asarray([True, False]) - ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # act - sut(nf, frag_size, u01, is_first_in_pair) - - # Assert - np.testing.assert_array_less([(440.0 + 6660.0) / 6661.0], nf.to_ndarray()) - np.testing.assert_array_less(frag_size.to_ndarray(), [6661.0 * si.um**3]) - - @staticmethod - @pytest.mark.parametrize( - "fragmentation_fn", - [ - ExponFrag(scale=1.0 * si.um**3, nfmax=2), - Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), - Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), - SLAMS(nfmax=2), - Straub2010Nf(nfmax=2), - LowList1982Nf(nfmax=2), - pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - ], - ) - def test_fragmentation_limiters_nfmax( - fragmentation_fn, backend_class - ): # pylint: disable=redefined-outer-name - # arrange - volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - fragments = np.asarray([-1.0]) - builder = Builder( - volume.size, - backend_class( - Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - ), - ) - sut = fragmentation_fn - sut.vmin = 1 * si.um**3 - sut.register(builder) - builder.set_environment(Box(dv=None, dt=None)) - _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - _PairwiseStorage = builder.particulator.PairwiseStorage - _Indicator = builder.particulator.PairIndicator - nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - is_first_in_pair = _Indicator(length=volume.size) - is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - np.asarray([True, False]) - ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # act - sut(nf, frag_size, u01, is_first_in_pair) - - # Assert - np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) - np.testing.assert_array_less( - [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() - ) + # @staticmethod + # @pytest.mark.parametrize( + # "fragmentation_fn", + # ( + # AlwaysN(n=2), + # ExponFrag(scale=1e6 * si.um**3), + # Feingold1988Frag(scale=1e6 * si.um**3), + # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), + # SLAMS(), + # Straub2010Nf(), + # LowList1982Nf(), + # ), + # ) + # def test_fragmentation_fn_call( + # fragmentation_fn, backend_class + # ): # pylint: disable=redefined-outer-name + # # arrange + # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + # fragments = np.asarray([-1.0]) + # builder = Builder( + # volume.size, + # backend_class( + # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + # ), + # ) + # sut = fragmentation_fn + # sut.vmin = 1 * si.um**3 + # sut.register(builder) + # builder.set_environment(Box(dv=None, dt=None)) + # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + # _PairwiseStorage = builder.particulator.PairwiseStorage + # _Indicator = builder.particulator.PairIndicator + # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # is_first_in_pair = _Indicator(length=volume.size) + # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + # np.asarray([True, False]) + # ) + # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) + + # # act + # sut(nf, frag_size, u01, is_first_in_pair) + + # # Assert + # np.testing.assert_array_less([0.99], nf.to_ndarray()) + # np.testing.assert_array_less([0.0], frag_size.to_ndarray()) + + # @staticmethod + # @pytest.mark.parametrize( + # "fragmentation_fn", + # [ + # ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + # Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + # Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), + # SLAMS(vmin=6660.0 * si.um**3), + # Straub2010Nf(vmin=6660.0 * si.um**3), + # LowList1982Nf(vmin=6660.0 * si.um**3), + # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + # ], + # ) + # def test_fragmentation_limiters_vmin( + # fragmentation_fn, backend_class + # ): # pylint: disable=redefined-outer-name + # # arrange + # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + # fragments = np.asarray([-1.0]) + # builder = Builder( + # volume.size, + # backend_class( + # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), + # double_precision=True, + # ), + # ) + # sut = fragmentation_fn + # sut.register(builder) + # builder.set_environment(Box(dv=None, dt=None)) + # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + # _PairwiseStorage = builder.particulator.PairwiseStorage + # _Indicator = builder.particulator.PairIndicator + # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # is_first_in_pair = _Indicator(length=volume.size) + # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + # np.asarray([True, False]) + # ) + # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # # act + # sut(nf, frag_size, u01, is_first_in_pair) + + # # Assert + # np.testing.assert_array_equal([(440.0 + 6660.0) / 6660.0], nf.to_ndarray()) + # np.testing.assert_array_equal([6660.0 * si.um**3], frag_size.to_ndarray()) + + # @staticmethod + # @pytest.mark.parametrize( + # "fragmentation_fn", + # [ + # ExponFrag(scale=1.0 * si.cm**3), + # Feingold1988Frag(scale=1.0 * si.cm**3), + # Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), + # SLAMS(), + # Straub2010Nf(), + # LowList1982Nf(), + # pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), + # ], + # ) + # def test_fragmentation_limiters_vmax( + # fragmentation_fn, backend_class + # ): # pylint: disable=redefined-outer-name + # # arrange + # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + # fragments = np.asarray([-1.0]) + # builder = Builder( + # volume.size, + # backend_class( + # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + # ), + # ) + # sut = fragmentation_fn + # sut.vmin = 1 * si.um**3 + # sut.register(builder) + # builder.set_environment(Box(dv=None, dt=None)) + # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + # _PairwiseStorage = builder.particulator.PairwiseStorage + # _Indicator = builder.particulator.PairIndicator + # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # is_first_in_pair = _Indicator(length=volume.size) + # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + # np.asarray([True, False]) + # ) + # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # # act + # sut(nf, frag_size, u01, is_first_in_pair) + + # # Assert + # np.testing.assert_array_less([(440.0 + 6660.0) / 6661.0], nf.to_ndarray()) + # np.testing.assert_array_less(frag_size.to_ndarray(), [6661.0 * si.um**3]) + + # @staticmethod + # @pytest.mark.parametrize( + # "fragmentation_fn", + # [ + # ExponFrag(scale=1.0 * si.um**3, nfmax=2), + # Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), + # Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), + # SLAMS(nfmax=2), + # Straub2010Nf(nfmax=2), + # LowList1982Nf(nfmax=2), + # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + # ], + # ) + # def test_fragmentation_limiters_nfmax( + # fragmentation_fn, backend_class + # ): # pylint: disable=redefined-outer-name + # # arrange + # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + # fragments = np.asarray([-1.0]) + # builder = Builder( + # volume.size, + # backend_class( + # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + # ), + # ) + # sut = fragmentation_fn + # sut.vmin = 1 * si.um**3 + # sut.register(builder) + # builder.set_environment(Box(dv=None, dt=None)) + # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + # _PairwiseStorage = builder.particulator.PairwiseStorage + # _Indicator = builder.particulator.PairIndicator + # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + # is_first_in_pair = _Indicator(length=volume.size) + # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + # np.asarray([True, False]) + # ) + # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # # act + # sut(nf, frag_size, u01, is_first_in_pair) + + # # Assert + # np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) + # np.testing.assert_array_less( + # [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() + # ) @staticmethod @pytest.mark.parametrize( @@ -227,12 +227,12 @@ def test_fragmentation_limiters_nfmax( # Feingold1988Frag(scale=1e6 * si.um**3), # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), # SLAMS(), - # Straub2010Nf(), - LowList1982Nf(), + Straub2010Nf(), + # LowList1982Nf(), ), ) def test_fragmentation_fn_distribution( - fragmentation_fn, plot=False + fragmentation_fn, plot=True ): # pylint: disable=redefined-outer-name # arrange from PySDM.backends import CPU @@ -310,6 +310,6 @@ def test_fragmentation_fn_distribution( print("nfs", unique_nfs, nfs_counts) print("frag_sizes", unique_frag_size, frag_sizes_counts) - plt.plot(rns, res[:, 0]) + plt.hist(res[:, 0]) if plot: plt.show() From 6533e5712e1805fe83e0e858ab4ba72934aafea7 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 14 Mar 2023 21:37:59 -0600 Subject: [PATCH 54/86] Fix error in straub p4 --- .../impl_numba/methods/collisions_methods.py | 11 +++-------- .../physics/fragmentation_function/straub2010nf.py | 13 +------------ 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 5e12c74995..93ce8bd878 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -394,7 +394,6 @@ def __straub_fragmentation_body( Nr3[i], CW[i], ) - print(mu1, sigma1, mu2, sigma2, mu3, sigma3, d34) Nr1[i] = Nr1[i] * M31 Nr2[i] = Nr2[i] * M32 Nr3[i] = Nr3[i] * M33 @@ -403,18 +402,14 @@ def __straub_fragmentation_body( if rand[i] < Nr1[i] / Nrt[i]: X = rand[i] * Nrt[i] / Nr1[i] - lnarg = mu1 + np.sqrt(2) * sigma1 * straub_erfinv(2 * X - 1) + lnarg = mu1 + np.sqrt(2) * sigma1 * straub_erfinv(X) frag_size[i] = np.exp(lnarg) elif rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]: X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i] - frag_size[i] = mu2 + np.sqrt(2) * sigma2 * straub_erfinv( - 2 * X - 1 - ) + frag_size[i] = mu2 + np.sqrt(2) * sigma2 * straub_erfinv(X) elif rand[i] < (Nr3[i] + Nr2[i] + Nr1[i]) / Nrt[i]: X = (rand[i] * Nrt[i] - Nr1[i] - Nr2[i]) / Nr3[i] - frag_size[i] = mu3 + np.sqrt(2) * sigma3 * straub_erfinv( - 2 * X - 1 - ) + frag_size[i] = mu3 + np.sqrt(2) * sigma3 * straub_erfinv(X) else: frag_size[i] = d34 diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 7177f58032..397fc39922 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -149,8 +149,7 @@ def params_p4(const, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3, M31 = N1 * np.exp(3 * mu1 + 9 * np.power(sigma1, 2) / 2) M32 = N2 * (mu2**3 + 3 * mu2 * sigma2**2) M33 = N3 * (mu3**3 + 3 * mu3 * sigma3**2) - dl = np.exp(np.log(vl / 4 * 3 / np.pi) / 3) - M34 = dl**3 + ds**3 - M31 - M32 - M33 + M34 = vl * 6 / np.pi + ds**3 - M31 - M32 - M33 if M34 <= 0.0: d34 = 0 M34 = 0 @@ -158,16 +157,6 @@ def params_p4(const, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3, d34 = np.exp(np.log(M34) / 3) return (M31, M32, M33, M34, d34) - # @staticmethod - # def erfinv(X): - # a = 8 * (np.pi - 3) / (3 * np.pi * (4 - np.pi)) - # arg = (2 / np.pi / a) + np.log(1 - X**2) / 2 - # arg = arg * arg - # arg = arg - np.log(1 - X**2) / a - # arg = np.sqrt(arg) - # arg = arg - (2 / np.pi / a + np.log(1 - X**2) / 2) - # return np.sqrt(arg) - @staticmethod def erfinv(X): return np.arctanh(2 * X - 1) * 2 * np.sqrt(3) / np.pi From ade98dbd0869cc20751bab6bcb7c697b032133b4 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Thu, 23 Mar 2023 12:19:03 -0600 Subject: [PATCH 55/86] Fixes to straub and fragmentation limiters --- .../impl_numba/methods/collisions_methods.py | 10 +++++----- PySDM/physics/fragmentation_function/straub2010nf.py | 12 ++++++++---- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 93ce8bd878..7b4d486659 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -731,11 +731,11 @@ def __fragmentation_limiters(n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y if np.isnan(frag_size[i]): frag_size[i] = x_plus_y[i] frag_size[i] = min(frag_size[i], x_plus_y[i]) - frag_size[i] = max(frag_size[i], vmin) - if nfmax is not None: - if x_plus_y[i] / frag_size[i] > nfmax: - frag_size[i] = x_plus_y[i] / nfmax - if frag_size[i] == 0.0: + if nfmax is not None and x_plus_y[i] / frag_size[i] > nfmax: + frag_size[i] = x_plus_y[i] / nfmax + elif frag_size[i] < vmin: + frag_size[i] = x_plus_y[i] + elif frag_size[i] == 0.0: frag_size[i] = x_plus_y[i] n_fragment[i] = x_plus_y[i] / frag_size[i] diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 397fc39922..fbf6976136 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -14,9 +14,8 @@ def __init__(self, _): def sigma1(const, CW): return np.sqrt( np.log( - np.power((np.sqrt(CW) / 8) / 10, 2) - / 12 - / np.power(const.STRAUB_E_D1, const.TWO) + # np.power((np.sqrt(CW) / 8) / 10, 2) + CW / 64 / 100 / 12 / np.power(const.STRAUB_E_D1, const.TWO) + 1 ) ) @@ -44,7 +43,12 @@ def p1(const, rand, sigma1): def params_p1(const, CW): sigma1 = np.sqrt( np.log( - np.power((np.sqrt(CW) / 8) / 10 * const.CM, 2) + # np.power((np.sqrt(CW) / 8) / 10 * const.CM, 2) + CW + / 64 + / 100 + * const.CM + * const.CM / 12 / np.power(const.STRAUB_E_D1, const.TWO) + 1 From 376eefe00c7756ef69576f2f97a426134acb698e Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Thu, 23 Mar 2023 13:29:25 -0600 Subject: [PATCH 56/86] Reorder limiters & fix fragmentation tests --- .../impl_numba/methods/collisions_methods.py | 1 - .../fragmentation_function/straub2010nf.py | 108 +---- .../collisions/test_fragmentations.py | 408 +++++++++--------- .../physics/test_fragmentation_functions.py | 51 ++- 4 files changed, 232 insertions(+), 336 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 7b4d486659..7679537aec 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -392,7 +392,6 @@ def __straub_fragmentation_body( Nr1[i], Nr2[i], Nr3[i], - CW[i], ) Nr1[i] = Nr1[i] * M31 Nr2[i] = Nr2[i] * M32 diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index fbf6976136..b313afdc29 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -10,40 +10,10 @@ class Straub2010Nf: # pylint: disable=too-few-public-methods def __init__(self, _): pass - @staticmethod - def sigma1(const, CW): - return np.sqrt( - np.log( - # np.power((np.sqrt(CW) / 8) / 10, 2) - CW / 64 / 100 / 12 / np.power(const.STRAUB_E_D1, const.TWO) - + 1 - ) - ) - - @staticmethod - def p1(const, rand, sigma1): - return ( - const.PI - / 6 - * np.power( - np.exp( - np.log(const.STRAUB_E_D1) - - np.power(sigma1, const.TWO) / 2 - - sigma1 - / const.sqrt_two - / const.sqrt_pi - / const.LN_2 - * np.log((1 / const.TWO + rand) / (const.THREE / const.TWO - rand)) - ), - const.THREE, - ) - ) - @staticmethod def params_p1(const, CW): sigma1 = np.sqrt( np.log( - # np.power((np.sqrt(CW) / 8) / 10 * const.CM, 2) CW / 64 / 100 @@ -57,22 +27,6 @@ def params_p1(const, CW): mu1 = np.log(const.STRAUB_E_D1) - np.power(sigma1, const.TWO) / 2 return (mu1, sigma1) - @staticmethod - def p2(const, CW, rand): - return ( - const.PI - / 6 - * np.power( - const.STRAUB_MU2 - - (np.power(7 * (CW - 21) / 1000, const.TWO) / 12) - / const.sqrt_two - / const.sqrt_pi - / const.LN_2 - * np.log((1 / const.TWO + rand) / (const.THREE / const.TWO - rand)), - const.THREE, - ) - ) - @staticmethod def params_p2(const, CW): mu2 = const.STRAUB_MU2 @@ -81,22 +35,6 @@ def params_p2(const, CW): sigma2 = deltaD2 / np.sqrt(12) return (mu2, sigma2) - @staticmethod - def p3(const, CW, ds, rand): - return ( - const.PI - / 6 - * np.power( - (9 * ds / 10) - - (np.power((76 * np.sqrt(CW) / 100 + 1) / 100, const.TWO) / 12) - / const.sqrt_two - / const.sqrt_pi - / const.LN_2 - * np.log((1 / const.TWO + rand) / (const.THREE / const.TWO - rand)), - const.THREE, - ) - ) - @staticmethod def params_p3(const, CW, ds): mu3 = 0.9 * ds @@ -105,51 +43,7 @@ def params_p3(const, CW, ds): return (mu3, sigma3) @staticmethod - def p4(const, CW, ds, v_max, Nr1, Nr2, Nr3): # pylint: disable=too-many-arguments - return ( - const.PI - / 6 - * ( - v_max / const.PI_4_3 * 8 - + np.power(ds, const.THREE) - - Nr1 - * np.exp( - 3 * np.log(const.STRAUB_E_D1) - + 6 - * np.log( - np.power((np.sqrt(CW) / 8) / 10, const.TWO) - / 12 - / np.power(const.STRAUB_E_D1, const.TWO) - + 1 - ) - / 2 - ) - - Nr2 - * ( - np.power(const.STRAUB_MU2, const.THREE) - + 3 - * const.STRAUB_MU2 - * np.power( - np.power(7 * (CW - 21) / 1000, const.TWO) / 12, const.TWO - ) - ) - - Nr3 - * ( - np.power(9 * ds / 10, const.THREE) - + 3 - * 9 - * ds - / 10 - * np.power( - np.power((76 * np.sqrt(CW) / 100 + 1) / 100, const.TWO) / 12, - const.TWO, - ) - ) - ) - ) - - @staticmethod - def params_p4(const, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3, CW): + def params_p4(const, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3): M31 = N1 * np.exp(3 * mu1 + 9 * np.power(sigma1, 2) / 2) M32 = N2 * (mu2**3 + 3 * mu2 * sigma2**2) M33 = N3 * (mu3**3 + 3 * mu3 * sigma3**2) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 4374fe853e..53a7134fb0 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -12,7 +12,6 @@ ExponFrag, Feingold1988Frag, Gaussian, - LowList1982Nf, Straub2010Nf, ) from PySDM.environments import Box @@ -24,215 +23,214 @@ class TestFragmentations: # pylint: disable=too-few-public-methods - # @staticmethod - # @pytest.mark.parametrize( - # "fragmentation_fn", - # ( - # AlwaysN(n=2), - # ExponFrag(scale=1e6 * si.um**3), - # Feingold1988Frag(scale=1e6 * si.um**3), - # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), - # SLAMS(), - # Straub2010Nf(), - # LowList1982Nf(), - # ), - # ) - # def test_fragmentation_fn_call( - # fragmentation_fn, backend_class - # ): # pylint: disable=redefined-outer-name - # # arrange - # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - # fragments = np.asarray([-1.0]) - # builder = Builder( - # volume.size, - # backend_class( - # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - # ), - # ) - # sut = fragmentation_fn - # sut.vmin = 1 * si.um**3 - # sut.register(builder) - # builder.set_environment(Box(dv=None, dt=None)) - # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - # _PairwiseStorage = builder.particulator.PairwiseStorage - # _Indicator = builder.particulator.PairIndicator - # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # is_first_in_pair = _Indicator(length=volume.size) - # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - # np.asarray([True, False]) - # ) - # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) - - # # act - # sut(nf, frag_size, u01, is_first_in_pair) - - # # Assert - # np.testing.assert_array_less([0.99], nf.to_ndarray()) - # np.testing.assert_array_less([0.0], frag_size.to_ndarray()) - - # @staticmethod - # @pytest.mark.parametrize( - # "fragmentation_fn", - # [ - # ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - # Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), - # Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), - # SLAMS(vmin=6660.0 * si.um**3), - # Straub2010Nf(vmin=6660.0 * si.um**3), - # LowList1982Nf(vmin=6660.0 * si.um**3), - # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - # ], - # ) - # def test_fragmentation_limiters_vmin( - # fragmentation_fn, backend_class - # ): # pylint: disable=redefined-outer-name - # # arrange - # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - # fragments = np.asarray([-1.0]) - # builder = Builder( - # volume.size, - # backend_class( - # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), - # double_precision=True, - # ), - # ) - # sut = fragmentation_fn - # sut.register(builder) - # builder.set_environment(Box(dv=None, dt=None)) - # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - # _PairwiseStorage = builder.particulator.PairwiseStorage - # _Indicator = builder.particulator.PairIndicator - # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # is_first_in_pair = _Indicator(length=volume.size) - # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - # np.asarray([True, False]) - # ) - # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # # act - # sut(nf, frag_size, u01, is_first_in_pair) - - # # Assert - # np.testing.assert_array_equal([(440.0 + 6660.0) / 6660.0], nf.to_ndarray()) - # np.testing.assert_array_equal([6660.0 * si.um**3], frag_size.to_ndarray()) - - # @staticmethod - # @pytest.mark.parametrize( - # "fragmentation_fn", - # [ - # ExponFrag(scale=1.0 * si.cm**3), - # Feingold1988Frag(scale=1.0 * si.cm**3), - # Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), - # SLAMS(), - # Straub2010Nf(), - # LowList1982Nf(), - # pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), - # ], - # ) - # def test_fragmentation_limiters_vmax( - # fragmentation_fn, backend_class - # ): # pylint: disable=redefined-outer-name - # # arrange - # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - # fragments = np.asarray([-1.0]) - # builder = Builder( - # volume.size, - # backend_class( - # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - # ), - # ) - # sut = fragmentation_fn - # sut.vmin = 1 * si.um**3 - # sut.register(builder) - # builder.set_environment(Box(dv=None, dt=None)) - # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - # _PairwiseStorage = builder.particulator.PairwiseStorage - # _Indicator = builder.particulator.PairIndicator - # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # is_first_in_pair = _Indicator(length=volume.size) - # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - # np.asarray([True, False]) - # ) - # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # # act - # sut(nf, frag_size, u01, is_first_in_pair) - - # # Assert - # np.testing.assert_array_less([(440.0 + 6660.0) / 6661.0], nf.to_ndarray()) - # np.testing.assert_array_less(frag_size.to_ndarray(), [6661.0 * si.um**3]) - - # @staticmethod - # @pytest.mark.parametrize( - # "fragmentation_fn", - # [ - # ExponFrag(scale=1.0 * si.um**3, nfmax=2), - # Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), - # Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), - # SLAMS(nfmax=2), - # Straub2010Nf(nfmax=2), - # LowList1982Nf(nfmax=2), - # pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), - # ], - # ) - # def test_fragmentation_limiters_nfmax( - # fragmentation_fn, backend_class - # ): # pylint: disable=redefined-outer-name - # # arrange - # volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) - # fragments = np.asarray([-1.0]) - # builder = Builder( - # volume.size, - # backend_class( - # Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) - # ), - # ) - # sut = fragmentation_fn - # sut.vmin = 1 * si.um**3 - # sut.register(builder) - # builder.set_environment(Box(dv=None, dt=None)) - # _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - - # _PairwiseStorage = builder.particulator.PairwiseStorage - # _Indicator = builder.particulator.PairIndicator - # nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) - # is_first_in_pair = _Indicator(length=volume.size) - # is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( - # np.asarray([True, False]) - # ) - # u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) - - # # act - # sut(nf, frag_size, u01, is_first_in_pair) - - # # Assert - # np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) - # np.testing.assert_array_less( - # [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() - # ) + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + ( + AlwaysN(n=2), + ExponFrag(scale=1e6 * si.um**3), + Feingold1988Frag(scale=1e6 * si.um**3), + Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), + SLAMS(), + Straub2010Nf(), + ), + ) + def test_fragmentation_fn_call( + fragmentation_fn, backend_class + ): # pylint: disable=redefined-outer-name + # arrange + volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + ), + ) + sut = fragmentation_fn + sut.vmin = 1 * si.um**3 + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_less([0.99], nf.to_ndarray()) + np.testing.assert_array_less([0.0], frag_size.to_ndarray()) + + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + [ + ExponFrag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + Feingold1988Frag(scale=1 * si.um**3, vmin=6660.0 * si.um**3), + Gaussian(mu=2 * si.um**3, sigma=1 * si.um**3, vmin=6660.0 * si.um**3), + SLAMS(vmin=6660.0 * si.um**3), + Straub2010Nf(vmin=6660.0 * si.um**3), + pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + ], + ) + def test_fragmentation_limiters_vmin( + fragmentation_fn, backend_class + ): # pylint: disable=redefined-outer-name + # arrange + volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__), + double_precision=True, + ), + ) + sut = fragmentation_fn + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_equal([1.0], nf.to_ndarray()) + np.testing.assert_array_equal( + [(6660.0 + 440.0) * si.um**3], frag_size.to_ndarray() + ) + + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + [ + ExponFrag(scale=1.0 * si.cm**3), + Feingold1988Frag(scale=1.0 * si.cm**3), + Gaussian(mu=1.0 * si.cm**3, sigma=1e6 * si.um**3), + SLAMS(), + Straub2010Nf(), + pytest.param(AlwaysN(n=0.01), marks=pytest.mark.xfail(strict=True)), + ], + ) + def test_fragmentation_limiters_vmax( + fragmentation_fn, backend_class + ): # pylint: disable=redefined-outer-name + # arrange + volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + ), + ) + sut = fragmentation_fn + sut.vmin = 1 * si.um**3 + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_less([0.999], nf.to_ndarray()) + np.testing.assert_array_less( + frag_size.to_ndarray(), [(6661.0 + 440.0) * si.um**3] + ) + + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn", + [ + ExponFrag(scale=1.0 * si.um**3, nfmax=2), + Feingold1988Frag(scale=1.0 * si.um**3, nfmax=2), + Gaussian(mu=1.0 * si.um**3, sigma=1e6 * si.um**3, nfmax=2), + SLAMS(nfmax=2), + Straub2010Nf(nfmax=2), + pytest.param(AlwaysN(n=10), marks=pytest.mark.xfail(strict=True)), + ], + ) + def test_fragmentation_limiters_nfmax( + fragmentation_fn, backend_class + ): # pylint: disable=redefined-outer-name + # arrange + volume = np.asarray([440.0 * si.um**3, 6660.0 * si.um**3]) + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + ), + ) + sut = fragmentation_fn + sut.vmin = 1 * si.um**3 + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_less(nf.to_ndarray(), [2.0 + 1e-6]) + np.testing.assert_array_less( + [((6660.0 + 440.0) / 2 - 1) * si.um**3], frag_size.to_ndarray() + ) @staticmethod @pytest.mark.parametrize( "fragmentation_fn", ( - # AlwaysN(n=2), - # ExponFrag(scale=1e6 * si.um**3), - # Feingold1988Frag(scale=1e6 * si.um**3), - # Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), - # SLAMS(), + AlwaysN(n=2), + ExponFrag(scale=1e6 * si.um**3), + Feingold1988Frag(scale=1e6 * si.um**3), + Gaussian(mu=2e6 * si.um**3, sigma=1e6 * si.um**3), + SLAMS(), Straub2010Nf(), - # LowList1982Nf(), ), ) def test_fragmentation_fn_distribution( - fragmentation_fn, plot=True + fragmentation_fn, plot=False ): # pylint: disable=redefined-outer-name # arrange from PySDM.backends import CPU @@ -300,11 +298,11 @@ def test_fragmentation_fn_distribution( res[i][1] = frag_size[0] # Assert - # np.testing.assert_array_less([0.99], nf.to_ndarray()) - # np.testing.assert_array_less([0.0], frag_size.to_ndarray()) + np.testing.assert_array_less([0.99], nf.to_ndarray()) + np.testing.assert_array_less([0.0], frag_size.to_ndarray()) - # res = np.asarray(sorted(res, key=lambda x: x[1], reverse=True)) - # print(res[:, 0]) + res = np.asarray(sorted(res, key=lambda x: x[1], reverse=True)) + print(res[:, 0]) unique_nfs, nfs_counts = np.unique(res[:, 0], return_counts=True) unique_frag_size, frag_sizes_counts = np.unique(res[:, 1], return_counts=True) print("nfs", unique_nfs, nfs_counts) diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index f7d8869408..ad9ee337cb 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -12,13 +12,12 @@ class TestFragmentationFunctions: # pylint:disable=too-few-public-methods def test_straub_p1(): # arrange formulae = Formulae(fragmentation_function="Straub2010Nf") - sigma1 = formulae.fragmentation_function.sigma1(CW=0.666) # act - frag_size = formulae.fragmentation_function.p1(sigma1=sigma1, rand=0) + params = formulae.fragmentation_function.params_p1(CW=30.0) # assert - np.testing.assert_approx_equal(frag_size, 3.6490627e-12) + np.testing.assert_array_almost_equal(params, [-7.933269, 0.467381]) @staticmethod def test_straub_p2(): @@ -26,10 +25,10 @@ def test_straub_p2(): formulae = Formulae(fragmentation_function="Straub2010Nf") # act - frag_size = formulae.fragmentation_function.p2(CW=0.666, rand=0) + params = formulae.fragmentation_function.params_p2(CW=30.0) # assert - np.testing.assert_approx_equal(frag_size, 4.3000510e-09) + np.testing.assert_array_almost_equal(params, [0.00095, 0.000182]) @staticmethod def test_straub_p3(): @@ -37,10 +36,10 @@ def test_straub_p3(): formulae = Formulae(fragmentation_function="Straub2010Nf") # act - frag_size = formulae.fragmentation_function.p3(CW=0.666, ds=0, rand=0) + params = formulae.fragmentation_function.params_p3(CW=30.0, ds=0.18 * si.cm) # assert - np.testing.assert_approx_equal(frag_size, 1.3857897e-15) + np.testing.assert_array_almost_equal(params, [0.00162, 0.000149]) @staticmethod def test_straub_p4(): @@ -48,17 +47,25 @@ def test_straub_p4(): formulae = Formulae(fragmentation_function="Straub2010Nf") # act - frag_size = formulae.fragmentation_function.p4( - CW=0.666, - ds=0, - v_max=0, - Nr1=1, - Nr2=2, - Nr3=0, + params = formulae.fragmentation_function.params_p4( + vl=(0.36 * si.cm) ** 3 * np.pi / 6, + ds=0.18 * si.cm, + mu1=-7.933269, + sigma1=0.467381, + mu2=0.00095, + sigma2=0.000182, + mu3=0.00162, + sigma3=0.000149, + N1=2.0, + N2=1.0, + N3=1.0, ) # assert - np.testing.assert_approx_equal(frag_size, -5.6454883153e-06) + np.testing.assert_array_almost_equal( + params, + [2.465004e-10, 9.517784e-10, 4.359425e-09, 4.693030e-08, 3.607041e-03], + ) @staticmethod def test_ll82_pf1(): @@ -83,7 +90,7 @@ def test_ll82_pf2(): params = formulae.fragmentation_function.params_f2(ds=0.18 * si.cm) # assert - np.testing.assert_array_equal( + np.testing.assert_array_almost_equal( params, (31.081892267202157, 0.18, 0.01283519925273017) ) @@ -98,7 +105,7 @@ def test_ll82_pf3(): ) # assert - np.testing.assert_array_equal( + np.testing.assert_array_almost_equal( params, (11.078017412424996, -3.4579794266811095, 0.21024917628814235) ) @@ -113,7 +120,7 @@ def test_ll82_ps1(): ) # assert - np.testing.assert_array_equal( + np.testing.assert_array_almost_equal( params, (55.710586181217394, 0.36, 0.007344262785151853) ) @@ -128,7 +135,7 @@ def test_ll82_ps2(): ) # assert - np.testing.assert_array_equal( + np.testing.assert_array_almost_equal( params, (13.120297517162507, -2.0082590717125437, 0.24857168491193957) ) @@ -143,7 +150,7 @@ def test_ll82_pd1(): ) # assert - np.testing.assert_array_equal( + np.testing.assert_array_almost_equal( params, (24.080107809942664, 0.28666015630152986, 0.016567297254868083) ) @@ -158,9 +165,7 @@ def test_ll82_pd2(): ) # assert - np.testing.assert_array_equal( - params, (0.30464721998964595, -2.148778428091927, 3.1133226212867343e-147) - ) + np.testing.assert_array_almost_equal(params, [0.0, -4.967578, -4.967578]) @staticmethod def test_erfinv(): From 45d2931613ca027dc7f08cb04f80dd54ea2be8d9 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Thu, 23 Mar 2023 18:23:21 -0600 Subject: [PATCH 57/86] Make pylint happy --- .../impl_numba/methods/collisions_methods.py | 30 ++++++------------- .../breakup_fragmentations/exponential.py | 6 ---- .../breakup_fragmentations/feingold1988.py | 6 ---- .../breakup_fragmentations/gaussian.py | 6 ---- .../breakup_fragmentations/lowlist82.py | 1 - .../breakup_fragmentations/slams.py | 6 ---- .../coalescence_efficiencies/lowlist1982.py | 2 +- .../fragmentation_function/gaussian.py | 2 -- .../fragmentation_function/lowlist82.py | 14 ++++----- .../fragmentation_function/straub2010nf.py | 3 +- tests/backends_fixture.py | 2 +- .../collisions/test_fragmentations.py | 8 +++-- .../physics/test_fragmentation_functions.py | 2 -- 13 files changed, 25 insertions(+), 63 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 7679537aec..1eb80b7656 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -285,7 +285,7 @@ def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument class CollisionsMethods(BackendMethods): - def __init__(self): + def __init__(self): # pylint: disable=too-many-statements,too-many-locals BackendMethods.__init__(self) _break_up = break_up_while if self.formulae.handle_all_breakups else break_up @@ -355,7 +355,7 @@ def __collision_coalescence_breakup_body( self.__collision_coalescence_breakup_body = __collision_coalescence_breakup_body @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) - def __ll82_coalescence_check_body(*, Ec, ds, dl): + def __ll82_coalescence_check_body(*, Ec, dl): for i in numba.prange(len(Ec)): # pylint: disable=not-an-iterable if dl[i] < 0.4e-3: Ec[i] = 1.0 @@ -372,7 +372,7 @@ def __ll82_coalescence_check_body(*, Ec, ds, dl): @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __straub_fragmentation_body( *, CW, gam, ds, v_max, frag_size, rand, Nr1, Nr2, Nr3, Nr4, Nrt - ): + ): # pylint: disable=too-many-arguments,too-many-locals for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): @@ -428,7 +428,7 @@ def __straub_fragmentation_body( @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __ll82_fragmentation_body( *, CKE, W, W2, St, ds, dl, dcoal, frag_size, rand, Rf, Rs, Rd, tol - ): + ): # pylint: disable=too-many-branches,too-many-locals,too-many-statements for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): @@ -725,7 +725,7 @@ def collision_coalescence_breakup( @staticmethod @numba.njit(**{**conf.JIT_FLAGS}) # pylint: disable=too-many-arguments - def __fragmentation_limiters(n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y): + def __fragmentation_limiters(n_fragment, frag_size, vmin, nfmax, x_plus_y): for i in numba.prange(len(frag_size)): # pylint: disable=not-an-iterable if np.isnan(frag_size[i]): frag_size[i] = x_plus_y[i] @@ -738,13 +738,10 @@ def __fragmentation_limiters(n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y frag_size[i] = x_plus_y[i] n_fragment[i] = x_plus_y[i] / frag_size[i] - def fragmentation_limiters( - self, *, n_fragment, frag_size, v_max, vmin, nfmax, x_plus_y - ): + def fragmentation_limiters(self, *, n_fragment, frag_size, vmin, nfmax, x_plus_y): self.__fragmentation_limiters( n_fragment=n_fragment.data, frag_size=frag_size.data, - v_max=v_max.data, vmin=vmin, nfmax=nfmax, x_plus_y=x_plus_y.data, @@ -764,7 +761,7 @@ def __slams_fragmentation_body(n_fragment, frag_size, x_plus_y, probs, rand): frag_size[i] = x_plus_y[i] / n_fragment[i] def slams_fragmentation( - self, n_fragment, frag_size, v_max, x_plus_y, probs, rand, vmin, nfmax + self, n_fragment, frag_size, x_plus_y, probs, rand, vmin, nfmax ): # pylint: disable=too-many-arguments self.__slams_fragmentation_body( n_fragment.data, frag_size.data, x_plus_y.data, probs.data, rand.data @@ -772,7 +769,6 @@ def slams_fragmentation( self.__fragmentation_limiters( n_fragment=n_fragment.data, frag_size=frag_size.data, - v_max=v_max.data, vmin=vmin, nfmax=nfmax, x_plus_y=x_plus_y.data, @@ -794,7 +790,6 @@ def exp_fragmentation( n_fragment, scale, frag_size, - v_max, x_plus_y, rand, vmin, @@ -810,7 +805,6 @@ def exp_fragmentation( self.__fragmentation_limiters( n_fragment=n_fragment.data, frag_size=frag_size.data, - v_max=v_max.data, x_plus_y=x_plus_y.data, vmin=vmin, nfmax=nfmax, @@ -822,7 +816,6 @@ def feingold1988_fragmentation( n_fragment, scale, frag_size, - v_max, x_plus_y, rand, fragtol, @@ -840,14 +833,13 @@ def feingold1988_fragmentation( self.__fragmentation_limiters( n_fragment=n_fragment.data, frag_size=frag_size.data, - v_max=v_max.data, x_plus_y=x_plus_y.data, vmin=vmin, nfmax=nfmax, ) def gauss_fragmentation( - self, *, n_fragment, mu, sigma, frag_size, v_max, x_plus_y, rand, vmin, nfmax + self, *, n_fragment, mu, sigma, frag_size, x_plus_y, rand, vmin, nfmax ): self.__gauss_fragmentation_body( mu=mu, @@ -858,7 +850,6 @@ def gauss_fragmentation( self.__fragmentation_limiters( n_fragment=n_fragment.data, frag_size=frag_size.data, - v_max=v_max.data, x_plus_y=x_plus_y.data, vmin=vmin, nfmax=nfmax, @@ -900,7 +891,6 @@ def straub_fragmentation( self.__fragmentation_limiters( n_fragment=n_fragment.data, frag_size=frag_size.data, - v_max=v_max.data, x_plus_y=x_plus_y.data, vmin=vmin, nfmax=nfmax, @@ -947,16 +937,14 @@ def ll82_fragmentation( self.__fragmentation_limiters( n_fragment=n_fragment.data, frag_size=frag_size.data, - v_max=v_max.data, x_plus_y=x_plus_y.data, vmin=vmin, nfmax=nfmax, ) - def ll82_coalescence_check(self, *, Ec, ds, dl): + def ll82_coalescence_check(self, *, Ec, dl): self.__ll82_coalescence_check_body( Ec=Ec.data, - ds=ds.data, dl=dl.data, ) diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/exponential.py b/PySDM/dynamics/collisions/breakup_fragmentations/exponential.py index b286c2968d..3b64bfccc4 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/exponential.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/exponential.py @@ -10,20 +10,15 @@ def __init__(self, scale, vmin=0.0, nfmax=None): self.scale = scale self.vmin = vmin self.nfmax = nfmax - self.max_size = None self.sum_of_volumes = None def register(self, builder): self.particulator = builder.particulator - self.max_size = self.particulator.PairwiseStorage.empty( - self.particulator.n_sd // 2, dtype=float - ) self.sum_of_volumes = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) def __call__(self, nf, frag_size, u01, is_first_in_pair): - self.max_size.max(self.particulator.attributes["volume"], is_first_in_pair) self.sum_of_volumes.sum( self.particulator.attributes["volume"], is_first_in_pair ) @@ -31,7 +26,6 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): n_fragment=nf, scale=self.scale, frag_size=frag_size, - v_max=self.max_size, x_plus_y=self.sum_of_volumes, rand=u01, vmin=self.vmin, diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/feingold1988.py b/PySDM/dynamics/collisions/breakup_fragmentations/feingold1988.py index 726e5b8696..0943e3f0b9 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/feingold1988.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/feingold1988.py @@ -11,21 +11,16 @@ def __init__(self, scale, fragtol=1e-3, vmin=0.0, nfmax=None): self.fragtol = fragtol self.vmin = vmin self.nfmax = nfmax - self.max_size = None self.sum_of_volumes = None def register(self, builder): self.particulator = builder.particulator builder.request_attribute("volume") - self.max_size = self.particulator.PairwiseStorage.empty( - self.particulator.n_sd // 2, dtype=float - ) self.sum_of_volumes = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) def __call__(self, nf, frag_size, u01, is_first_in_pair): - self.max_size.max(self.particulator.attributes["volume"], is_first_in_pair) self.sum_of_volumes.sum( self.particulator.attributes["volume"], is_first_in_pair ) @@ -33,7 +28,6 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): n_fragment=nf, scale=self.scale, frag_size=frag_size, - v_max=self.max_size, x_plus_y=self.sum_of_volumes, rand=u01, fragtol=self.fragtol, diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/gaussian.py b/PySDM/dynamics/collisions/breakup_fragmentations/gaussian.py index 4d15f90255..e65b223e7f 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/gaussian.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/gaussian.py @@ -10,20 +10,15 @@ def __init__(self, mu, sigma, vmin=0.0, nfmax=None): self.sigma = sigma self.vmin = vmin self.nfmax = nfmax - self.max_size = None self.sum_of_volumes = None def register(self, builder): self.particulator = builder.particulator - self.max_size = self.particulator.PairwiseStorage.empty( - self.particulator.n_sd // 2, dtype=float - ) self.sum_of_volumes = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) def __call__(self, nf, frag_size, u01, is_first_in_pair): - self.max_size.max(self.particulator.attributes["volume"], is_first_in_pair) self.sum_of_volumes.sum( self.particulator.attributes["volume"], is_first_in_pair ) @@ -32,7 +27,6 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): mu=self.mu, sigma=self.sigma, frag_size=frag_size, - v_max=self.max_size, x_plus_y=self.sum_of_volumes, rand=u01, vmin=self.vmin, diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index 382737da34..c8728df1be 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -1,7 +1,6 @@ """ See Low & List 1982 """ -from PySDM.physics.constants import si class LowList1982Nf: diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/slams.py b/PySDM/dynamics/collisions/breakup_fragmentations/slams.py index 95cb0ba9c9..f64c219e46 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/slams.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/slams.py @@ -7,20 +7,17 @@ class SLAMS: def __init__(self, vmin=0.0, nfmax=None): self.particulator = None self.p_vec = None - self.max_size = None self.sum_of_volumes = None self.vmin = vmin self.nfmax = nfmax def __call__(self, nf, frag_size, u01, is_first_in_pair): - self.max_size.max(self.particulator.attributes["volume"], is_first_in_pair) self.sum_of_volumes.sum( self.particulator.attributes["volume"], is_first_in_pair ) self.particulator.backend.slams_fragmentation( n_fragment=nf, frag_size=frag_size, - v_max=self.max_size, x_plus_y=self.sum_of_volumes, probs=self.p_vec, rand=u01, @@ -33,9 +30,6 @@ def register(self, builder): self.p_vec = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) - self.max_size = self.particulator.PairwiseStorage.empty( - self.particulator.n_sd // 2, dtype=float - ) self.sum_of_volumes = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py index 9ea658095d..83d0aaa274 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py @@ -93,5 +93,5 @@ def __call__(self, output, is_first_in_pair): output *= np.exp(self.arrays["tmp2"]) self.particulator.backend.ll82_coalescence_check( - Ec=output, ds=self.arrays["ds"], dl=self.arrays["dl"] + Ec=output, dl=self.arrays["dl"] ) diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index bdbcfcfce6..c122e1253f 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -2,8 +2,6 @@ Gaussian PDF CDF = 1/2(1 + erf(x/sqrt(2))); """ -import math - import numpy as np diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 7d5fe57946..1ddbd24a8d 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -17,7 +17,7 @@ def params_f1(const, dl, dcoal): Hf1 = 50.8 * (dlCM) ** (-0.718) mu = dlCM sigma = 1 / Hf1 - for i in range(10): + for _ in range(10): sigma = ( 1 / Hf1 @@ -77,7 +77,7 @@ def params_f3(const, ds, dl): sigmaf3 = 10 * Dff3 muf3 = np.log(Dff3) + sigmaf3**2 Hf3 = Pf0 * Dff3 / np.exp(-0.5 * sigmaf3**2) - for i in range(10): + for _ in range(10): if sigmaf3 == 0.0 or Hf3 == 0: return (0.0, np.log(ds0), np.log(ds0)) sigmaf3 = ( @@ -109,7 +109,7 @@ def params_s1(const, dl, ds, dcoal): Hs1 = 100 * np.exp(-3.25 * dsCM) mus1 = dlCM sigmas1 = 1 / Hs1 - for i in range(10): + for _ in range(10): sigmas1 = ( 1 / Hs1 @@ -133,7 +133,7 @@ def params_s2(const, dl, ds, St): Fs = 5 * math.erf((St - 2.52e-6) / (1.85e-6)) + 6 # (3.7) - for i in range(10): + for _ in range(10): sigmas2 = ( np.sqrt(2 / np.pi) * (Fs - 1) @@ -152,7 +152,7 @@ def params_d1(const, W1, dl, dcoal, CKE): mud1 = dlCM * (1 - np.exp(-3.70 * (3.10 - W1))) Hd1 = 1.58e-5 * CKE ** (-1.22) sigmad1 = 1 / Hd1 - for i in range(10): + for _ in range(10): sigmad1 = ( 1 / Hd1 @@ -178,10 +178,10 @@ def params_d2(const, ds, dl, CKE): if Fd == 1.0: return (0.0, np.log(Ddd2), np.log(Ddd2)) - for i in range(10): + for _ in range(10): if sigmad2 == 0.0 or Hd2 <= 0.1: return (0.0, np.log(Ddd2), np.log(Ddd2)) - elif sigmad2 >= 1.0: + if sigmad2 >= 1.0: return (0.0, np.log(Ddd2), np.log(Ddd2)) sigmad2 = ( np.sqrt(2 / np.pi) diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index b313afdc29..ffcb4eca0a 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -43,7 +43,8 @@ def params_p3(const, CW, ds): return (mu3, sigma3) @staticmethod - def params_p4(const, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3): + def params_p4(vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3): + # pylint: disable=too-many-arguments, too-many-locals M31 = N1 * np.exp(3 * mu1 + 9 * np.power(sigma1, 2) / 2) M32 = N2 * (mu2**3 + 3 * mu2 * sigma2**2) M33 = N3 * (mu3**3 + 3 * mu3 * sigma3**2) diff --git a/tests/backends_fixture.py b/tests/backends_fixture.py index f830869828..3983e21275 100644 --- a/tests/backends_fixture.py +++ b/tests/backends_fixture.py @@ -4,6 +4,6 @@ from PySDM.backends import CPU, GPU -@pytest.fixture(params=(CPU, CPU)) +@pytest.fixture(params=(CPU, GPU)) def backend_class(request): return request.param diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 53a7134fb0..636564b653 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -6,6 +6,7 @@ import pytest from PySDM import Builder, Formulae +from PySDM.backends import CPU from PySDM.dynamics.collisions.breakup_fragmentations import ( SLAMS, AlwaysN, @@ -231,14 +232,15 @@ def test_fragmentation_limiters_nfmax( ) def test_fragmentation_fn_distribution( fragmentation_fn, plot=False - ): # pylint: disable=redefined-outer-name + ): # pylint: disable=redefined-outer-name, too-many-locals # arrange - from PySDM.backends import CPU drop_size_L_diam = 0.4 * si.cm drop_size_S_diam = 0.2 * si.cm - get_volume_from_diam = lambda d: (4 / 3) * np.pi * (d / 2) ** 3 + get_volume_from_diam = ( + lambda d: (4 / 3) * np.pi * (d / 2) ** 3 + ) # pylint: disable=unnecessary-lambda-assignment n = 100 res = np.empty((n, 2), dtype=np.double) diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index ad9ee337cb..db4d177aa9 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -1,6 +1,4 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring -import os - import numpy as np from PySDM import Formulae From 281587d6fd0dde1f8e9af21a07cd0868e420d2cf Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Thu, 23 Mar 2023 18:29:52 -0600 Subject: [PATCH 58/86] Small modifications to tests --- tests/unit_tests/dynamics/collisions/test_efficiencies.py | 2 +- .../unit_tests/dynamics/collisions/test_fragmentations.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/unit_tests/dynamics/collisions/test_efficiencies.py b/tests/unit_tests/dynamics/collisions/test_efficiencies.py index 564d1eb5a4..8f604bff1c 100644 --- a/tests/unit_tests/dynamics/collisions/test_efficiencies.py +++ b/tests/unit_tests/dynamics/collisions/test_efficiencies.py @@ -51,4 +51,4 @@ def test_efficiency_fn_call(efficiency, backend_class=CPU): # Assert np.testing.assert_array_less([0.0 - 1e-6], eff.to_ndarray()) - np.testing.assert_array_less(eff.to_ndarray(), [1.0]) + np.testing.assert_array_less(eff.to_ndarray(), [1.0 + 1e-6]) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 636564b653..ebaa9e7c1a 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -109,7 +109,7 @@ def test_fragmentation_limiters_vmin( is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( np.asarray([True, False]) ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) # act sut(nf, frag_size, u01, is_first_in_pair) @@ -158,7 +158,7 @@ def test_fragmentation_limiters_vmax( is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( np.asarray([True, False]) ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) # act sut(nf, frag_size, u01, is_first_in_pair) @@ -207,7 +207,7 @@ def test_fragmentation_limiters_nfmax( is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( np.asarray([True, False]) ) - u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments) * 0.5) # act sut(nf, frag_size, u01, is_first_in_pair) @@ -262,7 +262,7 @@ def test_fragmentation_fn_distribution( builder.set_environment(Box(dv=None, dt=None)) _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) - rns = np.linspace(0, 1, n) + rns = np.linspace(1e-6, 1 - 1e-6, n) for i, rn in enumerate(rns): print("i", i) start = timer() From cab269c25fec76ba37b1c2ad8a6352346b50e044 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Mon, 27 Mar 2023 14:45:10 -0600 Subject: [PATCH 59/86] Attempting to fix GPU straub backend --- .../impl_numba/methods/collisions_methods.py | 1 - .../methods/collisions_methods.py | 77 ++++++++++--------- .../breakup_fragmentations/lowlist82.py | 6 -- .../fragmentation_function/lowlist82.py | 4 +- .../collisions/test_fragmentations.py | 6 +- 5 files changed, 46 insertions(+), 48 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 1eb80b7656..e497d8ce05 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -909,7 +909,6 @@ def ll82_fragmentation( dl, dcoal, frag_size, - v_max, x_plus_y, rand, vmin, diff --git a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py index 76c05441cb..90a40da356 100644 --- a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py +++ b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py @@ -521,7 +521,6 @@ def __init__(self): param_names=( "n_fragment", "frag_size", - "v_max", "x_plus_y", "vmin", "nfmax", @@ -529,7 +528,6 @@ def __init__(self): ), name_iter="i", body=""" - frag_size[i] = min(frag_size[i], v_max[i]); frag_size[i] = max(frag_size[i], vmin); if (nfmax_is_not_none) { @@ -537,7 +535,10 @@ def __init__(self): frag_size[i] = x_plus_y[i] / nfmax; } } - if (frag_size[i] == 0.0) { + else if (frag_size[i] < vmin) { + frag_size[i] = x_plus_y[i]; + } + else if (frag_size[i] == 0.0) { frag_size[i] = x_plus_y[i]; } n_fragment[i] = x_plus_y[i] / frag_size[i]; @@ -549,10 +550,8 @@ def __init__(self): param_names=("mu", "sigma", "frag_size", "rand"), name_iter="i", body=f""" - frag_size[i] = {self.formulae.fragmentation_function.frag_size.c_inline( - mu="mu", - sigma="sigma", - rand="rand[i]" + frag_size[i] = mu[i] + sigma[i] * {self.formulae.fragmentation_function.erfinv.c_inline( + X="rand[i]" )}; """.replace( "real_type", self._get_c_type() @@ -628,37 +627,52 @@ def __init__(self): name_iter="i", body=f""" {self.__straub_Nr_body} + auto mu1, sigma1 = {self.formulae.fragmentation_function.params_p1.c_inline(CW="CW[i]")}; + auto mu2, sigma2 = {self.formulae.fragmentation_function.params_p2.c_inline(CW="CW[i]")}; + auto mu3, sigma3 = {self.formulae.fragmentation_function.params_p3.c_inline(CW="CW[i]", ds="ds[i]")}; + auto M31, M32, M33, M34, d34 = {self.formulae.fragmentation_function.params_p4.c_inline( + v_max="v_max[i]", + ds="ds[i]", + mu1="mu1", + sigma1="sigma1", + mu2="mu2", + sigma2="sigma2", + mu3="mu3", + sigma3="sigma3", + Nr1="Nr1[i]", + Nr2="Nr2[i]", + Nr3="Nr3[i]", + )}; + Nr1[i] = Nr1[i] * M31; + Nr2[i] = Nr2[i] * M32; + Nr3[i] = Nr3[i] * M33; + Nr4[i] = Nr4[i] * M34; + Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i]; if (rand[i] < Nr1[i] / Nrt[i]) {{ - auto sigma1 = {self.formulae.fragmentation_function.sigma1.c_inline(CW="CW[i]")}; - frag_size[i] = {self.formulae.fragmentation_function.p1.c_inline( - sigma1="sigma1", - rand="rand[i] * Nrt[i] / Nr1[i]" + auto X = rand[i] * Nrt[i] / Nr1[i]; + auto lnarg = mu1[i] + sqrt(2.0) * sigma1 * {self.formulae.fragmentation_function.erfinv.c_inline( + X="X" )}; + frag_size[i] = expf(lnarg); }} else if (rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]) {{ - frag_size[i] = {self.formulae.fragmentation_function.p2.c_inline( - CW="CW[i]", - rand="(rand[i] * Nrt[i] - Nr1[i]) / (Nr2[i] - Nr1[i])" + auto X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i], + frag_size[i] = mu2 + sqrt(2.0) * sigma2 * {self.formulae.fragmentation_function.erfinv.c_inline( + X="X" )}; }} else if (rand[i] < (Nr3[i] + Nr2[i] + Nr1[i]) / Nrt[i]) {{ - frag_size[i] = {self.formulae.fragmentation_function.p3.c_inline( - CW="CW[i]", - ds="ds[i]", - rand="(rand[i] * Nrt[i] - Nr2[i]) / (Nr3[i] - Nr2[i])" + auto X = (rand[i] * Nrt[i] - Nr1[i] - Nr2[i]) / Nr3[i]; + frag_size[i] = mu3 + sqrt(2.0) * sigma3 * {self.formulae.fragmentation_function.erfinv.c_inline( + X="X" )}; }} else {{ - frag_size[i] = {self.formulae.fragmentation_function.p4.c_inline( - CW="CW[i]", - ds="ds[i]", - v_max="v_max[i]", - Nr1="Nr1[i]", - Nr2="Nr2[i]", - Nr3="Nr3[i]" - )}; + frag_size[i] = d34; }} + + frag_size[i] = pow(frag_size[i], 3.0) * 3.141592654f / 6 """.replace( "real_type", self._get_c_type() ), @@ -910,7 +924,6 @@ def exp_fragmentation( n_fragment, scale, frag_size, - v_max, x_plus_y, rand, vmin, @@ -932,7 +945,6 @@ def exp_fragmentation( args=( n_fragment.data, frag_size.data, - v_max.data, x_plus_y.data, self._get_floating_point(vmin), self._get_floating_point(nfmax if nfmax else -1), @@ -941,7 +953,7 @@ def exp_fragmentation( ) def gauss_fragmentation( - self, *, n_fragment, mu, sigma, frag_size, v_max, x_plus_y, rand, vmin, nfmax + self, *, n_fragment, mu, sigma, frag_size, x_plus_y, rand, vmin, nfmax ): self.__gauss_fragmentation_body.launch_n( n=len(frag_size), @@ -958,7 +970,6 @@ def gauss_fragmentation( args=( n_fragment.data, frag_size.data, - v_max.data, x_plus_y.data, self._get_floating_point(vmin), self._get_floating_point(nfmax if nfmax else -1), @@ -967,7 +978,7 @@ def gauss_fragmentation( ) def slams_fragmentation( - self, n_fragment, frag_size, v_max, x_plus_y, probs, rand, vmin, nfmax + self, n_fragment, frag_size, x_plus_y, probs, rand, vmin, nfmax ): # pylint: disable=too-many-arguments self.__slams_fragmentation_body.launch_n( n=(len(n_fragment)), @@ -985,7 +996,6 @@ def slams_fragmentation( args=( n_fragment.data, frag_size.data, - v_max.data, x_plus_y.data, self._get_floating_point(vmin), self._get_floating_point(nfmax if nfmax else -1), @@ -999,7 +1009,6 @@ def feingold1988_fragmentation( n_fragment, scale, frag_size, - v_max, x_plus_y, rand, fragtol, @@ -1022,7 +1031,6 @@ def feingold1988_fragmentation( args=( n_fragment.data, frag_size.data, - v_max.data, x_plus_y.data, self._get_floating_point(vmin), self._get_floating_point(nfmax if nfmax else -1), @@ -1072,7 +1080,6 @@ def straub_fragmentation( args=( n_fragment.data, frag_size.data, - v_max.data, x_plus_y.data, self._get_floating_point(vmin), self._get_floating_point(nfmax if nfmax else -1), diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index c8728df1be..abf9f73131 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -11,15 +11,11 @@ def __init__(self, vmin=0.0, nfmax=None): self.nfmax = nfmax self.arrays = {} self.ll82_tmp = {} - self.max_size = None self.sum_of_volumes = None self.const = None def register(self, builder): self.particulator = builder.particulator - self.max_size = self.particulator.PairwiseStorage.empty( - self.particulator.n_sd // 2, dtype=float - ) self.sum_of_volumes = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) @@ -37,7 +33,6 @@ def register(self, builder): ) def __call__(self, nf, frag_size, u01, is_first_in_pair): - self.max_size.max(self.particulator.attributes["volume"], is_first_in_pair) self.sum_of_volumes.sum( self.particulator.attributes["volume"], is_first_in_pair ) @@ -97,7 +92,6 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): dl=self.arrays["dl"], dcoal=self.arrays["dcoal"], frag_size=frag_size, - v_max=self.max_size, x_plus_y=self.sum_of_volumes, rand=u01, vmin=self.vmin, diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index 1ddbd24a8d..eb3fe8bfa8 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -6,7 +6,7 @@ import numpy as np -class LowList1982Nf: # pylint: disable=too-few-public-methods +class LowList1982Nf: # pylint: disable=too-few-public-methods, too-many-locals def __init__(self, _): pass @@ -35,7 +35,7 @@ def params_f2(const, ds): return (Hf2, mu, sigma) @staticmethod - def params_f3(const, ds, dl): + def params_f3(const, ds, dl): # pylint: disable=too-many-locals dsCM = ds / const.CM dlCM = dl / const.CM # eq (3.3), (3.4) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index ebaa9e7c1a..66a277487a 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -232,15 +232,13 @@ def test_fragmentation_limiters_nfmax( ) def test_fragmentation_fn_distribution( fragmentation_fn, plot=False - ): # pylint: disable=redefined-outer-name, too-many-locals + ): # pylint: disable=redefined-outer-name, too-many-locals, unnecessary-lambda-assignment # arrange drop_size_L_diam = 0.4 * si.cm drop_size_S_diam = 0.2 * si.cm - get_volume_from_diam = ( - lambda d: (4 / 3) * np.pi * (d / 2) ** 3 - ) # pylint: disable=unnecessary-lambda-assignment + get_volume_from_diam = lambda d: (4 / 3) * np.pi * (d / 2) ** 3 n = 100 res = np.empty((n, 2), dtype=np.double) From 6bba2d26b6d2e733ba1117e71d0d04f6f13a105c Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sat, 1 Apr 2023 20:35:14 +0200 Subject: [PATCH 60/86] precommit formatting change --- PySDM/physics/fragmentation_function/straub2010nf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index 1046140ba7..ffcb4eca0a 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -58,4 +58,4 @@ def params_p4(vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3): @staticmethod def erfinv(X): - return np.arctanh(2 * X - 1) * 2 * np.sqrt(3) / np.pi \ No newline at end of file + return np.arctanh(2 * X - 1) * 2 * np.sqrt(3) / np.pi From 1b68af909b0856c7cb64e67aecbf0cc9cc0c3515 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sat, 1 Apr 2023 20:40:44 +0200 Subject: [PATCH 61/86] reinstantiate changes in paper files --- paper/paper.bib | 2 +- paper/paper.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 70e969c4a7..925c822e76 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -314,7 +314,7 @@ @inproceedings{Bartman_et_al_2023 } @inproceedings{PyPartMC_AMS2023, - title = {PyPartMC: a Pythonic Interface to a Particle-Resolved Monte-Carlo Aerosol Simulation Framework}, + title = {PyPartMC: a {P}ythonic Interface to a Particle-Resolved {M}onte-{C}arlo Aerosol Simulation Framework}, author = {D'Aquino, Z. and Arabas, S. and Curtis, J.H. and Vaishnav, A. and Choi, J. and Riemer, N. and West, M.}, booktitle = {103nd American Meteorological Society Annual Meeting}, year = {2023}, diff --git a/paper/paper.md b/paper/paper.md index 73e125f380..1b771486be 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -40,7 +40,7 @@ authors: orcid: 0000-0003-2317-3310 - name: Sylwester Arabas orcid: 0000-0003-2361-0082 - affiliation: "5,3" + affiliation: "3,5" affiliations: - name: Department of Mechanical and Civil Engineering, California Institute of Technology, Pasadena, CA, United States of America index: 1 @@ -310,7 +310,7 @@ Development of ice-phase microphysics representation has been supported through grant no. DE-SC0021034 by the Atmospheric System Research Program and Atmospheric Radiation Measurement Program sponsored by the U.S. Department of Energy (DOE). EdJ's contributions were made possible by support from the Department of Energy Computational Sciences Graduate Research Fellowship. -SAr acknowledges support from the Polish National Science Centre (grant no. 2020/39/D/ST10/01220). +SAr, OB and KD acknowledge support from the Polish National Science Centre (grant no. 2020/39/D/ST10/01220). # References From fbafd6bcf4e8da3e74147c37e5a710f1e687de05 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sun, 2 Apr 2023 20:23:57 +0200 Subject: [PATCH 62/86] adapting products test_impl to work with the new NumberSizeSpectrum product --- tests/unit_tests/products/test_impl.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit_tests/products/test_impl.py b/tests/unit_tests/products/test_impl.py index bfa463075e..8e56204426 100644 --- a/tests/unit_tests/products/test_impl.py +++ b/tests/unit_tests/products/test_impl.py @@ -19,6 +19,7 @@ FrozenParticleConcentration, FrozenParticleSpecificConcentration, GaseousMoleFraction, + NumberSizeSpectrum, ParticleSizeSpectrumPerMass, ParticleSizeSpectrumPerVolume, ParticleVolumeVersusRadiusLogarithmSpectrum, @@ -45,6 +46,7 @@ "count_unactivated": True, "count_activated": True, }, + NumberSizeSpectrum: {"radius_bins_edges": (0, np.inf)}, } From 5d1f0c80d9d604390624e0ee8b417fcecf90c1bb Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Mon, 3 Apr 2023 00:50:37 +0200 Subject: [PATCH 63/86] erfinv cleanup (introducing Vedder 1987 approximation to Trivia) --- .../impl_numba/methods/collisions_methods.py | 7 ++- .../methods/collisions_methods.py | 4 +- .../test_helpers/cpp2python.py | 3 +- PySDM/physics/constants.py | 1 + PySDM/physics/constants_defaults.py | 3 ++ .../fragmentation_function/gaussian.py | 7 --- .../fragmentation_function/lowlist82.py | 10 ----- .../fragmentation_function/straub2010nf.py | 4 -- PySDM/physics/trivia.py | 18 ++++++++ .../physics/test_fragmentation_functions.py | 12 ------ tests/unit_tests/physics/test_trivia.py | 43 +++++++++++++++++++ 11 files changed, 71 insertions(+), 41 deletions(-) create mode 100644 tests/unit_tests/physics/test_trivia.py diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 7ceebb0b95..f6b76d2a44 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -363,7 +363,7 @@ def __ll82_coalescence_check_body(*, Ec, dl): straub_paramsp2 = self.formulae.fragmentation_function.params_p2 straub_paramsp3 = self.formulae.fragmentation_function.params_p3 straub_paramsp4 = self.formulae.fragmentation_function.params_p4 - straub_erfinv = self.formulae.fragmentation_function.erfinv + straub_erfinv = self.formulae.trivia.erfinv_approx @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __straub_fragmentation_body( @@ -503,11 +503,10 @@ def __ll82_fragmentation_body( frag_size[i] * 0.01 ) # diameter in cm; convert to m frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 - # print(np.sum(Rf), np.sum(Rs), np.sum(Rd)) self.__ll82_fragmentation_body = __ll82_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Gaussian": - gaussian_erfinv = self.formulae.fragmentation_function.erfinv + erfinv_approx = self.formulae.trivia.erfinv_approx @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __gauss_fragmentation_body( @@ -516,7 +515,7 @@ def __gauss_fragmentation_body( for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): - frag_size[i] = mu + sigma * gaussian_erfinv(rand[i]) + frag_size[i] = mu + sigma * erfinv_approx(rand[i]) self.__gauss_fragmentation_body = __gauss_fragmentation_body elif self.formulae.fragmentation_function.__name__ == "Feingold1988Frag": diff --git a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py index 90a40da356..191164b612 100644 --- a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py +++ b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py @@ -550,8 +550,8 @@ def __init__(self): param_names=("mu", "sigma", "frag_size", "rand"), name_iter="i", body=f""" - frag_size[i] = mu[i] + sigma[i] * {self.formulae.fragmentation_function.erfinv.c_inline( - X="rand[i]" + frag_size[i] = mu + sigma * {self.formulae.trivia.erfinv_approx.c_inline( + c="rand[i]" )}; """.replace( "real_type", self._get_c_type() diff --git a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py index 724ca3fc84..2b64a39921 100644 --- a/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py +++ b/PySDM/backends/impl_thrust_rtc/test_helpers/cpp2python.py @@ -189,9 +189,8 @@ def to_numba(name, args, iter_var, body): result = ( f""" def make(self): - from math import erf import numpy as np - from numpy import floor, ceil, exp, log, power, sqrt, arctanh + from numpy import floor, ceil, exp, log, power, sqrt, arctanh, sinh, arcsinh import numba @numba.njit(parallel=False, {JIT_OPTS}) diff --git a/PySDM/physics/constants.py b/PySDM/physics/constants.py index 05b4282355..d6de5b23fe 100644 --- a/PySDM/physics/constants.py +++ b/PySDM/physics/constants.py @@ -32,6 +32,7 @@ def convert_to(value, unit): FOUR = 4 ONE_THIRD = 1 / 3 TWO_THIRDS = 2 / 3 +ONE_AND_A_HALF = 3 / 2 NaN = np.nan default_random_seed = ( diff --git a/PySDM/physics/constants_defaults.py b/PySDM/physics/constants_defaults.py index 8222364629..4f744369b5 100644 --- a/PySDM/physics/constants_defaults.py +++ b/PySDM/physics/constants_defaults.py @@ -166,3 +166,6 @@ CM = 1 * si.cm UM = 1 * si.um + +VEDDER_1987_b = 89 / 880 +VEDDER_1987_A = 993 / 880 / 3 / VEDDER_1987_b diff --git a/PySDM/physics/fragmentation_function/gaussian.py b/PySDM/physics/fragmentation_function/gaussian.py index 7ebe63d761..cc698c3ddf 100644 --- a/PySDM/physics/fragmentation_function/gaussian.py +++ b/PySDM/physics/fragmentation_function/gaussian.py @@ -1,15 +1,8 @@ """ Gaussian PDF -CDF = 1/2(1 + erf((x-mu)/sigma/sqrt(2))); -Approx erf(x/sqrt(2)) ~ tanh(x*pi/2/sqrt(3)) """ -import numpy as np class Gaussian: # pylint: disable=too-few-public-methods def __init__(self, _): pass - - @staticmethod - def frag_size(const, mu, sigma, rand): - return mu + sigma * 2 * np.sqrt(3) / const.PI * np.arctanh(2 * rand - 1) diff --git a/PySDM/physics/fragmentation_function/lowlist82.py b/PySDM/physics/fragmentation_function/lowlist82.py index eb3fe8bfa8..4eab421a5f 100644 --- a/PySDM/physics/fragmentation_function/lowlist82.py +++ b/PySDM/physics/fragmentation_function/lowlist82.py @@ -91,16 +91,6 @@ def params_f3(const, ds, dl): # pylint: disable=too-many-locals return (Hf3, muf3, sigmaf3) - @staticmethod - def erfinv(X): - a = 8 * (np.pi - 3) / (3 * np.pi * (4 - np.pi)) - arg = (2 / np.pi / a) + np.log(1 - X**2) / 2 - arg = arg * arg - arg = arg - np.log(1 - X**2) / a - arg = np.sqrt(arg) - arg = arg - (2 / np.pi / a + np.log(1 - X**2) / 2) - return np.sqrt(arg) - @staticmethod def params_s1(const, dl, ds, dcoal): dsCM = ds / const.CM diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index ffcb4eca0a..207672802a 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -55,7 +55,3 @@ def params_p4(vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3): else: d34 = np.exp(np.log(M34) / 3) return (M31, M32, M33, M34, d34) - - @staticmethod - def erfinv(X): - return np.arctanh(2 * X - 1) * 2 * np.sqrt(3) / np.pi diff --git a/PySDM/physics/trivia.py b/PySDM/physics/trivia.py index 8f18f5c77d..8dba50e0d3 100644 --- a/PySDM/physics/trivia.py +++ b/PySDM/physics/trivia.py @@ -1,5 +1,7 @@ """ Various (hopefully) undebatable formulae + +`erfinv` approximation based on eqs. 11-12 from Vedder 1987, https://doi.org/10.1119/1.15018 """ import numpy as np @@ -79,3 +81,19 @@ def unfrozen_and_saturated(_, volume, relative_humidity): @staticmethod def frozen_and_above_freezing_point(const, volume, temperature): return volume < 0 and temperature > const.T0 + + @staticmethod + def erfinv_approx(const, c): + return ( + 2 + * np.sqrt(const.VEDDER_1987_A) + * np.sinh( + np.arcsinh( + np.arctanh(c) + / 2 + / const.VEDDER_1987_b + / np.power(const.VEDDER_1987_A, const.ONE_AND_A_HALF) + ) + / 3 + ) + ) diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index db4d177aa9..2e55e46a76 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -164,15 +164,3 @@ def test_ll82_pd2(): # assert np.testing.assert_array_almost_equal(params, [0.0, -4.967578, -4.967578]) - - @staticmethod - def test_erfinv(): - # arrange - formulae = Formulae(fragmentation_function="LowList1982Nf") - - # act - params = formulae.fragmentation_function.erfinv(0.25) - - # assert - diff = np.abs(params - 0.2253) - np.testing.assert_array_less(diff, 1e-3) diff --git a/tests/unit_tests/physics/test_trivia.py b/tests/unit_tests/physics/test_trivia.py new file mode 100644 index 0000000000..21d86bf4e2 --- /dev/null +++ b/tests/unit_tests/physics/test_trivia.py @@ -0,0 +1,43 @@ +# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring +import numpy as np +import pytest +from scipy.special import erfinv + +from PySDM import Formulae + + +class TestTrivia: + @staticmethod + @pytest.mark.parametrize( + "x", (-100, -10, -1, -0.1, -0.01, 0, 0.01, 0.1, 1, 10, 100) + ) + def test_erfinv_approx_reltol(x): + # arrange + trivia = Formulae().trivia + expected = erfinv(x) + + # act + actual = trivia.erfinv_approx(x) + + # assert + if expected == 0: + assert actual == 0 + elif np.isnan(expected): + assert np.isnan(actual) + elif np.isinf(expected): + assert np.isinf(actual) + assert np.sign(actual) == np.sign(expected) + else: + assert np.abs(np.log(actual / expected)) < 1e-4 + + @staticmethod + def test_erfinv_approx_abstol(): + # arrange + formulae = Formulae() + + # act + params = formulae.trivia.erfinv_approx(0.25) + + # assert + diff = np.abs(params - 0.2253) + np.testing.assert_array_less(diff, 1e-3) From 3b4bf673efc772d78a0dcf3ec2977b8f6f82a65f Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Mon, 3 Apr 2023 01:04:10 +0200 Subject: [PATCH 64/86] make erfinv test work with JIT disabled --- tests/unit_tests/physics/test_trivia.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/unit_tests/physics/test_trivia.py b/tests/unit_tests/physics/test_trivia.py index 21d86bf4e2..cf8dff0f1a 100644 --- a/tests/unit_tests/physics/test_trivia.py +++ b/tests/unit_tests/physics/test_trivia.py @@ -1,16 +1,14 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import numpy as np import pytest -from scipy.special import erfinv +from scipy.special import erfinv # pylint: disable=no-name-in-module from PySDM import Formulae class TestTrivia: @staticmethod - @pytest.mark.parametrize( - "x", (-100, -10, -1, -0.1, -0.01, 0, 0.01, 0.1, 1, 10, 100) - ) + @pytest.mark.parametrize("x", (-0.9, -0.1, -0.01, 0, 0.01, 0.1, 0.9)) def test_erfinv_approx_reltol(x): # arrange trivia = Formulae().trivia @@ -28,7 +26,7 @@ def test_erfinv_approx_reltol(x): assert np.isinf(actual) assert np.sign(actual) == np.sign(expected) else: - assert np.abs(np.log(actual / expected)) < 1e-4 + assert np.abs(np.log(actual / expected)) < 1e-3 @staticmethod def test_erfinv_approx_abstol(): From 9f51534be1e5e772f567ac5e2bf4f8b28f98a610 Mon Sep 17 00:00:00 2001 From: Emily de Jong Date: Fri, 7 Apr 2023 13:36:25 -0700 Subject: [PATCH 65/86] Rewrite straub2010 as one-liners --- .../impl_numba/methods/collisions_methods.py | 60 +++++++++++++------ .../methods/collisions_methods.py | 52 ++++++++-------- .../breakup_fragmentations/straub2010.py | 3 +- .../fragmentation_function/straub2010nf.py | 42 +++++-------- 4 files changed, 87 insertions(+), 70 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index f6b76d2a44..7123658f57 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -256,6 +256,22 @@ def straub_Nr( # pylint: disable=too-many-arguments,unused-argument Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i] +@numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) +def straub_mass_remainder( # pylint: disable=too-many-arguments,unused-argument + i, vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, d34, Nr1, Nr2, Nr3, Nr4 +): + # pylint: disable=too-many-arguments, too-many-locals + Nr1[i] = Nr1[i] * np.exp(3 * mu1 + 9 * np.power(sigma1, 2) / 2) + Nr2[i] = Nr2[i] * (mu2**3 + 3 * mu2 * sigma2**2) + Nr3[i] = Nr3[i] * (mu3**3 + 3 * mu3 * sigma3**2) + Nr4[i] = vl[i] * 6 / np.pi + ds[i] ** 3 - Nr1[i] - Nr2[i] - Nr3[i] + if Nr4[i] <= 0.0: + d34[i] = 0 + Nr4[i] = 0 + else: + d34[i] = np.exp(np.log(Nr4[i]) / 3) + + @numba.njit(**{**conf.JIT_FLAGS, **{"parallel": False}}) def ll82_Nr( # pylint: disable=too-many-arguments,unused-argument i, @@ -359,40 +375,44 @@ def __ll82_coalescence_check_body(*, Ec, dl): self.__ll82_coalescence_check_body = __ll82_coalescence_check_body if self.formulae.fragmentation_function.__name__ == "Straub2010Nf": - straub_paramsp1 = self.formulae.fragmentation_function.params_p1 - straub_paramsp2 = self.formulae.fragmentation_function.params_p2 - straub_paramsp3 = self.formulae.fragmentation_function.params_p3 - straub_paramsp4 = self.formulae.fragmentation_function.params_p4 + straub_sigma1 = self.formulae.fragmentation_function.params_sigma1 + straub_mu1 = self.formulae.fragmentation_function.params_mu1 + straub_sigma2 = self.formulae.fragmentation_function.params_sigma2 + straub_mu2 = self.formulae.fragmentation_function.params_mu2 + straub_sigma3 = self.formulae.fragmentation_function.params_sigma3 + straub_mu3 = self.formulae.fragmentation_function.params_mu3 straub_erfinv = self.formulae.trivia.erfinv_approx @numba.njit(**{**conf.JIT_FLAGS, "fastmath": self.formulae.fastmath}) def __straub_fragmentation_body( - *, CW, gam, ds, v_max, frag_size, rand, Nr1, Nr2, Nr3, Nr4, Nrt + *, CW, gam, ds, v_max, frag_size, rand, Nr1, Nr2, Nr3, Nr4, Nrt, d34 ): # pylint: disable=too-many-arguments,too-many-locals for i in numba.prange( # pylint: disable=not-an-iterable len(frag_size) ): straub_Nr(i, Nr1, Nr2, Nr3, Nr4, Nrt, CW, gam) - (mu1, sigma1) = straub_paramsp1(CW[i]) - (mu2, sigma2) = straub_paramsp2(CW[i]) - (mu3, sigma3) = straub_paramsp3(CW[i], ds[i]) - (M31, M32, M33, M34, d34) = straub_paramsp4( - v_max[i], - ds[i], + sigma1 = straub_sigma1(CW[i]) + mu1 = straub_mu1(sigma1) + sigma2 = straub_sigma2(CW[i]) + mu2 = straub_mu2(ds[i]) + sigma3 = straub_sigma3(CW[i]) + mu3 = straub_mu3(ds[i]) + straub_mass_remainder( + i, + v_max, + ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, - Nr1[i], - Nr2[i], - Nr3[i], + d34, + Nr1, + Nr2, + Nr3, + Nr4, ) - Nr1[i] = Nr1[i] * M31 - Nr2[i] = Nr2[i] * M32 - Nr3[i] = Nr3[i] * M33 - Nr4[i] = Nr4[i] * M34 Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i] if rand[i] < Nr1[i] / Nrt[i]: @@ -406,7 +426,7 @@ def __straub_fragmentation_body( X = (rand[i] * Nrt[i] - Nr1[i] - Nr2[i]) / Nr3[i] frag_size[i] = mu3 + np.sqrt(2) * sigma3 * straub_erfinv(X) else: - frag_size[i] = d34 + frag_size[i] = d34[i] frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 @@ -869,6 +889,7 @@ def straub_fragmentation( Nr3, Nr4, Nrt, + d34, ): self.__straub_fragmentation_body( CW=CW.data, @@ -882,6 +903,7 @@ def straub_fragmentation( Nr3=Nr3.data, Nr4=Nr4.data, Nrt=Nrt.data, + d34=d34.data, ) self.__fragmentation_limiters( n_fragment=n_fragment.data, diff --git a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py index d3ccdf19d8..ec8fd3e817 100644 --- a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py +++ b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py @@ -609,6 +609,20 @@ def __init__(self): Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i]; """ + self.__straub_mass_remainder = """ + Nr1[i] = Nr1[i] * exp(3 * mu1 + 9 * pow(sigma1, 2.0) / 2); + Nr2[i] = Nr2[i] * (pow(mu2, 3.0) + 3 * mu2 * pow(sigma2, 2.0)); + Nr3[i] = Nr3[i] * (pow(mu3, 3.0) + 3 * mu3 * pow(sigma3, 2.0)); + Nr4[i] = v_max[i] * 6.0 / 3.141592654 + pow(ds[i], 3.0) - Nr1[i] - Nr2[i] - Nr3[i]; + if Nr4[i] <= 0.0 { + d34[i] = 0; + Nr4[i] = 0; + } + else { + d34[i] = exp(log(Nr4[i]) / 3); + } + """ + if self.formulae.fragmentation_function.__name__ == "Straub2010Nf": self.__straub_fragmentation_body = trtc.For( param_names=( @@ -623,38 +637,26 @@ def __init__(self): "Nr3", "Nr4", "Nrt", + "d34", ), name_iter="i", body=f""" {self.__straub_Nr_body} - auto mu1, sigma1 = {self.formulae.fragmentation_function.params_p1.c_inline(CW="CW[i]")}; - auto mu2, sigma2 = {self.formulae.fragmentation_function.params_p2.c_inline(CW="CW[i]")}; - auto mu3, sigma3 = {self.formulae.fragmentation_function.params_p3.c_inline(CW="CW[i]", ds="ds[i]")}; - auto M31, M32, M33, M34, d34 = {self.formulae.fragmentation_function.params_p4.c_inline( - vl="v_max[i]", - ds="ds[i]", - mu1="mu1", - sigma1="sigma1", - mu2="mu2", - sigma2="sigma2", - mu3="mu3", - sigma3="sigma3", - N1="Nr1[i]", - N2="Nr2[i]", - N3="Nr3[i]", - )}; - Nr1[i] = Nr1[i] * M31; - Nr2[i] = Nr2[i] * M32; - Nr3[i] = Nr3[i] * M33; - Nr4[i] = Nr4[i] * M34; - Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i]; + auto sigma1 = {self.formulae.fragmentation_function.params_sigma1.c_inline(CW="CW[i]")}; + auto mu1 = {self.formulae.fragmentation_function.params_mu1.c_inline(sigma1="sigma1")}; + auto sigma2 = {self.formulae.fragmentation_function.params_sigma2.c_inline(CW="CW[i]")}; + auto mu2 = {self.formulae.fragmentation_function.params_mu2.c_inline(ds="ds[i]")}; + auto sigma3 = {self.formulae.fragmentation_function.params_sigma3.c_inline(CW="CW[i]")}; + auto mu3 = {self.formulae.fragmentation_function.params_mu3.c_inline(ds="ds[i]")}; + {self.__straub_mass_remainder} + Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i] if (rand[i] < Nr1[i] / Nrt[i]) {{ auto X = rand[i] * Nrt[i] / Nr1[i]; auto lnarg = mu1[i] + sqrt(2.0) * sigma1 * {self.formulae.trivia.erfinv_approx.c_inline( c="X" )}; - frag_size[i] = expf(lnarg); + frag_size[i] = exp(lnarg); }} else if (rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]) {{ auto X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i], @@ -669,10 +671,10 @@ def __init__(self): )}; }} else {{ - frag_size[i] = d34; + frag_size[i] = d34[i]; }} - frag_size[i] = pow(frag_size[i], 3.0) * 3.141592654f / 6 + frag_size[i] = pow(frag_size[i], 3.0) * 3.141592654 / 6.0 """.replace( "real_type", self._get_c_type() ), @@ -1057,6 +1059,7 @@ def straub_fragmentation( Nr3, Nr4, Nrt, + d34, ): self.__straub_fragmentation_body.launch_n( n=len(frag_size), @@ -1072,6 +1075,7 @@ def straub_fragmentation( Nr3.data, Nr4.data, Nrt.data, + d34.data, ), ) diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py b/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py index 2d067911e7..202e4f2877 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/straub2010.py @@ -32,7 +32,7 @@ def register(self, builder): self.arrays[key] = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) - for key in ("Nr1", "Nr2", "Nr3", "Nr4", "Nrt"): + for key in ("Nr1", "Nr2", "Nr3", "Nr4", "Nrt", "d34"): self.straub_tmp[key] = self.particulator.PairwiseStorage.empty( self.particulator.n_sd // 2, dtype=float ) @@ -93,4 +93,5 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): Nr3=self.straub_tmp["Nr3"], Nr4=self.straub_tmp["Nr4"], Nrt=self.straub_tmp["Nrt"], + d34=self.straub_tmp["d34"], ) diff --git a/PySDM/physics/fragmentation_function/straub2010nf.py b/PySDM/physics/fragmentation_function/straub2010nf.py index e6e71f5ae1..db8727ac51 100644 --- a/PySDM/physics/fragmentation_function/straub2010nf.py +++ b/PySDM/physics/fragmentation_function/straub2010nf.py @@ -11,8 +11,8 @@ def __init__(self, _): pass @staticmethod - def params_p1(const, CW): - sigma1 = np.sqrt( + def params_sigma1(const, CW): + return np.sqrt( np.log( CW / 64 @@ -24,33 +24,23 @@ def params_p1(const, CW): + 1 ) ) - mu1 = np.log(const.STRAUB_E_D1) - np.power(sigma1, const.TWO) / 2 - return (mu1, sigma1) @staticmethod - def params_p2(const, CW): - deltaD2 = 7 * (CW - 21) * const.CM / 1000 - deltaD2 = max(0.0, deltaD2) - sigma2 = deltaD2 / np.sqrt(12) - return (const.STRAUB_MU2, sigma2) + def params_mu1(const, sigma1): + return np.log(const.STRAUB_E_D1) - np.power(sigma1, const.TWO) / 2 @staticmethod - def params_p3(const, CW, ds): - mu3 = 0.9 * ds - deltaD3 = (1 + 0.76 * np.sqrt(CW)) * const.CM / 100 - sigma3 = deltaD3 / np.sqrt(12) - return (mu3, sigma3) + def params_sigma2(const, CW): + return max(0.0, 7 * (CW - 21) * const.CM / 1000) / np.sqrt(12) @staticmethod - def params_p4(vl, ds, mu1, sigma1, mu2, sigma2, mu3, sigma3, N1, N2, N3): - # pylint: disable=too-many-arguments, too-many-locals - M31 = N1 * np.exp(3 * mu1 + 9 * np.power(sigma1, 2) / 2) - M32 = N2 * (mu2**3 + 3 * mu2 * sigma2**2) - M33 = N3 * (mu3**3 + 3 * mu3 * sigma3**2) - M34 = vl * 6 / np.pi + ds**3 - M31 - M32 - M33 - if M34 <= 0.0: - d34 = 0 - M34 = 0 - else: - d34 = np.exp(np.log(M34) / 3) - return (M31, M32, M33, M34, d34) + def params_mu2(const, ds): # pylint: disable=unused-argument + return const.STRAUB_MU2 + + @staticmethod + def params_sigma3(const, CW): + return (1 + 0.76 * np.sqrt(CW)) * const.CM / 100 / np.sqrt(12) + + @staticmethod + def params_mu3(ds): + return 0.9 * ds From a35910696b8307919b224d465f9fa32fa9f5a799 Mon Sep 17 00:00:00 2001 From: Emily de Jong Date: Fri, 7 Apr 2023 13:45:08 -0700 Subject: [PATCH 66/86] Fix physics unit test --- .../physics/test_fragmentation_functions.py | 61 +++++++++++-------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/tests/unit_tests/physics/test_fragmentation_functions.py b/tests/unit_tests/physics/test_fragmentation_functions.py index 2e55e46a76..6fb1887f9a 100644 --- a/tests/unit_tests/physics/test_fragmentation_functions.py +++ b/tests/unit_tests/physics/test_fragmentation_functions.py @@ -7,63 +7,70 @@ class TestFragmentationFunctions: # pylint:disable=too-few-public-methods @staticmethod - def test_straub_p1(): + def test_straub_sigma1(): # arrange formulae = Formulae(fragmentation_function="Straub2010Nf") # act - params = formulae.fragmentation_function.params_p1(CW=30.0) + params = formulae.fragmentation_function.params_sigma1(CW=30.0) # assert - np.testing.assert_array_almost_equal(params, [-7.933269, 0.467381]) + np.testing.assert_array_almost_equal(params, [0.467381]) @staticmethod - def test_straub_p2(): + def test_straub_mu1(): # arrange formulae = Formulae(fragmentation_function="Straub2010Nf") # act - params = formulae.fragmentation_function.params_p2(CW=30.0) + params = formulae.fragmentation_function.params_mu1(sigma1=0.467381) # assert - np.testing.assert_array_almost_equal(params, [0.00095, 0.000182]) + np.testing.assert_array_almost_equal(params, [-7.933269]) @staticmethod - def test_straub_p3(): + def test_straub_sigma2(): # arrange formulae = Formulae(fragmentation_function="Straub2010Nf") # act - params = formulae.fragmentation_function.params_p3(CW=30.0, ds=0.18 * si.cm) + params = formulae.fragmentation_function.params_sigma2(CW=30.0) # assert - np.testing.assert_array_almost_equal(params, [0.00162, 0.000149]) + np.testing.assert_array_almost_equal(params, [0.000182]) @staticmethod - def test_straub_p4(): + def test_straub_mu2(): # arrange formulae = Formulae(fragmentation_function="Straub2010Nf") # act - params = formulae.fragmentation_function.params_p4( - vl=(0.36 * si.cm) ** 3 * np.pi / 6, - ds=0.18 * si.cm, - mu1=-7.933269, - sigma1=0.467381, - mu2=0.00095, - sigma2=0.000182, - mu3=0.00162, - sigma3=0.000149, - N1=2.0, - N2=1.0, - N3=1.0, - ) + params = formulae.fragmentation_function.params_mu2(ds=0.0) # assert - np.testing.assert_array_almost_equal( - params, - [2.465004e-10, 9.517784e-10, 4.359425e-09, 4.693030e-08, 3.607041e-03], - ) + np.testing.assert_array_almost_equal(params, [0.00095]) + + @staticmethod + def test_straub_sigma3(): + # arrange + formulae = Formulae(fragmentation_function="Straub2010Nf") + + # act + params = formulae.fragmentation_function.params_sigma3(CW=30.0) + + # assert + np.testing.assert_array_almost_equal(params, [0.000149]) + + @staticmethod + def test_straub_mu3(): + # arrange + formulae = Formulae(fragmentation_function="Straub2010Nf") + + # act + params = formulae.fragmentation_function.params_mu3(ds=0.18 * si.cm) + + # assert + np.testing.assert_array_almost_equal(params, [0.00162]) @staticmethod def test_ll82_pf1(): From 3d483347ba4aa31f97aa2937da22f72768b2773e Mon Sep 17 00:00:00 2001 From: Emily de Jong Date: Fri, 7 Apr 2023 13:45:52 -0700 Subject: [PATCH 67/86] Remove unused import --- tests/unit_tests/dynamics/collisions/test_fragmentations.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 24846fde18..ad346f40be 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -10,7 +10,6 @@ from PySDM.dynamics.collisions.breakup_fragmentations import ( SLAMS, AlwaysN, - ConstantSize, ExponFrag, Feingold1988Frag, Gaussian, From 6370177f3749bdf742a7a9b99cd458d1d070bb9c Mon Sep 17 00:00:00 2001 From: Emily de Jong Date: Fri, 7 Apr 2023 16:25:28 -0700 Subject: [PATCH 68/86] Add new test on efficiencies --- .../coalescence_efficiencies/straub2010.py | 7 +- .../dynamics/collisions/test_efficiencies.py | 64 +++++++++++++++++++ 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py b/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py index 71cbede8f4..a737e3057d 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py @@ -26,6 +26,7 @@ def register(self, builder): def __call__(self, output, is_first_in_pair): self.arrays["tmp"].sum(self.particulator.attributes["volume"], is_first_in_pair) self.arrays["Sc"].fill(self.arrays["tmp"]) + self.arrays["Sc"] *= 6 / self.const.PI self.arrays["tmp"] *= 2 self.arrays["tmp2"].distance( @@ -37,12 +38,10 @@ def __call__(self, output, is_first_in_pair): ) self.arrays["We"].divide_if_not_zero(self.arrays["tmp"]) self.arrays["We"] *= self.arrays["tmp2"] - self.arrays["We"] *= self.const.rho_w / 2 + self.arrays["We"] *= self.const.rho_w self.arrays["Sc"] **= 2 / 3 - self.arrays["Sc"] *= ( - self.const.PI * self.const.sgm_w * (6 / self.const.PI) ** (2 / 3) - ) + self.arrays["Sc"] *= self.const.PI * self.const.sgm_w self.arrays["We"].divide_if_not_zero(self.arrays["Sc"]) self.arrays["We"] *= -1.15 diff --git a/tests/unit_tests/dynamics/collisions/test_efficiencies.py b/tests/unit_tests/dynamics/collisions/test_efficiencies.py index 8f604bff1c..00be2a3429 100644 --- a/tests/unit_tests/dynamics/collisions/test_efficiencies.py +++ b/tests/unit_tests/dynamics/collisions/test_efficiencies.py @@ -1,4 +1,5 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring +import matplotlib.pyplot as plt import numpy as np import pytest @@ -52,3 +53,66 @@ def test_efficiency_fn_call(efficiency, backend_class=CPU): # Assert np.testing.assert_array_less([0.0 - 1e-6], eff.to_ndarray()) np.testing.assert_array_less(eff.to_ndarray(), [1.0 + 1e-6]) + + @staticmethod + @pytest.mark.parametrize( + "efficiency", + [ + Straub2010Ec(), + ], + ) + def test_efficiency_dist(efficiency, backend_class=CPU, plot=False): + # arrange + n_per = 20 + + drop_size_L_diam = np.linspace(0.01, 0.5, n_per) * si.cm + drop_size_S_diam = np.linspace(0.01, 0.2, n_per) * si.cm + + get_volume_from_diam = lambda d: (4 / 3) * np.pi * (d / 2) ** 3 + + res = np.ones((n_per, n_per), dtype=np.double) * -1.0 + + for i in range(n_per): + for j in range(n_per): + dl = drop_size_L_diam[i] + ds = drop_size_S_diam[j] + if dl >= ds: + volume = np.asarray( + [ + get_volume_from_diam(ds), + get_volume_from_diam(dl), + ] + ) + builder = Builder(volume.size, backend_class()) + sut = efficiency + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build( + attributes={"volume": volume, "n": np.ones_like(volume)} + ) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + eff = _PairwiseStorage.from_ndarray(np.asarray([-1.0])) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = ( + builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + ) + + # act + sut(eff, is_first_in_pair) + res[i, j] = eff.data + + # Assert + np.testing.assert_array_less([0.0 - 1e-6], eff.to_ndarray()) + np.testing.assert_array_less(eff.to_ndarray(), [1.0 + 1e-6]) + + (dl, ds) = np.meshgrid(drop_size_L_diam, drop_size_S_diam) + levels = np.linspace(0.0, 1.0, 11) + cbar = plt.contourf(dl, ds, res.T, levels=levels, cmap="jet") + plt.colorbar(cbar) + + if plot: + plt.show() From 5ac513f1f11af10e815e4415073b91cdadf608e4 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Sat, 8 Apr 2023 21:34:04 -0700 Subject: [PATCH 69/86] Make pylint happy --- tests/unit_tests/dynamics/collisions/test_efficiencies.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit_tests/dynamics/collisions/test_efficiencies.py b/tests/unit_tests/dynamics/collisions/test_efficiencies.py index 00be2a3429..c6e9e39fab 100644 --- a/tests/unit_tests/dynamics/collisions/test_efficiencies.py +++ b/tests/unit_tests/dynamics/collisions/test_efficiencies.py @@ -62,6 +62,7 @@ def test_efficiency_fn_call(efficiency, backend_class=CPU): ], ) def test_efficiency_dist(efficiency, backend_class=CPU, plot=False): + # pylint: disable=redefined-outer-name, too-many-locals, unnecessary-lambda-assignment # arrange n_per = 20 From d79541bec42ab6b89cf8aeb530d7018c641ec36f Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sun, 9 Apr 2023 08:45:56 +0200 Subject: [PATCH 70/86] remove getitem from float reference --- PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py index ec8fd3e817..d652626888 100644 --- a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py +++ b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py @@ -653,7 +653,7 @@ def __init__(self): if (rand[i] < Nr1[i] / Nrt[i]) {{ auto X = rand[i] * Nrt[i] / Nr1[i]; - auto lnarg = mu1[i] + sqrt(2.0) * sigma1 * {self.formulae.trivia.erfinv_approx.c_inline( + auto lnarg = mu1 + sqrt(2.0) * sigma1 * {self.formulae.trivia.erfinv_approx.c_inline( c="X" )}; frag_size[i] = exp(lnarg); From 8c973f670dcd3d5dbafa1bde94731d346968fe56 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sun, 9 Apr 2023 09:33:44 +0200 Subject: [PATCH 71/86] speeding up lowlist efficiencies by replacing [:] with fill() --- .../collisions/coalescence_efficiencies/lowlist1982.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py index 83d0aaa274..968431324e 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/lowlist1982.py @@ -59,7 +59,7 @@ def __call__(self, output, is_first_in_pair): self.arrays["tmp"] **= 2 self.arrays["St"] += self.arrays["tmp"] self.arrays["St"] *= self.const.PI * self.const.sgm_w - self.arrays["dS"][:] = self.arrays["St"][:] + self.arrays["dS"].fill(self.arrays["St"]) self.arrays["dS"] -= self.arrays["Sc"] self.arrays["tmp"].sum(self.particulator.attributes["volume"], is_first_in_pair) @@ -74,18 +74,18 @@ def __call__(self, output, is_first_in_pair): self.arrays["CKE"] *= self.arrays["tmp2"] self.arrays["CKE"] *= self.const.rho_w / 2 - self.arrays["Et"][:] = self.arrays["CKE"][:] + self.arrays["Et"].fill(self.arrays["CKE"]) self.arrays["Et"] += self.arrays["dS"] a = 0.778 b = 2.61e6 / si.J**2 * si.m**2 - self.arrays["tmp2"][:] = self.arrays["Et"] + self.arrays["tmp2"].fill(self.arrays["Et"]) self.arrays["tmp2"] **= 2 self.arrays["tmp2"] *= -1.0 * b * self.const.sgm_w self.arrays["tmp2"] /= self.arrays["Sc"] - output[:] = self.arrays["ds"][:] + output.fill(self.arrays["ds"]) output /= self.arrays["dl"] output += 1.0 output **= -2.0 From ee4bf4be54167a113f40cd75820b618415f18706 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sun, 9 Apr 2023 09:35:31 +0200 Subject: [PATCH 72/86] speeding up lowlist fragmentation func. by replacing [:] with fill() --- PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py index abf9f73131..70971723c5 100644 --- a/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py +++ b/PySDM/dynamics/collisions/breakup_fragmentations/lowlist82.py @@ -74,8 +74,8 @@ def __call__(self, nf, frag_size, u01, is_first_in_pair): self.arrays["CKE"] *= self.arrays["tmp2"] self.arrays["CKE"] *= self.const.rho_w / 2 - self.arrays["We"][:] = self.arrays["CKE"][:] # TODO #976 - self.arrays["W2"][:] = self.arrays["CKE"][:] + self.arrays["We"].fill(self.arrays["CKE"]) + self.arrays["W2"].fill(self.arrays["CKE"]) self.arrays["We"].divide_if_not_zero(self.arrays["Sc"]) self.arrays["W2"].divide_if_not_zero(self.arrays["St"]) From d91836b53f13c55d289baabebee06057308b3afd Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Sun, 9 Apr 2023 20:08:12 +0200 Subject: [PATCH 73/86] fix C syntax --- PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py index d652626888..db1ee4b54a 100644 --- a/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py +++ b/PySDM/backends/impl_thrust_rtc/methods/collisions_methods.py @@ -659,7 +659,7 @@ def __init__(self): frag_size[i] = exp(lnarg); }} else if (rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]) {{ - auto X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i], + auto X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i]; frag_size[i] = mu2 + sqrt(2.0) * sigma2 * {self.formulae.trivia.erfinv_approx.c_inline( c="X" )}; From 3ebf0ffc128a70fafe9b576abdfb24b02195ce97 Mon Sep 17 00:00:00 2001 From: Emily de Jong Date: Mon, 10 Apr 2023 16:31:24 -0700 Subject: [PATCH 74/86] Fix fig5 dJ smoke test --- .../impl_numba/methods/collisions_methods.py | 4 +--- .../dejong_and_mackay_2022/test_fig_5.py | 20 ++++++------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 7123658f57..ad9daf4870 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -742,15 +742,13 @@ def collision_coalescence_breakup( # pylint: disable=too-many-arguments def __fragmentation_limiters(n_fragment, frag_size, vmin, nfmax, x_plus_y): for i in numba.prange(len(frag_size)): # pylint: disable=not-an-iterable - if np.isnan(frag_size[i]): + if np.isnan(frag_size[i]) or frag_size[i] == 0.0: frag_size[i] = x_plus_y[i] frag_size[i] = min(frag_size[i], x_plus_y[i]) if nfmax is not None and x_plus_y[i] / frag_size[i] > nfmax: frag_size[i] = x_plus_y[i] / nfmax elif frag_size[i] < vmin: frag_size[i] = x_plus_y[i] - elif frag_size[i] == 0.0: - frag_size[i] = x_plus_y[i] n_fragment[i] = x_plus_y[i] / frag_size[i] def fragmentation_limiters(self, *, n_fragment, frag_size, vmin, nfmax, x_plus_y): diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py index b9e1d5bc9b..dbb6f4586b 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py @@ -23,11 +23,11 @@ def test_fig_5(backend_class, plot=False): seed=44, warn_overflows=False, ) - steps = [0, 30, 60, 180, 540] + steps = [0, 1200, 3600] settings._steps = steps # pylint: disable=protected-access settings.n_sd = 2**11 settings.radius_bins_edges = np.logspace( - np.log10(10 * si.um), np.log10(2e3 * si.um), num=32, endpoint=True + np.log10(4 * si.um), np.log10(5e3 * si.um), num=64, endpoint=True ) settings.coal_eff = Straub2010Ec() @@ -58,21 +58,13 @@ def test_fig_5(backend_class, plot=False): # assert peaks_expected = { - 0: (33, 0.018), - 30: (92, 0.011), - 60: (305, 0.012), - 180: (717, 0.015), - 540: (717, 0.015), + 0: (34, 0.019), + 1200: (2839, 0.03), + 3600: (2839, 0.03), } for j, step in enumerate(steps): - print(step) peak = np.argmax(data_y[j]) np.testing.assert_approx_equal( - actual=data_x[peak], desired=peaks_expected[step][0], significant=2 - ) - np.testing.assert_approx_equal( - actual=data_y[j][peak] * settings.rho, - desired=peaks_expected[step][1], - significant=2, + actual=data_x[peak], desired=peaks_expected[step][0], significant=1 ) From 5dc99643b74b4262e84edf0806edc9cfb0c24f2b Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 11 Apr 2023 12:13:30 +0200 Subject: [PATCH 75/86] remove GPU-xfail in deJong fig 5 smoke test --- tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py index dbb6f4586b..53f8823e47 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py @@ -10,12 +10,12 @@ from PySDM.dynamics.collisions.coalescence_efficiencies import Straub2010Ec from PySDM.physics import si +from ...backends_fixture import backend_class + +assert hasattr(backend_class, "_pytestfixturefunction") + # pylint: disable=redefined-outer-name -@pytest.mark.parametrize( - "backend_class", - (CPU, pytest.param(GPU, marks=pytest.mark.xfail(strict=True))), # TODO #987 -) def test_fig_5(backend_class, plot=False): # arrange settings = Settings0D( From 485e6a031bf56994d816c4e36a06622474da7db0 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 11 Apr 2023 12:16:23 +0200 Subject: [PATCH 76/86] remove unused import --- tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py index 53f8823e47..d77573fc74 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py @@ -5,7 +5,6 @@ from matplotlib import pyplot from PySDM_examples.deJong_Mackay_2022 import Settings0D, run_box_breakup -from PySDM.backends import CPU, GPU from PySDM.dynamics.collisions.breakup_fragmentations import Straub2010Nf from PySDM.dynamics.collisions.coalescence_efficiencies import Straub2010Ec from PySDM.physics import si From f84cb4fb90beb1be013f2ec062348b95e17859af Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Tue, 11 Apr 2023 12:23:17 +0200 Subject: [PATCH 77/86] one more unused import --- tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py index d77573fc74..09f28bb5b6 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py @@ -1,7 +1,6 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import matplotlib import numpy as np -import pytest from matplotlib import pyplot from PySDM_examples.deJong_Mackay_2022 import Settings0D, run_box_breakup From fb7165862f284330fbcda6ef6e94a994c7927129 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 11 Apr 2023 11:58:30 -0700 Subject: [PATCH 78/86] Use fill command in straub Ec --- .../dynamics/collisions/coalescence_efficiencies/straub2010.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py b/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py index a737e3057d..3beedc2417 100644 --- a/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py +++ b/PySDM/dynamics/collisions/coalescence_efficiencies/straub2010.py @@ -46,4 +46,4 @@ def __call__(self, output, is_first_in_pair): self.arrays["We"].divide_if_not_zero(self.arrays["Sc"]) self.arrays["We"] *= -1.15 - output[:] = np.exp(self.arrays["We"]) # TODO #976 + output.fill(np.exp(self.arrays["We"])) From 5fa21906c2cb9f0890c97ec3381ef80b7953de40 Mon Sep 17 00:00:00 2001 From: edejong-caltech Date: Tue, 11 Apr 2023 12:56:10 -0700 Subject: [PATCH 79/86] Update smoke test --- tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py index 09f28bb5b6..dbb6f4586b 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py @@ -1,19 +1,21 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import matplotlib import numpy as np +import pytest from matplotlib import pyplot from PySDM_examples.deJong_Mackay_2022 import Settings0D, run_box_breakup +from PySDM.backends import CPU, GPU from PySDM.dynamics.collisions.breakup_fragmentations import Straub2010Nf from PySDM.dynamics.collisions.coalescence_efficiencies import Straub2010Ec from PySDM.physics import si -from ...backends_fixture import backend_class - -assert hasattr(backend_class, "_pytestfixturefunction") - # pylint: disable=redefined-outer-name +@pytest.mark.parametrize( + "backend_class", + (CPU, pytest.param(GPU, marks=pytest.mark.xfail(strict=True))), # TODO #987 +) def test_fig_5(backend_class, plot=False): # arrange settings = Settings0D( From 4ae6e9878671f552912ae2876d714dd4640eeee8 Mon Sep 17 00:00:00 2001 From: Emily de Jong Date: Tue, 11 Apr 2023 16:51:19 -0700 Subject: [PATCH 80/86] dJ smoke test + case for x_plus_y = 0 --- .../impl_numba/methods/collisions_methods.py | 46 +++++++++++-------- .../dejong_and_mackay_2022/test_fig_3.py | 11 +++-- 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index ad9daf4870..4e697f38e7 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -414,19 +414,21 @@ def __straub_fragmentation_body( Nr4, ) Nrt[i] = Nr1[i] + Nr2[i] + Nr3[i] + Nr4[i] - - if rand[i] < Nr1[i] / Nrt[i]: - X = rand[i] * Nrt[i] / Nr1[i] - lnarg = mu1 + np.sqrt(2) * sigma1 * straub_erfinv(X) - frag_size[i] = np.exp(lnarg) - elif rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]: - X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i] - frag_size[i] = mu2 + np.sqrt(2) * sigma2 * straub_erfinv(X) - elif rand[i] < (Nr3[i] + Nr2[i] + Nr1[i]) / Nrt[i]: - X = (rand[i] * Nrt[i] - Nr1[i] - Nr2[i]) / Nr3[i] - frag_size[i] = mu3 + np.sqrt(2) * sigma3 * straub_erfinv(X) + if Nrt[i] == 0.0: + frag_size[i] = 0.0 else: - frag_size[i] = d34[i] + if rand[i] < Nr1[i] / Nrt[i]: + X = rand[i] * Nrt[i] / Nr1[i] + lnarg = mu1 + np.sqrt(2) * sigma1 * straub_erfinv(X) + frag_size[i] = np.exp(lnarg) + elif rand[i] < (Nr2[i] + Nr1[i]) / Nrt[i]: + X = (rand[i] * Nrt[i] - Nr1[i]) / Nr2[i] + frag_size[i] = mu2 + np.sqrt(2) * sigma2 * straub_erfinv(X) + elif rand[i] < (Nr3[i] + Nr2[i] + Nr1[i]) / Nrt[i]: + X = (rand[i] * Nrt[i] - Nr1[i] - Nr2[i]) / Nr3[i] + frag_size[i] = mu3 + np.sqrt(2) * sigma3 * straub_erfinv(X) + else: + frag_size[i] = d34[i] frag_size[i] = frag_size[i] ** 3 * 3.1415 / 6 @@ -742,14 +744,18 @@ def collision_coalescence_breakup( # pylint: disable=too-many-arguments def __fragmentation_limiters(n_fragment, frag_size, vmin, nfmax, x_plus_y): for i in numba.prange(len(frag_size)): # pylint: disable=not-an-iterable - if np.isnan(frag_size[i]) or frag_size[i] == 0.0: - frag_size[i] = x_plus_y[i] - frag_size[i] = min(frag_size[i], x_plus_y[i]) - if nfmax is not None and x_plus_y[i] / frag_size[i] > nfmax: - frag_size[i] = x_plus_y[i] / nfmax - elif frag_size[i] < vmin: - frag_size[i] = x_plus_y[i] - n_fragment[i] = x_plus_y[i] / frag_size[i] + if x_plus_y[i] == 0.0: + frag_size[i] == 0.0 + n_fragment[i] = 1.0 + else: + if np.isnan(frag_size[i]) or frag_size[i] == 0.0: + frag_size[i] = x_plus_y[i] + frag_size[i] = min(frag_size[i], x_plus_y[i]) + if nfmax is not None and x_plus_y[i] / frag_size[i] > nfmax: + frag_size[i] = x_plus_y[i] / nfmax + elif frag_size[i] < vmin: + frag_size[i] = x_plus_y[i] + n_fragment[i] = x_plus_y[i] / frag_size[i] def fragmentation_limiters(self, *, n_fragment, frag_size, vmin, nfmax, x_plus_y): self.__fragmentation_limiters( diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_3.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_3.py index 6bc7cb5495..c6fcd23f2f 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_3.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_3.py @@ -1,18 +1,15 @@ # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring - import matplotlib import numpy as np +import pytest from matplotlib import pyplot from PySDM_examples.deJong_Mackay_2022 import Settings0D, run_box_breakup +from PySDM.backends import CPU, GPU from PySDM.dynamics.collisions.breakup_fragmentations import AlwaysN from PySDM.dynamics.collisions.coalescence_efficiencies import ConstEc, Straub2010Ec from PySDM.physics.constants import si -from ...backends_fixture import backend_class - -assert hasattr(backend_class, "_pytestfixturefunction") - R_MIN = 0.1 * si.um V_MIN = 4 / 3 * np.pi * R_MIN**3 EC_VALS = [1.0, 0.95, 0.9, 0.8] @@ -21,6 +18,10 @@ # pylint: disable=redefined-outer-name +@pytest.mark.parametrize( + "backend_class", + (CPU, pytest.param(GPU, marks=pytest.mark.xfail(strict=True))), # TODO #987 +) def test_fig_3_reduced_resolution(backend_class, plot=False): # arrange settings = Settings0D(fragmentation=AlwaysN(n=8, vmin=V_MIN), seed=44) From 357bf8933b31b91103f5ebc8a06ff625f2e76d75 Mon Sep 17 00:00:00 2001 From: Emily de Jong Date: Tue, 11 Apr 2023 17:37:01 -0700 Subject: [PATCH 81/86] 'pointless statement' --- PySDM/backends/impl_numba/methods/collisions_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PySDM/backends/impl_numba/methods/collisions_methods.py b/PySDM/backends/impl_numba/methods/collisions_methods.py index 4e697f38e7..865dc496d6 100644 --- a/PySDM/backends/impl_numba/methods/collisions_methods.py +++ b/PySDM/backends/impl_numba/methods/collisions_methods.py @@ -745,7 +745,7 @@ def collision_coalescence_breakup( def __fragmentation_limiters(n_fragment, frag_size, vmin, nfmax, x_plus_y): for i in numba.prange(len(frag_size)): # pylint: disable=not-an-iterable if x_plus_y[i] == 0.0: - frag_size[i] == 0.0 + frag_size[i] = 0.0 n_fragment[i] = 1.0 else: if np.isnan(frag_size[i]) or frag_size[i] == 0.0: From 630b94a1cddccc6b84e5129e54b4139b853803dd Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 13 Apr 2023 08:39:50 +0200 Subject: [PATCH 82/86] bring back y-assert code in test_fig_5 smoke test as commented out and todo-labelled block --- tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py index dbb6f4586b..fabcfffdb9 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py @@ -68,3 +68,9 @@ def test_fig_5(backend_class, plot=False): np.testing.assert_approx_equal( actual=data_x[peak], desired=peaks_expected[step][0], significant=1 ) +# TODO #1048 +# np.testing.assert_approx_equal( +# actual=data_y[j][peak] * settings.rho, +# desired=peaks_expected[step][1], +# significant=2, +# ) From a84531d2a7dc23bffb0483672343b8cd97b6cb63 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 13 Apr 2023 08:42:42 +0200 Subject: [PATCH 83/86] reinstantiate test_fragmentation_nf_and_frag_size_equals --- .../collisions/test_fragmentations.py | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index ad346f40be..940bdd0b19 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -319,3 +319,54 @@ def test_fragmentation_fn_distribution( plt.hist(res[:, 0]) if plot: plt.show() + + @staticmethod + @pytest.mark.parametrize( + "fragmentation_fn, volume, expected_nf", + ( + ( + ConstantSize(c=4 * si.um**3), + np.asarray([400.0 * si.um**3, 600.0 * si.um**3]), + 250, + ), + (AlwaysN(n=250), np.asarray([400.0 * si.um**3, 600.0 * si.um**3]), 250), + ), + ) + def test_fragmentation_nf_and_frag_size_equals( # TODO #987 + fragmentation_fn, + volume, + expected_nf, + backend_class=CPU, # pylint:disable=redefined-outer-name + ): + # arrange + expected_frag_size = np.sum(volume) / expected_nf + + fragments = np.asarray([-1.0]) + builder = Builder( + volume.size, + backend_class( + Formulae(fragmentation_function=fragmentation_fn.__class__.__name__) + ), + ) + sut = fragmentation_fn + sut.vmin = 1 * si.um**3 + sut.register(builder) + builder.set_environment(Box(dv=None, dt=None)) + _ = builder.build(attributes={"volume": volume, "n": np.ones_like(volume)}) + + _PairwiseStorage = builder.particulator.PairwiseStorage + _Indicator = builder.particulator.PairIndicator + nf = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + frag_size = _PairwiseStorage.from_ndarray(np.zeros_like(fragments)) + is_first_in_pair = _Indicator(length=volume.size) + is_first_in_pair.indicator = builder.particulator.Storage.from_ndarray( + np.asarray([True, False]) + ) + u01 = _PairwiseStorage.from_ndarray(np.ones_like(fragments)) + + # act + sut(nf, frag_size, u01, is_first_in_pair) + + # Assert + np.testing.assert_array_equal(nf.to_ndarray(), expected_nf) + np.testing.assert_array_equal([expected_frag_size], frag_size.to_ndarray()) From bc85acb9e247704ec485462383bc0ece3599d182 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 13 Apr 2023 08:47:15 +0200 Subject: [PATCH 84/86] add missing white spaces (pre commit) --- tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py index fabcfffdb9..db565d3d88 100644 --- a/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py +++ b/tests/smoke_tests/dejong_and_mackay_2022/test_fig_5.py @@ -68,6 +68,8 @@ def test_fig_5(backend_class, plot=False): np.testing.assert_approx_equal( actual=data_x[peak], desired=peaks_expected[step][0], significant=1 ) + + # TODO #1048 # np.testing.assert_approx_equal( # actual=data_y[j][peak] * settings.rho, From b528479fba4569047b23a6143c933b782ce0b09d Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 13 Apr 2023 08:48:13 +0200 Subject: [PATCH 85/86] white-space cleanup --- tests/unit_tests/dynamics/collisions/test_fragmentations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 940bdd0b19..7e1879605b 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -331,13 +331,13 @@ def test_fragmentation_fn_distribution( ), (AlwaysN(n=250), np.asarray([400.0 * si.um**3, 600.0 * si.um**3]), 250), ), - ) + ) def test_fragmentation_nf_and_frag_size_equals( # TODO #987 fragmentation_fn, volume, expected_nf, backend_class=CPU, # pylint:disable=redefined-outer-name - ): + ): # arrange expected_frag_size = np.sum(volume) / expected_nf From c04168a2788e31b72a80201ca0b3ec298e0412a3 Mon Sep 17 00:00:00 2001 From: Sylwester Arabas Date: Thu, 13 Apr 2023 08:53:34 +0200 Subject: [PATCH 86/86] bring back ConstantSize import --- tests/unit_tests/dynamics/collisions/test_fragmentations.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit_tests/dynamics/collisions/test_fragmentations.py b/tests/unit_tests/dynamics/collisions/test_fragmentations.py index 7e1879605b..1ebc943de6 100644 --- a/tests/unit_tests/dynamics/collisions/test_fragmentations.py +++ b/tests/unit_tests/dynamics/collisions/test_fragmentations.py @@ -10,6 +10,7 @@ from PySDM.dynamics.collisions.breakup_fragmentations import ( SLAMS, AlwaysN, + ConstantSize, ExponFrag, Feingold1988Frag, Gaussian,