From 77a1cea9a49ffbcfcddbea9286cf232b5a9e7b25 Mon Sep 17 00:00:00 2001 From: Hala Jeet Date: Sun, 23 Nov 2025 08:57:12 +0000 Subject: [PATCH] feat: Add comprehensive tests for NLG generator functions --- ...st_nlg_engine.cpython-313-pytest-8.4.2.pyc | Bin 37483 -> 19312 bytes ...rt_nlg_engine.cpython-313-pytest-8.4.2.pyc | Bin 0 -> 12944 bytes .../app/services/nlg/tests/test_nlg_engine.py | 437 +++++++----------- .../nlg/tests/test_report_nlg_engine.py | 143 ++++++ 4 files changed, 320 insertions(+), 260 deletions(-) create mode 100644 backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.4.2.pyc create mode 100644 backend/app/services/nlg/tests/test_report_nlg_engine.py diff --git a/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc b/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc index 62635c88c3979e350de5ecaaeaf251a3fd340aba..eb0cb5f061ab57721ff935df3ec7af29f0fbda32 100644 GIT binary patch literal 19312 zcmeHPUu@e(nkOYnqAW|YEj#{`G^YPGQCt6M;{0)1CvoeziIY058E@0tEz3x>%|xcu zA(h6ty+bzH+Y5SF?BcM+c8h%oa5$jGy#%=3y?wmk?xAld*Xi2e_Rs@v4|jQU>H>=d z_i*1wiWDW&j^%ZdE#R>^8gk|v&J4f#ec${v)Jiy9K|#2~waMSKP}G0ni4}bL#L6$d z6!kI1Q#^f|BDC@88TP6kjv8i&k27=p#BbyTNB}-Pr`h2k2@Y3~3M1cpIy78KDvdOA zIy@X95hLw89UZPBRm0V!nxh)1R*LsGQG8&c@&Mcu1F4ztk3MUroiWcgQ3vuDNbMSB zf{rqEQ04_^4Q6_@YxOn0!XejChFr)Y*I0&Jr9-Z%47spFZf6;C5r^EaGUTETxo66d zt8&QgE<>)`A=g}nT#ZAHD?={kkZUPJuGS&fT83PmL$0k1xg8F<_9;4Ee-D*-B<|6J z1L~}jcs-rCr28_WmY9yydT`{-YbTT`S&{JM%$e6;P05m?LFvjVNfR__CX*61Ne@rT zsg!-1aLx;V{$%AjNPJ997%)IQJWag3XM*9q6F#1qV0!($uZgLyYqt;$-O$dcN9COac5 znv@z)#MG=NC)Dm&r$t#Ag)&6z9C+hY_XRO=Nm7#CVkXnAO60Pf05K&s)vZaYrW&yT zr-EcWyROWvUN@C*y_ZbKxH8Ndebg?4%CU&SwT?58A%ZI3PG4jC$lL`R|>)hSuvH*i3-A`OjIoe3Z|qX z+pB6sXOeP4BRilry-pBRO@tv3L`@^|g)H<>5J&^$t-wK%I*hP!Muh62auEH9TK0H+ z@6)#v%M^b8%Q^G&^TZ#GbAp8okyX0d201HtAq~838_LIpkJfbjBu1Hlo-c87{M}8V|2l!R!_pXgs-J_2&x%-iJhzvapMsm@kpVxj79LbFeOAQ^14g2OB_T3CG z_4F^Y+&s(OWjmL;_umXJvd!~s^Ii6Zub%Dx`%5>&ciFb3$j&d=oqr5b6=&(i;K=WS zBmbv90~KEn4eX_VJU|cBd+$VokiJu2G0+jX(-MGmk+xuCv6CyPs(>u^Q?{O5I~Au# z;OtDbl1{^Dg;l`$dnrg{2 zJ#-J<4F{+u6etY^1>=3ZpAU3-6c4DPtyqQfV>a}i0u`Hv`&_0!_Kx@}DaAW(tL`Ya zq$Gd`#_|jwEP$uNobNqylA_XMwrc*KqQ(us5rsz)F-ro}QZeT1rTNhL-d@jv{B+_g z$0Jt3(JCud_-wU3x|iT$a%a(9Y6|776ac-~f_STKB0NV&+iI<{g-@r{S(>lp!&}}e zW>c-QrJz+|KGNmsgeRr=`6ypC=I^16v%q-0)e}cAQSV9u^fF+z!upiJcUOGwx-|-; zT^g(@z)|Bksf@t0R&5G9-fUeRV;`keV;;Vmuj%vfu|AIyT%!#wR(*xfv7mK71VNU1 zjUGuDAXq!12NKh1G@5!~Mp9LAN`fnCz%WI-y64=qta9)#au#?cIV}zLHa!Ejm{YSe zGa{Mo(mgM}qBE1COu*Xv)odn{n$;_lGMF$lRN$cK6$LSZN;^kC^z`@7(N}9a_GV^# z;wsnQ&tY<3Ttx>)&89RxDqR7n1vf)TXSGaLoAc;iD2&m*hY?W)$kq&@UJ!zdz4s6y z=4jn7D@o`8q!{#w;??9?$RKSH=?zv_ghG?RU%R3aoE#*EWh#zmX3>8TM50#<2@yOx za7#!cJ(CfMq*p?=a3L$FG=OW}pGrdsRc{c~>2x-gG>QsSsq_UgWf*=i1*$kHVGVj@ zz-ToL$q7QzM2`uRX)+^f0(3PcDq!GYuxiY%88^nxgFPStY#c&OY3_%zHa%Bim;?hp z4X276#^QZgJcNxkD}2#BG4HtFd2&Fnw#V9bWedZfA<%P8MYIA>EgwwV)M1R%T4O4x z*#zj8O2$BR7aZyyunlOjmi<()=BIDpYRtVbmgByct9>gMJHPDp?DJncyU4W6Gc9+S zE-19K`L=&Pdg$88|BOAe7&|y0J9zubLhSfr`1rM zp}h5hYiE~PsBRX2|fjF92&IZT=7e2rns_FLv6iKgsP29X)_f+_QzuW^_%zXN_< z4WI>{wka#z#WsRw4>86yi`7r7ZH6f;NVyu!f)p4Ha0#}&RkkBhR;z3&XjOn`^QH{k z3UCX+$uUfsZyW*32#zP@g);RI9 zD5t~=DTxy`PDAsc$um+H_eM$*Rf$VXONmPyy44)6h6x8)!(}s2gQRvDz)$dzl0z8T z7)6fYX+K6sA&Q5{Af|0RCMWQ82&309I*HLKjD|5fjnNs1=KLlC7Ak%Nv);rAT|pJ~ zz(g+-yq;Shyk=xom6a)DRZZRoSsWVmA0R>mD*zH%h#grBAGtR4l>?BZrv*q0O#3$> zD)LeuKk5cK`seiEGt4K0en=KM(Kdd3-^P#4IMF@yV{xHD=eJ#GP_}&7gkx_z!$r7L zSai7Kge~=WUFag*sU9Jn&`*LyGUy>cs>tL@3fUUYG}svAFP&qbSET`niW$xj*K zY!~Up!I$ZOq6hn!Ujf9uyavQwvmvfPPw=+80B)1*2EkzEL(0B}gjXKs5Hw4|x=aB+ zNh6Hf6VJMWE(`Dkz(Y9PD|e>Ac8^9~9UnemFa>N8QxF`t-JMZ1rX|4zPzrZ(hX!}1 zpdf|(Pe~|Y3cz{U@>bb^RRnX7YLzVoFjv8cY^DIX3#@7N% zS@$Qil+(tdbVsRkc(v1wnM^C43H)}Dn~{Kcbnuh+{ukz={4MYzIx{U#P3z25I-QI& zdX)%YpI(*(F_{FXNktNLI-N>NMAfS`A}T88!`G{msq}}c9!$v}WaXr+&FWF$}k1^YcqJcj)Myghe|2DC9Zv`zGG5 zxYBxYDsuFJiay!(-k}!yH}ueM=ATdOh2%D?2ZI8^-MKCBb475w&FUfJxj|Mh4Ws+; z?X#s2`J}nMxe&=fOUmc=$axT3Md0mO2fd2eyDUg+SVIweXMoWw3$a6s;X@Ag4#4Qr z(*mPBdw2K&c?X*7jteL4rK~&Kj}B*#y|mo~;KBe3J=Z;R3@(5G#m^5!$K^ zF!ltemir9dyUq+XZ)fal%Bo}*>iA1I)PID?CfJQBdj7Xpa=qX*oLUT@`tzEn!*t%D z*AKSRzn}-3nNML&*jl_M+@{&7QPiZ8;KDx@R#2{d)%H#m9Kz9tl7WQN4f0Q`Xm&CI z!sHT0(-;|U!n-a^`&x^uC&agJ(rH+O#OhoJlw5f9@59La2oCiwL}l>kZMOz<(YWiv z$L7(&*GNBQU*qTl@;-FT{hPsU+8lc2v?HYwivodJ(Y=tmKF zuyEl99NT+yVGx|b1v<7pp6$K4`3-xk_U4vsiEwAV)pSCJJq{}DcZHo;ie~T1{hv>N zf#KdUoi*O50iqkW=SJ;&vO;Qbj@uQDrwmtyi2Z4_D8r@IA*y0=VTyM52?=jNmRdC%^^eC)vO>4n&_#qco)rM>shl+r%5 z&~Uhv(%$H<)Z_F6@;w+acV23H7b*E+`iD@%)wlUlaC_2xHS9(G zYiP+<@zwElajA&0z6rPeEgZ#+wShGk7Giyi;XVfw23Yg78S5i?#@gO81#RCZzP5=k zPy=TGqRP1q;xWFWRkVU!gZR1*O(WMZLcDqCMtrU1Up*n>Ym?}+2JvOS550y{Cb&>v ze+$EM2aa;Lkhb31pNsBsb&zc83qb2>Q(x%Mybs+gTE5z-watCO&=dui1#E%E+UCCe z@TzHGe5<#Nn%q}0RFRM1xomWWwwn9eT2v`{jizL^Rm^_<7~1{sa6Dp5Xv=p8Kal>4 zcj&D3La6bMlHMqouaI_`uU^6~vMwThNM!hlkf41@59F^It6<%4iWttO9?mMVhVEh) zzWs0fhJg8#18p1l)dJ#!M(Jyp#AlNKEdkJ3N{_Eur}^rq{evc|V`CXoS9)Mlz5>5C zA{Ab2&>46s_>vwF@kdnTwDIbv@%EJQJ_3BwW2LQ~Mw%C`=-_STpK80N5PSpQ~c-c$S^w$*iL+VEO_f+U>>bd`*`r!X- z>dkNcUiy9dli;@$elAy19` z7wOu0y7s=Ojvk^hyKdP?E_apW`{|B+Hl)m29nv8?7gEbjC9**~7gEbRN^+YDcgE!M Z;gbCI@4SBQpVF=;F(UUpFd~K;{68&T(Bl9A literal 37483 zcmeHwZFCdam0*=5OC`y6$se*U^HpHJWN6!ffriF_0rO$N&@M#NG2Ln*$+ke2+*L9S z?(Q_9XST^~vgw^k&XAtnjW?T_g=A(!&d$l~J#Kv%`;_ zV*-16nlnH4-d9ymWg%OZfu1IDS-y4OeINDe)vNdJSG{+)qQb$zG3pT_-(Jr!e~$tE zDASO;UokVxHyDu-*@KM4lC!DXESm(T+ai?-YK&E~k{Fw0gR|)%*KL>VTAV|2kT|F0 zgtPfzd3S|W(OoH3l6cEOzS||a2wrxus=HdM7MKQR86#R78PRsqwGpn5hg5UY+PhlC zjSy0DqJ0Z1IvSacT7=|=v}=v2RJ^z4L7M0^?CzOz{){t*$3HjWHe9KD6S7*rAR6@Rb zL%!t$theDdcH$l{o8@}L@^CyRhrOn(WltoS$eKH2=OM!0b7)^zY#8{Hj$G!AJf4~_T`M%o~_y)j|HW0B1~&>yBw~Ob;#jhA`*}JBcUuO1zz!o0*OG@ z3MogVSWtV-Nv&}3c@zHc!l=YFF#^=i$Mmu!ZVK?RA}g9&OdGYh<;-StKkGI3jCu~o zgJ;4a;Z#5l3)+Q+MEG2y#cPtBP)OE-A!wgUe}7yW3MBmTk;L#w!fVc2n(;= z&8~W&U0Vt!tiU=i1aO_cPwN2^sdGnGy9kqsu;${_Tn}24v}qR zHfm+*5A*=cFN4tjXW4IWEimZWcfrKUcUGc zyqM~6Qx0PjzMyKTQifqMG6X;k+k^2?SVbLykx)bAXQ^TExkhjNHprl0Zd9{SI#h=7}DXjPW)%v9OzbuPMGPtZgQ@OF%6mH9bUM{=W`c_r zNWS+ciJzUjaZWk; zI|SaXV7uJq?^cq)PwUyP`tqMPbT&fF->hf5Jmr70K>+-JxLJe{4bX$|Dg%9s^ZR3g zp|Ibdb@=^5@z6*V@p8Za<&i*Ci{btL{)i+eqA zBmyuW`2&fB6gf2llMSS;(0WKP4Bf!*Wu=w)aB=e7NqUN8#}W{biEj%a|MahzNfTQZ zVy^`!89e{wn0kIMc#ni9t=6(lnrI9?SI7iG`9txWM}pnX}9{K=>$@*3Jy*96`1m=UabZIO0DOKA){T zamA>bxphCb^*8<1oP+Xbc! zMqUm8--=sJ>;7QZTisvkR`?}Ji||W}{bSDYHOH47N=;LeTdvKv&1=~yx6eWDH{t*8 z)4=ekvuvNn$S_ovi9wTyCebWf#IhDkj1{dSFkGU&jcsFF;Xh`YdHyvqF>{{{u0jsc z;WLTOEheA27oT$8A(rc7YE+m1knuu>BJc3g(Kmk#k6HRE^~Ak6cH|w)7@tMIO03LR zCRX63Vpg#-e%5E(x}Ra|;0VAX3-G^I^XDORTmf?&)hHpGACD<{3? zSG3oBIICV$0lmg^;$-T_bCtUqbJZUN|z zhJI4GchX!(Sg2F3{})c})UQ!ACGE>aQ_^ujT+m`6{k&3KC@%6q1W?z{q(ce z>z&Z^ZP4>S2CWeM={%tTLpJbprk<5cnYE1GZXp;K`%MC~gc0Cxfax$VVftCZ%g~&xZYhP)G{Pau|mB!FV(jmgHC*re)C`=F)VLw1$i^l5(ic_5NQdmYAtL%MKN5@rmw0gzr;q&Xfve3~XAlmMgubQ%Epdo0#UkR*c zSW1w%O&AR4L7 zE4Ks#(WvZrot*u__(&|_cwMjlW~ibAI>7KbV6B2@pkqLV+Yqdmj>6Rt+dSo_wx~C? zdIPGT?5f>WZh6aC}zSTe>6G- zsa2Jd^gLFc(}P(aYAZt;N^s&;)gI86H3T*o1MAg*s$kh@3~V+gTW1VxG6tsPd0M?y zwX}6@GIAFnfHL@c()Yzn4wRRahCL~+Yl7>&n#cBE@| zT;e`%T>P!(cbdmeq#9c;@wfQJmtRZrYZb0jY3o#BlJ87&+HV}_{8}|QVZc))JxxXt zplA3^3is@LE0bHFg|o8t*(CpLn$v#cK;bv3!3hJNA}MJyiU2*sd(yz{@=Fx1HO)6C z`DQ?_1PDm-t!bc=-#FK*@Xa)+B0uL^FO5=KG8cjfV4>BRlp!bJR{}HxMIiYnRg9zd zQsZx}Ozlqg6B`7++W60RTj9Z8T+`ji{?&$VyZMH|c3Z7CmhN8&f%iM?K)-2a0l#T? zbl2N%*4Tg&ge>$xhWSnSzxxizAk(wvZA^>>1{ueKykiCi+0>`8sSMLgRb#pAz&22Q zX3Xp}i!5P~EutxY%vVNOoqjqkB7@B7d3%XDM-QoIV35r|E3i726J0r0$B+jFy}Scz zKw0#nDa-U}9mErrLAIfY;kV|U^2WbcAlim)5Jz{%S5ay@6N7YlK5??}xfm2b!Y$OJOiYl=dGDl0tu#n>bykOBoeCqTjg^|{eU;x1&f(U@Cc4-)K z>|T#in10ZABbRbs;@f9-+;^Cp;#Cf8qlP*HeO<2Uo0F2{!hOfG? z|CW0}+PyC6UUzME%Kc25f9AsekLw!$^z{Npj20TSl z(qt5YG{3bou5F**#h2#d(k>_apV%(W@|`^^0G%>w;YdJ6t!*%_Oh0O=3WXLEC=)=b zfN$#@l?fau6YSF}6R5cR87dQ;WURD`<#SXfR1hJ$^05oiJXI#R#Hz=jOsEEBLJb)! z=cY`kCGFsT;@hEaz8z4xRH9O)evV3&3Lh#}8m1^!JfB0Qil?XoOIE%-j2LV+{PVqrl7gUW*;ays! zeORge2o4}X1G6+ETO^5?1i8~3movJ#fEs32mwyh+Ie`ErXXy(FUPRzW@Dc)G)|nZE z*5(=bok&d6+o;w_=?|eOd>(Qm$R|doM-^W0l$nXx>M)3{=ZM&Po`|iZL~I2?_V-Y1 z-4E^$%E?p7L+A&g9114+V4Bl@<3Qo}tHB8ao+2q}GKv8Gb0Y`W6p(}ER_G%ay@wXE z-`jA=X}+vVa0sp}1IOMT?;I;v!Fh{u(n2BN>rEa1sYLGG+ z3{ocKKY@od6*NduiSQwKNOMtx6jiv#EIx}WW5&uvM?CDa5_$WT)zFN1%*r6;^x0q* zEI)DFAa55_;!`QQ=*&b-S7L?6Lvmsz8o@Mq+g^fd-mc5p`dO2=%YbJ%E4t#x#45r= zLOT$7yR>$wChbu3q_+d~%t8jK?-n#jS;36=L!XoMnh$8N`EZKSr?45*=_`j`<34dF z*K5SON6tG@&5~ZXSsA2gE-*;d744x^?kk#-_T{1}DL*FGYrIp1*dTg*6b+e*YrWo~nPUI;Aq`TE2Hr{PfKfx*j$j9X`|bl^Ka#*#P!PctFf0i~)ep8C!iLO} zak<~M7c@5D+?a?9x{h0jKVXm=85#;m=Y`Nn7!w9!0y-*^YlqRkkgwp8igPemd4p1i zusOvwJJNZ)`B4O~B6tnK>i}fDWvcQ~5moWCd8yPmz=~8AX8px#39) z>E>0QWLb9=`<)HljQK`Ad!WO5qXBgDjQRa`JJ8?nU;)3$IJzrsHyt*h6y{0h-Dc+9 zX6D^yRQI70H9YffGnAt!%6t5-y3OP?Jb7G7o{}fa%t5#EWtf@sbStx9Fp8;2))wOo zh*?N;(vkiR)Zq+r2=5l>5U7R!%C0e9sclng);$Tzk>4lAL3#t8WnoTW+pM$`ttPZ~ z5hq&WLMuAvX!ml0-K%_ByH|xSS3HEyqEsi^N|e)q8{p??_i_NT`psQon$Zs*Oa;TF z)T}ij!6#MVdmD;K>Nlc!EXe3PN7Mr0Q%>!JE%aU)j0_A$&kK5eh1G2>n>Tt@uiJp` zjH^~kpcfYCMOuV|;q!ueZBUK`qa&fPkbld>?RJK{E@*hly5x8;0uGn7y3;%7tSRBv zO5&(Aw^bENAP$`69IRbPVCZ4L(y7EY;F$pqKQWU`uf*680fEf&0d3qe$ z7XB%InBRm#(1%C_+H5h_LpMc@CW0 z=A>uybuQ)UOxJWy?~S=jY1^f62$TG-G^hQ>fr8$c7@RQRDUy;VqX^J5{5FMqPC0r! zx$`+hc0QNnpG$MvZyYH6HZ?e5z*8h8O-2!*e-2eXyQn*+s`7cNdm;P%4c%adTE=#R z8LA0XK2G!dJM2Ke$+3XnbUL~{wwrDnP^K_?^X{1NZs1{Xp)BmUNexN!?wD$eJ??fW z=#KfPaP8?w{C1H{4u-jG?(Ueg9mBr_H7ev}xoWCJoi|1s+${eL3c#Uy1~gB7@#`A%#Q)29p=Y?+(%izA7em4Cm2i5>RCDvdwQg| zfn}2ZEdX#J{u9K%jsQK1rMD1VLXdNLnVDxUvx3S zD)!qOdcmc~%{~h*J#|MHLm=612YRxD1$>-w^j6x&9X6m8maw{k-7Y3!iIx@v+e zGS%P2tUjwMeZVRwRq?lc9C0fBrp~v{$`!fVX9uU!niJ=8PNhY;S1NH9O-Y@z#ac}g zbBJ!S&Ie{yvSx_01gA`DfYFVono}mO5OP@rdxF?NBr#|Q;)+~aJ9tPtG(PF=06nvi zEAj^gU6DC)K|JHDAid@vX|I`$D{{535_-+T6JO2s8gbDhm&E8N=6c;`<%&#m0T;HY zXb+|9*c44k`*P8g)Vwb))+8}rTp|iSUhDO)5_-K$@9p|o>-FW}ip)XJ|Gx#fu-UJT zv$Q8$)(@`2@_pM7EP4owi{3`d4=m$i!R6#&`9fGl4wf&JBJvqIi_092NVxKi1ubis zDsg@|jC8QR0X^R;C+&vi5(Tm@p`h_zTZP@x0H#Iz+(0}YY7zEBS-_;JMGJT-Are9} zUVkJE(Y@gy1i(IrMd3<4khw+J2U!JNsF19*dS1X4@Gzo~D^j-Fus)(zkLk6q%V$&R zM{8(X0d4*;hPJC!s^4`Zma_oC%LpFY1G@@J$@9P#gn}R68ya|u@$SDs2|t2=`G4gZ z?=n>lV+XFAlMA*fjU9?>`-KCUYR}jgt{+b>+@mb$QmXb|=)T2O>1(P4$j51?$j50x z@^M;dE+3~oo%B3?-IMYhN!J{i-io(JY1^Z42$TGtG^hQ>fr3^%3{DvE6iG>wQ3U82 zext%2eXlOL1N|n?B3|w27Hl^c+JG{J$)iTR{{S-3^h_SEsRI9| z!t1Kcz+9P&uB$?QLdbVs>bfdru&zqs@4Ps;Uvf;lt_ptVWtpPh&E+X#o-1m|qu<6* z0~SZtRmpwlh2Kw*!%&t>r{&JDuFA~cd2yiTR@YT2tme+$lOtEEj=}meh1C7IJWotJ z6g1?05AJjNsbsrhD#7*K4D(gtuat2)x1uYo%yPEyI`Sul;veJ4_ylt!zB<&eo~pyh zALFiHiwBI;ToCnq5gaJ>H zlr$MdAPug!a)|wuxVBtu>Z)M>ob7U0{sIT!Ji}e3k3fe!4(A4S`TKNsPFG6@WCw3b5mv4NA;=fUqVg2uKd~gp`|tC?Q(Os zim5Yro)b_044yMX{52Hu3;36RGzW3*H}=Y@D_hR=3GHVtiGc)mMhTrx9y^1`u`@~j zOq$bv<3K^l41*H}JVjE{WE27V=UNn-;Y*l#1-ht1sB_hA#`MKPU}<#N<|wzH5Lx+KmGkv)XMYF<-o`?cbKn#-#qL9WN;}U$&{eJ< z)%w^uU9R+fc(hZLvrZroO~$vO7fr96#ie6m&lwi?qrtsr&;k!lURCxr={bCKfv6ZSE{eH) zyw8PJ^jMXzO23OxW){z4v9AiA#S+Y)dm5rpQX9iP8t$%O8}}{KYp&m&UdsSHLqqO@ zPyfN((B?w1T)8$Xnv(j>6irF3;o?%wvR*AN6PtYSj*}-|Q^FIk(c3~lYnJs&u&nd& zUj@%qd-gTV@mkr`%E+b~W_MV<>P!S#Z~hy zt-jNby`@*rxAbS!(%$)&{%l%0H?Hq9?FR`gR_Ciz$8&MTR#>4q@D^+!>54gk+Ki)o z{JxLR8`a}eM#Gs#^))BHKbJAR=vYfwQp{nU$3i7mc*Z7ysmVJ>XY=Rp0e&4sYw zXN;-NLP5Bwi90mP(xgM8n510iO207M148Rp%d^raQ{_}s%TI3mPz%P2?c)1gCOT!O4OE9^_ipf zhhf)4vgWY9p}K&cpe;nbIqKG8=_dO&{o($8SPqHo=smsra1=A;*5nz)Uy3tuymVSa~o7Vb_0E*;yfELfvh8Zw?WSB@zS>lKSTvtZfS?#nHTr9M-)Xv}f>HO0O5LQkfC z@mSp$r_^~b9GSE-3)?ac-b~|D8Fy2rZp9s^wE?z^ti9)=AN>8My!LHz%zowh>mFs# z3(C_kDr^19nwOL%fn@oqkByOi%F`#6wJ#`ZUR0L&ljSc>i9D%1{erUgMP-d&S@Ke{ zJTN6Pzt~ATC}|y2SHON2R>Eu)(w_FDr(N0DmGbOO*X(`Jm)fInYcKm@i^?>o{l>Yq zxThnACUPi^2HIsm4JD_9C^^Ng&D1O#+nTItQMmT6$VyF1if;w<+ByIV-+FCbl5bCQ z+ApQiC_zPjj#()!nF~P#l&Hq2c`1Hv9nC-<%de{82@KFEO4Fl(ki`6XDIl2Qnlror zn`w?6d|)$8VeFu48koQhG6iZTphP_$2w*xeglJOOPBX(VRk*gXz9iq4=Ct29*QW4G z$NE&l1PRedO4Q?lki-)5Qa~`pwPpAwg!96{trvVM2k zrTxZTyOsJjH8^3wQ<#vZqR7u(yWt~-%L|iLE0yMFQdQ4n>RS|7=e1SWSN@L?^}Et8?Kke) zrPM%vvEM>} zBQ)9pRpe1^1N#wd1KXi~m+E3__pzV;ww$Ry%zm;+-3?X-wwt-QCCIZs?p$+fyZPOf z?5R!GcUSGJfx!D)`BPi0H|@O*5cqiIUK^Z#{gfRN|9TUP_?9w++a1Aj+uw6GpnF$j zIeoijvLasA<5%~1Zc+C`hJ_Mar0^^76%_36Oi5XrMk5RDWvyx!GW%!CaMSK{>ioYM zDobX2SQhd8Fs!~P5l#j*71^d!0bs&np_ah#a0_g{eHOPTZ;3?*@ZYcP>TI1kC3=Ge zqY?P#O0walkO#@y`XlEOBT`t}f*}iR9C0RV3&`hV!AP9!luWkSB->7rjTgw;%F;J6 z|DPei@7pE(7)U}Nap|uSfKbRtIG0LZ1nUvtJ~&x!+fY0-5)E&cegQ$SFRN?+O`2Jj z{X2&F1Ln<&Uoz#tVj6$R*ni15|BhMsznLw+Vz%6~I@o6}+wU=OnrviD+*^w;F1~C@ zvWxDR%wnBJD}e)qLX!b@f9>TV3UjUVrMQJ P%)g{L@8DFWm&cyYi(I=D5cDp855^ZwH z&Ms}sz#ZJQy$cRFGzbvH?Mn?DaIoL<&^CYilDr)HNXBj@Y+dAv_Qh{;y|vto#TFwFLvU5Wi9hsF>}%Ml@uehbc;7TX4O)W7&E6{O=*icO&#LR(5X|e9n0rb%>bFUMb(fE zwNT0{hH6F^a``-a%{ae4$NAx90RA4_p$QR|`8;n|ka)=>d5?NLoW`ZOS$jL!#qiYl zxvp8kl~GIPoi?~MKa0II1gFM)$S$%Md797r?7TF8iF=o)TLre+k(v%7x5(|`4sg%b zs%4?S1=x=LoCyCGxp8klx4=K|UEqg=nX9AcmCQv|%ML50(y*?QOSz1y4{Q0wVM3=- zhNP_0i5zCLcoya~sFaPOUQ#ndeuASk+lXqEi6&oC@@3Tw%d!IVtrA%_LzupjGh~@~ zv1W8p)l>qr5A%Knf08Kvf~<8jT7XwMr|E{Gfi^fUQ8WvQrD8!%pjvv)D3YA2Cr*_M zIp`!mp(*)gBbU(=$Ci|wX03pO6K75&YQsyw@Dld$5-_HOq3VWCWBJjMmX^&9szg3t zkTZ1UkT|G;KZl-%=pJ{C`$MR`(vhm1Ir~{CT{*X~+B>+?J5uc(c`Le_9A62F)u4DM zcyKjw_^s$lu)iAYzY{!g_e+U?zxY=4PH_vS77WfkMc!_`4JL7BPG+~}? zoY|lwVW;+TQdknC$gt-KEPo7|Z!R@xwK$wY3ruI#JhCsZV-W2x72A~ zYL_~mxK_R`X~m>Ks}8Ai*mDqmDJ>v%NwIVw$~C+j}(`>k9yXv=e6od+m3p*a0^}c z*!?=sG}8;AI>^1s&s?TZ-7{kbGD}57XERVxbzNChA*TrFrx{nje0C|Pi|}6&ZEVeo znPOHIm2x&`hlXF>9KqSA#KlF@`-{P;v2A?f0Cx>)#d|brj z$dGROiK>_LhS{NB0i$84S-Ds?N@W8Y@vrEn> z;0gdZl1#BsQiy7{L9u+koXZ(OGFvn$cF*QWl^v-7;HC%5j8=77f;?@9Ggwd)ndPjKfzx~{f!OGy#+r48IVcXrFfz{xSyRn}4_P?|L z`nj9msm2c7jcr$ zKXsV@h@VOdAEz*xshKt?;L}HxdihSA2F-^JTvyqd{;1t z)Y!Yss%I`n&FW#>bY;=3u4kNuv=_{(Z!YFCtA3X$XljgYAZKRP>4h2O+92Tprh~UZ z-qwwpRY$6+@ph!ZfPtg(#I<5B$)>boQlM2p3f9c3AcdrG8eA`GR(?tVz~rOz>$OOg`UVDMqkE=VH&~XIys2Z zFhl@>qnO6UK%T?sd5m7b=qN^G7~v8jFJd%~(Mu5B@>{U!G(3StuV8cxA|2O-g?AR# z?SDMh6>_=`_EN?fASXaJiH+QZ2m#MQtn;7jy&X@jL{rzM?z%9m{IoFZwy^Jy5w3m8 z29=)4J^Xcka<}lKNk1g(ynO~L|I9#TE8c#R2VWl+?HA_j!zTEI6iqDW8Fix@)BXsvoj_k(tE{sIX@SZAOrpWd1Yg2$oYF0Fg%rS1_ z7mL}lid7U%L^^>0n;@#_wlU~3$g-#`0<~Zv*T&H*>Nura#jVieeLm6vW9U0Fx6MMb z4#)woWaG59VRbHuasP5G6jsb;L(xo_S2O zO^J+0JT#}5<^a;1_;94~5YZ4Ch>wWW_QbVn4u;~c#Fiq2})3^m(4TwliLGu(5$!nO}i_r#rt#&h%x{^^#cv_`E zW=8ZJPyy<~0`#^;KqE8IyWRw*zVdjOLV!}4HfN!c?X@Bjo7}x{{8?fibe$vCDuLZA(PFX{+4MQCV8?sx|f-Vj= zxZbJ)4mO4#3pU8adZcYmZ7B$RY^a6ac-CzQX{ICNWB;+n$2NGOR};-RSVL6`Vz!tm z7l68?q#xsAA4VjD6tzVW>zDzcTXjI$tKLJyN&F;+xiS!i`DHz)o3YDFIYTWeCHSCL zV_?k~ty0(cMuDBo!8l0*BRsW|a}d?>r_5W1PjsM1T_^Ob-R95Vn)F(@~07$Hxt!8W1k$V_N4Ci_5Wn-$745^Dv4v&FHQdP^=jYgb(zXDHmPuA>UsVH zeri;VtW40Q@YqqFg^e8dCITDM!qVaMYu(VvJS8;k`ma+ zzaU3B5Bh8YBab5

2{8^g6aLQ;l?G;HiYAYz0h#i;YQ<0n?4C+g|Q?>sGxs?m|t zHgas66#4h~$!CP?Ku3!8PA{V)|A`^TR_I8)r0|8fyKwH7LVvcq8>a4Vq|V)K`zv&J z;f(vlwQ9}>I0uAvSOm)nXYMXsKX{VvZr#bdtC5BzCt;^qj@28OLPUC)X~I9EU(y7m zYHrP4BKHUmB{u=oPj2l=!=#xTEWWOAJUDL*+T{0z`nG|3Re0F-`S~W+&^|WGA zpcVZTM!yI9k%x34;KsSf76<-11Zxi?S<4wX=fik=mfrTYE(hOG ztWUp%5u}?5G6n=aYl0Su;6fqWFhZ!Xj9l==LNQy;t1l4(c|a_1`pXcnd3m1yoa4UF zeJ}bOF7i3I|F_)KZ@HtNbFag#Cr|KSc7J#GeUG2t^}x${I@d6@*2(gD{+U`nq^$aV z-|buByQ_Tneb07&ipSz@Yc#nwf^x2MKR;M2hLqK+tF(=kLTYVSgKU_ULTatIp|r>8 e8Ix str: - return self._format_output({"section_id": section_id, "text": "Mocked base section text."}) - - async def generate_full_report(self, data: dict) -> str: - sections = [] - sections.append(json.loads(await super().generate_tokenomics_text(data.get("tokenomics_data", {})))) - sections.append(json.loads(await super().generate_onchain_text(data.get("onchain_data", {})))) - sections.append(json.loads(await super().generate_sentiment_text(data.get("sentiment_data", {})))) - sections.append(json.loads(await super().generate_code_audit_text(data.get("code_data", {}), data.get("audit_data", {})))) - return self._format_output({"sections": sections}) - -# Mock the environment variable for testing -@pytest.fixture(autouse=True) -def mock_env_vars(): - os.environ["OPENAI_API_KEY"] = "test_api_key" - yield - del os.environ["OPENAI_API_KEY"] + # This is a placeholder implementation for the abstract method + return json.dumps({"section_id": section_id, "text": "Generated by ConcreteNLGEngine"}) + +# Mock the LLMClient for all tests in this module +@pytest.fixture +def mock_llm_client(): + with patch('backend.app.services.nlg.nlg_engine.LLMClient', autospec=True) as MockLLMClient: + mock_instance = MockLLMClient.return_value + mock_instance.__aenter__.return_value = mock_instance + mock_instance.__aexit__.return_value = None + mock_instance.generate_text = AsyncMock() + yield mock_instance + +@pytest.fixture +def nlg_engine(): + return ConcreteNLGEngine() @pytest.mark.asyncio -async def test_generate_onchain_text_success(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - "transaction_flows": "10M USD", - "liquidity": "20M USD" +async def test_generate_tokenomics_text_success(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated tokenomics summary."}}] } - expected_llm_response = { - "choices": [{"message": {"content": "On-chain metrics show strong activity with 1000 active addresses and 500 holders. Transaction flows are at 10M USD and liquidity is 20M USD."}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "On-chain metrics show strong activity" in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"supply": "1B", "distribution": "fair"} + + result = await nlg_engine.generate_tokenomics_text(raw_data) + + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "This is a generated tokenomics summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("tokenomics") + expected_prompt = fill_template( + expected_template, + data=json.dumps(raw_data, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) @pytest.mark.asyncio -async def test_generate_onchain_text_empty_data(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = {} - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "On-chain metrics data is not available at this time." in parsed_response["text"] +async def test_generate_tokenomics_text_missing_data(nlg_engine): + result = await nlg_engine.generate_tokenomics_text({}) + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "Tokenomics data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_onchain_text_incomplete_data(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - } - expected_llm_response = { - "choices": [{"message": {"content": "On-chain metrics show strong activity with 1000 active addresses and 500 holders. Transaction flows and liquidity data are not available."}}] +async def test_generate_tokenomics_text_empty_llm_response(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "On-chain metrics show strong activity" in parsed_response["text"] - assert "Transaction flows and liquidity data are not available." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"supply": "1B"} + + result = await nlg_engine.generate_tokenomics_text(raw_data) + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "Failed to generate tokenomics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_onchain_text_llm_error(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - "transaction_flows": "10M USD", - "liquidity": "20M USD" - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(500, text="Internal Server Error") - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "Failed to generate on-chain metrics summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_tokenomics_text_llm_exception(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"supply": "1B"} + + result = await nlg_engine.generate_tokenomics_text(raw_data) + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "Failed to generate tokenomics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_onchain_text_llm_empty_content(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - "transaction_flows": "10M USD", - "liquidity": "20M USD" +async def test_generate_onchain_text_success(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated on-chain metrics summary."}}] } - expected_llm_response = { - "choices": [{"message": {"content": ""}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "Failed to generate on-chain metrics summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"active_addresses": 1000, "holders": 500, "transaction_flows": "high", "liquidity": "good"} + + result = await nlg_engine.generate_onchain_text(raw_data) + + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "This is a generated on-chain metrics summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("onchain_metrics") + expected_prompt = fill_template( + expected_template, + data=json.dumps({ + "active_addresses": 1000, + "holders": 500, + "transaction_flows": "high", + "liquidity": "good", + }, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) @pytest.mark.asyncio -async def test_generate_sentiment_text_success(): - engine = ConcreteNLGEngine() - raw_data = { - "overall_sentiment_score": 0.75, - "community_perception": "positive", - "trends": ["growing adoption", "strong community engagement"], - "direction": "upward" - } - expected_llm_response = { - "choices": [{"message": {"content": "Overall sentiment is highly positive (0.75) with a strong upward community direction. Key trends include growing adoption and strong community engagement."}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_sentiment_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "social_sentiment" - assert "Overall sentiment is highly positive (0.75)" in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_onchain_text_missing_data(nlg_engine): + result = await nlg_engine.generate_onchain_text({}) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "On-chain metrics data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_sentiment_text_empty_data(): - engine = ConcreteNLGEngine() - raw_data = {} - - response = await engine.generate_sentiment_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "social_sentiment" - assert "Social sentiment data is not available at this time." in parsed_response["text"] +async def test_generate_onchain_text_failed_status(nlg_engine): + result = await nlg_engine.generate_onchain_text({"status": "failed"}) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "On-chain metrics data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_sentiment_text_llm_error(): - engine = ConcreteNLGEngine() - raw_data = { - "overall_sentiment_score": 0.75, - "community_perception": "positive" +async def test_generate_onchain_text_empty_llm_response(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(500, text="Internal Server Error") - - response = await engine.generate_sentiment_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "social_sentiment" - assert "Failed to generate social sentiment summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 - - assert parsed_response["section_id"] == "social_sentiment" - assert "Failed to generate social sentiment summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"active_addresses": 1000} + + result = await nlg_engine.generate_onchain_text(raw_data) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "Failed to generate on-chain metrics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_code_audit_text_success(): - engine = ConcreteNLGEngine() - code_data = {"files": [{"name": "main.py", "lines": 100}]} - audit_data = {"risks": [{"type": "security", "severity": "high"}]} - expected_llm_response = { - "choices": [{"message": {"content": "Code audit summary: Clarity is good. High security risk identified. Recent code activity. Good repository quality."}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Code audit summary: Clarity is good." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_onchain_text_llm_exception(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"active_addresses": 1000} + + result = await nlg_engine.generate_onchain_text(raw_data) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "Failed to generate on-chain metrics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_code_audit_text_empty_data(): - engine = ConcreteNLGEngine() - code_data = {} - audit_data = {} - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Code audit and repository data are not available at this time." in parsed_response["text"] +async def test_generate_sentiment_text_success(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated social sentiment summary."}}] + } + raw_data = {"sentiment_score": 0.8, "trends": "positive"} + + result = await nlg_engine.generate_sentiment_text(raw_data) + + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "This is a generated social sentiment summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("social_sentiment") + expected_prompt = fill_template( + expected_template, + data=json.dumps(raw_data, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) @pytest.mark.asyncio -async def test_generate_code_audit_text_llm_error(): - engine = ConcreteNLGEngine() - code_data = {"files": [{"name": "main.py", "lines": 100}]} - audit_data = {"risks": [{"type": "security", "severity": "high"}]} - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(500, text="Internal Server Error") - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Failed to generate code audit summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_sentiment_text_missing_data(nlg_engine): + result = await nlg_engine.generate_sentiment_text({}) + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "Social sentiment data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_code_audit_text_llm_empty_content(): - engine = ConcreteNLGEngine() - code_data = {"files": [{"name": "main.py", "lines": 100}]} - audit_data = {"risks": [{"type": "security", "severity": "high"}]} - expected_llm_response = { +async def test_generate_sentiment_text_empty_llm_response(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { "choices": [{"message": {"content": ""}}] } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Failed to generate code audit summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"sentiment_score": 0.8} + + result = await nlg_engine.generate_sentiment_text(raw_data) + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "Failed to generate social sentiment summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_full_report_success(): - engine = ConcreteNLGEngine() +async def test_generate_sentiment_text_llm_exception(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"sentiment_score": 0.8} - mock_tokenomics_data = {"key": "value"} - mock_onchain_data = {"active_addresses": 1000} - mock_sentiment_data = {"score": 0.8} - mock_code_data = {"files": ["file1.py"]} - mock_audit_data = {"risks": ["high"]} - - full_report_data = { - "tokenomics_data": mock_tokenomics_data, - "onchain_data": mock_onchain_data, - "sentiment_data": mock_sentiment_data, - "code_data": mock_code_data, - "audit_data": mock_audit_data, - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").side_effect = [ - Response(200, json={"choices": [{"message": {"content": "Mocked tokenomics text."}}]}), - Response(200, json={"choices": [{"message": {"content": "Mocked on-chain text."}}]}), - Response(200, json={"choices": [{"message": {"content": "Mocked sentiment text."}}]}), - Response(200, json={"choices": [{"message": {"content": "Mocked code audit text."}}]}), - ] - - response = await engine.generate_full_report(full_report_data) - parsed_response = json.loads(response) - - assert "sections" in parsed_response - assert len(parsed_response["sections"]) == 4 - - section_ids = [s["section_id"] for s in parsed_response["sections"]] - assert "tokenomics" in section_ids - assert "onchain_metrics" in section_ids - assert "social_sentiment" in section_ids - assert "code_audit_summary" in section_ids - - for section in parsed_response["sections"]: - if section["section_id"] == "tokenomics": - assert "Mocked tokenomics text." in section["text"] - elif section["section_id"] == "onchain_metrics": - assert "Mocked on-chain text." in section["text"] - elif section["section_id"] == "social_sentiment": - assert "Mocked sentiment text." in section["text"] - elif section["section_id"] == "code_audit_summary": - assert "Mocked code audit text." in section["text"] - - assert respx_mock.calls.call_count == 4 + result = await nlg_engine.generate_sentiment_text(raw_data) + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "Failed to generate social sentiment summary due to an internal error. Please try again later." + }) + assert result == expected_output diff --git a/backend/app/services/nlg/tests/test_report_nlg_engine.py b/backend/app/services/nlg/tests/test_report_nlg_engine.py new file mode 100644 index 00000000..9cd2a133 --- /dev/null +++ b/backend/app/services/nlg/tests/test_report_nlg_engine.py @@ -0,0 +1,143 @@ +import pytest +import json +from unittest.mock import AsyncMock, patch +from backend.app.services.nlg.report_nlg_engine import ReportNLGEngine +from backend.app.services.nlg.llm_client import LLMClient +from backend.app.services.nlg.prompt_templates import get_template, fill_template + +# Mock the LLMClient for all tests in this module +@pytest.fixture +def mock_llm_client(): + with patch('backend.app.services.nlg.report_nlg_engine.LLMClient', autospec=True) as MockLLMClient: + mock_instance = MockLLMClient.return_value + mock_instance.__aenter__.return_value = mock_instance + mock_instance.__aexit__.return_value = None + mock_instance.generate_text = AsyncMock() + yield mock_instance + +@pytest.fixture +def report_nlg_engine(): + return ReportNLGEngine() + +@pytest.mark.asyncio +async def test_generate_code_audit_text_success(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated code audit summary."}}] + } + code_data = {"lines": 100, "files": 10} + audit_data = [{"finding": "High severity bug"}] + + result = await report_nlg_engine.generate_code_audit_text(code_data, audit_data) + + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "This is a generated code audit summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("code_audit_summary") + expected_prompt = fill_template( + expected_template, + code_data=json.dumps(code_data, indent=2), + audit_data=json.dumps(audit_data, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) + +@pytest.mark.asyncio +async def test_generate_code_audit_text_missing_data(report_nlg_engine): + result = await report_nlg_engine.generate_code_audit_text({}, []) + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "Code audit and repository data are not available at this time. Please check back later for updates." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_code_audit_text_empty_llm_response(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] + } + code_data = {"lines": 100} + audit_data = [{"finding": "Low"}] + + result = await report_nlg_engine.generate_code_audit_text(code_data, audit_data) + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "Failed to generate code audit summary due to an internal error. Please try again later." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_code_audit_text_llm_exception(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + code_data = {"lines": 100} + audit_data = [{"finding": "Low"}] + + result = await report_nlg_engine.generate_code_audit_text(code_data, audit_data) + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "Failed to generate code audit summary due to an internal error. Please try again later." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_success(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated team documentation summary."}}] + } + raw_data = {"team_analysis": ["Strong team"], "whitepaper_summary": {"version": "1.0"}} + + result = await report_nlg_engine.generate_team_documentation_text(raw_data) + + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "This is a generated team documentation summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("team_documentation") + expected_prompt = fill_template( + expected_template, + team_analysis=json.dumps(raw_data["team_analysis"], indent=2), + whitepaper_summary=json.dumps(raw_data["whitepaper_summary"], indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_missing_data(report_nlg_engine): + result = await report_nlg_engine.generate_team_documentation_text({}) + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "Team and documentation data is not available at this time. Please check back later for updates." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_empty_llm_response(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] + } + raw_data = {"team_analysis": ["Strong team"]} + + result = await report_nlg_engine.generate_team_documentation_text(raw_data) + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "Failed to generate team and documentation summary due to an internal error. Please try again later." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_llm_exception(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"team_analysis": ["Strong team"]} + + result = await report_nlg_engine.generate_team_documentation_text(raw_data) + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "Failed to generate team and documentation summary due to an internal error. Please try again later." + }) + assert result == expected_output