From a2739f7be3c196180f3216da492c8ad90f2f7725 Mon Sep 17 00:00:00 2001 From: Chengjia Jiang <46401978+ChasonJiang@users.noreply.github.com> Date: Tue, 19 Sep 2023 20:17:48 +0800 Subject: [PATCH 01/26] Optimize latency (#1259) * add attribute: configs/config.py Optimize latency: tools/rvc_for_realtime.py * new file: assets/Synthesizer_inputs.pth * fix: configs/config.py fix: tools/rvc_for_realtime.py * fix bug: infer/lib/infer_pack/models.py * new file: assets/hubert_inputs.pth new file: assets/rmvpe_inputs.pth modified: configs/config.py new features: infer/lib/rmvpe.py new features: tools/jit_export/__init__.py new features: tools/jit_export/get_hubert.py new features: tools/jit_export/get_rmvpe.py new features: tools/jit_export/get_synthesizer.py optimize: tools/rvc_for_realtime.py * optimize: tools/jit_export/get_synthesizer.py fix bug: tools/jit_export/__init__.py * Fixed a bug caused by using half on the CPU: infer/lib/rmvpe.py Fixed a bug caused by using half on the CPU: tools/jit_export/__init__.py Fixed CIRCULAR IMPORT: tools/jit_export/get_rmvpe.py Fixed CIRCULAR IMPORT: tools/jit_export/get_synthesizer.py Fixed a bug caused by using half on the CPU: tools/rvc_for_realtime.py * Remove useless code: infer/lib/rmvpe.py * Delete gui_v1 copy.py * Delete .vscode/launch.json * Delete jit_export_test.py * Delete tools/rvc_for_realtime copy.py * Delete configs/config.json * Delete .gitignore * Fix exceptions caused by switching inference devices: infer/lib/rmvpe.py Fix exceptions caused by switching inference devices: tools/jit_export/__init__.py Fix exceptions caused by switching inference devices: tools/rvc_for_realtime.py * restore * replace(you can undo this commit) * remove debug_print --------- Co-authored-by: Ftps --- assets/Synthesizer_inputs.pth | Bin 0 -> 122495 bytes assets/hubert_inputs.pth | Bin 0 -> 169434 bytes assets/rmvpe_inputs.pth | Bin 0 -> 33527 bytes configs/config.json | 26 +-- configs/config.py | 4 +- gui_v1.py | 20 +- infer/lib/infer_pack/models.py | 48 ++--- infer/lib/jit/__init__.py | 109 ++++++++++ infer/lib/jit/get_hubert.py | 330 +++++++++++++++++++++++++++++++ infer/lib/jit/get_rmvpe.py | 13 ++ infer/lib/jit/get_synthesizer.py | 35 ++++ infer/lib/rmvpe.py | 73 +++++-- tools/rvc_for_realtime.py | 119 +++++++---- 13 files changed, 685 insertions(+), 92 deletions(-) create mode 100644 assets/Synthesizer_inputs.pth create mode 100644 assets/hubert_inputs.pth create mode 100644 assets/rmvpe_inputs.pth create mode 100644 infer/lib/jit/__init__.py create mode 100644 infer/lib/jit/get_hubert.py create mode 100644 infer/lib/jit/get_rmvpe.py create mode 100644 infer/lib/jit/get_synthesizer.py diff --git a/assets/Synthesizer_inputs.pth b/assets/Synthesizer_inputs.pth new file mode 100644 index 0000000000000000000000000000000000000000..faa509ef7123ae84237f8a59105949014d56bb26 GIT binary patch literal 122495 zcmZ_W2Ygdy`#=7&vFs^SMk%W!J7;FJ$dV;OL6!(*4_O67KtT51plm2mI+HZ(BN$gbye|_BWv15j0mKZc~?D#>$b^X7d zIAYX<*#6^(4E%b;sKNaw4jD6H?D+nZOtG0I6BB!t!(W(ypByl3NM@;j|4MAT0i%X? z`}cdXnWcM`z>gR*%k(OXUk80Xc!0%_S@zSIyAw0Zb?q3_u|&tapT?9}kXatzqTejY z{9w_bL1Raa8Zv0&h_PcP#C|q@@R0FC2DctDXku(;Y*Kn=g^8IJ(=#jes`Ovawg0Fg zV}?x}J|VO6a6F~q|MfIFjU6-Wzn?~x=+iJ}R{ft(qguxj6EmwvpT)e)8XZf_%dEL* zIQ1impI5A>S8VwAQH&EaP3f8DUbu;3 z#t)b{B$N5i!~FY%*i1J14-6XjbtXrT!T;wm1bPhd-^Z{_%(SLw+P>Cx=~%A(tDXlF zJ}ns&L;vgY|GYUX#903Sn^SLS+mib~Z_WSxR{g*J9#1=_sk^Crt8c3>3x>(V)NpDz4oa#e)z-n*!T3OYV5WDbw`)Szg!!iVrXFk$Hpm6J zZbCPqv{+jFUHD!26#DV~_?}!(u9z)mKY?fFXXY-(F2+roO`1cy4(*!cn&i6ZzUYqk z#Czrz%`JMF|1y71%AS`3-Bf0|De6UBO#dX`)8we@T36)0ybXR8eK>jzL1 z5}-W{fx*xLL}&xUU>W=XW1${wwr;i#g)d>{>@9zqYn!dv);Y(sVhoDxn6^!;=gEsPdM@+0}uY-x5Mw~xCfUK4v0p|^06yT~17 zjxse2z|>&UAs6N*&Q1Kl{efHO*ZKE(_IX5El;`;8_}js5_ind-r+(+cg3eP8Ri@2GfQTvowuELp=P1x3*#5Yb1COiMmk42 z$N9(khr?O-S@+-izx6Aa70f!rIzvocOx#27L+>HaA%Fid>p@>-Ojdz(adOOsd=gSlIfBunN4QjFmISG+!k&t zzm;DhtPp0|X4z(>&PY8j92ffA`rBq$W?1eDcZFmzS^SOvji+lFi3eAP)H0WXKVLD?8>A0scGT zJHZCyz{`0#x|E!ob3gDu@L%&?^F_=Nb0@Kr_*!@^SVW81+S=MG*aiDr+gn=>+!gPN zS%xe_ZLk`x#?yw=28YpMybWtjYfUc3#f)Reu_GX#$!GLty_v4hE8Qzyf+j&zKfiwd z&Z3<~{k{FYll_zZ9i$G@ZO9B{2Cl-S;H2OkH~@8_YoKeuB3YzIzDK@^o{65-uGOx$ z1#b(6P&U6_OJGj^NsU0OKg_- zyZLuBKKg91Fj!b6P$f_#7s=Ox*Mj@q``vlX^P1C#ysp2lf2=%KuB=v8zf-W^i1?s?|(4kN+Y=o^KLA7wTa4`&t42je%u35YjjJu7yFM)NBb11e2gEXrMYg-2HI(cIW`V7yn+I1yi68%z^3960S$CN4|#@PzOd;flH9CU7u(Fx)cKGW3!9kt!>) z(k9dE9uIt@T~Ae2ueX|dC~Hsxm)IL*}QS{#%w8D zni-lIqA`#@hX$9yHQF=U^NsY46bVPdNl?*W(ccnU$*tu2?)vUazDvH-!PCK6(kw~m z)Hy#>KT};nSMYP`b7^B>W5DHhx#?PtmBvc#g6)FOeb0Rl+z;GI{v`il`LL`BYr@S` zP?{-wVUM~;J+2&AS_WGN3#0;Rw12d}0_4In7+f&8Am5m89KsA?ny^jS?_e{~SY!qc z{)8@E7mTpOFoVW$&DdsaDwE2@nq$o!OdU*};X2sOcC!jPM#pr4FJT}|gdboz(3scW z+}`}s_|kaLa1fucT83G=S-SK3^ZFD+iovF{>EaUO60hZ6%Pr$5}>V0>#>5?I(s$4Baf< zEN`rDtnXofZGi0x^s)D`H?TFZoe|Cm*Tw7NJL@~^HrqB^Nqb5AP1{Xd1&9%2#L8@C zcA##c?s~!X0-ML?+3nlyTkl`*kH8P!AG|S+7{`~oFLg8@WOLaZ#Wa^Jmn;--{thGU zBkeyz`PA~M%^?MJU<46x=~L^%EBh<^1vml@(>BGcZ&JZLAj6VJkUXeYE2dYXEgzJX3` zC-x0IGCwlg5^ae?+(X>&HZITr4e?h6aWP`YZjF>FRX#nsiMn2d&(# z-2Zs~@w8~tqDd!rCwG=7%cDuwB#-ez^D)gI1s(-oI%`PcfjBZLvc58#0l@QL_|xCBCQ0WQLR@Ph*MzPy8f z;3@n92-C$$&SMlH40|I zOmWB)^Aqzj<1%9dO#@Af{1*8)cir5Ts7=(qHoP|Ufs7p)I~MyE``$_Kq&TRWQ#I#z zzx9rlKGUw9ctx ztz%7wL&6~;8|bxnv39YRww1Qcwa&HDYo)QKm()x8$M=t~wW+l!Juf})6VoRqit|eg zr3DLT;j%5+mP_mr&XI@K?jH2D+BVoyVO;IYl`OJPSNs<*xEg z-%Z~a?l0UFcaK-bE5UFud?0in^hSB3*i@T(N;#!GSD&jij*L;qs3(<^N(MZXAIf#$ ztH4)*R4G+5`_2B=zSh1u-Z|bDP#O|FiJo@ucJ9lr%dW1l8|D|yFLJmYZdzZx^S<+r z@{jTtNkvk%V6|W;xs#ltq$n?7ySiQdDD+Wi3DEr1EZi)d4pU({Y=VVAzb7jp04L!m z*Z`AZEYMu$hg)z6z5o`wz_;)-7$Sy9LAW4HH@+KOFb#f#8Q~e>mT*Kpq7H_YK(Wsz zV8TqeHqe|J3kT(c^2or*fKztLWz;h2OQ3OchHr-N3+W5#d4uN-?g#G&yTaO}wMp;I z@6ADP(7TD>#3yk{+>XGGz$Rsr(k0L(-~tUC(;w5fGqy9H%R84xap?7w>nU_C8|xbD zMwv&M#|z_y7xowSO14T^z4W(rwsf}iwDz>p`g$4!LAJ`)pZK5nAB7)fSTmXbj0O)m`i^+AKE92I~gv zUr^Up*S6QP*U|(USsPimK`UD;+ov!F=zWQWF3{H6)_M_g;FaZ-h4KUoxCPu~p!aYq zx0R!H^zYp7T#be`8t%93x7@Sdvpy4^3H$8(?Cw-|>LK>eakg^M1O1x*5h9M%gTzuQFGe>Fji@qyp^s zK#%ze^9i%vwB5ASu+-2?+e_Owsc+Kn@xRAoh{7}QNyCkp39!^z2AFj zzB%GP;?9S0o^hTOUy6_7oYHXIbKEn~HPBVjQPDBYIn7zhQ_6GKd)IrxbHQ^g_gHRw zeS1Bv?K6xS#(PcfHJR+3?0i%3reL69prJNfn@!`=IC@W7YFldGHGS8#L~e;(`ZI-9 zSPODOyP4e#La&sHO)562?W*mXmOL$a2tS0s$=+lshm-72_NN2IoMTO6O_Cr9f0+I- z#d+d9rvj$}8Bl(6`OTDnsv*=6mKv8DX&rW*zs}DU<_fKht&AlsB`maFpKY3L5@5M* zc{J~Hls(FhO(51YKYrjD%E3gZ6+_V{tls4Q(TBBhIihJU%i$k^@blFHDB< zq46OaSI33Ng}1^7V1OZTLB1gKuwB`%vSRavzG*MP&4Q%^DVF3T=UQ=zHw5%Upq({$6cz_P$X`KfdKIbJJh zg%RQi@tWnDg+7;kbbWMl8_jJLtBcju($~^EI0r`;doDkhj}zm>LTjP5IaIb(wooiX zd6aHkH?AGij_H=vEoqT=k+)}{XP~@XUVapO6ikKjf$@RO(q`#5|8M>;;Ii+skB48} zzql(qD?3lbpNNmu$LbL_nSNw`WN1F`!S%o$HIftHE_;_91P7UeOdZ%`+GC>6?J~nM z!)5(teMLC0JFg1>tvya_Pirq}E@_@NecH6@_Nv===IzW&&ri?)6*3Ak3Ys{YILLDUP#@vz&u5)-l$vn6H>Wg+GN1s0b#}B)SE+K>4b5;yQ7Vb&z!#w~YJT z@wwwk!IOgL+UMFtAyGI0`!xGBbS>-q>-#T2r9h=XL$#q=SFS6shtHk-Tlp#HXx`Da@%1KrPS|9d=7mzRIOU3ZU|Hj|M z+r*pi%y-@^yjOUm=tdC*JN_bnQQo$^ZAqpi(_m&Wv(miMJUnT5(x&E{nzu-3k#JRe zRXdUy$xv=J5mp;k8$3FX?pMvPnhZmR;UD%Nwv13lpmA%0euDn$)~j3J?RvNCm*&4T zZ=KXS>45HlZcpt!wX*_Q0gX@Nb2(fNf6yP?r|eU@y1TmRwbJ_aC&y2Y77bc7m~5PE zG&5%Ah3183asJ}`PlKNZaY+K-1-}bUm8ME`Eolrs&z@(unYWos8cG^y?(byjWT6;% zl3|iz6T6AsBkU1gz;XUKp4nc01x#b7v15#5j6HID&kWYcJ_8oc1(7N(7_3=4lb&dba8iam++ME^n>lN7e>H9 z_dxd==Nf0xuA*I!lO88c)KAn8G7d5ZO+nKi=0D7{m|4sYb_e?cD3+nmc`KkeCjdS8 z9{d6*0=n;?@=y5#Kyk$1&{%9NPJskVf@Pz)QLF`ZEOjhftXr)2EcYz^!D6*o=ZW*g z_56B%m#|AHWi4e5T7nkJcjAvMm4r&dLGwZL59|-DTXc&TXbB5pp&+p!k>*Rv(`GczXgtw7(MzAD z?e6XFhnk0)_tJZ5dExTHehK{&4zdT?0M*^R-C!B8L+cq_e?IsxS}KZV1-!@d{Z7tz?Km8X@bG??HAutIxxd-q1y zMpuEez?tAmaJ?^jU)0~--z@_@Z;A)ATv;w!JMH)G_fm|am9)}R&r{E6?`SXMXZ(Lj ze@QO*9To)_1)ut!`s{wYe~L6kdKY*X_(%FDnggJ*rn{%Rr%#|y;EMZ-ySul$*Q@jD zI^}iB`<3~XQDH{PjFiQ~Vu5nVfAD|s9|P4Nn(CYCm%5g^<{0J}>N0g1m)T|hh5v;= zEuI!T^Z#luv=5evJ0~d&u5>{ zwr#d;KDYDS&c%6)^ZqROv*1_!WWJgjvSciZ>cw`Kz?aRU`U{Luy(M!++F@m`ApfUZdA*J%Y;9K1ZV{lfO62Z z9;MiaaSun1O0jr9)%tuv;J+UL8L+CynJ4sADSPc_voGS zPKnkk<#zH5h)JN*Bs|ET6-bM*7pAlx9F z6iSNTTVJVPsYB(V@&xD&jiD0Ix@fLQUKu3p#?c4$3XPkCQ?-+Q3XiNovgR)>e4haQbrLm_UorrN~c1l8Yh{%Mc~ z6u-{^ig0M{6AQF{vO<4Pf6oy3%=ek^ssE|Jid03K z)#LWK`+E9%XrdkK9qXl>d%7n*`nlwsoHLvpPEIf+7^vsq8*3YDX`H69n`*tu zU^EyFwgOwhQqNLP8F!gz{qBP4f@uajgY|G8ZWcd_PZd&yeZYteBJQ0c<$n+sSXuzh z>s6sS41qmR)>@WA2J3iO2`8Yst-9@F7y%egZO7p$l((0+R|5JBr2^&0DJM?758uF7 zP#-8~{yA&`2aJcxaMyO%R>NMyJ{j&o{nYxY#r9%*b=Ys)Z)5C?JuWpa^%h{MmqtHB zu|r3oocr9=xv3-4Mx^D!2>S>-J!i_PI&Dr{jno>ce?YyodTF_~T-zkeBxGou7RJWd z%G%3DpNDE!?FadTe1T9PAWbWJ;RA?RBUT!}wtBaE(>3Xu=lbXR8HqCzYcaK$zFc2! zrF*5DYPa(sW?jrWi_hY_hW?WPlrWYs-tgS;l$J_IKbILy1~Zl&%T8maF(Zmb6j2WK zj&w(=C|8uPDp!>$>J+s52U8{H9$h(pE*zwq*FVYvKyVKpj0ewbS z!a(mp?|R>QALr-%6M)8&bub-<0(~~`fLn4)xq;k(HE0bk0VkA}OUrRU`G!Kc0`x1= zj`G3a!C-Z{x;zd}K~<%yLIKa};Oc1ak~iQD)R1e)m*FHh0*-*8$WWA)m6jFuggyPF ze$w8+UQ{)P1}K*L(Db2+YH_FPpQ@kgP4zy-7(g+Swa8k;VUbdkt;tesUr(whrF+x8 zZTxNgl#l+?^rz{kl%G-@K}YbG|CYbKqrHRbRs9nCB{pH2FlmM~1LXwH3TK4{;sUXm z&`fy8zvC%ScTzYR?I&tzXlS7LIw%B%hFn9AV&v{jcZTN6Wx8d$k*<+0>Yt+gamR4S zaG6M%NLR2yKgbW~hpVDSJ3KTzM0tU7ayfaUvQasz9#t=gE{AG_YeaJ&RYFxlA1NOx zuL7?E^ZoPv`#t+TsYR(plrIbChI6YotKMuOR7|LtaBBUj^*OmYxm3$P=RW7Ax&zh1 zcSt*=3V{lNkKm#7P+ASN_N1EGB#46#eINShJ;t!@9_AY6y5YRxjCIAj==~ey9pwEE zp1Pm9d%1eKDE>L(I^v>y)Gyv&yjwh5JoTLQoIMhIB%-w5vJ-p$?M*x;Z+<7?B` zCKu~ssixHuVok9oi@{Nu6 zJyeTgQK6D*UKMdzp!?i22Zj)x9rJF~rCO23rf^uOt;2DO3Rz|!k|2g!VLv{&c>`+yz9 z3}UFCgx1omlXceg*7Rbz?(ZDv9GD)Q9<<07c{N;v@=AH7Hk5@wC{Ja|0#J-C6^H~$3x397IZ=sl*jP1Ru4V1k?wt@9XyhTtLTkhIUg&;Q2r#-qAb zH}z~#-RQmhz1z{u(X4Gs+Z3u#oHL&@-*Vk@edzqqc~NswbDg=)ARIDN{YOf3m)r)oL1aai=2Ysty24&zDfgDqJfr!0{yqN% z_XS5egt_Lq=5#)try}Gr-!b2O*L+vY#w{CnGIlav^j!4V{I+QQ(j$4KG0GTalsrnV zsnk?xETR~>VyI$h1n?@a2H+Y{9`ZGu37?7PDZh+-895q0it1Cd=;xB|H!i|OHo-x- z4fo&=xB@Z7F~zNb`Z~(*F26e+=0P^>gnY<>Z7>&xfdwkUmEtSK+hGiJ1S8ag*OAwe zli-CuK;y|2px*~zF|be)n!qfe=Tfq`WHJ3cdcIgfg=0`;KK8slZ?W$1SJc9{A|Ho|0>3)DN39!d|bM&7FrdRLNQU1VK! zd@B_$6;22w1Uf05l=Mh?gmSp0feo_33UUQ`ow82(Tm4%-SbVU!eWZP)gjyoHCgj6> z_@~fMAyE>gZJ}+UST$A^AY0Csmr6^c^CiVN&A4XVG3FQ}u@ZYnydxfm6rkK7#Ul&s z3+#JS_og0)<@V+FPf|Zg9hWvPjRLZ-pg#V?#7 zm<(US=gOKZNqo3r8R|r?Elz5 zO#LwRB8*HMnRY4lQflM0#%c3X=cQ7-)fVXalMeO{_7gDEHq+MM-rqjTHp&)BjigSu zO}EjSi4y!@ieHLp)-;-OqxoCvm82Xv^#|9r*0p}ceZ@VtKDM4=&M>RdFR=-z{`QIR ziSRl5IZI>gZT)TiL)SxBV`pP$d4G97^%PMKR9B!Y$dmFU8DD61iH=#cPP$cetLVJv zyyuz!nZI_RcHkS>1TxV0lqe_4i-CI9@?a?}fa#C~GlDaM>4Efs%kT12?AEQQThYRu z3wQR^_S8-^O*B#s>_{o6EvN0@pnro- zu1>Cx-5I9MTpMS%dVGA zvCLJ^RgcYKbD#vKr{`VST-oe&IbBp=d+vVju99CRf4*_Pk!oHWO&d*yTtn_9&n3^w z#w#25()H4nVahOH2ww;_O*KtHeNg`c_X9`$luflwwGB)SOlOPE7EvCvnyZ>?eA4)& zfG^;qIiG5IwY{~yg|0%^v6N#e?{nVg(6y|Od9{twMp-T|muCfL1+J)9)JEY(;rHr$ z^Q%374umU#09A%EOKe#`*UR|%!_bC6f4O~z`si4q3O+5(o{SyDtB-|uS zeXDV(C0A3bDR+Tl*|+jrnPNQ`8uC7Lf9ReUm=`#ga4un@G*O~>_m})%^7}OH)AaGi z#~ar=*E+8lt{Cnpca%|)QIW4;S9n+WkH8<%-Yu$=$U!+sInZB2zlLaj_)z^&)wndS zZ#>_4YA4rDPSvJrt2M6Hc&=uyrodQW9M6nr`m%jl>c6Gl<{TI(4iw`p@s?(=Qd}AB z4=@M@foh#tVsO8(zp&TL*UYLxH4IM~o|2xEo^#uI+etwQ-Q#=VcjyF^pW5u)>`c#2 z&mONGucbb8ALCZL)##oAq!WY$p#B`H5f(xQWCGfTgjw9I=xe97^bb%H z>OcWs5RF;32wQ|d#XrTh==XlbzGBz$>v*a&4P%Bef13X^FJ=}qzdL_-Qf@-jiMspA z_md0c0{Ie94s3ULcer_^d4ziK9;=Vly^*~UglVCe3NaO?Dbtk2fyDu;8C6lLC}sU+ zqp{Coelg!lXeE4L`G7)23(b@Cx<^_@TE=nXxKLIot6hV34d$8WnP-c$Me6Tx!RO-V z;zsNnpgack9(gU^XwO7fYu9Ms1oiMy9~kA!C{Knq3tJLUFCTrsKG3f^p#BP)J1|^D za|1_gM{Ie}4-QxlSZUo&>n-YqugX{DX|D;5Pxp-XjCb^R^oKQvHKD{%;@zCPIhlEx zdC&5n$PXA8-i}@Eb)uK2XXL~L_7wLYY0F3gaKZ!r#PO2)75=IHf495(uy{*0T zljkRYY5dZtNzx>BH+46?W8N{{*luhX3GrTVpy`38e;fXe_HI4mAMrM`&Afxz!Bo~) z)|byKpZ6&7Q6lY6Y3gk1O}>xeP(7@2a6Zvt-uRKKCV?J@o_{t3PbzQ*3h-c_De z9{MVVCEg|8lKzsYGQRO&m##~j1DgZ&g7wgqI5WD&_*ni}o()uUr98k67!E_=2z;V^ zqU?lKV20cBZFHGEjK+&&)v;<^C@w@XI_3X6E1i{rP&!l^5#vvxn@~PnKFoxe5Y+;x zPk2>mRW!b*K2};=-%;un^X8Yq+}Rx@OvgK>Lg+&%2{&M-lzKF`6-&cZPR{eM$S0s3=0Y-Z*od zIipratwDxC28x5a3*ChwK=WlNFO(<#S=VJZ%`Zq&L_1!Dyx z)m^o@+T4-ZBeNGKE=(+$QZj|+dxS?iMWbjE^AhuDe55+n3VDUBSM<@|JQ@oqS4=hT zEzmpEJG2;nfWtubhilZumHJA37J3%ahxOs@q3xmb>UlLHN8~AxrlzUqLgzwl!fnEQh!0tT=HmB2 zzSPv}(SRCMjLFtRX04Q)LlJtCP)Gp3tGj(3(0afG&u!0b73>wFdD<)XEA~4;>#5p+7HpdfI@>$j|Abpm6*M-Dt+KT; zb_RZJrMxcfZCeXe0~%}@Y@ufBK9)X~ziofp3hjmVvtYv){n;PFAHpngmPkcCdR*#H zoG4BdpYhN5KG<8N=kIAWYJPFu1qnv=I1?Zy#?5r&<{ zokq^gnSTHuT!44b5?UBr7#}u&*qr+RKP&pI=x6uO?q58=cxL-%`!4w}`FjL<1agD9 z!Lit9^9trFbCsKrtR|~HVG^tX`fR2{C7>~{5U8jB1hmDz04nk-_^JXFvr=41<2&_4 zJyael$v|@+{aXX&fimp_X{0t%D=HP0%E8J(-p~6xc{+JI_&P-ER4&;ie=U72E%Ge# zG|6p}i&m;#zd3$$(0JcP>7rbgE=!NSkG-WGr5zcSGAe!K_{cFgI5#*!ouJkX)(k$D z9!u1Zw={ccHpS-W0_Otbg5!cM-7VcsAeBvJ2MPm)hs;AJ)0yd{UhHYu%T^}2Ome1> zDU?TF{}y;|dT!bZd-Z$uTyLKz%1~{cruhz)J5*@9SN!cO6SSmRLH!bbhcXShU=;+*8wC(_Kni zN;{vM&ryGQV@qSpMtBbC)^v1PY_Wy`sc`E;=mZJy((*F;bG0qCEyKm(qDyc^_g4zM zz~^y!TpOqWkJ-oU&pffQ7Yz<2d?84|~=_+;=HQ1*}kGCJF z=yTk9+&bAZ*+Q{#J)xfPnt9Ef1BzFNSch02iVvgv8CP;Exoeth8k#FpND!FRUJ#=$vxKid)=3}~Jx_c@2QmFqJp&1u1#z*^2Og58kG0<3p(rQ%L7PjpLs`=9R zTL{!AJqf-7%8O(HjZ=r<7#s$)%SRu_Yx5$)Jz{$a4eSl2UzWctv^Sc@ScHMLvzD`#E5em%9X$^FC^ulA zbdH!K*0R*H(7n^o+|S%j*G|_WuSMRM<}b}pxF;Od?5R)YX5!7nf}(<=cqv|5nYA*D z_RnUs+3bD&eSHrYRXD0}&#pbYs$+$XDV5jT^ft;5%xC5^Wi4eXEpF+`cjYPP(?RSY zZZ&K*>}B>czvTUr*Ey+kQtPIzo6>zreOh#ne#d>s9WWh;el9PXE}9;0ez=*&)474U zfj9CSd2n!Wunipa9`x=v>^C$uHZ{^7E9$jAC!P~aT1#4Ke>ANnP)f1U`}mvfx9FOJ z`U*aR!`8#rS%inMZ3gJ_DCO*TXofDq`%|j1ixpldpG&mAY+!U2wklJV>8$UppO!E!AuBE` zj@C4kOY5QQp*x^IpkHEGVwh!|WgKcAYW9EuC~rl32a@&4dUuLDb8DV3CZ2YLsJgT=wGD;+2t53?HZ;sBM61tAk)P(0thmmI3u%(0fjC z@B(NDH1EJCUjz?g zPsl!XpZa0w!%$_lvPyM`?b3EB-<$8f@4xTwrgV$W$y6Vu_30k>9=E6!wSDvZ=2s6? z4^Zx`rK_duy8F7DUh^Ps5O>#f*R;A|b-^q1D{~I?OYWDPYDP}v>mk^wH(Sjlt+CG9^S)K&&oUY9Xq7ykfwAk<8*NtE!ydN)$3L7l8 zd^icTpXmzRgi4l5mSa$0EwFa6b%~CfHSIO+W55R#Yf?OU3>HFDps|*Lwbr%P2bKrX zwJdEmi2>@}pngG`!^#5XduZG^0~G5~zJ}()Bm5D5JkUCNxG-Ee2tUJWXaF=HK7#s| z`j!_!W5ZGLs5nR%gjY08V6lm-tgS2}S>86uI?38bXd_S#pf%f?rP$;Q_QPO{xV62d zy(Pj#n1Pysn!CokMla)KXx?AOE@Npfra5qoG)AIz8`Z6+dZ&7wl2f8Oq(9^jQGO?) zM3lb+e+NDYeh{So8OnD%eNJDkqFP1eUFBW3ytll~q-N-{`(64h_*t-dpn2em|B9dX z1kklyCNGoss(V#)$QJLn`Gx{foX6;L`*I@+783+m9N@HChzry^@G2ASZ0&6H=# z{eg1uluP&o_6PO{J_j08=$<#h8UGpoV4&PpDIa)Cc^h~dc#>Vou0I`rIx0CUIoCtT z5pql_m{h=L^VtoWG-#q}s%g4s$DSRf3juNG<^0R};ho`~dGUGi_w(-OHTN|4y!E~H zb@X@i@04~*=YeXM&tbGY8d;#XGQ}U4fpWFK@<8t;^-a;9H+of652St2 zqtsDqH7FyOktqj7u`cBxXo3u@VU=pLbqebgQt#3$?7!%&c8>N|q4is(TE$i0RbPUX zAklk6?{RIlwmKfR!a8|f^jh9vZ?IGOseG&$D^i>}5P4NP&m~vP6+2iuSo-pPdD_5A z@nZwN0d`-t;Y$c5qMw1Q;#HCETRP{1dON!sx*FQL+PWJ08~Tq)$E1W{La>@#Eqc9X z$TOlfVik2@+W#_BnkiBL-viGB55-~!vkqqc(e#g|EfZQMGP#O0a^(HO=s+%qYTBFk#X@+P&DDnfbk=O{Wxif%r&D2wW3MeBY@CIdrLN$dR-X31+ zzoP3z`G^THS)Hs7z^nb-|GA$wkW$`#!|n~c>EEXMYKL%#Fy)3KYDE3G_+wN|t;N)v zvq9aUP7F>AcB$I-Vb#T zjqY)$-m4R)6QSPStxf!`J|QD z16l=8s*KcxTgoj3E&P$O#bb+q#=Z<%Gt*=D3ik^C>i^ZhEV3*@b(chEqLcdURES|> z7>bdKTt%+o?%{6Qhq%zY(A%PRi`t_NqYdYTb2OAkb9m9U9Fxano3qWyf#hf}5A~4G zbj@^84`UBw4<}`(U@( zqic<&wxu@O51#|HPRRhuvr`?mJA^DD3j^5tB=!Ov*Mm+qAzI@YEshp9!wH}~R6|IC zHZThEfb#us;6Bja$RA)O`~)?uHLSEZoz}9n7nJtrQ?EY7`x_w-UJ5UToBU1wICmV; zL;?Q)IF~2np{nvu&~ZBMCG(P@d~Le}m5wvINSN zW+!JS(>{$vU81gBa=GLk{0^ShfPEADCf3QWlYNpq$u%`KHPIeZv` za;itsx^XB!l+Q3{nDaIH8pBD|5zUunxH24_6GZ>^I$@o# z8z_#yV!mRg>(>+}a1*%3&=uZe-!7ZNrYy)?kVoe~(b+FLK_}4qg4X|oAr5G*osQ}e znGXYuIZT+H7bu+Iv$S`w)6#55h*Eb1bOOsUGa`?(n)iE>BB1>OSh8 zQ8c4ytz)gDYI7{5*V zHpQA~O{BfG&6sA4gLR-I%f))lUh^XDB5j+*HiW&^ZHbliMbL zmh@Q?oee~NpLB*7-EXunad*n@lrFk1x?Db&zhu2+rCedAEz?H%H>ztqG(I%?1;0S| zt<`R|Pq9t0(Ha1y-uNRpL#UI`NuW9rt>NkYkyGRpT8C2aa3j7EPq`4Pg_mQ?v9t%4 z_PtKxCUNI2=PeWmerW&Deu2Hfb~1G`^(pF8w8^{4yCZK$UTm}2W^^r+rDUn4x25-~ z^i+BQM`4^gE?ST7rSwux;9Qtpp8yXsl46!qG3aFlNAMU|4IHVqmt^;VDk%~Qj)6p|YXD*D|IBH|2K2x7! zPBB+xD)}Q~d7G!qQ)WHT9yi(#M88rE;gb21`3KVv6uz5AK?8FGoJ#P- z{2K0??wd{;Pa3bnPSZ})Rr6Kz3+6@iTm#CLW|%T?GSC3i1oH%QP4q)vMxWwA?jT3~ zGvDFdmg%PHrq7wrnGe|ySz6=Y4(2pIjbFko;X0Z-nrC}v zd#J8)x$tr!F0UWQc$r5hjZVtJ*U`YyK&~U#@k8K;z(3x9yaV_Fe1BhmAA>K-chzy# zaqW_JNq=yEaDTi1cF)z$)y8^by(a@F11pu4(fnjfV@qR4rXw>bYfx5S>=~hJSr8}) zP;F`@_J*1x=IEYNL)Z|Ws!mm}gACLs`$T>sQy)Fmp1K!xFFNcz?CgNEmMEXOKeRuT z70yEEWq$Z0IIbR7H(`%G^@&itdN^=65SGFc^)phTqnf6gCdZItSZP{ms?JnrHo_&i z3e;OoeQESr(wH^oL}Q}yg7$)TWwVvd@^bQWRykHV4!I7w+Pd4iYkO*=+k36&cc|j6 z;yn+=K=J-`p!bRT1l#)CM&n+pDRzK-SR<{G=(W>**iddLQ;lkCU@N+Aa|31NveC6H z)vqXir@aA`%cOn3cjP{Kbb$lors?6^^AFjlhD3kCYvUk>gwz2d!+P8shw0i>2c!Y#0C)GFuvjD zoXt75d|Q4@R!r8hq+?0zb?bE(4Hpd~O(RW|ho|0n1JM0G&^*vg-(PK9ZKN9CaQ$%o zIPExXo#Z;nHyhq;NNaG~r?u0)(|yu&(t}oIFU5gJK!oMW*HWT3e4gzto} znYWp@ue+}s)4B`Wh8>zbP2Lo^wB^#4Q%z4bt(s6ZAwDiXPN&srk3&qAm@0Mh>*P;$ zO?5rQ{s`*LrC!{u{H*++oj*J0dFOc#_zw8WN@XRgX$V}Xk7eK`BM1|=Qh#0g{~#-FDWIJl4!5$`oQ|Y&fv}<)mN#Hd8jf}sV~)++`HVn z;!W|U(m1~^LzAKD?C9*E-uEP5lJBPbru${l%OdJcpM-NK2RH^e=H|`K!}i2oRd-h1 zSw5$H&eHs)`4^lQoUgHWrC*?5V5&Sdx(9r-x>;QoS{7;^ZXT^GE($M-t}p1^t3klP z-#F`QE;I+K9a7zx_UuyMA+3SYPC%hkwAM*uIQ96}1?p2g9XgF_;<@OVVU5F$qiYsg z4`9nhvca32p<8sdsSh zT=ZHRdmCwASO;SVV?UuEK9Oq$+LuK+yKne!_&6ahdhQRhQ(f% zUY3g1iqV?o8T%PK^%youYm!z9cBk%6oekX~9v0ab*~`ET+YDrLV{BB9TV+{gq5c%w z7e#AZYUHAr$qS_|rEwbLjA)EH%sR|^1ghDp(eX#N7C>>y8rTmvAL) znA;lK8mL!<&YzedP7o$a-*OuJJ}JL;S~wl;uX-Rn5Y~!o zMQpdW9EY~pW0wu|7<6rD{X(^s%77&rUiAoT8OTO`miF3A0J`p*fND;Z$EXc809ywv zTIdOsyP%$_4M6=#v<9JC@l2rmj&gMuaYhv70Kd0z`ISM~C{>s75r!I>oxqw$Ap-`pQb{ zq(-(zwhZ(c*TK092$iD!VwJR&v~&*4d-1(EbLY&Rbe=553~sGkOYvKhHOWfrt>waU zfqIuI-dm+vrDHr28yn3n3F(*7mFA*XVmTgd4)uX;!Bhoy{^7`?o1qbO(RBub_QL)MqsV=!}YOo^75p z-ZS1VI2W#+)J~#2UXNgpU<1fy3Y7}gg2q7kF&c|#Z!im# zFM6atLR5YNr#@|u?%kz5h@V1Ppc)6|A*psv@yaZDmP~sa<^bj6wgBbMmnusYs!xBQ ze4x1UUD4JDio0uo0D;(K z+$V;XQrsJ~xI=L$60{U3#fuXlo^kht;uLo$NYT;;C|XLtweEZG%kK{;MJ6-%o_o&T zYpoqn2dW0E2B}NpbB=30eaTwC*3YJ(EK`3}Xv^J&6l`6L^ zXIT#OJAN|%WPWaaZiOV$LGArJ&pQvFH_Ym~2&fyF05$^F7JmZmzj1Rj_ln7McQ9(iD=!D`WJ5jC{bbhgp8(Xp<;-X1^8 zuEDOsuK;Vp(|pr>?1>FC4Kp22IG(`XNOe7Gn$XE zjIgW*JS)-DJIX!E9q)>Fz4g8Iaep-*HCe7D)TnUXSQlIu)Pyx*)=!%w=e1t2Ua*|6 zoR1!~MW#ijNKd4Py*mpy96TJ12t)*~`mg%g3*l$UeNh{48#oWuXqtbpTI5IRcv>8+bt%I$DCj%z~)bb4q4GPT;&*rxl zZV_q`;%htXKkTPQ@L1?rsB~fJ!k6Kf;pf5UL9WqUbGrw+2bk$}Iea;s3(|sVL0&Vv zJ-a>6e9wH;0*b@paKli;5YL(HdwhmAuPb)n{GOT@HZP<tVr(Mjh?C(oK(%lKYb^i=c=b`KVG>rKQ>#8H+g3v&vXSKczdWjxoRPPR_A zZPsm8p3OQVUl#Im5B0&bV`j&M#38X5nMS7o|NOK))Al&b4s$t2ImbYF3KQLlZW*}X zy1=9`*LXnv6W2mHXzXb0ppI7xS_0M;qX6vHHhL&nM`n170?f3|w5kz8t#|63lU~lSWNAxSc8bLMp)}d*N>)ukXnPc$Tr;S+v=<1tmCZQx^nBe z&`jP&R`l3{u?4*5+kqFz+3xS{@8xS}FNuBk{=xpirh%q`k)n~JEOD0DBs0nA1!NXG zGj4ca(o6XtaxRmcNzRA9hrapftI&_h%z_-iGYtDj+e6!Nx~TKdLRV&%rA{hXx8r7`{eIi3HW}!17D)YFcWM7JN_5;2F3!kaYMI*w}X?w*}&Pr zQU6gtHFng7&+^RjbaQueyYR(56TiQJBjEVi{aNhwGv*pRy+ zcSgaC0{V5h)^$ZTDmOknBd1DIC6D7D$J?{**^7*ejGMDIXQ9j>=6=Y5>(&lPf2s2Sk1m0oM^DHLvn`zcVkgTV)r?ic% zjf^!%u}ZAc#p+_Kw5-x{mUb2@|GC-|niHCZ>V@i=(wTy8y)SYYx%cOOUj~|JnrOa8 zW(@b)^pH`fxf|@&?bU^}At6tU{dww!rUBRsv|GUMn%_0d!~aV8mGT<0{Ey0x%8~(V za4W&z;1jq4O36!c36isp$p*<*@GJNUoK>7ve3pHdG2@|kT<^GWb~wAxRA}PDXi0H0IzE?hKSG*O#%!g<2U{nB3dUN`&8*O9YIeMWV7!FU}lu`IEa%_y6}d=BOi zW+P9K*_Tg1PgPG9wW#c?3{?$PF|YHy;k=+{q_=uGay}lme%PA(mlg1D{|k6uQqM~FEZnYfua&JpKm?rZMj-aX%!?_-^iwN)K>?s|^hU}IN3XFX>XTNPU?b1QRv zetbUr^pDMt%~h>ct%EFsELp}Z{`YoRo%M(LsS&^O3wr?+dMf1tmr zyDE0Hcir8B-GWy_S33?u8|#d9&c;ju*5_}5IPd|p4aRB4X_BN#Qfg56 zKJc^rPW+vi9>@D2D={mPKOX?97^)bK>W}KVhy6$WkD59c<{wZ$!@k=)&;Z$gR{`}k z`!xGB{T2Nc%;8*MTwp9^E@kF^xQ4NYv3qX!+>DB5X?r#0c&dY0X4qt$-IP5zlpPnllubJM|*)ET|c^>Ii5K-V%EYD z>k%vcbi0rj6%TkHe`o*BzB+q#HnTLC^F+TqB$S?Mu4%5hj=7EtSr@YQz)$LNd0aES zGri;8<56{c;r`G0pYtEnKc=jyc9OM|}xFx+M-Jh^Op_H|hHD8i1 z$+P5H#wo@r9?BldSmTb9#mR<9he+=g+$)$YpDa(;rR$Ui{oo?V z1mgg`EQ|9j3k7v*+zWq*`49tndGo$eeWR#B+<~kw zx6-Youag?Lsk*7UG5Rrr7jOe=gJ-}gz@M|u#o82WdDI2J)%_3je>Mi3H^d%s6p#RF z_BfA-&;3e3i?3FiR+=PrlA6Bdw}99D7UdRYT0&Za!{)H9aIA2Mjbh{c)}YP&Hk+)Q ztRvkc-KoA*q^Ajq+ zo~-@{KFd=8?GJ4OT?0Ya#rL)sa#AM%W*SkSoNLH6xb<%RDcvc!Uux)Bqd$+iTyxcP z)n(;n<+q}5MPD>uG}D7o4%uI<&3)_nR>%V8IU^r@;rqfz&FOY<8e9hK>Ct<~Gio=G z0K`B7l%N$j<2@tv5$B^1GX}6euo!SH`P=ii=N9JK@QlW_hW&i*e_lIYJ4V??+1THC zmisLCM&6A)cY(X0)b>)_KgN8Fp+c7;<;F9W+ zFgJ{~WSWBV75R#a;IsU*ytkq^QU_)$(g1tj^FN)x5;E#J0e`z|PNmIx=%gzz+8gVL!^=erv#M;{|jS)LU2g zR`>F{U=6CSyRMt{TY8r&BfpM?`lP(1yzgD#yO?oqa2wpKe5-_-Uh@9r{f~1W=dy0p zH@0sq^FT_2N~TJt83{8Icn!=K%@SDe@GDM2AF;e2sj}RAh~0lz)_eDrN@k5A6><4n7W628~faW}TDX zCidTd1b-q=iTc-dw7BlU{(|Ca#h022lfM?UbfUos8a0F}v)J+Zr%K?A= zDWIN^{~Zsgf#fqU0H_gs09a@G6Pbb%zk3UfvhF=43}w^Y1S*(E8B_Ni6avt6F9qN zl5CPJMjj)7h)jlfdAvMwU*x`uri!NYg7kuOvU9R(fX%o(W1jJ0=EKbA&gagzF>hnm z%ht=jkbfcHq}imoX1`|V8JRg5LlcH3e53eA@yh(leA98$F%x-s>?yqRz4B29#M;bp zs>D@-f0X^? z)yb=qJEe3&r}TbGx72Q_KZ6?}qHRRmSP&1Q0b&w`pNrax+Dbqis0ZGszE8ak0$@8> z0Tu!N$PAt=-~mU#1#lVg$K!y1#t8W5mVrs2A7CaA`?yU2q@$?~KxI&pQj&5O90RAp zGf*-0f4G)%O~?z{m9i`41V9uvzMF15%BS{C>6=oVT%6n?r9(;>^h)iOIvz9zN0W~x zcLtBapp-!={P&r2#D8;D@~Y&o01NmRz>}0GDIU-i_zivod)}MD;N-!{hm#HqvyAp8 z?M*tVI;pChSUIt|p}FCl?wpRfs_Z{bS4>wh`<|XF_Q1ZfX*+3#Dg&$UJQaNv)TAT!Ov$=KU3Fb`Kkz{L0K2;_%A4w&>V}$znx@*O zNNhh0Rq0!yx7r6YOsO3`ihOF$qnr+yUZjP>Y9K&Z5}Of+KHNp8v3zsB6I9Jz`cnYFsoGr%79hM zRm!G-zNK#9J{X0wDtWJb57*jE7jY4eW~rN z@2p2Oq)@v+n?}Q#Wb}D5C*->Bx~`YLm!KWxuXA6LrOZ-(tNm68+hEWA3Ff+T@6i_X zIRB9SAyevdWrB;<%wJK^=R7uN{mew}bHlJcY#cqq=7Jp27O!`>%7(3528E0qK6x9@c z0)3@@rOfqX#`y{92`N2o^$Y43T*|wYml2r}Iaxeed`fgmwA-@VqDJWOEzd2_x0uOM z%2~>})4bChZHczb!5q8@XnNwjao!WI6Rw_)o{nv;x3#V)swiSli(W7%vhNQ&4?D+N z$6Bd5eyx73HcQMB-lR6W&CZ_Lo`gLKvoXW+jPi`KhP8&3&(5NTMGcds$x`Z`4~q_q zIGdCCd-YBAO>LknU=IKBeaH9xEc+Qx%Mw{HZ7=P4=r(@S{-*7$>8weHx0?55T~l4t zB4q2v*kkN7Fbj^gNA}tHOl2)H-Ii|K=-lXR;BMeP0wMtSbU5O=H+wdF_9M6Bn(vzL zMc_q%`-Y|ArD1uYyl_AG6VxiIRn!dNGAjtTDD=qVM5*D z7xrUJFlwjT z`6%!xupS--_V*6?5BdMboWotnv`%%Ux~}-H`2O|&>wS%Ev)9(w)}Ng}JO9-Dso{R0 zlDLximG+fzuU8UP60MG19h)4J9K+9&v*4Zr?qObI_7KnNyf)VA*XsL1uN?t6A0Qoh zFl&GgFjq1{5+PY*U1Md$Ma^_}&Fyjq??crM+Dtl4?M>+-s$JEl8W&zHKT z?whWgu8r=E?oyso9}?E*_Gmp&%k|&`w}Z9S4#ev^ijw^#;q^p4!%l$MLT11g`^6?Ji}|y zsNDd3&QDC5m_+}0vLV^P+Boat2LUrM=#@+YtkGBvRs-{zneVX)G{WpdBak8lcPBUw zE`!7102mEwfuiK16Ui)K7$C4VP zG)QTc+$yScsP;I)V9jD$_4QY;;~%GA7XA9_aMyl;y#2L z;pW=r+I6aRD(d%)fIj;@V4i-S{sZz5=(}UjYN&pwesJ#K+;+Nly4|YXD&_@puMw9J zm%z`GxkS_h@JvWw9BY-UL#snygFx6lvp1Ja+zb1#42XD}x`C%iGVG2{q3xYZZ>veaSJ3)iDvJA5H@ zL3kE$&3zwwAKDt;8qNWa3Lh0NK~=hWxOtehTk001pa6UV)HcimM*;utd;WX=3V{lN zH@-K%QGro`s>nGR3C*zDul9QpC`4TdYh(qWH*!_@S(dkzxA9y;4@5t4KQU_$3El*6 zk-f;?MBYSB{ohGYL0Un&O0i0DLw-ZPGGS#x-Bxv5m)Ojrc0e8U7Spn=*wFc9j)1Bj7 z<8V65a_x5Rc8n)NNNo!&rs#oHb+?3sv9X1~}vuDiDlEYhCMYW2;7@30ert>Ch3;DcXELki; z?2o4({0r2`X_nuy7)`gBb zk2-tUd)Swomz&$@f1kL{of4&Ix2lP5h0d)%#zz<+C=nh(fx`3J>YK6E)JO>BC6wnUv z?__U-JsRFOJD_=(9Gr|)uyR2QU|*&(;9jK<=4aDCa>aecy$pIwe&1ZPp8B8qH^SRI z)iKqcVTqoB(!OULj1DK(7T7Fty583)*Ky4j8cl<1QEzYpduroK~ zi0p{$2kj3+J`m5N^!C3(ZsAkIQ^Vh22Ih8*i5(NWe#iP9TmY-(RmC%XSB#@u|95nT+M`<37f%T5Fw5bv!?u3{#I^Mnw0d!u{XL@yHqFXKl8HR(MfEwfF0Hs(1eQOIqRj?VFX%>IzkGu&^18N@Fx90w=KTrVri9WAO`T~iNFDh0Q=*8HGMVwb-7xu?xpIb;`xyK$NTF0T%y&(w8OM0uPfQ> zVXoyza0A&x^aHZ*x>B)Hu@!pd)wAg{7U@*_Z z7t9yTtjF=Q+;84*W^>}P^Rbh2);LFYhiivxQvRg;diHvDEdqXO8EYA-5zLll%OX{g zDznn8Tq#{C{n7ZN@gcIa=($=iSueSuzM!^i?OOJjz66PY*-d;t|Ap)a?w9+<^^Lo4 zxo@f9tl;EY!J4DlXZC&cfAk*>91TPwACJBc-j|o)1!)JI@X$!W1K$HA>@D#9?)_b; zSFZM~_Q*hgK)o-vir$Xktmmwt5Bdq&@1@+O+)9s9(B23+EuJV3GZffA9_}3OWS@q* z4{Ec%a)0ISW9?&Q4Q`Tkl9gI9lp;O#fENPIP0~%$XUbx!O?@G)7Pi3XCsK-+{V(zQff6%Sx;FHIuANmMz4&XZk}#Ft30c`gx58>b(KU2AqWzk>~f z4JG;#J!j0&v%nhF62RIoJq#>pD=ef8*mT*Li0k?HnDBuJLC}Y-{JWq&5&lG z2YDNyPDXB&8+XEAfybJbKF)wTpyoQUFn?h_Gt)CbF`$=?b7Z#Iw%FJsrB1jy{7Sqp zt3Y#K+FRO7t?wA~81oI=4cj*RHall=Fc*ZH=_i0avl*BfG8VZcX`^B7tXNVkNsmd7xt4n^_mlaP`8ehS9dI6igY%=HBj;JWGCZO; zO*c)&4T~E>Y9?+eZz-o=Jgf?{&Dehg?2c=mgi z_)N%0Tbj5Or!jrv5yKI~9sM0ai-7SCx@FpBTF#`Vo`{)p{NfDkM8H|Nd}cAji1X!F zs8*;V0N4FNppv?hIvxB1sD0#Ta6)-PDSQ^lHRiJrvT#&$Cu-OqZ3(DTI)xs4M_osq z6mUJ_`&bX~*~|Jc&*ywDE=OkPam8_kNG6hTFB=Ij5PuJA;4@S+RD8x|W@lzIE1kWc zbXB^lDOe#{AvtY4ZR9@tR@AL1y+W^`AHI5i_58kIk$sVUCuY~S@wD-9|8xQV-h4;C zgL?m5PcD+uKY6H~-5b~&VE>Ezd-@ib7q!5#z_HA|Owc;dhv$MdJFe{$EfXz%j~^AQ znI6vU=ntLjXv|z#0aSoJJ?4?K_sBga`;e~!uL6+D`l$uDgY3p19Y10xH`TG$xz-tU z1zoI%&_nhgs1&Fa=!1EEji8U=IczHOGt_xKb3yt6 z)&EhSpseW&MdwMc7v0EXW1vjj%mJ97n4mc3IOgDexkj``^dR;@?0=^JOhISRxf=TTGLABie^mddRu-%* z_-y}dKV&&%Ic7d)Zk^RStE{@Lx`C#FhI>IH;NHy)nD?<&zg5rN39h+(rf>}_WhjNj z#5h9_un(}mISFt^@88I=$>j6EN8^L~% z0r+ck0iQ{Y0RK!&KtCkwOr1eRz`7GXdY3_RVlv#Dql7#Y`UC&c|E0eI_*{=enCc_L zBf~QNGJPjqCrq0d!j~)5u(*!X_s3@*b;5%HbLF1`=Ag6(UYJ!dx>3!UuPf9m)XZ0* zj&TX{Wx3y`c4h`Vi2OOdr?<4Xv~`eok_GrZaF4MA87Bh)byDABUQty?RYxt%L*U=f zv-x+v?|k$s&~I`j|B9e1n3g#$6Jw*=G_*Cem1mymP8Nn&`0ot=LgSq@DS_< zEx>yBdX|p`%?5p>8Gv8i!8B+cKO-MK9$p*^U_X5hIF6Y>d!2io>5gXM3gDc6px7?6MxHc%fj5k)8^CW719;b%E)c02)sJ7plVA@VNHS&YpUCb3^+cd zYN~6hm2#z!Q+WX%LzD}=?2%ZbtL>!J^iSS~tOL z2(HU<(l{wK(>xDV1P8STweQvM)jVq*RvuQyDdH5l(p+gzQBRSy708fgl!`AEA1R5H zWFvQ?1hB``AH6+hOLKm}9PJz}^?QRs0k{dCfhzhcf{uVWDm-Hn`lT0wUEnZy0(cG` z_rGu^C;yxKPM+JCH`EMyEX=$-j6D7WAOjQtdQ5pA(+B%R_e2*4ydUXhZK-Q1=o?HL zlV+D}7gW-NWg@Xi47qvi9^`>8#!OJF+$z6sy$|=zHY+vu+^gWXh}Z*eAKN~56LKz= z8kZXBnc;q8b=>MWUMHO~$8@P5pw~wYUmt%T|05`4yzr=03sws<%keEdtNk4P9LGJ! zJ!ob7_j>kvs5hU2`Bl7rnSF5}a3H`n7TXqUmMBX^ua8}AS69rcnDwXjPi-}15!&@;IOX z^}vgy7sBryn=}@abB3W>{Zo<`oCAM=*Pu#r6^_qHo&>C*R7$Co5g-hzq*h54gYJN{ zfi?rQhf)uMFo4DO|IaO&mO3qUC-4L66V`%(fOC#GV`x1%1x^5J6RLxQDF;&`QzKJX z0{)$&z@(H(DIJqLCiel#6lF?{)EcP~U_53gQWqdj5+iY^fiP6z)V)UecW6WQZD-^@yy zm9!#pMdDEDP-z821w%7^GyM+C;Nd#Nyp#KwjXu>f)za40*7e5yM%epM)56S-8{!+{ zOURGreaik^TX;ON`~{gqp$IA{X$(>a^u+hXM-Npj=9@l(-)<*h?Jfe|J?B*?np z;P7Bf?TQWa-YnPZE;lrEw6SJb$u@h_HNEKki(%{CW?Hur{{y^N_^gTo+(TVYye?!myiRs{+z-7>pnTI*ZuKZFOFeTdrr+Wr&plU@#94)WgPXUVhEYh?Aa z{`v{D)3nokZTQ;28gD#c4S@T?DPSr zuz?+bYY6k4d2SyC`U4T5PALg22F0LGVx2@4_y#No>=XV8;*#QoGwRl)tw~mp19)y7 z3I>8vUCQ8LC~C5?QBc0F5~NpeFDqa@pyVJFYygyePdWEiWrCOVOq1F6l4nsV$Gu#R#>} zYpQE18}dtng#2exT&kytM_*oux9YxjhC)WbZxS)}47qU*+h$zB zxPqRRo)+e;-2g{yM{JX!u|Ep%h_|b3Dj~Pxu<@|*l^b%H_VkYMjPR6fF4@d=@0|Rcd|%YQ zs8gO(o|*oc{>hjb#Mw#IvVOFZ|Hz0DrtdF6Q}&nK+uRnk<_)GVl3P{&utmutS1&c=Fv~{FX$ZVEckDBVz#Ca@VWIn z_yarvd?p4zm#&-b!J=PXJct3d0 z1kMQ0a(b4N1Id9`$OfncZ_qX5`EgJ49*l8}aqya9ZAB6-iSB6XXmVTJmTYUb^`PpY z>J_pJ=ZoiyS+As?;W;vJ_*s5{myc(D>iZ`tCMn*C)g8M;yF{HdoiyhZ z=M+ywPekRBInO-q{m5x*4ZqJ3`4RbQ>1rv(6RcY;S1wmN5b_;V2h}WIkNRJ@mQd%i z6f-EUE3YeaWI3|YaiimI7ThdoYinz}jrntEg*XQxr~Wojxm9q{^>R-GlffV`9UKOI zJbi?o#s=>O?`yyugw9|jXb&a;KM?!HNL%RPe~X+4&VL#Yz5%O23AhK&d(V40vzGh7 zR-RU#G;f-hewBZa-^@PB<;=^OGONrw+cDeG#ofi7<;@Z@V4mkb&!vA8QpmkZ_y(yJ z>g(z2VYd43qTfaI1wRo#5g&{_7<9yM~X+n>=x>O z`sw@WSrcd|ZzzAOdaJr(zheK48TS=56*Tu`_hii8;`y{WGDo#)tvX$ku9>Hur-gi3 zT~l9E@RM*ooM@V8Di9TjSa;c=*q|scFE8h_u@H0s%r0uGZmLd|r^@L$`9t}KGD00e ziSIW=*C?QV-#*@+Rp`QXR}Wj{^O{Ou*;UcYyo-S4ppuz5{nL zkD@==i^*=g;ls{DX8ULDXTh5phxyFx8I%W+peL9M*!!Fas4--oCyoWe{^y$Znzp{K zzU~1$iqxoo)_m5ev??gmUTY8Q4(o#UUxo82 zbwYgY)Hj_3eBJjF?+MzU14##x*uR;ZGB>3=;CTcRyu?A^QPQI%p3B~V1AzKwKIbMP zQCEVY2h>z?e#DW)BZ>6@E(s;i&({IJSQ9I1QPcwEnT&0%wzWDcJu0Qfcw5tLO{;0E zX-l@3Z2vOh%Y+7&29}oAmO`#`KpPO=r!L6G%|{L{&-3hUaz+}jKh8Jhy};gIRaaHl zFOFY?b1ScX?pyDJ@n8X9e4Q}c zs}treu+F5#aaI_tmiM|x`6e<)}D5R%jA+n^hec3+Nzj>}W?LO^Z z2XE+C{;&KzL$vm{_ES?lQ$17d3b{fSFv2~;y$SlFUBO+!-m2cJX^o~eni!mjcP=`} zweF+!qm}*gIl4JQU6K2AuCdItXGT1td7UG~Bg9UTQ?vuU=nlwwq(=IJ=z^${t&%O; z5$)hJHU*5ZkFbx*9F@6GyiY8ailtc6B~j8S>2c6W)=3rxmP(gO8v@QW94;L$Wi4V6 z=nk0U!2HwA;2~HfUnGA8*ppoacmOR0_Dl9ltRkyue)Rn4h6frRsGMIpKi(8?`r7if zg+4ZFBl7|CBm3I>+EW30@)?c{M;~V&=UmVVbjQrqhM*1D0q7II>bUA)Zp8%a1nYLo zcFQ#DH0uf530o?%o!EPrVwz&g*pjh@nL~3A%ssGs@9w=lZ9Q!pOdCx3qI}VC*>D-Z z?|;Pqh)2jq$jrIs+{Q7DW1MoQypFn#+N?0+$#hOJ0JDoWD>f_G^IfZ2tE#N8tY;4H z7GylIwwI^QQ||-y;_JoN*45TcS5K!rOU-q-9vCkgFItVcmYXpvjs4`Pf~W$%XY}R$ zqx?r1uLc;<#m{mi=BKlNJr9WVB0cpMQ`J+|)DUp(gtSLBTsd5MLUjU(c=;+BzA$D? z@;UZ%Y|o6I8H-{T#Y9LWr0ip}=ky3Oh*+bgC-5Nn((t8W0ayX-AOxuWSO$0|S)*Sg zWal-57bstwuN?wv0s0e)k+~ZM=rz2fyrb*^&M3|xaUosN5^RufkPnazkWoiNuQ+SV zd`-1uYsb=GGfp&4l)684KaOSAHMTW2YH8W?jme71LgaNGbrUz;H{D&lUA)ZajesvU z&Jt%i4ETJeUTjNXOMv>Td}qFMj&+W;rKP2XzW2k({9%oPp4t}97S6ZEx5l1PJ);me z?_7+zc+B|x!uo~PtM;nrYv*ehgS7ax_zSiRw*J=s)>LJxGEI}Fsi3Z)F2<~(YW8aO zCeTtugd#%4o?=fkc(d5|dFy=ZMBKd4V`5)9FCj0X3E(V?Sbwaa9sugU_*pVz>LK#y z&YRDhd6ucKtgn=2N;Cgf{;kZ>=jiFxv}&wEJ(<3Eo)@TV7=gLX>=VpKcKH{`L1z#B zsNtv~Eip~#TTm}M3A6`Iz!Sq0D0%7`dh2@&b8A`a<#SN3k!yOZd#jHlx4ko{4wRq> zbX9jnI@nt^_f87{d)!mhQ`Ey%!&P0BU6j}5*X4~sIdENeUB=g59(<60kTd^gJNO2E zsVmAWf^WpGvSTvW1l3#Qr?YO7t;yEZ(bduI0qljW*RI#Tg+E$~J|^q6JO`cu%$Toe zsA%9GV;_1wR}-%$PRER_(#fThPbQvB)Bv=j4P6tvCf3IszpBtkj0UBWN+s>l@6q$w zU4*=EzMn%d>zrTzq3*z%#zpN#Ezf+^9KA_=lla;2*)U!^UOOGLf4N_z4`z;fj=I05 zzhAR!$wz;M`5=r)J zb_32UqF?Bk{FwY@^vmeMqQN31;__FkRtxjd7HJk~CTk~a=Rw~-3Z5SJyV$p3&7=cZ z1YjXWh4P?bF8Bx^{4DTE_X$qF5jxgtct0M}9MW7=U&P&AN!?e~S9L*o0Sbea%Ar6G zo?|vIYxZ4{H@-%;MrHt9)BAyLpd)As=$qXHhRKJ?{{q7l!;layQhbzul+%C8^^e(* zmk}d{wcsSb1M{Uv)=wN5ei}HlRP~ zp8B4e-YRO#qfiUtwUGvX)cz>k1LsudFk$$;>Nj{lyc)0I4WLhveeeqM3i4FUALQSK zQ9kl?NxCFf5vw>YJ}q9DxiE9QY`ly;Z5+|HCF&COd1QW;k(QC3g3tdb@>8Dz)-Skz z@v|(J7t3cWXDhij_tx~*@O|U^w@|xKdmZy}=PKtary$E;34{G3>mzIV*z&Qx+w^YJ z#ni?0pXEQx1W>%Act=fDO%=6t)bqRooiLN0b(P6r5a1WJa$k4SGlLVrn}~&KSRI3d)s^4eUM?#5b~V)eCrK9 zTR#hhpN{5^=DL{ur?qKq8K4X}Z8~i_o^f2*6Dl*6nWc@TjiW)u{T28BApSwz2>m2} z?{}nkq?{QzTRL0Hd*iw0xh5NP*_b0pKQ^Cx^bPQA&;?luD-A0RV!;1Toyo7@6|{@2 zoy|y`k@#BwTF;st`%O`xDP}7-1~Gv3GVa4shE;Li0&AAk#_p2tl42CNd4qd{yRxjZ z?2`PF{EYRC)ee}cQ(!DG7Ws?()dST7TfJMotE{VpeQa-8ZyBsqxoa?Ui_ev0PqL6j zR7+M%Mh$WrG6@@lhM3#OOg5=aYGdAt%k6Ui;r+wQd_M96^0DYmn%Ht;%SM=)%bY0A zfq12SrPKpz2`5HQjI>#8R@kAOtv#(hux5DeKD&?m;0!=5ZzJILxqY1gwGb>moCa?| z3ttQ2oOlMaF6r~R4CwC}3#NeDfF7P(V0d7-ka=D|SUvw@(fnI^S!MZ`t z6)6f71qS;E`>Da`0v7of`T74|23`g}GqVnTU)GDd*}K`>!Nblxfx*7PzKO{2_}2Wb zd8B-#oHHxv|F7$->to-R89?`U-{1Ya;&;XLxao1ToU@$y$n?pyXWD-OS43AtYoIMD zhnd&I?8EGn9FrU;F(-k#kaxN7a${Aosv(%)*F2$l0(CI~Wk9(T8WJhs?8^hlUH|0% z0DzpAJ)psq6owOW)kg&OvH%yq8<=!1Tyc&0cI zb0DS|I`IbZj&1R7@s{IG9KH205#&2UP~FHPr!`zDyz4 zh&8CL;7iPmqQ;u{aXwfB+JP;uEkX{!Eo2?_#~klBfP0%EfLVQGoMW8*0d@65TtkFh zijmHd&dZj|76!&&$i0xuUNrZ;1Iz==d8RzmPq9D6#wuf#tL3ZZv&6H+(i~~d=Y-D* z*FY;|ckY13jJ16}>nExvs_0d{i8&$c-}C&+x_d0Lfy?O12y5~**)$pVudL1VWcMF? z=7-9M%5`z;;_@-8WQSshVv%l<&LlR8w<)$M@Xtx*cDbE1u-LEWXUV$z3C{@+y(H#< zIZz&zjRDpHR=Hn}6IialldF^KO5T;cx}v%w&TyNZFgu|D%tM}4XUr^KC0QjYEiNtQ zbBaCLg@EUYPU22tjL#C$8`%?#iXIicsyS%6s^y*NJJAD014Ylo&%_InuhB!+1L@H- zWqIHYxG29UKcqOM7^ED;lA6#Tfy7nW3ov)BEC|cPm{jqH{4$scSa-StUEEU8L*7Hq zwSb=5hM-7Ur2G!-1UwJKXks*jwS%Er9)?qQHSI0v-8uq3WBA^l16#FQwXB!X56pFr zb>eE8Y8pM5iTR_IBJD{}Z`4a_<|*8wZv# z`xoXf%FWE#SPB+&qAV_Y|Nmx_*BBeBRrv7f~Cy zOS?-;4|a?p#?TOb>EVFA=?O^_lIDYNKnuX0!3n_YkToRsAJ{kO14@J5m|43C+CLt` zndwGf@=o?YJxPSK^t8`!N1td^YBG z{+;l50&}Z)?JiX=RjvRF6blr6k-?&c{(`z-DFhw0G_^E7wNJfAwMVrPGYR7{G>3Cz zSeK+XmHSub1*WJ|)FU(_G=805$GI@vgR>^ac`!VqC1?^f)O4;ftT9lh5U-Ed?^W+r zZ&GYhaF!73FWj$F>p+bj;zq^CjK_?=C|?x!q@06G-969ldY9hy(frZeDW_8oZXY>H zLK4>9&a^z!av?Iplvbs+4l*rS8{cEwV>^?3CigbxNYh*1P0~&B*!tKy1Mt42ANgC~ zx4zci*52yU>e5lIMzyM)P&*+3IZR8mON4B-ICY%5mZFw|8BO;w)1J?=nc#0w2-vv0 z=)H(E;;LShfZg3)-CaDZ4}oWLpM9UbA)pSHexnzF&8!S-hIP1kxEb*^M(SsSFh(uN zT98!?*5|Cxxdl4pcF297^*)P3yJ#@dp%$1M_;UZRBSw)QtMLCuAfuJFJN`Y?M^Z1j7R&)@ zpc*)d*`zrj3sCP67l;#dlhhJ$Pj%aOo6C}~mA{p+zv3RNK4^+e^-|E4{o($@Jq2E- ztG=tgT9}oW;!E*S=goaYJYcVmH9O{$aj*B6<1fcY(MQpKuoE+wm^1#R|4TowW&U@T zG)u|=!X5CAFUKs7Uol4})1B%5X!>Y^%qe@KeWHCHGN+&TpZME2+c>{;eCb#NPx3+g zLHn)PTd|MCkHi%ZR6NiZ^LE)=$0Id1Ns*-B*>Zt$f%0z7-JA@3v98W%#7?juGsbvd znw%!*PmZ4)tbHADA8_yR?(ja0d>GkT+gZ!&xFfQNPI^yzD|;#nKDszxoG%v~#~df_ zM+`v&s+?N5$y)>`Bj1hB*~iFpUh7%w;ohdYtGbIhzSPY!&w%w4YS65Jy4+64iAFiv zTMYL4_WI`f=L$Yf?md-3rLgy5#vSXC%G|CmBbZ_JwUw7|3guid`hzTU7W>{;Sp;&1J1?fcRHqo3c$ zbEAI zIWaIXz}Yp_bv(d)BW4Tzj#(}n0~-TYpH=wx1_TBOuO%Hhn{$91P>;*k!g)mdy!*TZ zeFJ?z0lxlxZ$7U0Qr>KLwlL=saks)Gp8<{mjyv`{_5om-ZJA9DW&mbNU9w-Y)6d9^ z_U_K^&fB1;tEb?}WR{x-zW2XDGIAWKU!#7S|9uTGyPp3ZdsfUidVtw0x8O_XzGD|C zf}e9FGA~8}*2ng{_q#v1J_ugVAK|~-f(-lq0J2suHF0*A-NoOp@oW6wfL7jC-VM$T z&P-dT&13SIxCd7F6u5&^e4GdH(DKmob^h1+WfRIKoOGRZrTNl?SqUF(A8g`mads(b zDd~G;m;8;Hs9Q0^>s*U-Em#ZT%z+4fgdVam<#^9{&lU9*HJ{t74y-yrZ3#6O=iKMq zn~^7?%hY9dj_DjzYajF?{U^R zs~*}^?)9m2z3aS-B-2c*)4^&$9nn}Y089lt0M}OPt5N{< zFY7V4Y#*SmicL&2;QoB5Zz%4t8t6>U@QwD5_V?p_Pr!XT`<^`0@I88mInlhI*}tiS znMAX^v%H+Ew8po_$M>uS^4F)NSyxh5vd6K}GBR87$lpireqtIU{=zZRb--=T=rf%%ns%NV? zjyVqMIp~l5&hnk5vAwaK&l*^ah1_z!W^BcLyqEVOn|>8|5O@&i?(gpBGl%{)#8o?3 zM2~VrIrte@b60b3^=$S0f;qVK%74HK{#@`}(1_vav%<5&$%V;d#WdfaD$?w~bzS@=@$$@eSlR~QBUf_{O1@uh&C^)Pe=86X!>e_#UCQ%J!3;QQbg zp)W$rEnNdPfCNw!EE4)^T%+mBXYc84@NMvN0H7Xm9Fx7dC_h6c8ee-c=7~2#-W;Ei zU%0<;-$pJP_YPkNz79x{Bgy#+thq!)MnryN{Km-jh1q$zrd-o=`*Zt9$wZGpHzTpb>njbN8~m$|d6 zvuiMNezswja$RX%A)`A(lcAwbfM?lA&w-OU$*Q_hWvc-{!ZK z^^_H^g(~odbpS@M(R&l~=UyQ5_8piAYPo8;{=~d0zr}C4mUk^Lqg6($I8mHvoOqmg zzGS|nOG1}~o-KQ}90>jOueM)ptnIIJuM@J}M*BwlxQF2!uIb2tWkxH#%U1!)rr{;v z7EnTWxd`k92LOM}0{ju%>cVs24A=qqnTtRtFdyUq{+=H|RX|@y<-*E^E5TiG68szf zH@rTyJ~Rw;Mpn&3K%dL;@bT~!WHxhupa(;dwMU($6^sMSjptfZ8$HsbP*O+fgtzEyzsU`bHf_k9^^iA&q$Be+*Wg2L5^%k3qFs}n3}GdE>V;y3gZUbl!qs5 zmt&WsazW(+&S-pYd2Xrfs_g3M?&xMM?r-FubB&G4jLO`i*rI5IjP{%2n__AP7UeC< zV>U|}_{-6v5;E<%CXWt`7G{C5&tv!7{oJ1<1`~rlKo~Hqw-7uB*8qJrd{1TpuCd<( zJ9r6NgjdUHS{z&$m4 z&bz=(;1Bsj3&GXk)!?weut0Ons-`ZN*WoMh1>n8QGf*bdQ0bB2^Mub5YG5AQAKNE8 zCp$mcKiLf~gKM#Wv9JcT_q6xa2-FDhb^5$MZwBU3b_XAz7h?9pQQJ{l*{rfzO-xNp zV%tnjVy@k~H11+OuF&&@D<4;)VPS>RfpaE)kW8s>-X%$6Cf(<|CH^Ti2XRm^=Q-`^h^nZ(!czgvSYg9QfnF zSEjE_%+0#)z3%7uZMSWgJeNHDx4%M04ENBnfM>lspayhy zJQLjY-}N`gv-u-v8*D4+J)}Wt@Vmfwfn&a7KIUogOg7dv)-@g39qS$I9rFRtBO_fS zT}|Chp#q$NllxFSnf-2RvAKV*@2iiB^3cHut zUX@?vkAc^|0WwWlBQE1BygS>EyA z@gE5s3Gf=@waM2P4`Z**WpmZ{*7x#yi38c*Y|4_n+`Eo-jCHVf&0pi^$30CuFd1-< zP(Dx|C!lV^nXMG|t6Y1U0$!K=-unZteSZO7`@Cnk9zO!L!?lqF)G<67d=L1H90f#x zdtz#W%7NTqu5jk$-%ZU9XDFTUo$z%8oR!9Xa9zv`v+ydiM%Fi7oyu{)?EA z!uv8FneDGUuRJ!d&C8leH*YswWfQ#CfHlwv{imy#D_p}>!^P)wiLJzTrr=D$wSCw2 z@r+b;U)6oTVg@Vg$n;|W>HX6i=a0jw=TAR%zaqfB{Tu%qtSr}rvl`#q>45#Lx!^C) zU-*-?JoM9{ob3LH4DEY>y^P1;?Ek_!)@YHrRsvpM-JIQ=DUK8e-}jkk}t^UOuP8Sh)vk@5G9agTA|@!j$948hlAl4FwNp7$QMH+<-& zhDlg9(#hY6ujO>}bn|o3bJ11HRSWx;)4kKZPux%3HC#1Z{Q7Hx_3+p7Uez4%zDRwH z71kR&v?Y%mmRzX|){B2d%42hsp_#t;AJCq98JW!4Zo!8|Y+bfxzQ)B`WdUX;BA zapiG>4v)2BYDH?rZtxg%uIyYX22p@D<~6|j6VjTBJzxPC0r)I?j;yO16*VfPU?|`- zvorVuI_sYSza~CQSAiXXUn|csd=~Ql@D}XDJ_gpMJli}##`9|MFJS&NYgRskCzVes zXJ07|u(qOZj$c!AzVC~5VB=3EkS+Xy4(0$O&{F;-+CyQ$&)JR}{CsslxoeVn} z#{B^u932E*95YWjcMbQ2V-w+2pBWqh^wxe4HiOB)iw|nBm=jY88u%OdSpzVW>!J6d zcYtpIPJ&l_8VIUpfOde-qA1!;B~kJ zeLdbgRPfK`_;P%lK}UgO&{=X8%lSh}ASIwd{m18fE#zdfK3?Tr<&8()+z9UoFVAxa zJqJD8{M-EOmz`9fR5OR-6|kvnsuj?Mk49EJJzBlQy~Ovu_r0xyt%KBC<={+n!+XO! z-96pSzVQF|dC`CH9MJ!&(y0VZ3TJ@Bb;ETN)f3g!);Pot@fPeJqQ~SGSOYq$JF2OR zW9}2RWDnF2)H$jg6?J->MVm$b7=KJUROPHq;tz1D}9&&>mdST+oaI)ZI{fMQ^eaQ15kCbyl@kxmTH` z$Wj;pZ+3mK$MsiW0A9IQPVK}Jpp|Ro$%-AshX+AiN=X`Id?gkrOB+^n4*}XL()Ui z*Q(d51JVQ1%eKolp8ZgAA2U%O~H0?Bxk{%_|L%_Q1is*`n8kU6kg!os;x#iiQ zZ?JFhG%|T^=ikof^X@GC)zs$HKu!_XGkAmYpnR@qu87yv12A7UU)EmLUdZ>sweX#e>6cUX2T6q|FDSIiy7*TEmoc)dgAA&Cd^N2ctOY%!{ z=KrPu{@MfNc089omocY?`>}h0_TZf4oMfM9pJ+$Sj+lmAdr&vcza)aAFuCf?561D?&U6XmO0n@*7{hJ za0bjXqz5n~CKs6jwUcWnPsLte89uHQUn!=Rq`RuSiuonss!VC4x{mk8@ zq)Exyyt8>Ti)I$RcD;5*cp^M!klTuNMa6^j2j{oI(i-}ssLSAM$-3i3{EK*u+GG*X zI}TM2Rk9Xmj&XNwckMjgJY5rb9_fb^0S#b%RufRmwGq&3!K`o`1+;6yI)I~spo^o{ z4R#8oJ;-!_X#WuAg3NU}1?WqlR`W7g4DM;~3D@;)?2V(Ro9`Fa|Fr@A67<#YLQa1d zLl?sU;{fAKFwrp4z$^xoEE;NBsaxC*G7K4pIq3>T8DVg)?uXJDO_)^P@GbVr z(kDmFS$}A*mVvdPH5iP&(+$B)z*%(^U^WHo2+p%zMXsVN<|}4uncG;~Sbw$rYFQ>- zChex}rj11&6?1uiK)!tla8CcL_E+sf3M@uWDwgUoc)Uawzu3 z@WwD0{uk==Rw`C1{xJMu;Lo`q*rwTrr{W6D0?7hNj3vfG6F&8iHI+4$@v?YXU9i}_ z*qw`Q!-Z5vr z^@H_;=P{3F9mg7zb3l4+RSm$C({0Hc8dGzR=T>1jI$*x;egg>^6c zZ9eS$WIe>*!$fc~a1rW;iGjDs0lEaPfoNneP@mNYbKt)L&vtJBYe)7n0)YEW>X+BY z7OuAF{KX3YjDiQRA6N%?Ze?A2GC_-cHz`nS-4| z)QVNfs$@sN6R=diRQ@%X45T1SoFyJD8ZFu`-Y(uM+ba85{;?bqeAyZC8F5?epU*v* zdvJc<{JbqmTatJfJ=5+?yN9-iwo&d;?seF+N3RU~*qt4n9q)VJ_vRp*j5D=LFf1@E zz;i>kKigjmIcDswutz!(Yz%G;^4!KgL=|8LK}13@cR!! z|C;?|?oc`zIvH}Jw|&=h*Yjb;hr+z4ZJ=#{{lvkQgDW|^m>QZI8dx!~g6Abf@H^Ny zS&mHAR-slQe$GEFe+p-k?4s<7 zK7>8n3mpp`*~!_-ig-o*>7>(1^k=q@Z6CYs;I@MqyEArYC1xdF6I~N!i?hWHTj-PA zCz+z&tQ}c9rWa2yUT9fpaS*e^T&uWN@x}ui4{R&kR#*qSGZ)(y+qyZrIXD-kcX_0H zBsK~h5i)-Gb3O$A2A2WvoxD$)-DW|j@X-Cx&3*=lRXlg`na+BBHDLBIb(Pdw)&)Oe zhx>Eqb7yT((^b<|<*st`@8+-HwcoWjDr!_zkXMkmC3;J=&aSiHbKi3h@C@+K@6p)W z*qUlfwOuc|Ui6XkBWFurOWy7sN| zyfxl>L3TmLH~2x{L0=nB8_x~t4e1>J9KY77bzYCZ9#5@IE_PuL!|uFJp-!Qx(3TEO z8k)qp`>XO-LO+>)cxo~-lQNUev^~>yw`{kJd$V|5(og-l`g0*CwXUhIiJk{O(|FIP zf1dw71+>S0kfX|@%5JgUVqqygK#xr$dn3Uc#hn%d4-7mIr;Jk$)(qA#d*J}+0M={P z!*v_0c?Eqsb)s)oZ&eckJzY(}8|SPXaq4mE55Wf2234*i7wIU|6}?rxRX5c))rFcuI7TBi z)S}yDHW~BmQC2GQ)%ogbO|@o*dWJe0dw^1%sZQ2-Sc6uwPQFggGZX6?_P5S^&wHsk zz2>^+q5+|msudC(!&GOSXPhmNS$Yew4n(v;krE#>WHV%(<&Oq~fC?tdt+88UcWHKM zsENFb48jU|g|M%ewJ5!y%*SO-PE8_ni&*#4v-7?2dn0or=0bbQA;WUba!oz#J2;~{ zqteJV!pw`W&4SVer3rBfaSNe&mp z`ZV7I{Jjo9ZO;P0=QHPHIUo#sFxijcZcNS&d_i9j9#|PQN=XIL*b>7$2vTE5?`&GNbukh!?|NkEJDJxwoUDWEmBYj6& z?WlI}9=H-a$j?I?cFB9m3oE~np+GGM=SXME&I*0|Xm7Mv;Zyk7H;#5hI|gFD&mJdz z)yK`p%`Fuz6?c$7%=;E=yJ`ET?Hh>9*@KA(6N}=D;(1*%7m~iA*6?7_uf%$j`-YgG zqypR%Fc%boUchd!8y*-R7}uHB!8tt^$sa9E^i<>+atv$qYxMMf(JORUb60Z}L~0|o zqX5tQ)R1!DPaSO?ZI~_$+XnaO=@$Wl24>3q3wWM6Z#-|Dmog7YOy`7*CT4mu`-@sw z{@lLDZiJN7lvHNY#$ta*JYW{`bKP^@2zb6r0rNWUYwl~t=*Q?UXfJ5_wI`<}r))!3 zbrF1YJO_0&bTll%`Esphtwy91>Goq^S$9`=7Z0h_UDp@a7f(R8>n!6eBlXw0>RdsG zmnut@HF7m_ZS`#R{NVjT*!R~3yCkW9;eC3IdyPBUne3cvoomf2&MQ6+u;%E%2T30! zO;4Pj$Q^^VJ+(d5L~Vj5oBa%|5B5KTp6VKS2%2DrSVxcxo&(Ods975d|1s-|KA}FS z`p$;fpJY#(H7I8y{VMuZoC1lJiIpFLZGay&57lJs{h=O!yA*~2{xbn|0CAuz7y}9~}?n48B1%YUjfg3PaDh+T~gQL<> z$^8XO09K$>@W1o-^7B5bcvQhUHxn!bvnpm)WPu0RyK}1Sl+bg|gRYah(m22w`JZ5w zXO*X&tDS46d8S$Fk$Q%Thl-cWmdkiHf(6IHK1Byt2iI)JY{$Q{e`T}Wv)nwB*`;>r zXvJtD505iXYKmJ}T3A#vm29?cwr#0>slB1Hp>nx3YanUEArjxJ!Hl$??=pQ z;x&~DS|RUg0FWWIfO!rdfoyOG*_QWp_jQ-BmxaDl_81xi-n(w;Z|U=;`BGko^ft1; z!;IQ(x^24sUG{ew<{0MaBJU!9rG2I4b9}3Qt6pQ&7$LN4}RQ?uO^`*AKI>zVc7 zPyV0$V*+CW_Mkn;I#~?P1kXST+)L2F@m`OSpO1ZDYJcVeKN##A?E3|z`_r*iVYpBW zQ1817a3;)amCvC}@C~>QsKMhbkiMZhsA(we1x1^xjM=@IF105Pa6?b8!LTTlydV6_!w zf#tvfP6G<<=7KoDzYF$|v=zVtHiBrdJ#~BPB(MNH2kJC+T6JnQwjjNm+BKzX$|~f3 z(JQ(lWrg5>E=7)em8r_a=i^-cTp_!X*E2IkiosgrTH{{rUTwT4o?{u!2K@&82gVPK zf9w9%4OR?RQ2ULSt#GexuWgfMljR594?3P7WL}x~PwSsn`Y)Tio4ZvSmFBkowtimx zy!dcv!dYXmzs~0qy|o5-lk$+g&b|`!ZoBBZ=sbFlknh8@dY{5Rg`G<~m*yAe7t;qc z*FM*tY0I?n9#j=kV>5KHuLPp;y z-znd6&vK6yA5%mCKHHO?lOEnr*yGT+G%noIq}+)Jdu2XnUK#!j!9TOqv(!VqTt8Jm zmCmNK@&0ora3;WOm{~yvm%;Va`qavv#G1ews4*h~Yp}PWAE=|Qqs~|5tA307EiOAI zJLWR>$INrjbHDMv@%r=pd7P;)cP@9fifI)?zjuw@HFobZ>=JT!n0L-+h!!8bF1h38 zj`WU{eV%K`*a=GwORWK}o35L_!p^Pb;9bCh!wcgJA)}93Odo1L)V{<%aMr&$rW{jM zN>$1+?D+f-RDtC{3hrYoSOu_vFTrRK1`e4HVH-l0X&_JmK9~6H;WLZ-2$-vrh<#dX zz(eppb|j!Bn_4riW||oE1sULTa1eY4>ZR99ZwMZxJreeoUQD}~#(k#TO_~Z2PlVLa zX6YBO!}NR5JiU2(A21HcKrVKgzD|9e`Us?_r3?GTOzEcdn&5Wo?bP0&6W|Ab5C81r zw8v==QXgQmX`j@-pbmH^?VYqq>66k=;j5^fS{tcGBT_AZp1ApG^V7zsk5A{HIb}L! zI-Pnt^{VNr=@Vq&G2`G#%99iwKF;aS=@)|qMGJ}^h#!bqGxr6|YvGJ@sC=k=TI96I zK9)WfdR+Kgt~dl6fwABdh>AWR`1a4 z(41GESJIoxIWhZ=v(>ZJc}aOm<0a!I^y=_IG8q1;uEDNB?k+j$J?Z81mFL%EfSM(q zUmtdP*yXP8u1_A4hhoZN%I*a31c!Qu3i+|^g6)DW%Ua?D9acu|!c{=cA^XjCK#l1t z(6qd1`4r^nUxU7rf1Vl!_K>LuXP-R_P?yKE`T@`gJCoRZr#_-#MMFX3c(&qfMZd~^ zmDCzX9Ev#98%zZY!4mK}_yTMJ)KhK++{?!QzY=@`hJ)@v3AmrFHF#e6T+lZ6uk0`A zn!8ta7jo-!Dsw8wgX)TEVLw`NMRCOwFr{)zWiepCb63T#ig-XR$y0E!@?hm_;K5F} zamd3R0oM}+!aUgIYz}$4sSzm(=$l@9Rbb$RK_7*RLGpPRl24nQgZ#=(5Vmi+?ipgV;2u;4eS2YnIeHWqLc{0Ah+7v>yuP%Yt_H3Kim}IQZD4HxrMsVc71ju~k(cqr`^1Zt2Y%*g zjlJ!^`Mwzk(m*q?1pD-cfVqG>kT?4``%eRY(9g67JCnZ#Ccg=);ER6t zK$yjl0_dNe1UScFeKsL5A>j7A{XFOlaSm~gwvV>Qcw#)%+7^P9*u$2NEY_#4r>>jU zo7NO}iktqA%i&@jNc)G_}tf4~)Rosgc8*7w)4h}H zY+LEA^s=$Z=OufoyWyFlCeT!767G?l$!#m&R^A>tP}C|-4NeWRNBJXI6@Pti;Esd4-n($K z?ZCTekg&&?_YdwT2!c&uG>8Qq!9+lfAfGAAean5%0O#~|0Axn~WuOkSvQ~k;U<7Ca z>H^-Uwu5(q?*tzP9t!sfdI0&_p90h=^E~%2bfC|1dZXcuu$k+6CGPdeZgC{^ZY%wfj?`3@U{jLe5@G zK_TFMZ5oIM{C#|Fc#r1mauZ%r-rKf0P zECW9ybCe&{xAOX+W^OC!3C{S=;Huu^WB-6(Jo|15Uz(VJZQO0#)cB2t50hR_&JU`mF-#POq}4|eIzch7h4 z@$B(jDZNrkZ863iWgTQ4WSmKAAXvsV44;_phgx%0P1y0_qYik;7qT}$VaRN4@ z`2aIHVJAhWx<94fDJ=kVro4ceD(uCvpT`{WEKmzH``?c0QN~gDPU4I{gU`Ud8@<3y zFyA;I8`u64dVPzHz_8e`AF!YIG4ep|0JkR-^ZS^&&Kj5b6TMBnP1^x`3oK%TPVuD57_yu-O(l_hUyYypBV@&L?v+u{=Vh(hQ@9N&wUBr&#zX5en z+?n-4`$8M%h;zJ?_)a4CTk;v|!p9Z(Lg>w9mJt0GuRO0j%e>3H7hD%yQJ|~4tGgz4 zu^k0`EvM?I>gyZo8>rLy#`ukqXOVW;{ll3hvuG|EE*bc}%mtbHOnnPv)IS8Q&%Xr$ zW5CG%WnaKNt$rX0i~v)>dmtQKH(VDo>zTC>0GxlkK|X9YV4sQo$(P!f+J~SN^aRYo zJ+3(}%vL5Lf3Fj`sJ^Hk09dnSC^8iPO8=E|CkIx3C5oa&(KR~N=+qx9ELd3Zi{%%~ zKKnj9ckfDU5*s8PC0VVrTEouNIkPyk*kyHDlK^Dpk(;HPrRx;yglw4_9cpx-zM!zA zu;jS-xEL*CCAB`B!BR^%ws34=G}%xF@SegPEU{}0R93u zK^34k?sG5!bO&9)Yh-Vq0eb;8_pF=Ih7g_&9pMw{1d;&{jMVsa2R!$v06%AQz~>Y- zK+iyJc;p&^#(@8<1*${UaOwOlWMS~xWd^fBB4Dk~dnfO!JlDkmYH;XT+Zfo0yYYoU zgHQvw@g{_tVMoPM@Hu!AdJ<}jEZb)AOHl(A106Fp`4l!#!(JDDmt&v+%m=doJ$@Jb z7yLEguhEC}g5EzI^I6_UogruFrTwM-19%RoDe52WAEYj-c17(9>eS{1<^?jbr+XzB zDIF=LW_yuyk@K1LnU&h*N0LVp;kra!^Fio?5WjE!+;>Cwe-ms2Ja=^khe1Bz*Y;uA zhp57);yY_r#_v-SlmuCO@k~(=DhO>uE=XE=n(*2^D}N?DASecVqWpQqIpHCSXJz#_(}2Cqpn9W-)4$+igl)A zrenNoylb6%om=5i2>KA_PSHn}37F^I+1D8-u{a^SfSS^NU?5lo`hpXl6M}}1S`yCN znPIRW&`(DXaXWAhJL?*F8+Z@<4*MtstA~A&^a5ba?e6dD@7m$s;a=uj=A*xZzP8Dp z$sXn*6nY9hJHcnc&w@sv6`#tkR31U3oj`(8C3GHs(NWN`v9@Im#o7E!FdMLk@JHy6(A&V<0KaEG!?8}v zj&XO9Ey0#REwK!t!EL2&rR+zgC8s54glB~RX8z4Q+&SDi&oj^C@p*jIC@h5^u1m0s zFwclYCiEOYeN$`fO<{e^&$k1xmZZintURn-2i^md!AL-_iX3nb%KDn$&n7Sf^agRj z2>5H%psoe1!Fi7u0rmjiH+bg%0_ilTk$3+oG7=v9ANz}tp>jWXKez!ti%;NdJL5m& zXEsnD_*N%)CwK?JXM8buG056$9=v}%V{&)apRPY$^w_Z${?+@d;4jLNt8=rV z2m1p&4L%L>?6}&u+NZH-EF)bbg`Lwp*H#x+7n;Rp@fODxM{j#?`&jQ-FVEIJ?qd|{YsoND|%<@c0&V7FCrtBuv>sB_eh zm5-IwUF3`N#oU)QIc{>CB2kgZJbLca87&(PRm^kQJo!91wfM~x%@owbXMrcs<*gI1 z6CX@In9S_J{<8kEJb9jc1fb6KR?@Ac6)`Jfo+ds`Tr6EIJt;paUkPMlnfOrgp<)JA zoZWMFPqI8&&a*PT4XKJ$1q1iEZ;-Rycc7s^jJ;*lQghB|P#e@I<4?vutx&>Z9KRrM-Kozf!S5t$->$!P+^Y~wiektNPow~6Ljth=< z?so1RYmW7j`;xn{x3QOJ%^h(&;%@fv#W`;D7rC zYz5)q$CMva8l^T$-3eNzwM>fzORz6yW6H*qaxgD-UMl;iV}{Q%UVUEu20IZxb${xX#zv-#U zZhdZTYiTQJD(NGn?rMO2fSvnu^TF4quT9?r&MB?n1>kug*N|&?h)i1ck@-H$0t&4{ zcz=GU`c6e19<`}+_s!k+x#e?<)9$o)c6P>=_&Lsp@NP~3w{5p=+&KqXT%j}G84qi% zWUqX$oclDGCs(W}Rg^#CVk14HJdfVkhil9`R|<*J10gXd#|Cqq5O>IjIg)eqPD2%LHow~jrF1Pp)&{B9S4;M zl~}*7RjQP#ObdV>_+8(-zPs+b?y<75GU_C1)TvR2UbB553R(XcVfmvYq9eqy;@Gq9 zvu@4?={MuOmEIrvnX2Sfa`w;{sTQf8iJpl#f2NQ0GskBR&K<{sC}^EegE9mL@Y!Pw z8HGK2%^c0(erSM1k8d5TbgOjS*F?`nXI*FA4ba8d#R!S3X*@WtKd$eh?xH@gJ+Hl} zyQnJ!^m~-5N>$99S)o{=s8&`h|5p92x`4cI*6PLJB=`f&($3P>)zuaDdJc#7Y8+rT z%0xgvP2_ucpL z%=f_kK+p})3(P(uYoDajq*BPPMAS#IM!`C@gpS^h!Y%}}#;loweO}D=Y>)h8K69G+ zn)#@Op_Z+=qq&1RfLvrq4)YH44tET9@SK5YFV!)49YoQ;{{?lpf7~(z)>O5v5zlS}TJphlEpcP)))tCb_Az>>fcVUG)YHSAZl2a9}*eC#Pc_dFMB$oZc6aJqf& z`3z9E%w8@1O3X~$3$_93uIa1djC?q-BF~S#y*sWusBFi(=$kAB#{hlbZyawNW9(z> zM*x2YkjS{XYq=#Dj9uL4E$1!FdR&GqTnqMVv+ugnv(gh`iLh*!ZkJ}}XXbMrJlr$f z(*QuO?EWnFvsij?`0Qr?W(l&TtB{LNz2j%1&qP>(si7{Md(S9DjbQ?65<0G}&9SI|sckW`S= zG{0$ntOYn?9qe`e<@w7)ojr9wY$B`%M?mdhZ9#)D6Z`vIphKvGu=n|G=xs;_GaF}a zJHc7d5IdCOfgVUeIN)rAJ+HBVbt=z!tX*F~SIfK^{<)6j9R*(qeKgr+*=*aC@hnQ6 zE%mjl!9O^evMKy6Fahd{c;-D1ugweOC&pm*V*~VejE8=fXT>p~O`uJH`8#(4cLE>! zKJ@Ji?hE3w60$w%ao{!83wfXI;PIfw=#=A>BNClxUfcg7XK+ApK#*GD`M&u+UPraD zkDL9_CY~mqU??b@PhSOJ1v#r>z0B8g0d{yWqn777_RX0e#2SAEoDBo(YMDgyN9_yBVWPkcrUp7=*p!>%nlqK0!NB2Yd(Uo!5eJz`x_$ zvTw`ephtO+@)ngXDrbXR$a&ff&cajlB={tFu>4^8k%}Y2pF4>ip6sEV3j(BI@xODn z$LDKAC_?a&^GwY5MdMK85YJ?fypOyhEH9>3oHM3sceQ)8bF`2>!MZvcJ1T~1hH9wa zV~xP)U@o-p7@-`1)lpNRE6^GA20in)sW+m};4|<@{YY&GPXYUFtks#BTVbdW&Mc>l zr;P1Q?a|5XEoi)WzHx(}K{cq6QX{1?XaJs>o(aFc%d|`AdA5WOaWVR(oc$+)gUGMf z0rr6TEboXt4b+A$Q7=(92Ue9;wF9WZc;$G(AB}b!Qr`C~W-4bYzXu%t(@SgyiP$$V z7+KhDk^4+N8Mz8}YIbT|S{D+{#tC~Qhv;S9~B)HbyRg!Ns=YWLkflzj4mEs{B6;< zMH8J9oexwGR5!7=>azN>dKz|ePt;D-b}H;t_y#)xGQ%^&>0KV>8s*v|*&?ZMRya?4 zPJ0YagR@#xE$o^(i(RyZzCs_LN8h==bD?eR`D~>&>=fX$c@cOQ)CauRP`^10 zP>;>I8TE<#efyz^T7#+P5@B9W?OP^Lf(P*M&?C+)SI#JzX|oP66N?^Bauv9}ZX`pV zfhzRAcZz3<=a%~x(su3csh+8xZ0s6mPnCWF&JM4;u45z7Kx}+n2@J+ny9ZDXu52CuJsVTV#0uj-Ap|BvT~J5B?7smNmj_gkME&r{1p@>V^l%UwOam zeIZwLGBP#CfO@`qzT03&$&Qk4vTm|zmT8tcMRkg1Cd^E@Q+%iR5Az>p>cV)BrDvOS zyClGD(_aI>2KJ!7nd+bF@9*yK<};T1yKbRwLZ2-j*$KUoeartJj`@2YpzfMA@<(75 z@^7g(Z3F1x;WIoJ$x0B2)-E~Ei|KK5D$fa8F*<~qQdi*q(=5-$VxSy_wi04@xOr~9V+*q6)j zW_URt<6QH1kcliHli6fGm~=3y2NF{`1HX*fBA=U0eNBCxlR77{j(F>Q>*RZh?=?Pq zCi*A(TUlCJo&cUFegRiQS3@hySC+FkH^n_g$nWQMz`LYp6*_|e`Lz7|3zBCi?FJ|Ri;v;xmWVkY1!?2rc zqhq6Efqj8}1oj`ZrLe>Jq9ANvq|Mv!2w!?)7n{dX4kn_xK_9pzyqG{ zN*At`r`Ute?}>ARe(rwm4_qI(`2DeGD?)DX1;C6Ueh-t~likgb$K4zGyR5%>Z`zAY zB-VKRJT1K~z0}6=zQpJ031ma1dD1+0op+sXd{D#rC%noRLKi~RH?(xMbnUV1v23w# zv47(EM9^T;Gua5aPrTV&hfj|>@|(7sw$I(4yZ43m30`M^#0pp$+OSv^Um?i z@gTO@QcGM*yiv7LrLk#j(=F32PJHA=<_Ws>Ny(FvmztNFH@Y^us8v0NoY|hPo-Wo@ zt&3Y1C&`k8b1(PLv{bfKMu;LrJv#U3+|=3B$$2ZU57vO3h15pAodeK2aSI#+#b6DX z2%3PRilPb?IDyO`&P<*{XJY|tzzV>>s~E7RxeYjThyYQbEocrJ1O9pg@DiTwJAm^F z{<-~tH7Ni5Z{R0T09Ju@0Anwqug&_ZCNLu#dKzGVnRW3$U@vO*vzn-$5Y1`>`0}T zwilR+T_h`QD{W~dX(hb=`7Ax_KkGMurtn*A5N{CEd(*5<-%-dD)2q(^9|eJJDk?d;+6^{on41>aiVTF=?e z*`z9|s-?Z9eeAxm``B0JYkA3c$;YgGv(zlTqPwDFkD{Keo-7V~&_2_Drhg@SC7X-A z&iq-tgFOco-U=_zILxIPS2V6@W6Z{wLtPGaiIhZ2ST8QdZdHt^RJ#k;a65SG=1y99K#apFYrCVfMWYgu-4BaT>6GjnqY=OOvZ(A91;Mx%L`%q;sB3Pw)%*3pw{e^Zl?} zy<0ujFxF68U0cl_NM>Yao6j=Q+CH^q_SxM05k zwcKd?R@+t^&2#n$dxTTu6djQsLF&yhDffSTk@!X8o1`~MeyLx|?_-Q|jIy_)w_=`T zo@7nTniy&ntE5%Zi^z{^tZJ;HNBfuLUy?I-XYL-GI5zPTa)Mf_TB;ggH$|Q@PZ=%^ zmpT$1iOpl0$G|QnN3@6PzWTnPMKsAxayAfq#rKNutm>>9q8_3is2ixek6lR^kHjA< zK32>cnA(mvmN%Bo*ade5`<1`2e__|bK-$sX(cZ|?$P%s%$0n8Lf~TDRO8Uwl=pN`= z>s#v^Xc}n7!uR?BJM`AUf696W(Q7)smQ4dq1ET;ns_gatg)Gb8k@3y?kNPcYw+Rw@8VZ9W4DfT__dt#+jDTVz- z#{T(lpth{GY+B5;m}q;noio1~-Wgujt_MIhcmr5Bu-EYznTwqDG2;;S7Z0_g)MoNt zItS2$*aC=MV%KEnWalFLBKv*YeWXb|v@vgfFnGuMj+MJ}5(*Ov!-~U-M_5J(J^$y9 z=Z+!vA$I0^(&Xy2Ic+x_Hyo^gY@mz1i=Be&e+vF7u;3#Xh+HC9g`>j39Z0E?RLQvn zU_NKg-jltD_pWmQE29$l9GUH$?X2Uj;~tSPB4IqVmiwLio!hM2tR3tf?7X+Vu)VOQ z$W!Ef-F@BdMeRl0$-*%| z>}T!8x%zJBZlPDu5E&tBook)5i)R;amv5K9z+S;o@G(;ZB+`ns)U%9|j*>QzHIUIC zI7vE5x*}mk!XVoq+hEsV7iTZyuv2C`cBMQ_c$mPmdAd4XyBL9*44(f@RdBHjxwuaIQcZGX$aB^_CYq+ZfAB+5p z{HfMdYu1-pU$VaXFZf?@pK_m)eQ_`LRZuTP&mikO4fHcsu~qy7@?+WWrdCkylDo3P zv%+~@at9858{cZa)m$`OG|V>6Hr~bVB5K>ZrFKibnsPPew&}LeyLqI4q~|QPucoht zvyEhZvVJz`4*0r-lp*D^m}N29_H6rJ*IpM&Xm>wPKb&9>dw8$x3|69N%`?Gt=X9sA z12*qSUX&zCas#`{4=WEV*>7s6ZiflUD)n_RP&1Gtc5Iovp?M5mg9qRqXsT_B4ZiQA zOXI?J?>AcBZ`K3OUg$d=066o04&?C2_X5oT_eIpu*AV(SJq$hAI)NH*v0=A36$aG|Ke z8@S-lo6eiW2C;$jrzM6ZhK-tynhWsSRhlYI*Ywx)>{s)>Jq0vMY?Mgt9&-Zr740jk zqp72z5BHejn1cE?X7?-t^X&8NtV^kBoTQti5$k zNXmrR#ThXM7E|No!mrgC%u|E~wg`7Y@LR{sxbFfT&S$d7bD{U*I- z>@j|5{Lsh)G4ngPuZ?;`>Oh-<^Q!ZzTU~B-;kC@O#F^$_TLfR5f#`9Y{}ka zFTH|%PWpi+lQe`k5K93VCY0AAaTd$}ttWoqqtnmYIc_ zg*QuXmTu9W2LXKGnSgqMk$Tp-1`bnyU{5N0Dry>QX8Z;PHPOe+T*ky z={?d%fn@;g<@Ddc4Zwe10vEt}a1I;;cCZv~D(UHWx{YqpQQ^x4=C+RtFqeqZBUgiQ!1N8q-1wBC~;C0k0rB%vEuo&=tvJGqmo59!r zV^hkelrO+sz`eE*YF0Q#vEA9S{F>iT21T5M5l(Rc3e+`|DIh#xCI zR*F<2mCmd)8+}F}YjVynsL6|pjf%C%EV4cJJ@zlK)0O%+&bMIE@|f*r`;o*WiJWUv zUmt-yuA9hzABDY5Lp(zSJqKsWoXP%F{!=+~_iKUIq1U1B;Q^*_W(l*weWqU0CAu(IAJ_t zbN~)8s{v;)e2+4>;t;YYz6Jbu!ry<}c-uJ4Fif~!Cu9HhJmWm$&xW6oVpV8p3~Fd= zXgg{;YN*Hm(D0#w=WyoLU{t6ZrW&TY2kPkS=zAG^8UNM%D_l$Tt=?4LRDNmr(r_^4 zV9E$ERy|hD_dO(1`f1W>(k0k;{{&1GPZh`e;(cSSW3B9O+`){!tGTNg?Ez^$S3TEP zlCLBxc;Q80l4_EQ*%f>#d5s@a9aE{bYHg}BRp`rY@ow>MRc=+*b=Gy>^xX99wd}Q2 zM^{HvBYs73MS-`c=!xrzt1dk0^mMR?JPCUScZqh1ct2RKS+3cj-JpF9>gnp~3bX~< zA2mN}_Nex#Mk+=s21y4=kHj5`WA@@8=O8Eh#Ow!h4#J*z|4@HSoZp6c5sv_z+jR!i zEpG?ufLVup2CWA4$gp4D0mOn89tr>Bz&!)^bcyQ{H#uf<4C_tq{Xtts^Z@%5cDr}G5zm>-pI^2o z+jCrTT)|#p8nnfn#ZcQkuVP+BVL@TR@5qUMgq<#&`EF8gQnQAYVD|&{D)c90$}<0N zZ_v)XomZ?^tVgj|(FAYXVsIUBPVx$H2g*P|4ND53zpR_Rn;mjwTN`H^C$)hZkH(V& zSaR%9+RBQi6dxg?F{jggx1kNg5y2EG7Jz;kvRa946yk`bE`dk&ebyq}!F-T-C* zhWo<>pF|cshb^(6k+YX-5DCp}9B2xjLi2PF+ydPoBY^19jqA%)S?4?%1TT}_yJNXJ;Pkm4yjD=^AdLU*+jKQ7* zHFnVP{j>)O4b-cwa<6iqcAa(&@(l9qMt%alrx(2!y-%!9tU2*H@ioF~gsm@KFL)xE zvC`Yo+cCKH;MNz-7tIm-BKEy&e%DMr(Fl0b&>}Xk0d4GU?3}qyP)|@lEPhzbP@3Nl z{C?nT)z>OM?|#z$q-9nNuOD8Q^hn&4-IRT*`cyS7Vp;@y9GSV9xzx{2l~0xTH1rg* zUVG_!>7HSC80*=wfO%4XfWJX0a0Ax%8;u){Re*cr_koW9J?$li62lMrAM^tOeI%?O z4uZ8{4d7WK4;%+iK_v2lHGm&20Qb@J%z@Fkjy1{=z@4&lfdRAvJQptn=RkYtz{H>f z;B$oc2-Yfd0P7Y$-#Y{5@f-r|zg-3VUKSV@7`B2p`Zq%6G;>`yg1h>=`mxXjL+)vO zpnrgBVuBv+b}e_1vQ~;TMjC^Lpkb$Or;hmpklxBRXf|kgCgJD#22i)R6nmAgE3PZ3 zGv*oOhVh1xd0VWVnHzBdvuzn*pKp(TkKU*CX@@I@D<&sSPCTYPre*!J$GFEh$uvoL zmgE?7jC?H(W`mhMr*Gxo%6UJmDXJ+t55GL~@;a$IsrxzlIV2v5XC?NioW)*4W=FEW zMx9rVKgZ9RF7FrTk#iSa5?vCR6q&SMyk7i9^hVSoxkWOL7YT1--^Bh0x|Mb-z2>;) zU{8)co#;SxU?jBn+1PP9sB942%IC_cKc|+QUJ4E93)G+~FUTwFz+^i`YX?mNm;dN<2y&?g)4E#?JBm{{8+r*hSGIu|;B-I!sNS z;yLv>wLxN#T+h3nN9_a8yGM{$NL|GxqC?NzZ)?AEw7o8_D3^jp;g)!4&De=^Um zwXpO0Dl(P_8VABnw$~V=j8Rg*@m%{{yEbWU(qY?S+k%n>C44P6={D(3YEKF~pm{!L zPmH;GvyhX*`z*$WAIwdZ7xg)a zMJ5upOdA2SRyns`0G&_$iux7vK^eFVP5`t-gj|7OMNrtm#u?dIPzoLc?tfw)S}GU} zn04?mm;y!uW+3qQ-2?o0M3o4%I@C(YJsVLuqLP_tX8`lolEJykbCpk!VXy@J0KTjI zuCnK$oz&YKlvRQcQWtQzN-z)5n;xmA{qz%}?$oqIP;QMnvvKZe-t}B1<51}7I9th7H zVa9nja5lgiQ;l86siD+RV}E1+Gu1OyZ*6Za&s`$`>p0#c_*&9S)f9{aSKu3Yggr|1 z{Uib-vY@bbKG7^U%d^&Ht%H2R&D_mTKto?cAOCJ@G?;<+5U`eL1?aora|iN$LF;iJ z89;o0=t8=X3)C#DDfqNf%Th7vONY}bsq9whR%j?-J%14y&+Wlj|5*P(?0IApf^(}} zcP`Y2_1yW$x#joEeRP~Ze*t)QU_X-D3zbvl{N44t>lgPg?l@1JhtG7*3RXB)I9O}( zoK39^GvdR*0W70#;B4RwbBDR9N9D6R4B14Rkngt%p6n%n8WzqAIH#Z{ftouO);t%m zZsTmA8tj4gtsCGq-3__5t^BS0Lca}O;IvR$$Qg77c@E5kb|pWM4;Sve0DS}0#1vqE z+Yw*6jhXqT^p?7s1wa8&M7YN75KuigKw&3s%3+BgSR~>L5eP4OZx6x zqxUBV-y?JRvG=ieQPH9z_LR=J&bTf+FFV;gWd zUsHEJ73>5@0P|Vdr{YYLJ+D644bex|M_B_p!8V7GaS%6j$#ps%y9lR4{7Ko3WzIn!(^FclQzqZRZWUx3Mu z$&P)NeU=}TKPVMyg*p#e_|KKkl~HhLj*K4}zh1Il!XD8Fph{bXQ+GWr&j~|hLu4bB zBbC>5*L3U+sNh3Mm#51Y7cDNL?%+4aZw}^5(?8AEgnc8vuGDlqvOKb^!!G8L{*nGe zk%uC6$a{F@edYbZ`GYf8mMhe%)Q7}b;+3)IXR3 z>s4y@8bPl-8=k78@Y3H6-Ngo(S)o*4s_(V=wRvXJ%%lbJ3*x7lrG79|<;suR#IeGvo!J_x37at`K`~g8>_N z^gjP${KdEu=)hycV}k?m9`m36KfMji2kcvz!22K-9Mc^WvPl~tcQh7I>q0Fd`)#1?z@V262}^`Eh^h|I|kqA`AtR0twF@O=L}k%-J>4HBx?!*bdujL5DY7ZDX@+USe6*ISmWf3o zugfj*TjEElN2!?+G9_h7$~3SK{A&8u#NG?DQjK6D=wj+(qFl_HsmxG>5KFs zL&%V4%oF?$ec`cY#$GabY4nr0qkRE>$CCBsHLB*o2+97-6_o}O%LElxC;GM z2LSu?byRg!%;R*+-EuQ@l$=kl1Jo|{RrXaLk{y!$CHYIT1X-;ORSi|q>S#54&)KqU z*=p-*>r2~98(O4dzP8NwV^&m@GD`Wg`Db&KCCWm9AM5*fkvYYFNdfX}TS{9>$I8db zAHqA%z5~7H^vIndJ%hq{B-?qsOF7@8>}S0+ zzcl}b{FqbvQ~E4ZmWgv*?uhx%{GWM)V}pb9i&fZ1#MhF~A6}aVnEIQko2kD68*p~% zs_m-P;iImou80|N%q?aAZ)IR*AS;v=I)FSTYHap^cLATbTLCkS9)YWXdYL1D+7;FU z1|S1<0ky`gsi+^G3qAvrL3cpy1?$ii;BoMAa3P2UJo7IAd^WQ#s{^PVJPtUUXQpJ8 zugXU)2ED9yaMgR&OD!RF`#<}B_VH{-ohY>wtUu}X=b6rr51tdnZZUpZSKajN)C|@X zGA-$I+yO1xQ|D7B^&ye)1<;$iCAcNXXAU(|Q|(i+Jz%!ImAjRjx|9CM=BCE%p5mT@ z?|)wN+y%{h*;e~jdv=HH4*X%v#7qY=EDZ}kKD#(0J`NorKi?Gb6!Gzc#}Cr8N(~L) zPg(9PceE~A(9(q=XSRK^Ts=>!C-oJ)g9WJtslGH{T6B7J`iry| zX|2HrDIcWpT)~V#di*FL+zjXiWDbWFe3bf8>c46KrWF7iIFfoK)u;FAd#ChHd4McD zE0}JYZhD&XG=)85?!V;EZmfQ+K3keCJ%^p{Lv=%SGwd_$>kHNw_!WM|=d#abGKb7j zUsYcAIS_8rJP!d%pJ28%4cnCr>AjqDWEn8vO42-FGs0GB^tR52@`s z8aVp@Xgcq}D5|!L2c-8BAoKv~*=)AVY)=8CgCf!m9TgA(k=~ntV4?StKoU|mo9%s* z(0lJi1Vp5X(yRELGoLTNKVF5rnC#5lx%YX_Igiina=U(U{^Hz)TqovJ_Xdl=IsZ9- z4R;N9oHNeJEGX8@xliY_*8zT$_R983d6GPdIaKtUt%4r57=sX7`C3VNmdw*x2E89O zirwIurf!Ba9J;_GYmKwUO-h=SL@x@T%W;w#V;mgTe|T97px5?9;)%qb&3ZPYhceO= z=~)J-1-}ZdFtxx>0nagCg0BGU`aEm@>G{*cc?_}6SZ5f>0i2C77CI1S2Mj`;KLI&7 z?CJ4E8;g?(b&&Lvj|ba;70~y=GpGozK^Mtdz>C0(0DJClksZ|-eB}MeyAJc=XSrv& zC%7iKesuomwSdB8tm-XA}|y@2~-)^WZEyq2Sd(Zc)q z_wk$~{S5W;#^_}7U27jT5f*dT~Tk+ z2DQOGzCF@>=wrpliaEE+;4nDc_{A)@E6_Z1UI|4GJZr!P<)PMlT;aIFp>2k?8KEDc zPe9fV_ij8V-G*=eocx@;)LLo{2fUV?7r?z+H*Gg93tiOoG*dTIb6>b3Yeg3GXFDW! zNPd#{Bu{3O*-mAh%374TD3KmM>D;8AsixL%q;{m1xig$&!2S;VzF&i%0M8DrO{f8D zuAIBJNwZ1AS`}-D)Z43yR7DGI3vE32@$B2x)fE%Dy1VEzUI)ek)&;0hKLp}k@u;K~ zxTd+Mxx0G0dJX_yU;0dVhQ9!%JkR{w0sAk5K{+rG3E|ZBu)*VlVTC%^XMi=yYo2SK zD0t>3dM0|h`nvkoBLi$KU=4)+h+EEEPU`Tv?^%X9PVE780_;yQ8;kXI-e)||(U)3e zFS7GIc?RYhZuC^chE6ipP=FVn|OkJhKcr@~;s`Z%xUB+VqGKyTL!)ehB`wJdANB7h=S zk((-}ii6;Fy6w2_SPbt#7(Cef9Qz!{t;enGmGlEG>@DnQwcD3DmO1zg@dFxzBs6Bs zoCNPd81igx25w4O=UpuH6p(y3Y5pJZ)S9GkIL`!Lu5(60qmc!cTA0)0&s1Oz|1DT zd-xt4;~nE|jhR#6Ulr#76=)iKOFTDNK4rUr2h;@C5xwyTY{ zjdlP$Z+T*#Nc}P2s}nJ^XlT;Vq`{uS9xgDfC9xm(chcWUBg7Hnj@TWs%%0e6+-zh$ z=~e8jSn6WgkD^wDH3Rmss0rtqHcLHA9W6$SuQjiwS^?{h7m>p_6B&~9;+FT6_nfz% zx3iB){qk_fa7RSTh?XVF66O8u``O>ZqryHLwY03O&jaazXFn&n0r2BV*^9hZtQB7X zncxr59CLZN&!f*`3Shn7f-Klo@ZnJN%-$*Wi}adb#thC3&>VyTuD5*tpSz#CdF{E! zqn?+Za!G#n||<@qYqp9R}zI=$aXtN%}h$hFDi)U9Jik ztRJjDr#pvHl}nc7IN;1%|&_o`2-P9oi-p^E;D1zMfjhO*T-35DM!K9$%>ta?Q z^*VXEdAZ}o@nUC9XGy0~M^{I8S8-RW)m}y(aRXNa7xxb{-80?2mA#d$hckna^|ee* zriPwjyUMQWZSHM;ZGCNx#Jnl`5h_?KSQi&9E|UBtIGarqry*hZiZ~K%LjH$LEmL!5 z5!aQ>n9P_ut%0nLY_MXm!m6@L8N?rnABnRyvo*hKf7kB8yw4x>Kj;+(g~0;q80#1Z zf$zaaz@F2-%e} z8&CsU3EbA-)>F@a!FWNMIm2wm2z`XUk=O`##88p_>S35w%g-bV^Cj9na*f@rGgEaV6y4W_Ul&4Ro5b2Szm|K5UU|LpzH)r! zC;`FZU@`YCd@gyvl^4nj^gKL~J&~~=!aC!0({$5d%n*q}Mk)WSoT{9PbyRLNn&_M8 z-CDPn_cwiF`(*oM%)r>K->uhTCRjvDM9K<45zanv7@P-3z!uOR>@)3?>gFFBKQ!|5 z+@af{^JqL8_T6hJYA7m2Rf?h}o%fE#WBY^te-3Jqip`4^ns0bdlBp z)*IOC_*MKB-TV(kY7^e*-$>aq39B)!5#-9=qjeOG->QBF})O;c3J z^%^De_nBwPb>_b9zOAyeGL$#xoaShA^iO#|<+1+O%h$`t{0lkqLwHtNY*{S%qT}>& z`YD*<{4W@#9;K$1l3FgFE7u#>8=GTp5^KThkv&7rA?wSBHHS6q=P;w75uh%My@bz% z&je<9Uy@&v*OJweJ&S%8JuhcoPKmk1O#jG3^bo1#Wv0qcuAf}(-R<4gkW_1%>;Sl5wr&iy3ov)p6#1D}G?iyfD4gY;R z7z=pT7zMb0rN*D{RBCsgdY^i)gB89NKJJ5g?q`jbXL$ZIbBnnTXTLblpXWah|9Gjt z)X#iJYLE5<)|saTrUe=R)=|8GJ>luW>A{}J=HTA@N$^SVN96F`4c-j~eL>$-_&r&p zuF$GNs|In6W8Gf*>7L%+yU*-{yGRP>MW z{^4^;4W|P94gDFlT5*mz>~@Abm=R@j+MK^CepPTWpeK+X7oMZ2MWe@RZt>jWqXkC` zI>vO285KDyl6^v68-6CsklUOMmVgPMJK*_`_3h2>&F&tUJGdBg-&os91Z%)?P#$v% zn*#QpsokCdz5z?XEzsTF-Teb#{y@AZ-g5--XJ#Ll`(bLm=pp02264>Zy1u&7HRHXJ z3~WA|?}hh;cQxRdlII!j5n6#$=ohmVQ5Bp6%zAE$EXNbbHss#nmiv}_5WM)zCE`Af z88Rlodn4bKkM8yfm&5IFvyQhZxCxbmWkJy`x)<3O*;$`jg&C(4m{*Qj#+TvfU5Z)O z^u*C0x6-=OigE*&%wRWfH}5guF7eF>Z3rJ#nD{lbBOdi^PYxgSeYmZqR(EosHs=b8gnf<<5$=mBhCS<14M zZmHd*Su=9v){Ovjz;f_Cmv9rvvKFD*?XCyMe~wx%N4?Big?-e`#{T&w#a7`cIl+X5&81K22Z1y6-Ts1i&_> z4S)|omY9W;L!LMjJQbb_D>N%K%e2d+Y`9m-S4#S>n3qDIRDw1^>K`@3T+}#KoT`ep ziliC)M)*cJr9Gu(h9iHEIoTz;61*|%=)%?E>T&vUaCQ#YN5P!e2bdt@rALIBf=3NU z4OMkjb=;$6s58_nwJWta;w#vPeXM<~{S5OJUn^fLd#QV=xxZVKyeK&v{$u)6<1BF& z?#cLEPIOFkFo$uua=CJ|ezTtYO~@V;T>rTrt){D{!z){8AT|)GEnB2sq<#wM9Nt@c6+VjCc!2KH@1w6@zY;){$?CkZkw(|tYopPkeO>i=! zv@ba6IO%Yp+3&WwZOs64ORre2SgI6PDIRYfZ+!ueb4N!<2j@csLAWbidTv=K;kl7} z`517B!bu?pf~n7x0WU%sI@7kw(sstsh&d72tFIIcQ>U zVxLerq0m$0DOzP)W$WbX8us6W z-vqu_J)%dvDqNNNs;3mE6p&Zys4>2XIh>P$)nGNS_qj*0N7~0y`*Oi?!NI*Lebd*G zal<~se%*d45AQ?mhuVkYLopM~P|rYhzKi<2a9;Rc{9c-G*h1SvI|B3vN5msyZ&h#A zti)M~Rr0Ik+bwp>R_j))$!@a0a=dbcTp?GgJJsC?EW)g?m(G{YcHme0ul8BCS+>d6 z$<}Gb(~8en&RDp9@3-!^cCdG_b8+AryT`r9&Ch8isOYOGWl!@)sp+o?McD{RKbss# zhEueyv}T5*wxa>;gYumF7)V+>O2cb{S=CC_ryr0}_<-n^WroTm+Dcd*~Vvpcg7&w%hyc!+sr)F|>Z zs0P;i*5fPN_*hTA1~M@lfq5*f->|-%555Deg|U9XdL8dWdYxN&T6vndo47fv;tHT& z@hM_hP1xfh~eo7a!~kh6fbjxPQ#{`27Lz}JBm=!^FT z?6b4R!EB71fc^H4p^l;E;5+aJ*=&CTW)GJDJIDk17$`G2G&xkKq)y2X$o@K8a<+v1 z59<2Hfv%yhp&KPPN(>NIFrzpeb+R#--}F6L8(bS~0?+bQB{4SwA#?Xnu#xMfT%6dpb+r@>fN#imK&S%Uv&DFQ=FOiS~(> z+6NkEm6a86hF+b9fW=IPIV^aU0Us8WbjYf{{NU$Cp053tk^m@{a zyH)9{(ticfZKK-`1oJ^2I1GLVZ$QmsBGQkqC66^wqb4o`HW%_!M0XSce+JohQe@3$k^$GekecHX$ zd#N|lZlqD4z^|t!p(L#&tzv4$)Ce#-ZFJhl=^v+W0^-4WoM=5v{!oS%F#{$l(% z%p6HbN=O=O8*B4wyqbfU^E6vGTle>lzjw4iu5KM|9qmK#TzD@03)tr}Ys?yIsHxd6 zF_svInubbyU+z7rO?qj3XA~?mS&cQHMRl50HLkAt$L(tq^bf) z1nWU1btS2WFA5?ia_R=A252zmjdaUTRpynG^Cz08H zHb9*VYZ=_fj?s?M(jQA*9zDqdpl*#d9eUm9MP$9Dou-{8U(6Q=X$EQNTj1~UTzx~h zAs9d|U~P!H<4xd>ct>P#5&vugV6M*R$cPn_#pHIfcC!A8{tAkWRLCEQghq^>;s>q= zuE_0?+Xa{4(%3cj3)Tx(&XB6^sqSI#WD4eBc7~qku;Z`;QBckU$XH$ATHv~b{t&%E ze8=&eNG%7iWvVaL_a4ww!`=;ZO_>MEzRpSaNq3Ar#@+xLu?%~L{U7H)&X7CgK7kC@ z8c8*h=zVCcX{`AktQ1#@3e0_emHaAsynMX8U2?nR2IzfKlSohYP4TAKS=U)de_B05 zJ;N!$c`h=O%#;agrPN9p2{@bfcThLAZfYz@10Vf|9wJ2kKF=)l)6%;~&vY~J$n?nM z01E(p7X87OU>x8%sx2@8`md*h1KF?*-wH)x=HWkoENF6ADrf|SHlf1860{)$C^=h3af&3-l5K+&L-$v^4Ywsy{xT=`3!%{{+31AqwEaE()o10 zi-6a1U+lhE=A*1rty9&I)sS5mt_$?2v=m#4%e z;5FC==+)$BSrJh8$Uip&dVZH1mK&HUP}5k`SO~h9y3l4Nd3YZqw}E>G{)}yqdrdFr zD!^Z#>Oa*p*OPgjPJ`1xAIM1%fwMc;yIe!AVYP9!RAgAHU#kDs_^q*?sh;T?s*eT6 z0^>TvI>R(!n()s0&ibY8OWSJaY9x(xb#fi0ZqV=cyP1V@PBGxI^&_D@+KJ){ac%+axQJ71X{`2U8#V z=m*XRP7{I#W+-MTnrND6AcfFw0ZTPYH4(`3?4je zi%Woe3T6;?3v>%qKrZWNfHe~QL+a|2#gmSM6QoS>)j!`HAa^Ym*|IUDI6CTp6AWPbJJI=1cpe;-rGJ3ga|!8nsz%-r(5apia_KXes3S-$>U8 zN#qrE7xFLUGbgK(y%H)utL)EJ&sD6~>k@T|T-%n!Es5j4p3e~TYp7ZM9jvylmi(S? z3f>fC$TQ@9n)PW`A}>J_)#q}~LcC?WWy`hZTDRM_+txbPI;dmkb2-~K+s5F4c9<>Fiby8 z4@sMDx@NkDb;6PAk?MA;cB+HmbK!HLrluy3dz#09^_K5J7hM-g(|$sKLQhTMEHD#{ z2ke)y4|@PG7lO5WSRwV(0e$PNP2U968MCgO08J-a3Wlxvt$KO_w*iCRpzm(zZg>g8 zjA2GDw7vDcF>yn#9{}Gz_1ot_4rpv_Y|m?~|z~Qw<;;IToBDa2weie2(eu&PdCE+OmGyG5s<9Dq)rIsrFOtCfz2T zBgK)DieJ=@-&fvOa()2)fC=gZ^-{x9!$9!6=6B6taj=M^nU0=6*28}R%-@)jJSF)@ z(~qY0`t?|KTj+W2f3*G4c3#U``dWH3;5rhO8kNeKXsoAK1$Xp!py>4IBQdL&zJYg` zqejgpwTkPt>$PKaV|0u3i==rad|>LP)P>sfMaqoS8L7Eo7svy!C?P%KMrtXT0vdvx zl$?|_!2E$e;GFTCH1q4M{;YnVZl8|N_E>SO_><}<)eHFxc|>AFVzq>72@~Tc#`TWw z9et_QrB-24VNtR;S={@?_lb!~iAiz{w1Es%u_I|mk}1{{yEJBLOhi&d(gno@#SY~T z<#5GtML}Xg;?nG;*@k>WK6N&Ol!KJ)S#l4B?Yoiz*7OJL5OxSlbxU+C1V>-f2gVPKPjyeFebXS_Al(DdAf-V{ z9n9q3ow7Sc0nIw!#k}@7UK*OWZQk~N`u+4usg+XCLAN_Sb$aSA&@Rx+g%}G{Q{?PZ z(|!SS@l1gIP1Xsy*78~w#1+JKlXa7Ewo6oWRJ6@%6Az)~Pn z$P{%obu}hXURz$f8cYB~0P`&zqC;fQm7WsnOSlJ{h`hJ^p8KAK-i1=G#X;XeDbK5g zzlERPrCES=?hL>hIW_(5xVP zL-vMS$ik}Vspw(tp6fGfRm{P66gmob*>>4Rct?1PkeAN0JZp)AZG&z6Oxru!JLs=J z5A!!l?5vc#v(iFS7f1| zLtSd1XP~FPt-fuExJ3Nc_N{G~W0&JY?}uK&C;0F!NcB3_Jb5ik+$HWCpeE+PwFe(~ zKajNex6QZFL2qa7SJ1D3XVu5*$7*W6#>mIW7b_Plxt>fhOfjIvZ`uw}rZX)Bl>uwT zTTlb%8pw0gC!i-N4<2eBYJSEn?><5wp|QHLI$9O2x~#mc{2eR-YG43|K`&J=)nMdp z_s6V{YGO6f4zwDrhB`FP3;$aCwU#w$)~de;)%Dfkrf#NZeftbxPkAGVGDI2rffT^! zp8dmfV3A>wVT5r69NfpGp10f}Hw?s#@Hj)9%;XCst6Tp?3`E5S~j(HPMK52 z8a*>^scQ_+4A1<^{*|42mtpzC@_B!A^UzJxO><3qO*`H_-d!QPLUvfq|&cyJH-08^mT85|lc&62B+ zxof>ZDd>*b+mpdK&;uBN4txS;fV%*ZmSx?+IPe3&DCDw};1Q^>yTa~zpfP9)8i2~+ zW!cNJ-@$o6)AS}V5p)Lepb@AF9+y5Yy#vkz4_F5l{Kv-9jiuQj2do5RKo=kf{9Z?a zxnK_X82nK3116SN#iY7TCFg-qDwM{7TP3$ju7$3JW`TYs{Ytt3X5wxJsepC32H-m2 z*F=IIKo`^nH+nXDSO?Sqv)_zqbj$t61IGib=d!o;1!mqczlrOMJ!oe;Iv9mH$GZZ% z0?faozVE(pUzmn@2h5daZM2W4kB1&hUdzdt5;h%l^mOzD905n;;>N|R%&W{pZ9{F% zqnbxGhPLS&_`IL(dbaCo%+(m?4YQxz$lS=>LD@lB0bZ&xsxc~R$XKIiJ|=6R)8Xf# z&b%eG)^9Pp8@Gwy;!DQIVGXPgbzf$iWl;Qs#tsA8%j)hj+U{eP|&-#7IB z@%+K_9{Y4Wr|_M17Jk69hO>rwV61Vh^ej@pv|PVj&%F>e9@G`A(XY{0HddB2Bn_ZL zm;v;HUbrS+6Vs7t@Q?l3FAVfWd}I8^c*JzX#2gQ1hg{NL(zaH&R#WdyO#`;V zx-pq!GGo25SaCm;v|IaxeZq0^xR?fFgct$iiINI51)Bc){`wLOY-j+eSzw=w^^-T^ z8!7*ZeL3n39)pkdAET;tNLsg6=~wAL0u8})-E%2}ir%5B`l?u6#$n~%!qOyMi4UZ; zY$ft;GK?9<7-aES02e~e^y-k>A$2Es1nQ;LON#=Dfd3l-s)CB39C)4jI`ttq2&`Z` z$O2}7*a7K(e@OiSZk^GoJwf;X=#<(im3uYBm!!-ElfgEy3seM6kxTU2_}WMhA&ya| z&MBQ!9+)0TvwPLhZBGNt+dz!IahYiuPWm-X)GTqmWp>seagdlHWJsD3)_S%lZBM$f z?Z!4lcdGf$;Qh;589jBp4hswm3_Qp3_xXA9eo_f4p|#jrq}FK&SRt$sLb{M{E#{|9 zrRNlV*azYRY46hxvx8ZKqc11}C*gO1H4&Z>E`jm7@%Vp+bhGud^~?)o-;(EvHik9^ zp2>N}UkT{d+zEaL^l`GO-3(L$?~#{P1M`R5g3(|apwF{EpzlcoqCrjY()1El$BQP` zQkDWfSAQ7)kn-!c8@3ys>Yq{;qvzZRjAhWWZ+uKVhP$mNrc<>+l`;dW)-fXMj2!?q zJ@mCQ|Ch77sQ2y+nAI0jg`fyLsbanZ*M>`gKig7qsW?$FQBhf0S@}SCAQXy);$+P5 z=q2_NN6JUa(T1_sDXvpoC9X;w^@LvuUkMjf7gX2tujjKzK%Yt#YZWV-Mr~DXRTu-5 z?{GO>JWqcM(gAhMQTb8%-Qn$v!-S<#SH)Z6lH`BgPlH9S`o>I!vymg!gK8Y_3mrMXGh9oHSq>!E3jKXa$#PRR{1$E>ll zv6D5xTtHn8dqdRUbOOxXIgacj_Muv1PCLDnyzooFQt&x2fu`V9@Rj6yVh@8o!*j@0 zDD)P3sX=9paJgl<<#qh)c%Hd9uZp!(p2_Zk0&rJzSHt_AGvE%X52-U$87gYs-Y36L zo}Dy1iB06OQDdVx`*&L8w8(wY`=T$kzSKGhqiC2p%)A$5fnxVOUA4d&-D&to>mZH()0>uP*DgAT;9&aoryJ+HI$z@K6Ai+>l%PLBJb_*r5XYIp6q!iz!!ybfR{Cv{`UTM zo)?nfzuAXu7HXgNDfTI-D`U-;pGU=_iba=@BXA2@P0TMj?K|yLc~qWESEh?K^<~y& z)}vslbE#C16_9~=#&O0mz&60v(Am)W4e~>%(WkDvuCuQ5w&%9zHBJb-aFothXD208$%mIRxmy=J|M%KgwDj**%ud#3x0)pL<7NRzR!G9 z98(+_&I~8}gwzscBcGq&3(9ofik6C&TGm?D@x|kdQ+z2teqTJBMCV86H-q-nsdlP& zC+$u;g&eyvt}(7eTcYi);;rIE+>1DKtU30pq_2|R<-g1C8{aqni^MMyM+zgQnknCV zJVOtFSMXTqSSSj4?#$>eLv{h5ORm5D0MEwE8BK~$isv=GpL9RzNb-?n`oi1k+Ci}} z3n%z}x~uxDdd^CuR_~Pjl$`m$nXNNhQ*XD?w$Vn960<0{m)Zah0iK_lAZz0=z_KXy zh&TH-`?dn^huE`@h6Z>ZpdauXWFtHW^enIkOy6D$%t7b}Rsi;TuY%veeee)G0p&`{ zm9zoe^rPfe(g3^-y~WglyVCrE=F)_!<|X$+_auK{w@|mxdt}}4zMy9tOKyn!N9w{? z_*eMpUn}#M`PuVa2sQ*ZNE!rs%Xpo~qL*D3C=1XViE>Ebu>Y{1ee}tIzLrlzpJLKj z)e!H?Kfrp&dPgV_lDuMbOXij^6R0mT;ua$lY#g%cKlgtw%}l{jUM3U^#h*Gqb<$6r zqs&o0DR@%AHMBlv5ykpr{jYqlBroO;=MHCIXqk8|)1%U(ILGFk?VPO)ezYN&GqEqQ zPs-os*<)zQP$_e#1i#*g-ebyjR4Ab&p`?6i`O<$%{*iKVu4Dh*sH9QJNzkvfUuh=T z29|?)U=s*{gWxC#0si@7&==GOW(Y5pfc<#(+W$okP5>+gd|ti>g@9+JD}cRyem-wO zy-+=A9hYM+TLid|tjGP}I%tBK%zgiZ&p7oGYk&vvXPE=o$LHU}tc@6O6|4=d#TCDY zWS;HP_tG2M0`{qEhH8e6f{2oc55|15do@5}GY>m}Dqo&~xA_HQ1l9;!BB z4gmM}6Z8}G%n;!|kv)sZlt`&pLNUM*z`7;(NVUOg(`u=vSl(FPcvpW{e?oUcH&r`T zyHVUIo`E0k1LX%wj0H$)k=-JD2IgCiF^@4HC^}Fix5};0Y|m_bhjhfOsvDTulx@$p z=h$*=)4(R=lrv9k2Iy<=Yu{0{qX_ojobX-YyEtp9DdxPu>SAf0(>!Nk^1|dtibsm) z$akC+Gbv^eWqHlr+W_da%svmCG!*MzvlOU#c{k;cO`M=~Zb}e?XPswZfrSnVYJI6Z*XM_zz=0%s?t!!8vg4@t(}C}czAs|Ui)Um} zhqef2P_qtr&w0<;Ti#p#PXA8N@00te5v~!grQW69d$xPFW%2Kv6A(RC3t~jqa zbzlv2fvGrLQKL)USW#S2oYJXuS}{|YdN%sQc`aLFUY%T!3(J(tl!M_Vqc7-%|AoKU zRqRsvRsMCDna+2~=Ov$){2ux}#GJ0wP-IT{Z6=(rol)ONy*=bBE+{bbyR&p~`Q#(qQ0+gjo&WFy2_{##$ zfq%duunB!P)?27$UI|#w`35kHZ%$xNU>ZD~tjjD1=?Jv;fv2Dsv_$p5gwO=2(|-)j z2K;X=08NpR<;Sl^^ep;+^}qGL^^ORQ2+i`(@~;+Gi?jW+k$m}=pVxJ6Xl^J2`Egww zT^#Z7KW+|f4*eYbIoJ$dE9Rxd+vDw=i+){wUCsI;uVpuTH~W^@EwQJBQ<9$Y--Le? zR^+V6;e8r31Px{ITCq=X9ne>(0|CJE=OcInha(RduL4ESojrHX%bl0oIJt4M1(`+P zX}^rIUwHFc2aK$%|G$1H_fU@0c-ac&^@Agr%*AHtkl zYR>sQJqD4;yX9H$Z@^wAXa8oqvZZVl)~u){>^L6ueEQDRaY(M7G?#pF5mR@v=(PCtOJ|;CL^{(lz zRF7G2T5m$D&cJhCq&8C98o=T#)w${I;(3t!DI3_K-Jy-s#p#*=>KgWgR=QT`wrA^_ z>zhki+3c152!h}msA#BYpeBLqC2J?tlYFWDQp>;do_J5>dS(S&M^0!?VDe?MCQ(cj zsc~Ta8cUgY1#{0iA7PugP3)`gtL~}nsXVGWs$zx+O7hxxun75$T~%FGoC(uZ)l`+G z$@eGPv5ZJ2k+d(h-F`LJcN^1T~o4YSU5&2`aZIs;nm zM&3r=ORh^&_C$;=#zxJzU>EG%SGjyHA7{WG02HNNKo01O(2LNZl0niw_D<-Ir13nA zT*>}`=YU6{N1=Ho^Gae%V@nf3Ily&|n=B{T3m$bnXT zR4*wfaDsP&_dK9Z_6DGap*mpypaI~sOg}rlJ=8w`fPCOpo>iWA$e`mh#CqOkvHVI%dT2toyJ3Un!%a9Oiev^S$#agUVprP}`6U&&xd-f$X1@yp%j%Q?B8BH+%=}F7>3%ea(IUc>lrFl&;<(m?7QY z+21+5czE#+>kcdTfMuBD!?W@So)0{{=e7gZHLidcpn|UgTv81r9~HBd7lW1HJ1`Lp z1-(H}z|Wr=@tT0^-&Vl+;4MK8avzu}bJ%@Y@)^*xP7TvQ=RoIi$8jVb^l~JC<$!gU zXYeypYi7f*qktOJ8qONdmEan90a$CJC+epArkiVXGI-^A)$36`*UUCy4m~iN%SP%zZ1-lt{B(9wa?Ty6L>>d<(cHv(Cnz z?rZmJ_cGTqmkGt@hu|FYAnJMRd2cb}Q#dQ|%+Scxh+76p(}tL>l)+#gm;8 z?MbP_qfSGs*Xrr>X^1&i+#l1&p9yUd=WeY+&J$YB$|b5LsuSuH>aD_7p`qANoDQ~t z{onJTkZC^%3Qb6xjsVYY zQN}2#k5gY;Upou=_|&KU3fKo43J=t0U~ZeaZTL>(8G!F0ugPno*J?!ah-A(ZOVTIl zIs3VVw}qGQih5!_u{60fxn*3-IO_lTT&{MncC*IEcRuT1eY}0V88+a{@Qn_P4rKYV zB!4~UFR-px9@IhRYF*?evLDK4!00eKn8#&C&Medwl}%3S!L>>x>*q%4nq z*(A^i6etUnGm>XWwb@njRq_qU#$b;95MWhWl{v{d$+hLRhUBbHr_OWVWR@z(Pt*|L+ zQ_?-_JuBBqY7fd1$`YvE-LKrQyi;_ih`KrIQV)U(n9-eW&Nh1-9tY1X)IeSVGQZ5v zwdc6&xa$+|CtiNuwf(gvO*%cE{H&=rvjQ6ilKAjzg?ELQbxy8fZ=7$O7hM-!dz^cu z^JPVAMeD5SSz5uR+D_{?Rb)sY%7!LY?9$*L<0Qlc)3Mv4;&%;49;J=>=oeKQ~ zJ_3*jgyR0=cF;Lak;>LX+k z!~P)U(vCHcHCHL9Qt&?ReVi&;m7FchmYoyNiS+1ko!lYr5C_2BM_*MPbshB>VT{lM znIn;yyS^rQO)_;7tnW^=Otdi9eyVP&t{)skqF*A-x?+#k zCA!2vlz%88aaMeSd8W)8O;MyMIDZ&zSUIy&n=6|uhpC3CMk8}I7PG)|kbf7Wi_z7_ zTrKvUKQ(-cY3NG~oxy(netlh#fqV(rLbb4=ip_=QLXomac`o@}a=JWSz6@FJ%$wS* z*sS0h{JHvbHN6+i46TTHn;DpSG*muR-cr$0u~)fQ$?xg9;<}TjN7w!%kP%oCa+CiJ9|6(efNELm?zA`4CPN8 zpEw?wADM@Px{kUI<}tkmu-19FzbPp$DIRDZXpXc;+F4_N=XvKzL2eY!?L1epx5D#F zBjmR4L#{Gk0CO=XssrW#pK_jZa=*g84|Pcy!Hgh%N82!Gp{~C!c1*MVm!MaF<9Xwu z_VkVajsGRs8{8|+m*TaQAs=E&^pxnz@WpqE?-c(4U_@XMbxpSvw-l?yRZ`~N zB`w$iJYXSUeepf!jMc#Wppk$&K^N!^jv=djC};|J59y24TS-P&jF5QnqbVGEzHM=!`V@?^(0hgdZE(gB_d#dGk zmEXmDtA>E}w&mNGZ+|De6J`MlIhoN?Q(044%~s8}4YLhWff@b^o}uRI=ju71i?e4~ zCt{C(v~9F4J1;x0pQ4`vrFJ>bLUjx3N_!u?esVdJoaZ<#awAwTYl3+^tdBM{H8c$~ z3^Q1DR^1F}pXVWCMK9CKAZ?19wPn_pv$~fc1F1 zEr z-ZQ1Al$*iNf?rR)E9-}=QdXtpr{<@A2R=7_Zi+R;8q`L$vAwCisSA7%15yXTF%g?e zZNW|bP3beBCXD*|6PS6#eGD~~tD#w^UWJnT9+EX8Nu3uJbaV@saP5?@ZuKpb*sc*7a_%Z?M;N*L0WnmX~TNe>?tm zxQbjwkMbVn?X>N*Wx29k+}AQ=p87BaibC|2QV;eDeVTs(y_58m4h8hVj`5B0eUBWw zAet9^*W<5q?ZqrDzCWS>v)gzMm}H-XNsD8o8SN^k3R5T-I0pmPlXrkw_E~n`C|vJ8 zg-^97;KR7avBseRJcs<|{>?oXxrfwN-tgS;v_?kP7S|TnNzX}--Dmf$gx8e)wtL95 z=Q)LaDFOXS?g!Y1`3!KT1J70TZ3g@S{}asOP79?;J$M$y{y;WbO~C!d2;T@F`;B2> zh<%8?2Qt3OkfBLGG&9jJz%$Aj1k)7L6x5-*J#G(cQ1t`#18b0hK|deQWc=)JyKcMa zrQhM(A)R3(p$ju(Fb#FETw^bbmqpfb>LO2djdl&HDlN6wG}kmA=s(a?Tfr>4yZ*cW zAbvGN?$qVLvYs5asB3ce+aXMpW2?<+F@qVRm@__DaCtFD3?x1I1e`TP!Ve zTjoaZjNaKNsZWv?0WN$7nDN#eG>C5ykL_RLTjg8jKyjeB33ITyzNRWul{v`N`W^7B z$+HZs62hB=Hwodoa9t014|yTjlC&jBqtqy;+NMhLxw-DP0Mn4)$jpgq>T2q7N#l~} zxxb9;y&TMF+)})ycrj*c^IC3+*%H$`uXo;6>s9Md-k-cpF-t-Tcppr5PIj^n)X?0} z%-KdwJxx6pm&HYG!y~|Oq;Sk$U?%8S(EM?~yVkSTGr~Q>-PP6A)fX8A3WvgxWzWJi zg{zX!eSmX-b2?yl$Xmx-DSLn#hbFEju0eo3y0w7%7<%H50?ugQe_IdH*E7J364qW& zLPT2RGTb$f{2ee8rDCw6bT6@XO#|Re zVvVZ?SmIyee+=je=iZ!}$e)5g1+)Fxe)bnV;2CmjSdY2|_{^$(YTqZAwe$|~9A4lr z@HfM}DxNjyCD|a~Ag>o$FOo%k=9sSZtn^%STyyko)wk6nVw zvlnI$j~X7;H?wbMcUyNG*Pw04MPq$70-V4MX4WydzH(pPb4|}Ryq05g#^&^~_px)v zGS@(U57bYv*URs91A6IvjxU5Rz^Oes#OHnwpx>DLc<$+0@7wO*?$34Sx<9ghWX+As zjf+#nDJrQdsg|mjs-uNyVF)m(OsY+?O|lwAHHwNI#SY$Y^cgZ=bw*%DK!z$E`}h3& z{sR1b+56=`hrnXc1#o@}uNA$_O#yog=Ya*#dzb|lfIfh_DE8r*Q%Ri)b1k_JYyhk| z@mo{pUX`|=I5O0X@nAkBfQW2@hR%ff}#o{@m0_Mj~ z)K0|2X3oT5wi#z?^IB4K@>|hw zMbnhilz*xJQtwaRpIp^Y)xbQKI)Jr~6bK#Zhdu%5FXLV%1h_9b4XDTa9mrB-DXjtZ zh!^0cqsMIn;C_c+$NJ9!5CKndH!v0WzyY%`3htSx)UUv=;1p;Ns100f zT5ReC=)s{E{4}64lGn1Mx}*9R;TJ)zQ|qWBpeLk1=GvBkg>ehxwncA? zrYZ4&^?;T8q;(1F5|-sG%VDpUdXx&T3P^aY>N`fw|IOIKOZ*Tg2sYd6r-fDA#unsDV6IzHhk>tOh(^ z^#TUKy%hIO%nsqcg!{97fbZoMfOYWB$l+=Yn7364R0Q7xW@@rVeI2kC-wauV&B1d} zFHjE&2G5XG`5-XWH`R9p9v9YysTmXlVxS6iV6OwO1FXGp-d=NWbMJKDbYB&CL%44D z0YPuj%i3m{x6FGAdaYRGty1U0KRfC?>QuYbu4VAaum-{X5x-_+U}RuJ)P|@yPn?Hm z%a5HOJ9*B04=qw1M;!>ubK_yeqddhT4dOkUalzV&5;bwjF2B23wPn)M5qZy;2-jO(9KWm_fOK=H)Y!QzL3b>JxVAcIx;8Rg0CRzUJl4%cCCV8Rlhg&~DIj-p+jee0@(>Pgh1FkfxaPPxpJT9hu=HFuM!tG=d3E^_;fRm|ciD#I4avt<$5r(d^%OkEP8FsKH&r)P z>9Ta0J=z}4;u(FFGsGE^#)5h6oss3q^X*>UUR_uCXqG~w{0?YSv?+bS5b!CWrkUR_ zd!jsFW&_C^j`_np%f29*#GAL7q1J+zs|3(3V3ei`v1Sb|BUI3q_eIL&tp4y90Zu-W&(Pp z>78o^#vl`OkA9E-Z^Pe)w$M9G0w)Y73_R!V(CNFm8&z*^cSdP6yLIrCWLCFE)HG~I>nLI+(3-8I18agF!vO!l_dx0!#$)cF*X?x+4#A-=Ru`8qEMNFf z&ObSKH{abnw&mEC_p|P2QNPPx6xZcEcb>ZedP(=ekG>y$oF&V3llnr|gXQ3i?+lV} zKlHIaO`ly)(D6T5FQ#^Y-e-P){4<^fkAPCZnl)=^#emtx+-D=E#k&QtzSi8++%w-j zU-J4*^G=gyNJJp7;cIvT?_j2Hx;@>#8vb(D$d|g8x+}RVxgO^|&fBBfqhh^qs&cAw zPT`zFYR~!dl%8>9<_y#g)P1Y{Ry$cWS(PfLN}14nSDN8*Wqu{ssGI7WYK-<3mM1Sy zo`h^sX5Pmp$0mDiUfcPc^Eo~2J?z{=W+B^+`^CxH$=aQYor<73sLr?MTdP~DTbOGW zg|jz}Abh5IEx)&ZZ_NbFT+LiH12v_rJmw??U?~5~{g-I*#M&#=$17upJKd4O8DpY1=}y$-K~Ipy42Gpn3? zG@jqNj%h#4Yfjt5Csd+Y> zADAC_;D6v}-?cNe);B>X!2IGj$b}jVpB;PQ^xv_6%xl@(+uM5@SXFr%(rmQiy0`oABXo-7?oBe9;PrgP@)g~!BDfZ9`j`RQopFWs7e#k1ilLo8V?%Ffjh{9 zT?(H`g`^5eWy&%o=Q(gsf821~z&%2NP#|S1{iFCtF&o|+>PMKz!R#9Y{G>B%Gi;;s zN9A)*O0BqDEmyBmu2J?c>R)upe#yQme^dSj{RX`co4>CMzAm8On^|n^b+aBj0$%J+ zLMNfjQRcXU`4^ip2d7*?xq^ydj&+Vy!|Q@v>KDj-Vx7B+r;3L<2WlnCz-d6g-BaIF zDZ`T*s2!Lc73YhSGU8T1+pY4ceEr@1r5On)FfZBev^$w&%AVT^&k4_X%*Nzi@lQ}c zSYOg9Pe;xebtmjqtpU`=WdUl{_#k)Q(WcydI1L^fU6T%X+|p zz=6PzfPKIZfebVRe_c`~w^fWgFtm#qDZ~zPiF9FwHo`GK@AN?w3 z%S-@$F_VJ(*DIbYp6AZzPOis4Aq#f{7~>q{WFO-+;JVZW&mC%Oe7v~rAxs12QTiL&vQ9PqKD>f_kQqHBEHR2j^u5qrhk-m|heY2kMG#d>@ zgWM!HB>>jTnWgfv@nho_!2JQDL`?Up^MNlKZE(aHN~1@FQJz( zO+8J`z8;^W3ZSy4vc>?qtGlZ=DK{yTF>LWFI0Qz4czwK{zOGVDspcK>AUIF&Cy)iG z{SuOdB>K%7sO8iRvE$_Fyeb#+e>a%7dR~;jy-V1TJ*>~QNNkZf3)ztykh#e-8}D1rm|725I~f3WfN_BOL)OVxfkKc8Sa+EKz5$~F zzy3bv(+&pwyOe-4UFbdGe1dX-_jsl))0S(=wS?t|<nB|S^D;T(o4!_R6`Fx*+G%jYR*(zz{VEc8WRhtP^Hd+HKT@AnomDMUEK{_PX&=M=L#ij$^WOd5J<2u8rFZI`g8+3#Tj7II`jt>u zmh*3i#)(<=8Gr|4)}i<=ynz|;d}p$U&7YAvPh~)fN@Irr>r&K#cJg-ex?OJ975^2< zXWRyvk;9?iWRB^P#3PAu>Nxcz?F*1Oir{K`)DPWBR4iED~?iuV`5 zYl&Bfm433(p3xpXM$V;; z1JiP*TC{^dM}`!0On9DKtY3`N(;hvR zAua0%FSIYT6QK2fWPD`gneP{H5gZ3Rk6i|=&$UQxk-7|=2X)fwr0GF_FbVwdA77?@ znU)N0AR~q~yuVWZO6dz&yXpx_j3q|usw)BR6O=loZh>Zj<|*=)>DORp9(^6PK~Nr) z4^A4Kv`w~6mIf*%R7z+dYalxh>Vrjbi{gg19NIE-OXilch_VR!GSeHUH&)~Tm<=G$ zvmUe^wDq?3wvIH9G^aO8Z&WF!Qp~};gL#oVBX_=*zn1S)?o&1snh9J3s4e5Z_!;(ba=KQf8@?; zU(G`QLjNML6&a=6H*pU|9R~k9?~pUk`XBdCS)r`ZZZHb-WT;_ft!}q>xA(K)XF(Q; z_lNd}8oP)sQdGJ^wLyzCHtJX9jqtcx-uWp^tfVaCFcU@<^&8uBFTySd_OY zPvup4&j-(A<+~*9jrobu+MhLa7fPK zt}s=YIES;Yk2asFKwF?a?L6%qBn}cKfSjgpeBb!8z1iNg?z8SjSEFmAbE7i_k15C| zc|rez9`=pK(WcR+0{B7Ge~;d&r2yAMR{{DUv;v<2)b4BnMgy(pR(bAYE$$F_!`F`#~_!=rD{PQ0ESErtoKvt zEyt{>hXy`0FrW=+ztw-MZ_qSowz{^us>CX>#oyvTSaGmoo_3zLgT8~l{eboZ*z?Cw zDYXV#18T3@3tO9{JawMBi>r%kM_`BSUCko@D&&AtYkfj@LdVS~xpBiB!yKS^?A+$u z=A=m&wwmUsBkE{&Hal5A-0j%y;68A%2Oz zuFzNL>*($1{T7;T_UQRqevUi=)^x}{_Qt%pQjWyR8_FBVjhhGGVP0>CfRzm^Wgp@l z^*ic2HguFVtDn_>R{t%K+>qQ*1@Q4S@Bp9$x&s}6o&c}u3BYtqK*a%5Y^^K!d6o*Z^?fJ3KgC&Vaibxf%i0A*QHT)bEPz zim|r87`YN}gx&~ojNsfq3+M%Kf5v_*>*KtyIQQ|M=e~~lBjf|MSGHGLpe48j9QPmh z4+{(nP(#9XaGh8uE`YbyZOHKZ2;g(P0xmt*Oss1a`-}YxT?<{?0^0)RL&}GA@^$jD z?+GcUnmud2mgGjaB5VFp%cGV?cu7_x4|t$$psh?O6X;dJTyuJAk$=o_G*OeNX+<`` z6=W-Y3s_-Vcc$!2*<9~jFFA)>;IYm68f#{J&;2U<)#T%r_)2_Ehtttf*-<$HIq9r> zhs+`KW#O{mMix8uED_*$K$TD>RGKTzZy~eDX0RD%=w|4qYo}{D)tLY@-~o!Y#oErg z&bl<<3ur;EYOW%wri-Q`qap)saE({%)q>)vy9@e8>Xzt@#3IaQ^=37-bS1hH-7@Vm zEp?T}z%=Ait<$g5pMf827jqZ$Nz+Laz1+$SWd`z#UBE?T;O;;+4toUT{jN2vHN*oX za??MG+8uHU7aA8Dm+6-23epSGGqsso9y{AI+p|)cud_nB6wUM_&)HBm&VKe zOOCHg%uCEZn9-7t$@x1jBQ9gDXRU`>MxC~G+BU9yT=^yVSf)ACoK>nS6<*#!^qr}x zt*KQEQVhBe9ArMHhb<3V>a2BEvAZ|*X4A$wtR5?XeVoa z63vO`T>yREHkdY;HW@d`TxRN2*>^Y&d~N#L^bmYT*b5k|9jhfbzOS*b@dKb#U#c%k zElNG%KH)y+JLl_F1HgO3qxa}9q+Up!=mDVVGQnq)`JykXUsTiQ<&yT2Rr`=Dx zXZdFNY8|x>*wLwvnvR-U4XuVAnjV_cl+u*R-pSrQ>OJa<$laj#3k$Q2$Q@$Oy1k{n zg&f!ALzlx@^^2hsZ4+(m6?1>S8#+7IrFKGhJl8hYHUK*0p75sqJHU146u@XTT6tem z6SWcC#V>)?@JSqH7-i_I?5kuwtSX}_BUziQJ#RQ~c*6LE@eQCSeD_0dm>YU zH6HS#sqI+pUF{v^80Bc)-nu=0K>UF6{^k8ytGKV?zKYpZv#ZY7&)9h_aqREt@91aU z_7UXxE`fIwbD1v!tm&Bm`qz~NOM-7mZ%AhXXP}(x6c{cJ7shB!m+$EN$!{j2I$)g2ykct}ZgN%aoz4(}=7DPL!QXMZIgjqvSt z>0G)_$fCTX=8l?W&SlP%?vw7vl#eO9>$~d@LjOn(|GDkwwo4$mEY&U5ac*b7ki9Do za9xG2U>JWTbhmW3921TS%T3GWH3(l{`n{oLl}WGMLZJ}V!gAqj{nz?t_%(4nW}Wzy z0Z^Y(PtZ=#9y1&>@U^^6xJ~G6?kx8mlby-Vx8Vo<6|$6PYG-Pf0$1QGxFNJ5L;?VL zF6^@|2rUSG1H1*0bCw&<4YMZ9erawj7c0Qoa$R?NY`V;|;eK-_upHO~>;(P_@N=#Y zv9*wSHfNEkNUsF$y*MA`0^ECl3cpw^59OXTYY?1|sPSS=j=kw*AQfPLg`AvsfJT6| zi{AqyfXx6oXqN%bW84Eagd61EHF<&00keU7fS%!=Vd^Y$0e*Z7JaBS>rvR=iFpNkO zf)j%GNA8boi*7@jVpFt#tbc4Id{A;jxgmSl9wyhNu)a{9@6*8<;$EOzpj#jm4Miid zi2VGi1t8yrnuLfa;;BVg8NCw5N@JyJ=y=(~&aBO>Mf(Ju?cE{nyH^`l8-@mk2Aaes z@qPFEZq6ww$U5h1NpdhXK=T6g0%%#uJUQ0yx$kF>ybt<7T$|?r_%qPqzAdyrxIf5x z5bO320$iW7t~fI=Gmz)Y^Ch{GTqkNy)Ff<9*t{lbO_Cx-k@8&fbIGP1rXBR|J3RRC z;Kk{S(?_XBsg47u0D6PIqI^Yp4cXP@{mT1+8fzy9$&S!?*6>*inJi6~z5<2?hXr#1 zdgG)6>`UnZW|Xf2x&Ti~Pf8iUMj#`YfwbtYP%#}wLd@DAf5wLZKPQ)g`?1df1K^aL zvgVc=t`)#l;2B`Ov|eJy!%O~`{M@IPg8P{XPI?-^TF_Q_SFUldao-`{A(A`zZ0K3J z*0Ia8%cF9r97h5AB(aV=3Ax0Jf{TJR$Rau9JLIEY?@j4VsmNdCr?25`|7`zQ%~;LI z+L5*6g5!cy!&Aeor9B>dJk}Cw37w0bi`@_00UQ8F;%Q`+^0lO|dvX8b{-D@uzI1%) z_z3yN2gC#71F8p9o6|O@wehs!)>5vA77i{PJXJeY_EMbYp5`9u9qD}x_^0cit^_<5 zz^jUyYx1;w2wiE`Hfvd99i|?prf>W}`#?L#S8^am!E4zObOgzl;yup&9cyAw0M!8R z6W0C6wPX#kAK*fMOZ$gx6eNFfT}5Ki^aPL>*yg z-~#joTYXz)ZOB*tul&45*avWioS~1ZKdOGp@RY%*GwK#qE~?~pDue|jPPYk@gwM4L z_b9E&t;yu+K7fqk=YUD-NowZs94tRr&b=A?sNCn0*Fn8g2S*3TWaQbKpzYzfpoX{N zA@~YBg3Q_PV&BPqrDR94Bd#H?p-p|8daf4=kR`)D0QHa?;SW)U4B6X|&ASQuCs>af zsN3LcNgqA#;}7W$>DX88q3)sfEB(p?=?BtPN|o|(%Hb5F*XR{QK`g5)tK-)`sr;mp z*Ax47{iJ>p=S|jh*$-pyk$n^P!pLjkK8)J#tWXxW39=?r4Wj-~U=Bc?5qo-kUK~R> ze&325fN6ng0sfBHov%BGR}8OMm9#49Q=mDyIhk2g#c9Q9d>@|>pKy20-8FC3zEwNi zIow%PQ&eL}HKbzNRV_|goWlBCKW{(p7|$3Fb>~g0Ce^FjS7i;%oZ30HOVEqz>h9`( zL-~f1{wdV;K^|hHS5BHI&9gXhabk)#MT=6L`D@{8fqmnI%7n@|f1Ljrcw!&idT?tt zG86tw_%DIIHCXAoI!GO)cf@zZqyD3Qdb3iaz<%Ag;ADFv-U!#BtYyb1#V6qs57;8M z$OYd8pAZvb7or!Utj)0(IU+hDI@>oFdLFZ- zdP_BibT9ip_ACj;b(v(NLS z|4DznyWYK_d_#GGsz61Z;rGCosxMW!%3QfVmyn*2em?npvIK&{8rK>Z^K$0<=KI+9 zKJPy7rnl}X_bE5mN+ZG5O$MgGEpDD`o~utnp9EOIZ)Yz3N6^;h19{Fo=ZD%4wMUFc zjMMbf^xUI!&2<^MV>z`swLOs2{9E&H&4cxW^{2#BA~`mH3H~K$h4J%_v>j>5#$=;g zY4%44)sbi^w z_hp^0&bI|9>|5AZ*e+}z<{svDAytW$hm83#-Gc z!^Q9p>l5u0#qH;ftBtFr=Mv{oa?P8-#b<4PIr94GA)#~Xob=k86qpqFJ$(NQ1BC&u zx2VZz^fmgZiv>L~a1b2g4*=Hm*qc6)bRubzev&?v6iVu*>Zbas7d_Q8)iWKy6yY2m z7#?7qgnUHy{8>Zk9qb)sF2V}$3hz{8rr+ba$1^^4d@9c~>)egijnxwa69a#R#-3wm zMW`aQU)nEmFP3M|vu{vsP}S;cbz|Z2NbTS}>pWSTLtY>GSp@+5pOc?b>QD8vUvU_I-2LFk#<^p3cy#z}fF66~Di{HNO#gxbkpXi3 zhrNpS@b^0uIV5}HkyA7a;JxF55!3Cl+hy+;UZ}BfOwKmt^IjNRDA#&f4-z3}oEDk} z6=wesYf5*cro;8;`;qq}oU8v3{6nxGz%}!@*f@D@{w=ud{;)s1JhU7~P7AFE@BK1# zCtRm;T|O;1E!b1+DY6H>F}g9@3%+%o!A<=%@zca*>Sb!`b$e@iYu*2bM{l<3#?*`6#&i0B$U6;Nt0u{JzYIa7IAds&O9;B;malvqkE2dxLK zKAX?>2r_DpSdLi8x!z*iVtYLE@l5*PmH}M*+=fhSa?vkjUXb&4=w-q&_9fj*I_k<< zLm@w~7IMPLzN1irU@ zZ>t0*0G$Ey7fS&4nmPgW&84sUZ-6!cJ=fWA>krV&g@5P0z|+7vfO>$bz<0uT^4IPc z?neiIpRmNT#Ine`$hrwAvJ_c#f)31_V!6I}+$PHz+zNN*?z?WEZj?KtITN@K$dE z`2D|Y_+0~aWbI<@VipuuSw~<$nS0n?0G5X=Hv~ENa!k$-=Z9;c%Qt}6#X5BpFah=V z2+(s^&s{kHYy8WAHNc;N1wbP3Hauzm2vE~L1(*c92s{dO1xo8n>sJB?fRPO&(Mew= z`@%QYH`eQccEC%(y$$!uo;5iQISm8q2gqX&>)KxdZ5!G)>;uGlvHp1Uxa{3Pf6`0Q zOVNJtO#CZ6*0;j(GBuhST?@WB_cO)F(Hv7hroQ{G?z_H+KO^V7RsL0e&I>DoD}wz| zQ(zyST!5EVFRQv4x*49Ye7-WvkYyl8^}*nS!D3CZW`=2oiQG&23zDS7{TF+a?Emcb z?)83*9OoU;9nl`(P*KOl`;uOx5#Tl5YdZFusFirc_K1zOz$|l?nLo2msN-^3U>|l2 zFiDsMWlOs75q!o6q6?O#&(bSRN)v0SldO}hvu(3&)VfXvcs&LI>Py(et_L`F*Z{o5 zwl}SBTG2YNM1+XYXl^u*G>tSdTWv4$)~GFQ%4o_cR2C|gNy;R{PQy;6L+Pk?SG$|N z&EC`g(|-2)=LY5mJ_ZVQ#>1 z7;;z0|KS*W4SMlC(4JB&H$XW+NltD_RY}!+?|eDqb`SDC@_qTf@pa>6U9{j6d=>r* z|A4@Nz&Q6f_xs2PDG8JWo^U_mPC+g=Ib-`A`y3NeCZx=*np=e_#nsK(&3Q(3Mn$c+ z2@emDZ_YPUZxDwpXV!~ZD`$;SiF`ZA(+%(G-qT%CUr}%ByQ%Mo?hoB3;2FtU?Bnp| zX9gTS{K;SOieB*;@>ka)e|O^ciQ8Yxcr7E{kZxe+v8+uHJ`qZhZB2dpULY5FS$hFq zJIs+-1~4z}Ie^zu9Y8)A=NEDzwgCLTB!Jv>;x_;_)(HT0In$BrOb#XW;2gWDaeo3? z6-R}m!sq7C&F`4r;a1-SiMS8@-nQO0o}bggX@NQH5ABcIWd8r%q8}+M_S0yimzc}Ywlqv-B z*fzrV{s6Q(*~V-mJyQPR{D+hGWpSW5z?wyGWV>=*QV5g*7m*3de);pE=jFN`Uee&} z@E+*|^hGv8wNx#=9eg|ZHNg4}wUOLYUIfTziUY`FXWfdu)g%5R{uWP*C)=6rRCH5x zlT?!GtmdqytF9|sxjNR9!kVz=^^DguR;X5}N>WQw;}hc(Sr1=oUuyrx`HeHfli^7~ z7Bcs;^b@}i;C@Q*3;s%g9!!v$1kQ?Q#Ug<9C2FL(KG`5`K-H~OqW{V{@f_UF%H_H~ z_a@w%w@3C2@0b1V{qF9b?w&o!tLNhFEc7>Ar}OVVH}Kp*?!WjuTaX2|7yQe2q<19l z`4Kw>+{uaz1)Kx&$7m^vsKYiTzH{Sg0<{7wo25z2#n`hwW8TfT) z0JER``2YT%^R#Y=sL*tT-zMJv(>&e#x_MfklilNA&C|c`EZ_X@n`hwW8Th|A1H8|t z+`Z-ErgjPi-~Q8mo^joMZpg__|5x|N<|Ce6rkj3BK zY+3y#HZBUj{rl(IMUnl(bN%DP3l=`|*u0$VtY7nF|HVnr-@@Pi-qu;;+u}F#EpJpC zzWaF()tbL|;Anh>Z(aExH#~mcQK{$uVa_iPy?$eM>kWr(!4LcS&QnG|MI}=FPa+VUM9KY5H;P?OVz5fm3H8Ds4 literal 0 HcmV?d00001 diff --git a/assets/hubert_inputs.pth b/assets/hubert_inputs.pth new file mode 100644 index 0000000000000000000000000000000000000000..46d28868e5b7107a209af2ac9b75473575308e12 GIT binary patch literal 169434 zcmeIvO>Y}T7zglioW!o%(o$a2LV-(FDu~lS4@DfR2t`5*!UQ3r2+4BnrM7D9tUa#i z0SQsHQcp#qa75zRkHV2p(yzd-leT%OCFqskSmRyq&itRpziA}p`4FnraAcedmC&7R zr=!^H4R$7R+>Gfs);q1(THd+QUtcSSt*xz}=6=k=#m&Wh`*=7RwbQKBj>A#AyWE__ zUVmI`j?(Q(uiwdc55~h$^X6(To4a^%vr_EHSGKOFS$TI$ZM{Ei#jkcB*0TA{Qod#- zTbQP|C!N+uE7{`b*KWtG@?{c|Qhs|ktLEQH>27xHUb{W)_tSRl4F}`e7o$!(N;{wQ z+Od|^-rdNK$Lz#Lc5?GXp1ITNbb5p9&F@>|8`%rpyq)gj7S@Kt{*w+)O*^=po!;NU zi_;FyJni76y$)W!*DVUY60=t~ve%}ChLf1fHT$hw=_q^sK%q|uPygxCw9aaFcC(y! z*WQ_A=aN#)-k8?7lf9Xg?quh8)8C5O+Z)+CljTNIsa|+=|I)1-shEwK2e}lk%{s{X z%KF;J#W6nj$on-25FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009F3D}hE*sXmP5VK_M#irGlY^C48LM-i67)zF=6 zr=!^H4R$7R+>Gfs);q1(s$X7TyArmZXB@5L2>}8G2oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+0D*rm&`2uP<9~kh^R?R{6tj_(=R>Ggk0vaItD!sDPDioX z8|+NtxEa%Ntan z`Oi&!P%Enc{m1pM=km(MY$T=P^2Ff6qx+X`l|m@yx#GovF+*f|=SKf<(8E|BhLd@A zK9>%J{FO(~2&%6fj@f@&2V)wi=4QrRJ{+?j{9w%KUuVax9*)^BclN-adhqO+y|Zd0 zi;M5=tv_FW|H;mUy}QTP?{b=AoOv)kl>BrSE5+|0e%t+Bylx~5i^a2r{8OyoUHcE) CtWhEW literal 0 HcmV?d00001 diff --git a/assets/rmvpe_inputs.pth b/assets/rmvpe_inputs.pth new file mode 100644 index 0000000000000000000000000000000000000000..a4cfb8607af1de19648868e84e8e3d30939d359e GIT binary patch literal 33527 zcmeH|zi!h&9LLXbni!WqQqvhR773CF7%CX3B9!HXFhxjF7R%UHvFgM=+sTHMuvAP; zbmSfS2#5u#6TCv7fH&Yy2~pCBh2^_^FS^)q?(Dno@B8(~lXhJQ#}Qj?U)&bOXqv}< zGR`MuCkm>dou8)to-2l{HmiM5Vqojpqc~MoSY<`{zU@z{BrRRPh~G_;H1ex>TxNwo zedMaz!NH)ZhnIR}a1twfF~#ktSx~)MTy#}^VCgYkwKHhwV>pR|r(M-}DehIO`C1BT z$(cMlQ;xnOkEC^`ZhQ*EEKTFEO0se3zAB=)h@%%tSh>o*f2?*Zb@N!=D%+lHI+y2H z^9y}n)4hKRuqWk z|MC5gb3ggV{FDD^4>12Y1<`{&5PRYft`@|2X%P|I9!6kM;obk5d3$d-9+B$GM;U zXa325vM(H>y_aSEVoPyUntIQNtP%s=^$_5kybQvh9i@}K<2xu5)J z{>gu|2bh1H0_fV4|KvZ;{p3IMPyVAl!2IJBK-Zr9$MWBkP3Om;^;yic2X*(PT^GV} zwjJ3L&qXns=CPlQ^GVs6#zmQA<9_d%82*2I?3xw){Ce`XrXOj#CoOYWt>NwK3nDZ_Zz8;>kwi!E)eT r`{8Pey*&GS{J#ACOW(#cJ+EnwUp|^|A@u6>wVj4JxAn)2UyS_&{WX(w literal 0 HcmV?d00001 diff --git a/configs/config.json b/configs/config.json index 8e9c176..668299d 100644 --- a/configs/config.json +++ b/configs/config.json @@ -1,15 +1,15 @@ { - "pth_path": "assets/weights/kikiV1.pth", - "index_path": "logs/kikiV1.index", - "sg_input_device": "VoiceMeeter Output (VB-Audio Vo (MME)", - "sg_output_device": "VoiceMeeter Aux Input (VB-Audio (MME)", - "threhold": -45.0, - "pitch": 12.0, - "index_rate": 0.0, - "rms_mix_rate": 0.0, - "block_time": 0.25, - "crossfade_length": 0.04, - "extra_time": 2.0, - "n_cpu": 6.0, - "f0method": "rmvpe" + "pth_path": "assets/weights/kikiV1.pth", + "index_path": "logs/kikiV1.index", + "sg_input_device": "VoiceMeeter Output (VB-Audio Vo (MME)", + "sg_output_device": "VoiceMeeter Aux Input (VB-Audio (MME)", + "threhold": -45.0, + "pitch": 12.0, + "index_rate": 0.0, + "rms_mix_rate": 0.0, + "block_time": 0.25, + "crossfade_length": 0.04, + "extra_time": 2.0, + "n_cpu": 6.0, + "f0method": "rmvpe" } diff --git a/configs/config.py b/configs/config.py index 20bbb36..6c9620f 100644 --- a/configs/config.py +++ b/configs/config.py @@ -43,7 +43,9 @@ def wrapper(*args, **kwargs): class Config: def __init__(self): self.device = "cuda:0" - self.is_half = True + # Float is faster than Half, on Rtx4070 + self.is_half = False + self.use_jit = True self.n_cpu = 0 self.gpu_name = None self.json_config = self.load_config_json() diff --git a/gui_v1.py b/gui_v1.py index f86809d..3f2073d 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -51,7 +51,7 @@ def run(self): from queue import Empty import librosa - from tools.torchgate import TorchGate + from infer.lib.jit.torchgate import TorchGate import numpy as np import PySimpleGUI as sg import sounddevice as sd @@ -59,7 +59,7 @@ def run(self): import torch.nn.functional as F import torchaudio.transforms as tat - import tools.rvc_for_realtime as rvc_for_realtime + import infer.lib.jit.rvc_for_realtime as rvc_for_realtime from i18n.i18n import I18nAuto i18n = I18nAuto() @@ -100,7 +100,7 @@ class GUI: def __init__(self) -> None: self.config = GUIConfig() self.flag_vc = False - + # self.device_latency=0.1 self.launcher() def load(self): @@ -288,6 +288,17 @@ def launcher(self): enable_events=True, ), ], + # [ + # sg.Text("设备延迟"), + # sg.Slider( + # range=(0, 1), + # key="device_latency", + # resolution=0.001, + # orientation="h", + # default_value=data.get("device_latency", "0.1"), + # enable_events=True, + # ), + # ], [ sg.Text(i18n("harvest进程数")), sg.Slider( @@ -388,6 +399,7 @@ def event_handler(self): "pitch": values["pitch"], "rms_mix_rate": values["rms_mix_rate"], "index_rate": values["index_rate"], + # "device_latency": values["device_latency"], "block_time": values["block_time"], "crossfade_length": values["crossfade_length"], "extra_time": values["extra_time"], @@ -444,6 +456,7 @@ def set_values(self, values): sg.popup(i18n("index文件路径不可包含中文")) return False self.set_devices(values["sg_input_device"], values["sg_output_device"]) + # self.device_latency = values["device_latency"] self.config.pth_path = values["pth_path"] self.config.index_path = values["index_path"] self.config.threhold = values["threhold"] @@ -565,6 +578,7 @@ def soundinput(self): blocksize=self.block_frame, samplerate=self.config.samplerate, dtype="float32", + # latency=self.device_latency ): while self.flag_vc: time.sleep(self.config.block_time) diff --git a/infer/lib/infer_pack/models.py b/infer/lib/infer_pack/models.py index 711db22..30ab539 100644 --- a/infer/lib/infer_pack/models.py +++ b/infer/lib/infer_pack/models.py @@ -564,7 +564,7 @@ def __init__( **kwargs ): super().__init__() - if type(sr) == type("strr"): + if isinstance(sr,str): sr = sr2sr[sr] self.spec_channels = spec_channels self.inter_channels = inter_channels @@ -647,15 +647,16 @@ def forward( o = self.dec(z_slice, pitchf, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] + # 这里不使用分支,因为当使用torch.jit.trace()时, + # 只编译一个分支,使用其他分支时会导致错误! + head = int(z_p.shape[2] * (1.0-rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, nsff0, g=g) return o, x_mask, (z, z_p, m_p, logs_p) @@ -685,7 +686,7 @@ def __init__( **kwargs ): super().__init__() - if type(sr) == type("strr"): + if isinstance(sr,str): sr = sr2sr[sr] self.spec_channels = spec_channels self.inter_channels = inter_channels @@ -768,15 +769,14 @@ def forward( o = self.dec(z_slice, pitchf, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): + def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] + head = int(z_p.shape[2] * (1.0-rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, nsff0, g=g) return o, x_mask, (z, z_p, m_p, logs_p) @@ -880,14 +880,14 @@ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[b o = self.dec(z_slice, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, sid, rate=None): + def infer(self, phone, phone_lengths, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] + head = int(z_p.shape[2] * (1.0-rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, g=g) return o, x_mask, (z, z_p, m_p, logs_p) @@ -991,14 +991,14 @@ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[b o = self.dec(z_slice, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, sid, rate=None): + def infer(self, phone, phone_lengths, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] + head = int(z_p.shape[2] * (1.0-rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + nsff0 = nsff0[:, head:] z = self.flow(z_p, x_mask, g=g, reverse=True) o = self.dec(z * x_mask, g=g) return o, x_mask, (z, z_p, m_p, logs_p) diff --git a/infer/lib/jit/__init__.py b/infer/lib/jit/__init__.py new file mode 100644 index 0000000..1c42934 --- /dev/null +++ b/infer/lib/jit/__init__.py @@ -0,0 +1,109 @@ +from io import BytesIO +import pickle +import time +import torch +from tqdm import tqdm +from collections import OrderedDict + +def load_inputs(path,device,is_half=False): + parm=torch.load(path,map_location=torch.device("cpu")) + for key in parm.keys(): + parm[key] = parm[key].to(device) + if is_half and parm[key].dtype == torch.float32: + parm[key] = parm[key].half() + elif not is_half and parm[key].dtype == torch.float16: + parm[key] = parm[key].float() + return parm + +def benchmark(model,inputs_path,device=torch.device("cpu"),epoch=1000,is_half=False): + parm=load_inputs(inputs_path,device,is_half) + total_ts = 0.0 + bar=tqdm(range(epoch)) + for i in bar: + start_time=time.perf_counter() + o=model(**parm) + total_ts+=time.perf_counter()-start_time + print(f"num_epoch: {epoch} | avg time(ms): {(total_ts*1000)/epoch}") + +def jit_warm_up(model,inputs_path,device=torch.device("cpu"),epoch=5,is_half=False): + benchmark(model,inputs_path,device,epoch=epoch,is_half=is_half) + +def to_jit_model(model_path,model_type:str,inputs_path:str,device=torch.device("cpu"),is_half=False): + model=None + if model_type.lower()=="synthesizer": + from infer.lib.jit.get_synthesizer import get_synthesizer + model,_=get_synthesizer(model_path,device) + model.forward = model.infer + elif model_type.lower()=="rmvpe": + from infer.lib.jit.jit_export.get_rmvpe import get_rmvpe + model=get_rmvpe(model_path,device) + elif model_type.lower()=="hubert": + from infer.lib.jit.jit_export.get_hubert import get_hubert_model + model=get_hubert_model(model_path,device) + model.forward = model.infer + else: + raise ValueError(f"No model type named {model_type}") + model = model.eval() + model = model.half() if is_half else model.float() + inputs = load_inputs(inputs_path,device,is_half) + model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) + model_jit.to(device) + model_jit = model_jit.half() if is_half else model_jit.float() + # model = model.half() if is_half else model.float() + return (model,model_jit) + +def export(model:torch.nn.Module,inputs:dict,device=torch.device("cpu"),is_half:bool=False)->dict: + model = model.half() if is_half else model.float() + model.eval() + model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) + model_jit.to(device) + model_jit = model_jit.half() if is_half else model_jit.float() + buffer = BytesIO() + model_jit=model_jit.cpu() + torch.jit.save(model_jit,buffer) + del model_jit + cpt=OrderedDict() + cpt["model"]=buffer.getvalue() + cpt["is_half"]=is_half + return cpt + +def load(path:str): + with open(path,"rb") as f: + return pickle.load(f) + +def save(ckpt:dict, save_path:str): + with open(save_path,"wb") as f: + pickle.dump(ckpt,f) + + +def rmvpe_jit_export(model_path:str,inputs_path:str,save_path:str=None,device=torch.device("cpu"),is_half=False): + if not save_path: + save_path=model_path.rstrip(".pth") + save_path+=".half.jit" if is_half else ".jit" + if "cuda" in str(device) and ":" not in str(device): + device = torch.device("cuda:0" ) + from infer.lib.jit.jit_export.get_rmvpe import get_rmvpe + model = get_rmvpe(model_path,device) + inputs = load_inputs(inputs_path,device,is_half) + ckpt = export(model,inputs,device,is_half) + ckpt["device"]=str(device) + save(ckpt,save_path) + return ckpt + +def synthesizer_jit_export(model_path:str,inputs_path:str,save_path:str=None,device=torch.device("cpu"),is_half=False): + if not save_path: + save_path=model_path.rstrip(".pth") + save_path+=".half.jit" if is_half else ".jit" + if "cuda" in str(device) and ":" not in str(device): + device = torch.device("cuda:0" ) + from infer.lib.jit.jit_export.get_synthesizer import get_synthesizer + model,cpt=get_synthesizer(model_path,device) + assert isinstance(cpt,dict) + model.forward = model.infer + inputs =load_inputs(inputs_path,device,is_half) + ckpt = export(model,inputs,device,is_half) + cpt.pop("weight") + cpt["model"] = ckpt["model"] + cpt["device"] = device + save(cpt,save_path) + return cpt diff --git a/infer/lib/jit/get_hubert.py b/infer/lib/jit/get_hubert.py new file mode 100644 index 0000000..1194be4 --- /dev/null +++ b/infer/lib/jit/get_hubert.py @@ -0,0 +1,330 @@ +import math +import random +from typing import Optional,Tuple +from fairseq.checkpoint_utils import load_model_ensemble_and_task +import numpy as np +import torch +import torch.nn.functional as F +# from fairseq.data.data_utils import compute_mask_indices +from fairseq.utils import index_put +# @torch.jit.script +def pad_to_multiple(x, multiple, dim=-1, value=0): + # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 + if x is None: + return None, 0 + tsz = x.size(dim) + m = tsz / multiple + remainder = math.ceil(m) * multiple - tsz + if int(tsz % multiple)==0: + return x, 0 + pad_offset = (0,) * (-1 - dim) * 2 + + return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder + + +def extract_features( + self, + x, + padding_mask=None, + tgt_layer=None, + min_layer=0, +): + + if padding_mask is not None: + x = index_put(x, padding_mask, 0) + + x_conv = self.pos_conv(x.transpose(1, 2)) + x_conv = x_conv.transpose(1, 2) + x = x + x_conv + + if not self.layer_norm_first: + x = self.layer_norm(x) + + # pad to the sequence length dimension + x, pad_length = pad_to_multiple( + x, self.required_seq_len_multiple, dim=-2, value=0 + ) + if pad_length > 0 and padding_mask is None: + padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool) + padding_mask[:, -pad_length:] = True + else: + padding_mask, _ = pad_to_multiple( + padding_mask, self.required_seq_len_multiple, dim=-1, value=True + ) + x = F.dropout(x, p=self.dropout, training=self.training) + + # B x T x C -> T x B x C + x = x.transpose(0, 1) + + layer_results = [] + r = None + for i, layer in enumerate(self.layers): + dropout_probability = np.random.random() if self.layerdrop > 0 else 1 + if not self.training or (dropout_probability > self.layerdrop): + x, (z, lr) = layer( + x, self_attn_padding_mask=padding_mask, need_weights=False + ) + if i >= min_layer: + layer_results.append((x, z, lr)) + if i == tgt_layer: + r = x + break + + if r is not None: + x = r + + # T x B x C -> B x T x C + x = x.transpose(0, 1) + + # undo paddding + if pad_length > 0: + x = x[:, :-pad_length] + + def undo_pad(a, b, c): + return ( + a[:-pad_length], + b[:-pad_length] if b is not None else b, + c[:-pad_length], + ) + + layer_results = [undo_pad(*u) for u in layer_results] + + return x, layer_results + +def compute_mask_indices( + shape: Tuple[int, int], + padding_mask: Optional[torch.Tensor], + mask_prob: float, + mask_length: int, + mask_type: str = "static", + mask_other: float = 0.0, + min_masks: int = 0, + no_overlap: bool = False, + min_space: int = 0, + require_same_masks: bool = True, + mask_dropout: float = 0.0, +) -> torch.Tensor: + """ + Computes random mask spans for a given shape + + Args: + shape: the the shape for which to compute masks. + should be of size 2 where first element is batch size and 2nd is timesteps + padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements + mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by + number of timesteps divided by length of mask span to mask approximately this percentage of all elements. + however due to overlaps, the actual number will be smaller (unless no_overlap is True) + mask_type: how to compute mask lengths + static = fixed size + uniform = sample from uniform distribution [mask_other, mask_length*2] + normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element + poisson = sample from possion distribution with lambda = mask length + min_masks: minimum number of masked spans + no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping + min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans + require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample + mask_dropout: randomly dropout this percentage of masks in each example + """ + + bsz, all_sz = shape + mask = torch.full((bsz, all_sz), False) + + all_num_mask = int( + # add a random number for probabilistic rounding + mask_prob * all_sz / float(mask_length) + + torch.rand([1]).item() + ) + + all_num_mask = max(min_masks, all_num_mask) + + mask_idcs = [] + for i in range(bsz): + if padding_mask is not None: + sz = all_sz - padding_mask[i].long().sum().item() + num_mask = int( + mask_prob * sz / float(mask_length) + + np.random.rand() + ) + num_mask = max(min_masks, num_mask) + else: + sz = all_sz + num_mask = all_num_mask + + if mask_type == "static": + lengths = torch.full([num_mask], mask_length) + elif mask_type == "uniform": + lengths = torch.randint(mask_other, mask_length*2+1, size=[num_mask]) + elif mask_type == "normal": + lengths = torch.normal(mask_length, mask_other, size=[num_mask]) + lengths = [max(1, int(round(x))) for x in lengths] + else: + raise Exception("unknown mask selection " + mask_type) + + if sum(lengths) == 0: + lengths[0] = min(mask_length, sz - 1) + + if no_overlap: + mask_idc = [] + + def arrange(s, e, length, keep_length): + span_start = torch.randint(low=s, high=e - length,size=[1]).item() + mask_idc.extend(span_start + i for i in range(length)) + + new_parts = [] + if span_start - s - min_space >= keep_length: + new_parts.append((s, span_start - min_space + 1)) + if e - span_start - length - min_space > keep_length: + new_parts.append((span_start + length + min_space, e)) + return new_parts + + parts = [(0, sz)] + min_length = min(lengths) + for length in sorted(lengths, reverse=True): + t=[e - s if e - s >= length + min_space else 0 for s, e in parts] + lens=torch.asarray(t,dtype=torch.int) + l_sum = torch.sum(lens) + if l_sum == 0: + break + probs = lens / torch.sum(lens) + c = torch.multinomial(probs.float(),len(parts)).item() + s, e = parts.pop(c) + parts.extend(arrange(s, e, length, min_length)) + mask_idc = torch.asarray(mask_idc) + else: + min_len = min(lengths) + if sz - min_len <= num_mask: + min_len = sz - num_mask - 1 + mask_idc=torch.asarray(random.sample([i for i in range(sz - min_len)],num_mask)) + mask_idc = torch.asarray( + [ + mask_idc[j] + offset + for j in range(len(mask_idc)) + for offset in range(lengths[j]) + ] + ) + + mask_idcs.append(torch.unique(mask_idc[mask_idc < sz])) + + min_len = min([len(m) for m in mask_idcs]) + for i, mask_idc in enumerate(mask_idcs): + if isinstance(mask_idc,torch.Tensor): + mask_idc=torch.asarray(mask_idc,dtype=torch.float) + if len(mask_idc) > min_len and require_same_masks: + mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],min_len)) + if mask_dropout > 0: + num_holes = int(round(len(mask_idc) * mask_dropout)) + mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],len(mask_idc) - num_holes)) + + mask[i, mask_idc.int()] = True + + return mask + +def apply_mask(self, x, padding_mask, target_list): + B, T, C = x.shape + torch.zeros_like(x) + if self.mask_prob > 0: + mask_indices = compute_mask_indices( + (B, T), + padding_mask, + self.mask_prob, + self.mask_length, + self.mask_selection, + self.mask_other, + min_masks=2, + no_overlap=self.no_mask_overlap, + min_space=self.mask_min_space, + ) + mask_indices = mask_indices.to(x.device) + x[mask_indices] = self.mask_emb + else: + mask_indices = None + + if self.mask_channel_prob > 0: + mask_channel_indices = compute_mask_indices( + (B, C), + None, + self.mask_channel_prob, + self.mask_channel_length, + self.mask_channel_selection, + self.mask_channel_other, + no_overlap=self.no_mask_channel_overlap, + min_space=self.mask_channel_min_space, + ) + mask_channel_indices = ( + mask_channel_indices + .to(x.device) + .unsqueeze(1) + .expand(-1, T, -1) + ) + x[mask_channel_indices] = 0 + + return x, mask_indices + + +def get_hubert_model(model_path="assets/hubert/hubert_base.pt",device=torch.device("cpu")): + models, _, _ = load_model_ensemble_and_task( + [model_path], + suffix="", + ) + hubert_model = models[0] + hubert_model = hubert_model.to(device) + def _apply_mask(x, padding_mask, target_list): + return apply_mask(hubert_model,x, padding_mask, target_list) + hubert_model.apply_mask = _apply_mask + + def _extract_features(x, + padding_mask=None, + tgt_layer=None, + min_layer=0, + ): + return extract_features(hubert_model.encoder,x, + padding_mask=padding_mask, + tgt_layer=tgt_layer, + min_layer=min_layer, + ) + hubert_model.encoder.extract_features = _extract_features + + hubert_model._forward=hubert_model.forward + + def hubert_extract_features( + self, + source: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + mask: bool = False, + ret_conv: bool = False, + output_layer: Optional[int] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + res = self._forward( + source, + padding_mask=padding_mask, + mask=mask, + features_only=True, + output_layer=output_layer, + ) + feature = res["features"] if ret_conv else res["x"] + return feature, res["padding_mask"] + + def _hubert_extract_features( + source: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + mask: bool = False, + ret_conv: bool = False, + output_layer: Optional[int] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + return hubert_extract_features(hubert_model,source,padding_mask,mask,ret_conv,output_layer) + hubert_model.extract_features = _hubert_extract_features + + def infer(source,padding_mask,output_layer:torch.Tensor): + output_layer=output_layer.item() + logits = hubert_model.extract_features(source=source,padding_mask=padding_mask,output_layer=output_layer) + feats = ( + hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] + ) + return feats + + + hubert_model.infer=infer + # hubert_model.forward=infer + # hubert_model.forward + + return hubert_model \ No newline at end of file diff --git a/infer/lib/jit/get_rmvpe.py b/infer/lib/jit/get_rmvpe.py new file mode 100644 index 0000000..3c2ed7e --- /dev/null +++ b/infer/lib/jit/get_rmvpe.py @@ -0,0 +1,13 @@ + +import torch + + + +def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt",device=torch.device("cpu")): + from infer.lib.rmvpe import E2E + model = E2E(4, 1, (2, 2)) + ckpt = torch.load(model_path, map_location=device) + model.load_state_dict(ckpt) + model.eval() + model = model.to(device) + return model \ No newline at end of file diff --git a/infer/lib/jit/get_synthesizer.py b/infer/lib/jit/get_synthesizer.py new file mode 100644 index 0000000..70458f0 --- /dev/null +++ b/infer/lib/jit/get_synthesizer.py @@ -0,0 +1,35 @@ +import torch + + +def get_synthesizer(pth_path,device=torch.device("cpu")): + from infer.lib.infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono + cpt = torch.load(pth_path, map_location=torch.device("cpu")) + tgt_sr = cpt["config"][-1] + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] + if_f0 = cpt.get("f0", 1) + version = cpt.get("version", "v1") + if version == "v1": + if if_f0 == 1: + net_g = SynthesizerTrnMs256NSFsid( + *cpt["config"], is_half=False + ) + else: + net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) + elif version == "v2": + if if_f0 == 1: + net_g = SynthesizerTrnMs768NSFsid( + *cpt["config"], is_half=False + ) + else: + net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) + del net_g.enc_q + net_g.forward = net_g.infer + ckpt={} + ckpt["config"]=cpt["config"] + ckpt["f0"] = if_f0 + ckpt["version"]=version + ckpt["info"]=cpt.get("info","0epoch") + net_g.load_state_dict(cpt["weight"], strict=False) + net_g=net_g.float() + net_g.eval().to(device) + return net_g,cpt \ No newline at end of file diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index d305b53..c2e4480 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -1,8 +1,11 @@ -import pdb, os +from io import BytesIO +import os import numpy as np import torch +from infer.lib import jit + try: # Fix "Torch not compiled with CUDA enabled" import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import @@ -276,17 +279,14 @@ def __init__(self, in_channels, out_channels, momentum=0.01): nn.BatchNorm2d(out_channels, momentum=momentum), nn.ReLU(), ) + self.shortcut=lambda x:x if in_channels != out_channels: self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False + + def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: - return self.conv(x) + x + return self.conv(x) + self.shortcut(x) class Encoder(nn.Module): @@ -578,12 +578,12 @@ def forward(self, audio, keyshift=0, speed=1, center=True): class RMVPE: - def __init__(self, model_path, is_half, device=None): + def __init__(self, model_path:str, is_half, device=None,use_jit=False): self.resample_kernel = {} self.resample_kernel = {} self.is_half = is_half if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" + device = "cuda:0" if torch.cuda.is_available() else "cpu" self.device = device self.mel_extractor = MelSpectrogram( is_half, 128, 16000, 1024, 160, None, 30, 8000 @@ -597,13 +597,49 @@ def __init__(self, model_path, is_half, device=None): ) self.model = ort_session else: - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model + if str(self.device)=="cuda": + self.device=torch.device("cuda:0") + def get_jit_model(): + jit_model_path=model_path.rstrip(".pth") + jit_model_path+=".half.jit" if is_half else ".jit" + reload=False + if os.path.exists(jit_model_path): + ckpt=jit_export.load(jit_model_path) + model_device = ckpt["device"] + if model_device != str(self.device): + reload =True + else: + reload=True + + if reload: + ckpt=jit_export.rmvpe_jit_export( + model_path, + "assets/rmvpe/rmvpe_inputs.pth", + save_path=jit_model_path, + device=device,is_half=is_half) + model=torch.jit.load(BytesIO(ckpt["model"]),map_location=device) + return model + + def get_default_model(): + model = E2E(4, 1, (2, 2)) + ckpt = torch.load(model_path, map_location="cpu") + model.load_state_dict(ckpt) + model.eval() + if is_half: + model = model.half() + else: + model = model.float() + return model + if use_jit: + if is_half and "cpu" in str(self.device): + logger.warning("Use default rmvpe model. \ + Jit is not supported on the CPU for half floating point") + self.model = get_default_model() + else: + self.model = get_jit_model() + else: + self.model = get_default_model() + self.model = self.model.to(device) cents_mapping = 20 * np.arange(360) + 1997.3794084376191 self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 @@ -622,6 +658,7 @@ def mel2hidden(self, mel): input_feed={onnx_input_name: mel.cpu().numpy()}, )[0] else: + # mel = mel.half() if self.is_half else mel.float() hidden = self.model(mel) return hidden[:, :n_frames] @@ -683,7 +720,7 @@ def to_local_average_cents(self, salience, thred=0.05): # t4 = ttime() # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) return devided - + if __name__ == "__main__": import librosa diff --git a/tools/rvc_for_realtime.py b/tools/rvc_for_realtime.py index 094e307..c259a7d 100644 --- a/tools/rvc_for_realtime.py +++ b/tools/rvc_for_realtime.py @@ -1,10 +1,14 @@ +from io import BytesIO import os +import pickle import sys import traceback import logging -logger = logging.getLogger(__name__) +from infer.lib import jit +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) from time import time as ttime import fairseq @@ -81,7 +85,7 @@ def __init__( self.index = faiss.read_index(index_path) self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) logger.info("Index search enabled") - self.pth_path = pth_path + self.pth_path:str = pth_path self.index_path = index_path self.index_rate = index_rate @@ -102,34 +106,75 @@ def __init__( self.model = last_rvc.model if last_rvc is None or last_rvc.pth_path != self.pth_path: - cpt = torch.load(self.pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) + self.is_half = config.is_half + + def set_default_model(): + cpt = torch.load(self.pth_path, map_location="cpu") + self.tgt_sr = cpt["config"][-1] + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] + self.if_f0 = cpt.get("f0", 1) + self.version = cpt.get("version", "v1") + if self.version == "v1": + if self.if_f0 == 1: + self.net_g = SynthesizerTrnMs256NSFsid( + *cpt["config"], is_half=config.is_half + ) + else: + self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) + elif self.version == "v2": + if self.if_f0 == 1: + self.net_g = SynthesizerTrnMs768NSFsid( + *cpt["config"], is_half=config.is_half + ) + else: + self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) + del self.net_g.enc_q + logger.debug(self.net_g.load_state_dict(cpt["weight"], strict=False)) + self.net_g.eval().to(device) + if self.is_half: + self.net_g = self.net_g.half() else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) + self.net_g = self.net_g.float() + + def set_jit_model(): + jit_pth_path=self.pth_path.rstrip(".pth") + jit_pth_path+=".half.jit" if self.is_half else ".jit" + reload=False + if str(self.device)=="cuda": + self.device = torch.device("cuda:0") + if os.path.exists(jit_pth_path): + cpt=jit_export.load(jit_pth_path) + model_device = cpt["device"] + if model_device != str(self.device): + reload =True else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - logger.debug(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - # print(2333333333,device,config.device,self.device)#net_g是device,hubert是config.device - if config.is_half: - self.net_g = self.net_g.half() + reload=True + + if reload: + cpt=jit_export.synthesizer_jit_export(self.pth_path, + "assets\Synthesizer_inputs.pth", + device=self.device,is_half=self.is_half + ) + + self.tgt_sr = cpt["config"][-1] + self.if_f0 = cpt.get("f0", 1) + self.version = cpt.get("version", "v1") + self.net_g = torch.jit.load(BytesIO(cpt["model"]),map_location=device) + self.net_g.infer = self.net_g.forward + self.net_g.eval().to(device) + + if config.use_jit and not config.dml: + if self.is_half and "cpu" in str(self.device): + logger.warning("Use default Synthesizer model. \ + Jit is not supported on the CPU for half floating point") + set_default_model() + else: + set_jit_model() else: - self.net_g = self.net_g.float() - self.is_half = config.is_half + set_default_model() + + # print(2333333333,device,config.device,self.device)#net_g是device,hubert是config.device + else: self.tgt_sr = last_rvc.tgt_sr self.if_f0 = last_rvc.if_f0 @@ -142,6 +187,7 @@ def __init__( except: logger.warning(traceback.format_exc()) + def change_key(self, new_key): self.f0_up_key = new_key @@ -275,6 +321,7 @@ def get_f0_rmvpe(self, x, f0_up_key): "assets/rmvpe/rmvpe.pt", is_half=self.is_half, device=self.device, ####正常逻辑 + use_jit=config.use_jit ) # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) @@ -357,13 +404,19 @@ def infer( with torch.no_grad(): if self.if_f0 == 1: # print(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2) - infered_audio = self.net_g.infer( - feats, p_len, cache_pitch, cache_pitchf, sid, rate - )[0][0, 0].data.float() + infered_audio = ( + self.net_g.infer( + feats, p_len, cache_pitch, cache_pitchf, sid, torch.FloatTensor([rate]) + )[0][0, 0] + .data + .float() + ) else: - infered_audio = self.net_g.infer(feats, p_len, sid, rate)[0][ - 0, 0 - ].data.float() + infered_audio = ( + self.net_g.infer(feats, p_len, sid, torch.FloatTensor([rate]))[0][0, 0] + .data + .float() + ) t5 = ttime() logger.info( "Spent time: fea = %.2fs, index = %.2fs, f0 = %.2fs, model = %.2fs", From 074605b4b82a9a6a5ec87bdb751e69c1e9b56699 Mon Sep 17 00:00:00 2001 From: shizuku_nia <102004222+ShizukuNia@users.noreply.github.com> Date: Tue, 19 Sep 2023 20:26:48 +0800 Subject: [PATCH 02/26] Fixed some bugs when exporting ONNX model (#1254) --- infer-web.py | 4 ++-- infer/lib/infer_pack/models_onnx.py | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/infer-web.py b/infer-web.py index 542b42b..e3565d8 100644 --- a/infer-web.py +++ b/infer-web.py @@ -165,10 +165,10 @@ def clean(): return {"value": "", "__type__": "update"} -def export_onnx(): +def export_onnx(ModelPath, ExportedPath): from infer.modules.onnx.export import export_onnx as eo - eo() + eo(ModelPath, ExportedPath) sr_dict = { diff --git a/infer/lib/infer_pack/models_onnx.py b/infer/lib/infer_pack/models_onnx.py index 3e99763..e9928de 100644 --- a/infer/lib/infer_pack/models_onnx.py +++ b/infer/lib/infer_pack/models_onnx.py @@ -621,10 +621,7 @@ def __init__( self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) self.speaker_map = None logger.debug( - "gin_channels: " - + gin_channels - + ", self.spk_embed_dim: " - + self.spk_embed_dim + f"gin_channels: {gin_channels}, self.spk_embed_dim: {self.spk_embed_dim}" ) def remove_weight_norm(self): From 768ec52283fa20b7c3b807bacb1998028ea70af2 Mon Sep 17 00:00:00 2001 From: Ftps <63702646+Tps-F@users.noreply.github.com> Date: Wed, 20 Sep 2023 09:55:04 +0900 Subject: [PATCH 03/26] fix import (#1280) * fix import * lint --- infer/lib/infer_pack/models.py | 40 ++++++-- infer/lib/infer_pack/models_onnx.py | 2 +- infer/lib/jit/__init__.py | 152 +++++++++++++++++----------- infer/lib/jit/get_hubert.py | 110 +++++++++++--------- infer/lib/jit/get_rmvpe.py | 7 +- infer/lib/jit/get_synthesizer.py | 30 +++--- infer/lib/rmvpe.py | 48 +++++---- tools/rvc_for_realtime.py | 72 +++++++------ 8 files changed, 271 insertions(+), 190 deletions(-) diff --git a/infer/lib/infer_pack/models.py b/infer/lib/infer_pack/models.py index 30ab539..ec13fb5 100644 --- a/infer/lib/infer_pack/models.py +++ b/infer/lib/infer_pack/models.py @@ -564,7 +564,7 @@ def __init__( **kwargs ): super().__init__() - if isinstance(sr,str): + if isinstance(sr, str): sr = sr2sr[sr] self.spec_channels = spec_channels self.inter_channels = inter_channels @@ -647,13 +647,21 @@ def forward( o = self.dec(z_slice, pitchf, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): + def infer( + self, + phone, + phone_lengths, + pitch, + nsff0, + sid, + rate: torch.Tensor = torch.FloatTensor([1.0]), + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask # 这里不使用分支,因为当使用torch.jit.trace()时, # 只编译一个分支,使用其他分支时会导致错误! - head = int(z_p.shape[2] * (1.0-rate.item())) + head = int(z_p.shape[2] * (1.0 - rate.item())) z_p = z_p[:, :, head:] x_mask = x_mask[:, :, head:] nsff0 = nsff0[:, head:] @@ -686,7 +694,7 @@ def __init__( **kwargs ): super().__init__() - if isinstance(sr,str): + if isinstance(sr, str): sr = sr2sr[sr] self.spec_channels = spec_channels self.inter_channels = inter_channels @@ -769,11 +777,19 @@ def forward( o = self.dec(z_slice, pitchf, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): + def infer( + self, + phone, + phone_lengths, + pitch, + nsff0, + sid, + rate: torch.Tensor = torch.FloatTensor([1.0]), + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - head = int(z_p.shape[2] * (1.0-rate.item())) + head = int(z_p.shape[2] * (1.0 - rate.item())) z_p = z_p[:, :, head:] x_mask = x_mask[:, :, head:] nsff0 = nsff0[:, head:] @@ -880,11 +896,13 @@ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[b o = self.dec(z_slice, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): + def infer( + self, phone, phone_lengths, sid, rate: torch.Tensor = torch.FloatTensor([1.0]) + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - head = int(z_p.shape[2] * (1.0-rate.item())) + head = int(z_p.shape[2] * (1.0 - rate.item())) z_p = z_p[:, :, head:] x_mask = x_mask[:, :, head:] nsff0 = nsff0[:, head:] @@ -991,11 +1009,13 @@ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[b o = self.dec(z_slice, g=g) return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, phone, phone_lengths, sid, rate:torch.Tensor=torch.FloatTensor([1.0])): + def infer( + self, phone, phone_lengths, sid, rate: torch.Tensor = torch.FloatTensor([1.0]) + ): g = self.emb_g(sid).unsqueeze(-1) m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - head = int(z_p.shape[2] * (1.0-rate.item())) + head = int(z_p.shape[2] * (1.0 - rate.item())) z_p = z_p[:, :, head:] x_mask = x_mask[:, :, head:] nsff0 = nsff0[:, head:] diff --git a/infer/lib/infer_pack/models_onnx.py b/infer/lib/infer_pack/models_onnx.py index e9928de..ff60414 100644 --- a/infer/lib/infer_pack/models_onnx.py +++ b/infer/lib/infer_pack/models_onnx.py @@ -551,7 +551,7 @@ def __init__( gin_channels, sr, version, - **kwargs + **kwargs, ): super().__init__() if type(sr) == type("strr"): diff --git a/infer/lib/jit/__init__.py b/infer/lib/jit/__init__.py index 1c42934..2d3332d 100644 --- a/infer/lib/jit/__init__.py +++ b/infer/lib/jit/__init__.py @@ -5,105 +5,143 @@ from tqdm import tqdm from collections import OrderedDict -def load_inputs(path,device,is_half=False): - parm=torch.load(path,map_location=torch.device("cpu")) + +def load_inputs(path, device, is_half=False): + parm = torch.load(path, map_location=torch.device("cpu")) for key in parm.keys(): parm[key] = parm[key].to(device) if is_half and parm[key].dtype == torch.float32: - parm[key] = parm[key].half() + parm[key] = parm[key].half() elif not is_half and parm[key].dtype == torch.float16: - parm[key] = parm[key].float() + parm[key] = parm[key].float() return parm -def benchmark(model,inputs_path,device=torch.device("cpu"),epoch=1000,is_half=False): - parm=load_inputs(inputs_path,device,is_half) + +def benchmark( + model, inputs_path, device=torch.device("cpu"), epoch=1000, is_half=False +): + parm = load_inputs(inputs_path, device, is_half) total_ts = 0.0 - bar=tqdm(range(epoch)) + bar = tqdm(range(epoch)) for i in bar: - start_time=time.perf_counter() - o=model(**parm) - total_ts+=time.perf_counter()-start_time + start_time = time.perf_counter() + o = model(**parm) + total_ts += time.perf_counter() - start_time print(f"num_epoch: {epoch} | avg time(ms): {(total_ts*1000)/epoch}") -def jit_warm_up(model,inputs_path,device=torch.device("cpu"),epoch=5,is_half=False): - benchmark(model,inputs_path,device,epoch=epoch,is_half=is_half) -def to_jit_model(model_path,model_type:str,inputs_path:str,device=torch.device("cpu"),is_half=False): - model=None - if model_type.lower()=="synthesizer": +def jit_warm_up(model, inputs_path, device=torch.device("cpu"), epoch=5, is_half=False): + benchmark(model, inputs_path, device, epoch=epoch, is_half=is_half) + + +def to_jit_model( + model_path, + model_type: str, + inputs_path: str, + device=torch.device("cpu"), + is_half=False, +): + model = None + if model_type.lower() == "synthesizer": from infer.lib.jit.get_synthesizer import get_synthesizer - model,_=get_synthesizer(model_path,device) + + model, _ = get_synthesizer(model_path, device) model.forward = model.infer - elif model_type.lower()=="rmvpe": - from infer.lib.jit.jit_export.get_rmvpe import get_rmvpe - model=get_rmvpe(model_path,device) - elif model_type.lower()=="hubert": - from infer.lib.jit.jit_export.get_hubert import get_hubert_model - model=get_hubert_model(model_path,device) + elif model_type.lower() == "rmvpe": + from infer.lib.jit.get_rmvpe import get_rmvpe + + model = get_rmvpe(model_path, device) + elif model_type.lower() == "hubert": + from infer.lib.jit.get_hubert import get_hubert_model + + model = get_hubert_model(model_path, device) model.forward = model.infer else: raise ValueError(f"No model type named {model_type}") model = model.eval() model = model.half() if is_half else model.float() - inputs = load_inputs(inputs_path,device,is_half) - model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) + inputs = load_inputs(inputs_path, device, is_half) + model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) model_jit.to(device) model_jit = model_jit.half() if is_half else model_jit.float() # model = model.half() if is_half else model.float() - return (model,model_jit) + return (model, model_jit) -def export(model:torch.nn.Module,inputs:dict,device=torch.device("cpu"),is_half:bool=False)->dict: + +def export( + model: torch.nn.Module, + inputs: dict, + device=torch.device("cpu"), + is_half: bool = False, +) -> dict: model = model.half() if is_half else model.float() model.eval() - model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) + model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) model_jit.to(device) model_jit = model_jit.half() if is_half else model_jit.float() buffer = BytesIO() - model_jit=model_jit.cpu() - torch.jit.save(model_jit,buffer) + model_jit = model_jit.cpu() + torch.jit.save(model_jit, buffer) del model_jit - cpt=OrderedDict() - cpt["model"]=buffer.getvalue() - cpt["is_half"]=is_half + cpt = OrderedDict() + cpt["model"] = buffer.getvalue() + cpt["is_half"] = is_half return cpt -def load(path:str): - with open(path,"rb") as f: + +def load(path: str): + with open(path, "rb") as f: return pickle.load(f) -def save(ckpt:dict, save_path:str): - with open(save_path,"wb") as f: - pickle.dump(ckpt,f) + +def save(ckpt: dict, save_path: str): + with open(save_path, "wb") as f: + pickle.dump(ckpt, f) -def rmvpe_jit_export(model_path:str,inputs_path:str,save_path:str=None,device=torch.device("cpu"),is_half=False): +def rmvpe_jit_export( + model_path: str, + inputs_path: str, + save_path: str = None, + device=torch.device("cpu"), + is_half=False, +): if not save_path: - save_path=model_path.rstrip(".pth") - save_path+=".half.jit" if is_half else ".jit" + save_path = model_path.rstrip(".pth") + save_path += ".half.jit" if is_half else ".jit" if "cuda" in str(device) and ":" not in str(device): - device = torch.device("cuda:0" ) - from infer.lib.jit.jit_export.get_rmvpe import get_rmvpe - model = get_rmvpe(model_path,device) - inputs = load_inputs(inputs_path,device,is_half) - ckpt = export(model,inputs,device,is_half) - ckpt["device"]=str(device) - save(ckpt,save_path) + device = torch.device("cuda:0") + from infer.lib.jit.get_rmvpe import get_rmvpe + + model = get_rmvpe(model_path, device) + inputs = load_inputs(inputs_path, device, is_half) + ckpt = export(model, inputs, device, is_half) + ckpt["device"] = str(device) + save(ckpt, save_path) return ckpt -def synthesizer_jit_export(model_path:str,inputs_path:str,save_path:str=None,device=torch.device("cpu"),is_half=False): + +def synthesizer_jit_export( + model_path: str, + inputs_path: str, + save_path: str = None, + device=torch.device("cpu"), + is_half=False, +): if not save_path: - save_path=model_path.rstrip(".pth") - save_path+=".half.jit" if is_half else ".jit" + save_path = model_path.rstrip(".pth") + save_path += ".half.jit" if is_half else ".jit" if "cuda" in str(device) and ":" not in str(device): - device = torch.device("cuda:0" ) - from infer.lib.jit.jit_export.get_synthesizer import get_synthesizer - model,cpt=get_synthesizer(model_path,device) - assert isinstance(cpt,dict) + device = torch.device("cuda:0") + from infer.lib.jit.get_synthesizer import get_synthesizer + + model, cpt = get_synthesizer(model_path, device) + assert isinstance(cpt, dict) model.forward = model.infer - inputs =load_inputs(inputs_path,device,is_half) - ckpt = export(model,inputs,device,is_half) + inputs = load_inputs(inputs_path, device, is_half) + ckpt = export(model, inputs, device, is_half) cpt.pop("weight") cpt["model"] = ckpt["model"] cpt["device"] = device - save(cpt,save_path) + save(cpt, save_path) return cpt diff --git a/infer/lib/jit/get_hubert.py b/infer/lib/jit/get_hubert.py index 1194be4..aec7132 100644 --- a/infer/lib/jit/get_hubert.py +++ b/infer/lib/jit/get_hubert.py @@ -1,12 +1,15 @@ import math import random -from typing import Optional,Tuple +from typing import Optional, Tuple from fairseq.checkpoint_utils import load_model_ensemble_and_task import numpy as np import torch import torch.nn.functional as F + # from fairseq.data.data_utils import compute_mask_indices from fairseq.utils import index_put + + # @torch.jit.script def pad_to_multiple(x, multiple, dim=-1, value=0): # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 @@ -15,7 +18,7 @@ def pad_to_multiple(x, multiple, dim=-1, value=0): tsz = x.size(dim) m = tsz / multiple remainder = math.ceil(m) * multiple - tsz - if int(tsz % multiple)==0: + if int(tsz % multiple) == 0: return x, 0 pad_offset = (0,) * (-1 - dim) * 2 @@ -29,7 +32,6 @@ def extract_features( tgt_layer=None, min_layer=0, ): - if padding_mask is not None: x = index_put(x, padding_mask, 0) @@ -41,9 +43,7 @@ def extract_features( x = self.layer_norm(x) # pad to the sequence length dimension - x, pad_length = pad_to_multiple( - x, self.required_seq_len_multiple, dim=-2, value=0 - ) + x, pad_length = pad_to_multiple(x, self.required_seq_len_multiple, dim=-2, value=0) if pad_length > 0 and padding_mask is None: padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool) padding_mask[:, -pad_length:] = True @@ -91,6 +91,7 @@ def undo_pad(a, b, c): return x, layer_results + def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], @@ -141,10 +142,7 @@ def compute_mask_indices( for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() - num_mask = int( - mask_prob * sz / float(mask_length) - + np.random.rand() - ) + num_mask = int(mask_prob * sz / float(mask_length) + np.random.rand()) num_mask = max(min_masks, num_mask) else: sz = all_sz @@ -153,7 +151,7 @@ def compute_mask_indices( if mask_type == "static": lengths = torch.full([num_mask], mask_length) elif mask_type == "uniform": - lengths = torch.randint(mask_other, mask_length*2+1, size=[num_mask]) + lengths = torch.randint(mask_other, mask_length * 2 + 1, size=[num_mask]) elif mask_type == "normal": lengths = torch.normal(mask_length, mask_other, size=[num_mask]) lengths = [max(1, int(round(x))) for x in lengths] @@ -167,7 +165,7 @@ def compute_mask_indices( mask_idc = [] def arrange(s, e, length, keep_length): - span_start = torch.randint(low=s, high=e - length,size=[1]).item() + span_start = torch.randint(low=s, high=e - length, size=[1]).item() mask_idc.extend(span_start + i for i in range(length)) new_parts = [] @@ -176,17 +174,17 @@ def arrange(s, e, length, keep_length): if e - span_start - length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts - + parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): - t=[e - s if e - s >= length + min_space else 0 for s, e in parts] - lens=torch.asarray(t,dtype=torch.int) + t = [e - s if e - s >= length + min_space else 0 for s, e in parts] + lens = torch.asarray(t, dtype=torch.int) l_sum = torch.sum(lens) if l_sum == 0: break probs = lens / torch.sum(lens) - c = torch.multinomial(probs.float(),len(parts)).item() + c = torch.multinomial(probs.float(), len(parts)).item() s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = torch.asarray(mask_idc) @@ -194,7 +192,9 @@ def arrange(s, e, length, keep_length): min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 - mask_idc=torch.asarray(random.sample([i for i in range(sz - min_len)],num_mask)) + mask_idc = torch.asarray( + random.sample([i for i in range(sz - min_len)], num_mask) + ) mask_idc = torch.asarray( [ mask_idc[j] + offset @@ -207,18 +207,23 @@ def arrange(s, e, length, keep_length): min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): - if isinstance(mask_idc,torch.Tensor): - mask_idc=torch.asarray(mask_idc,dtype=torch.float) + if isinstance(mask_idc, torch.Tensor): + mask_idc = torch.asarray(mask_idc, dtype=torch.float) if len(mask_idc) > min_len and require_same_masks: - mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],min_len)) + mask_idc = torch.asarray( + random.sample([i for i in range(mask_idc)], min_len) + ) if mask_dropout > 0: num_holes = int(round(len(mask_idc) * mask_dropout)) - mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],len(mask_idc) - num_holes)) + mask_idc = torch.asarray( + random.sample([i for i in range(mask_idc)], len(mask_idc) - num_holes) + ) mask[i, mask_idc.int()] = True return mask + def apply_mask(self, x, padding_mask, target_list): B, T, C = x.shape torch.zeros_like(x) @@ -251,40 +256,45 @@ def apply_mask(self, x, padding_mask, target_list): min_space=self.mask_channel_min_space, ) mask_channel_indices = ( - mask_channel_indices - .to(x.device) - .unsqueeze(1) - .expand(-1, T, -1) + mask_channel_indices.to(x.device).unsqueeze(1).expand(-1, T, -1) ) x[mask_channel_indices] = 0 return x, mask_indices -def get_hubert_model(model_path="assets/hubert/hubert_base.pt",device=torch.device("cpu")): +def get_hubert_model( + model_path="assets/hubert/hubert_base.pt", device=torch.device("cpu") +): models, _, _ = load_model_ensemble_and_task( - [model_path], - suffix="", - ) + [model_path], + suffix="", + ) hubert_model = models[0] hubert_model = hubert_model.to(device) + def _apply_mask(x, padding_mask, target_list): - return apply_mask(hubert_model,x, padding_mask, target_list) + return apply_mask(hubert_model, x, padding_mask, target_list) + hubert_model.apply_mask = _apply_mask - def _extract_features(x, + def _extract_features( + x, padding_mask=None, tgt_layer=None, min_layer=0, ): - return extract_features(hubert_model.encoder,x, - padding_mask=padding_mask, - tgt_layer=tgt_layer, - min_layer=min_layer, - ) + return extract_features( + hubert_model.encoder, + x, + padding_mask=padding_mask, + tgt_layer=tgt_layer, + min_layer=min_layer, + ) + hubert_model.encoder.extract_features = _extract_features - hubert_model._forward=hubert_model.forward + hubert_model._forward = hubert_model.forward def hubert_extract_features( self, @@ -303,7 +313,7 @@ def hubert_extract_features( ) feature = res["features"] if ret_conv else res["x"] return feature, res["padding_mask"] - + def _hubert_extract_features( source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, @@ -311,20 +321,22 @@ def _hubert_extract_features( ret_conv: bool = False, output_layer: Optional[int] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: - return hubert_extract_features(hubert_model,source,padding_mask,mask,ret_conv,output_layer) + return hubert_extract_features( + hubert_model, source, padding_mask, mask, ret_conv, output_layer + ) + hubert_model.extract_features = _hubert_extract_features - - def infer(source,padding_mask,output_layer:torch.Tensor): - output_layer=output_layer.item() - logits = hubert_model.extract_features(source=source,padding_mask=padding_mask,output_layer=output_layer) - feats = ( - hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] - ) + + def infer(source, padding_mask, output_layer: torch.Tensor): + output_layer = output_layer.item() + logits = hubert_model.extract_features( + source=source, padding_mask=padding_mask, output_layer=output_layer + ) + feats = hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] return feats - - hubert_model.infer=infer + hubert_model.infer = infer # hubert_model.forward=infer # hubert_model.forward - return hubert_model \ No newline at end of file + return hubert_model diff --git a/infer/lib/jit/get_rmvpe.py b/infer/lib/jit/get_rmvpe.py index 3c2ed7e..e71c39f 100644 --- a/infer/lib/jit/get_rmvpe.py +++ b/infer/lib/jit/get_rmvpe.py @@ -1,13 +1,12 @@ - import torch - -def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt",device=torch.device("cpu")): +def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt", device=torch.device("cpu")): from infer.lib.rmvpe import E2E + model = E2E(4, 1, (2, 2)) ckpt = torch.load(model_path, map_location=device) model.load_state_dict(ckpt) model.eval() model = model.to(device) - return model \ No newline at end of file + return model diff --git a/infer/lib/jit/get_synthesizer.py b/infer/lib/jit/get_synthesizer.py index 70458f0..79bb8cf 100644 --- a/infer/lib/jit/get_synthesizer.py +++ b/infer/lib/jit/get_synthesizer.py @@ -1,8 +1,14 @@ import torch -def get_synthesizer(pth_path,device=torch.device("cpu")): - from infer.lib.infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono +def get_synthesizer(pth_path, device=torch.device("cpu")): + from infer.lib.infer_pack.models import ( + SynthesizerTrnMs256NSFsid, + SynthesizerTrnMs256NSFsid_nono, + SynthesizerTrnMs768NSFsid, + SynthesizerTrnMs768NSFsid_nono, + ) + cpt = torch.load(pth_path, map_location=torch.device("cpu")) tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] @@ -10,26 +16,22 @@ def get_synthesizer(pth_path,device=torch.device("cpu")): version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=False - ) + net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=False) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=False - ) + net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=False) else: net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) del net_g.enc_q net_g.forward = net_g.infer - ckpt={} - ckpt["config"]=cpt["config"] + ckpt = {} + ckpt["config"] = cpt["config"] ckpt["f0"] = if_f0 - ckpt["version"]=version - ckpt["info"]=cpt.get("info","0epoch") + ckpt["version"] = version + ckpt["info"] = cpt.get("info", "0epoch") net_g.load_state_dict(cpt["weight"], strict=False) - net_g=net_g.float() + net_g = net_g.float() net_g.eval().to(device) - return net_g,cpt \ No newline at end of file + return net_g, cpt diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index c2e4480..0e34725 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -279,11 +279,9 @@ def __init__(self, in_channels, out_channels, momentum=0.01): nn.BatchNorm2d(out_channels, momentum=momentum), nn.ReLU(), ) - self.shortcut=lambda x:x + self.shortcut = lambda x: x if in_channels != out_channels: self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - - def forward(self, x): return self.conv(x) + self.shortcut(x) @@ -578,7 +576,7 @@ def forward(self, audio, keyshift=0, speed=1, center=True): class RMVPE: - def __init__(self, model_path:str, is_half, device=None,use_jit=False): + def __init__(self, model_path: str, is_half, device=None, use_jit=False): self.resample_kernel = {} self.resample_kernel = {} self.is_half = is_half @@ -597,29 +595,32 @@ def __init__(self, model_path:str, is_half, device=None,use_jit=False): ) self.model = ort_session else: - if str(self.device)=="cuda": - self.device=torch.device("cuda:0") + if str(self.device) == "cuda": + self.device = torch.device("cuda:0") + def get_jit_model(): - jit_model_path=model_path.rstrip(".pth") - jit_model_path+=".half.jit" if is_half else ".jit" - reload=False + jit_model_path = model_path.rstrip(".pth") + jit_model_path += ".half.jit" if is_half else ".jit" + reload = False if os.path.exists(jit_model_path): - ckpt=jit_export.load(jit_model_path) + ckpt = jit.load(jit_model_path) model_device = ckpt["device"] if model_device != str(self.device): - reload =True + reload = True else: - reload=True + reload = True if reload: - ckpt=jit_export.rmvpe_jit_export( - model_path, - "assets/rmvpe/rmvpe_inputs.pth", - save_path=jit_model_path, - device=device,is_half=is_half) - model=torch.jit.load(BytesIO(ckpt["model"]),map_location=device) + ckpt = jit.rmvpe_jit_export( + model_path, + "assets/rmvpe/rmvpe_inputs.pth", + save_path=jit_model_path, + device=device, + is_half=is_half, + ) + model = torch.jit.load(BytesIO(ckpt["model"]), map_location=device) return model - + def get_default_model(): model = E2E(4, 1, (2, 2)) ckpt = torch.load(model_path, map_location="cpu") @@ -630,10 +631,13 @@ def get_default_model(): else: model = model.float() return model + if use_jit: if is_half and "cpu" in str(self.device): - logger.warning("Use default rmvpe model. \ - Jit is not supported on the CPU for half floating point") + logger.warning( + "Use default rmvpe model. \ + Jit is not supported on the CPU for half floating point" + ) self.model = get_default_model() else: self.model = get_jit_model() @@ -720,7 +724,7 @@ def to_local_average_cents(self, salience, thred=0.05): # t4 = ttime() # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) return devided - + if __name__ == "__main__": import librosa diff --git a/tools/rvc_for_realtime.py b/tools/rvc_for_realtime.py index c259a7d..7da5169 100644 --- a/tools/rvc_for_realtime.py +++ b/tools/rvc_for_realtime.py @@ -85,7 +85,7 @@ def __init__( self.index = faiss.read_index(index_path) self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) logger.info("Index search enabled") - self.pth_path:str = pth_path + self.pth_path: str = pth_path self.index_path = index_path self.index_rate = index_rate @@ -129,7 +129,9 @@ def set_default_model(): else: self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) del self.net_g.enc_q - logger.debug(self.net_g.load_state_dict(cpt["weight"], strict=False)) + logger.debug( + self.net_g.load_state_dict(cpt["weight"], strict=False) + ) self.net_g.eval().to(device) if self.is_half: self.net_g = self.net_g.half() @@ -137,36 +139,42 @@ def set_default_model(): self.net_g = self.net_g.float() def set_jit_model(): - jit_pth_path=self.pth_path.rstrip(".pth") - jit_pth_path+=".half.jit" if self.is_half else ".jit" - reload=False - if str(self.device)=="cuda": + jit_pth_path = self.pth_path.rstrip(".pth") + jit_pth_path += ".half.jit" if self.is_half else ".jit" + reload = False + if str(self.device) == "cuda": self.device = torch.device("cuda:0") if os.path.exists(jit_pth_path): - cpt=jit_export.load(jit_pth_path) - model_device = cpt["device"] - if model_device != str(self.device): - reload =True + cpt = jit.load(jit_pth_path) + model_device = cpt["device"] + if model_device != str(self.device): + reload = True else: - reload=True + reload = True if reload: - cpt=jit_export.synthesizer_jit_export(self.pth_path, - "assets\Synthesizer_inputs.pth", - device=self.device,is_half=self.is_half - ) - + cpt = jit.synthesizer_jit_export( + self.pth_path, + "assets\Synthesizer_inputs.pth", + device=self.device, + is_half=self.is_half, + ) + self.tgt_sr = cpt["config"][-1] self.if_f0 = cpt.get("f0", 1) self.version = cpt.get("version", "v1") - self.net_g = torch.jit.load(BytesIO(cpt["model"]),map_location=device) + self.net_g = torch.jit.load( + BytesIO(cpt["model"]), map_location=device + ) self.net_g.infer = self.net_g.forward self.net_g.eval().to(device) if config.use_jit and not config.dml: if self.is_half and "cpu" in str(self.device): - logger.warning("Use default Synthesizer model. \ - Jit is not supported on the CPU for half floating point") + logger.warning( + "Use default Synthesizer model. \ + Jit is not supported on the CPU for half floating point" + ) set_default_model() else: set_jit_model() @@ -187,7 +195,6 @@ def set_jit_model(): except: logger.warning(traceback.format_exc()) - def change_key(self, new_key): self.f0_up_key = new_key @@ -321,7 +328,7 @@ def get_f0_rmvpe(self, x, f0_up_key): "assets/rmvpe/rmvpe.pt", is_half=self.is_half, device=self.device, ####正常逻辑 - use_jit=config.use_jit + use_jit=config.use_jit, ) # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) @@ -404,19 +411,18 @@ def infer( with torch.no_grad(): if self.if_f0 == 1: # print(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2) - infered_audio = ( - self.net_g.infer( - feats, p_len, cache_pitch, cache_pitchf, sid, torch.FloatTensor([rate]) - )[0][0, 0] - .data - .float() - ) + infered_audio = self.net_g.infer( + feats, + p_len, + cache_pitch, + cache_pitchf, + sid, + torch.FloatTensor([rate]), + )[0][0, 0].data.float() else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid, torch.FloatTensor([rate]))[0][0, 0] - .data - .float() - ) + infered_audio = self.net_g.infer( + feats, p_len, sid, torch.FloatTensor([rate]) + )[0][0, 0].data.float() t5 = ttime() logger.info( "Spent time: fea = %.2fs, index = %.2fs, f0 = %.2fs, model = %.2fs", From 5be7f10e6c6218611cc794993e7eb26308980d7c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 20 Sep 2023 09:56:24 +0900 Subject: [PATCH 04/26] =?UTF-8?q?=F0=9F=8E=A8=20=E5=90=8C=E6=AD=A5=20local?= =?UTF-8?q?e=20(#1242)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: github-actions[bot] --- i18n/locale/fr_FR.json | 250 ++++++++++++++++++++--------------------- 1 file changed, 125 insertions(+), 125 deletions(-) diff --git a/i18n/locale/fr_FR.json b/i18n/locale/fr_FR.json index 5991dab..1caeb0d 100644 --- a/i18n/locale/fr_FR.json +++ b/i18n/locale/fr_FR.json @@ -1,125 +1,125 @@ -{ - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >=3 : appliquer un filtrage médian aux résultats de la reconnaissance de la hauteur de récolte. La valeur représente le rayon du filtre et peut réduire la respiration.", - "A模型权重": "Poids (w) pour le modèle A :", - "A模型路径": "Chemin d'accès au modèle A :", - "B模型路径": "Chemin d'accès au modèle B :", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0 (facultatif). Une hauteur par ligne. Remplace la fréquence fondamentale par défaut et la modulation de la hauteur :", - "Index Rate": "Taux d'indexation", - "Onnx导出": "Exporter en ONNX", - "Onnx输出路径": "Chemin d'exportation ONNX :", - "RVC模型路径": "Chemin du modèle RVC :", - "ckpt处理": "Traitement des fichiers .ckpt", - "harvest进程数": "Nombre de processus CPU utilisés pour l'algorithme de reconnaissance de la hauteur (pitch) dans le cadre de la récolte (harvest).", - "index文件路径不可包含中文": "Le chemin du fichier d'index ne doit pas contenir de caractères chinois.", - "pth文件路径不可包含中文": "Le chemin du fichier .pth ne doit pas contenir de caractères chinois.", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte RMVPE : séparez les index GPU par des tirets \"-\", par exemple, 0-0-1 pour utiliser 2 processus sur GPU0 et 1 processus sur GPU1.", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration expérimentale. Les données expérimentales sont stockées dans le dossier 'logs', avec chaque expérience ayant un dossier distinct. Entrez manuellement le chemin du nom de l'expérience, qui contient la configuration expérimentale, les journaux et les fichiers de modèle entraînés.", - "step1:正在处理数据": "Étape 1 : Traitement des données en cours.", - "step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur et extraction des caractéristiques en cours.", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers du dossier d'entraînement qui peuvent être décodés en fichiers audio et réalisation d'une normalisation par tranches. Génère 2 dossiers wav dans le répertoire de l'expérience. Actuellement, seule la formation avec un seul chanteur/locuteur est prise en charge.", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Utilisez le CPU pour extraire la hauteur (si le modèle le permet), utilisez le GPU pour extraire les caractéristiques (sélectionnez l'index du GPU) :", - "step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et démarrez l'entraînement du modèle ainsi que l'indexation.", - "step3a:正在训练模型": "Étape 3a : L'entraînement du modèle a commencé.", - "一键训练": "Entraînement en un clic", - "也可批量输入音频文件, 二选一, 优先读文件夹": "Il est également possible d'importer plusieurs fichiers audio. Si un chemin de dossier existe, cette entrée est ignorée.", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Traitement en lot pour la séparation de la voix et de l'accompagnement vocal à l'aide du modèle UVR5.
Exemple d'un format de chemin de dossier valide : D:\\chemin\\vers\\dossier\\d'entrée (copiez-le depuis la barre d'adresse du gestionnaire de fichiers).
Le modèle est divisé en trois catégories :
1. Préserver la voix : Choisissez cette option pour l'audio sans harmonies. Elle préserve la voix mieux que HP5. Il comprend deux modèles intégrés : HP2 et HP3. HP3 peut légèrement laisser passer l'accompagnement mais préserve légèrement mieux la voix que HP2.
2. Préserver uniquement la voix principale : Choisissez cette option pour l'audio avec harmonies. Cela peut affaiblir la voix principale. Il comprend un modèle intégré : HP5.
3. Modèles de suppression de la réverbération et du délai (par FoxJoy) :
  (1) MDX-Net : Le meilleur choix pour la suppression de la réverbération stéréo, mais ne peut pas supprimer la réverbération mono.
  (234) DeEcho : Supprime les effets de délai. Le mode Aggressive supprime plus efficacement que le mode Normal. DeReverb supprime également la réverbération et peut supprimer la réverbération mono, mais pas très efficacement pour les contenus à haute fréquence fortement réverbérés.
Notes sur la suppression de la réverbération et du délai :
1. Le temps de traitement pour le modèle DeEcho-DeReverb est environ deux fois plus long que pour les autres deux modèles DeEcho.
2. Le modèle MDX-Net-Dereverb est assez lent.
3. La configuration la plus propre recommandée est d'appliquer d'abord MDX-Net, puis DeEcho-Aggressive.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Entrez le(s) index GPU séparé(s) par '-', par exemple, 0-1-2 pour utiliser les GPU 0, 1 et 2 :", - "伴奏人声分离&去混响&去回声": "Séparation des voix/accompagnement et suppression de la réverbération", - "保存名": "Nom de sauvegarde :", - "保存的文件名, 默认空为和源文件同名": "Nom du fichier de sauvegarde (par défaut : identique au nom du fichier source) :", - "保存的模型名不带后缀": "Nom du modèle enregistré (sans extension) :", - "保存频率save_every_epoch": "Fréquence de sauvegarde (save_every_epoch) :", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes sourdes et les bruits de respiration pour éviter les artefacts tels que le déchirement dans la musique électronique. Réglez à 0,5 pour désactiver. Diminuez la valeur pour renforcer la protection, mais cela peut réduire la précision de l'indexation :", - "修改": "Modifier", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pris en charge pour les petits fichiers de modèle extraits du dossier 'weights')", - "停止音频转换": "Arrêter la conversion audio", - "全流程结束!": "Toutes les étapes ont été terminées !", - "刷新音色列表和索引路径": "Actualiser la liste des voix et le vers l'index.", - "加载模型": "Charger le modèle.", - "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D :", - "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G :", - "卸载音色省显存": "Décharger la voix pour économiser la mémoire GPU.", - "变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :", - "后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :", - "否": "Non", - "响应阈值": "Seuil de réponse", - "响度因子": "Facteur de volume sonore", - "处理数据": "Traitement des données", - "导出Onnx模型": "Exporter le modèle au format ONNX.", - "导出文件格式": "Format de fichier d'exportation", - "常见问题解答": "FAQ (Foire Aux Questions)", - "常规设置": "Paramètres généraux", - "开始音频转换": "Démarrer la conversion audio.", - "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, il n'y a pas de GPU compatible disponible pour prendre en charge votre entrainement.", - "性能设置": "Paramètres de performance", - "总训练轮数total_epoch": "Nombre total d'époques d'entraînement (total_epoch) :", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot. Entrez le dossier contenant les fichiers audio à convertir ou téléchargez plusieurs fichiers audio. Les fichiers audio convertis seront enregistrés dans le dossier spécifié (par défaut : 'opt').", - "指定输出主人声文件夹": "Spécifiez le dossier de sortie pour les fichiers de voix :", - "指定输出文件夹": "Spécifiez le dossier de sortie :", - "指定输出非主人声文件夹": "Spécifiez le dossier de sortie pour l'accompagnement :", - "推理时间(ms):": "Temps d'inférence (ms) :", - "推理音色": "Voix pour l'inférence", - "提取": "Extraire", - "提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour l'extraction de la hauteur et le traitement des données :", - "是": "Oui", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Enregistrer uniquement le dernier fichier '.ckpt' pour économiser de l'espace disque :", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Enregistrer un petit modèle final dans le dossier 'weights' à chaque point de sauvegarde :", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache tous les ensembles d'entrainement dans la mémoire GPU. Mettre en cache de petits ensembles de données (moins de 10 minutes) peut accélérer l'entrainement, mais mettre en cache de grands ensembles de données consommera beaucoup de mémoire GPU et peut ne pas apporter beaucoup d'amélioration de vitesse :", - "显卡信息": "Informations sur la carte graphique (GPU)", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs qui utilisent le logiciel et distribuent les sons exportés par le logiciel en sont entièrement responsables.
Si vous n'acceptez pas cette clause, vous ne pouvez pas utiliser ou faire référence à aucun code ni fichier contenu dans le package logiciel. Consultez le fichier Agreement-LICENSE.txt dans le répertoire racine pour plus de détails.", - "查看": "Voir", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Afficher les informations sur le modèle (uniquement pour les petits fichiers de modèle extraits du dossier \"weights\")", - "检索特征占比": "Rapport de recherche de caractéristiques (contrôle l'intensité de l'accent, un rapport trop élevé provoque des artefacts) :", - "模型": "Modèle", - "模型推理": "Inférence du modèle", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin d'accès au modèle du grand fichier dans le dossier \"logs\"). Cette fonction est utile si vous souhaitez arrêter l'entrainement à mi-chemin et extraire et enregistrer manuellement un petit fichier de modèle, ou si vous souhaitez tester un modèle intermédiaire :", - "模型是否带音高指导": "Indique si le modèle dispose d'un guidage en hauteur :", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Indique si le modèle dispose d'un système de guidage de la hauteur (obligatoire pour le chant, facultatif pour la parole) :", - "模型是否带音高指导,1是0否": "Le modèle dispose-t-il d'un guide de hauteur (1 : oui, 0 : non) ?", - "模型版本型号": "Version de l'architecture du modèle :", - "模型融合, 可用于测试音色融合": "Fusion de modèles, peut être utilisée pour tester la fusion de timbres", - "模型路径": "Le chemin vers le modèle :", - "每张显卡的batch_size": "Taille du batch par GPU :", - "淡入淡出长度": "Longueur de la transition", - "版本": "Version", - "特征提取": "Extraction des caractéristiques", - "特征检索库文件路径,为空则使用下拉的选择结果": "Chemin d'accès au fichier d'index des caractéristiques. Laisser vide pour utiliser le résultat sélectionné dans la liste déroulante :", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Il est recommandé d'utiliser la clé +12 pour la conversion homme-femme et la clé -12 pour la conversion femme-homme. Si la plage sonore est trop large et que la voix est déformée, vous pouvez également l'ajuster vous-même à la plage appropriée.", - "目标采样率": "Taux d'échantillonnage cible :", - "自动检测index路径,下拉式选择(dropdown)": "Détecter automatiquement le chemin d'accès à l'index et le sélectionner dans la liste déroulante :", - "融合": "Fusion", - "要改的模型信息": "Informations sur le modèle à modifier :", - "要置入的模型信息": "Informations sur le modèle à placer :", - "训练": "Entraîner", - "训练模型": "Entraîner le modèle", - "训练特征索引": "Entraîner l'index des caractéristiques", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé. Vous pouvez consulter les rapports d'entraînement dans la console ou dans le fichier 'train.log' situé dans le dossier de l'expérience.", - "请指定说话人id": "Veuillez spécifier l'ID de l'orateur ou du chanteur :", - "请选择index文件": "Veuillez sélectionner le fichier d'index", - "请选择pth文件": "Veuillez sélectionner le fichier pth", - "请选择说话人id": "Sélectionner l'ID de l'orateur ou du chanteur :", - "转换": "Convertir", - "输入实验名": "Saisissez le nom de l'expérience :", - "输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter :", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers) :", - "输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin d'accès du fichier audio à traiter (par défaut, l'exemple de format correct) :", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ajustez l'échelle de l'enveloppe de volume. Plus il est proche de 0, plus il imite le volume des voix originales. Cela peut aider à masquer les bruits et à rendre le volume plus naturel lorsqu'il est réglé relativement bas. Plus le volume est proche de 1, plus le volume sera fort et constant :", - "输入训练文件夹路径": "Indiquez le chemin d'accès au dossier d'entraînement :", - "输入设备": "Dispositif d'entrée", - "输入降噪": "Réduction du bruit d'entrée", - "输出信息": "Informations sur la sortie", - "输出设备": "Dispositif de sortie", - "输出降噪": "Réduction du bruit de sortie", - "输出音频(右下角三个点,点了可以下载)": "Exporter l'audio (cliquer sur les trois points dans le coin inférieur droit pour télécharger)", - "选择.index文件": "Sélectionner le fichier .index", - "选择.pth文件": "Sélectionner le fichier .pth", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Sélection de l'algorithme d'extraction de la hauteur, les voix d'entrée peuvent être accélérées avec pm, harvest a de bonnes basses mais est très lent, crepe est bon mais consomme beaucoup de ressources GPU.", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Sélectionnez l'algorithme d'extraction de la hauteur de ton (\"pm\" : extraction plus rapide mais parole de moindre qualité ; \"harvest\" : meilleure basse mais extrêmement lente ; \"crepe\" : meilleure qualité mais utilisation intensive du GPU), \"rmvpe\" : meilleure qualité et peu d'utilisation du GPU.", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Sélection de l'algorithme d'extraction de la hauteur : la chanson d'entrée peut être traitée plus rapidement par pm, avec une voix de haute qualité mais un CPU médiocre, par dio, harvest est meilleur mais plus lent, rmvpe est le meilleur, mais consomme légèrement le CPU/GPU.", - "采样长度": "Longueur de l'échantillon", - "重载设备列表": "Recharger la liste des dispositifs", - "音调设置": "Réglages de la hauteur", - "音频设备(请使用同种类驱动)": "Périphérique audio (veuillez utiliser le même type de pilote)", - "音高算法": "algorithme de détection de la hauteur", - "额外推理时长": "Temps d'inférence supplémentaire" -} \ No newline at end of file +{ + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >=3 : appliquer un filtrage médian aux résultats de la reconnaissance de la hauteur de récolte. La valeur représente le rayon du filtre et peut réduire la respiration.", + "A模型权重": "Poids (w) pour le modèle A :", + "A模型路径": "Chemin d'accès au modèle A :", + "B模型路径": "Chemin d'accès au modèle B :", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0 (facultatif). Une hauteur par ligne. Remplace la fréquence fondamentale par défaut et la modulation de la hauteur :", + "Index Rate": "Taux d'indexation", + "Onnx导出": "Exporter en ONNX", + "Onnx输出路径": "Chemin d'exportation ONNX :", + "RVC模型路径": "Chemin du modèle RVC :", + "ckpt处理": "Traitement des fichiers .ckpt", + "harvest进程数": "Nombre de processus CPU utilisés pour l'algorithme de reconnaissance de la hauteur (pitch) dans le cadre de la récolte (harvest).", + "index文件路径不可包含中文": "Le chemin du fichier d'index ne doit pas contenir de caractères chinois.", + "pth文件路径不可包含中文": "Le chemin du fichier .pth ne doit pas contenir de caractères chinois.", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte RMVPE : séparez les index GPU par des tirets \"-\", par exemple, 0-0-1 pour utiliser 2 processus sur GPU0 et 1 processus sur GPU1.", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration expérimentale. Les données expérimentales sont stockées dans le dossier 'logs', avec chaque expérience ayant un dossier distinct. Entrez manuellement le chemin du nom de l'expérience, qui contient la configuration expérimentale, les journaux et les fichiers de modèle entraînés.", + "step1:正在处理数据": "Étape 1 : Traitement des données en cours.", + "step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur et extraction des caractéristiques en cours.", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers du dossier d'entraînement qui peuvent être décodés en fichiers audio et réalisation d'une normalisation par tranches. Génère 2 dossiers wav dans le répertoire de l'expérience. Actuellement, seule la formation avec un seul chanteur/locuteur est prise en charge.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Utilisez le CPU pour extraire la hauteur (si le modèle le permet), utilisez le GPU pour extraire les caractéristiques (sélectionnez l'index du GPU) :", + "step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et démarrez l'entraînement du modèle ainsi que l'indexation.", + "step3a:正在训练模型": "Étape 3a : L'entraînement du modèle a commencé.", + "一键训练": "Entraînement en un clic", + "也可批量输入音频文件, 二选一, 优先读文件夹": "Il est également possible d'importer plusieurs fichiers audio. Si un chemin de dossier existe, cette entrée est ignorée.", + "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Traitement en lot pour la séparation de la voix et de l'accompagnement vocal à l'aide du modèle UVR5.
Exemple d'un format de chemin de dossier valide : D:\\chemin\\vers\\dossier\\d'entrée (copiez-le depuis la barre d'adresse du gestionnaire de fichiers).
Le modèle est divisé en trois catégories :
1. Préserver la voix : Choisissez cette option pour l'audio sans harmonies. Elle préserve la voix mieux que HP5. Il comprend deux modèles intégrés : HP2 et HP3. HP3 peut légèrement laisser passer l'accompagnement mais préserve légèrement mieux la voix que HP2.
2. Préserver uniquement la voix principale : Choisissez cette option pour l'audio avec harmonies. Cela peut affaiblir la voix principale. Il comprend un modèle intégré : HP5.
3. Modèles de suppression de la réverbération et du délai (par FoxJoy) :
  (1) MDX-Net : Le meilleur choix pour la suppression de la réverbération stéréo, mais ne peut pas supprimer la réverbération mono.
  (234) DeEcho : Supprime les effets de délai. Le mode Aggressive supprime plus efficacement que le mode Normal. DeReverb supprime également la réverbération et peut supprimer la réverbération mono, mais pas très efficacement pour les contenus à haute fréquence fortement réverbérés.
Notes sur la suppression de la réverbération et du délai :
1. Le temps de traitement pour le modèle DeEcho-DeReverb est environ deux fois plus long que pour les autres deux modèles DeEcho.
2. Le modèle MDX-Net-Dereverb est assez lent.
3. La configuration la plus propre recommandée est d'appliquer d'abord MDX-Net, puis DeEcho-Aggressive.", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Entrez le(s) index GPU séparé(s) par '-', par exemple, 0-1-2 pour utiliser les GPU 0, 1 et 2 :", + "伴奏人声分离&去混响&去回声": "Séparation des voix/accompagnement et suppression de la réverbération", + "保存名": "Nom de sauvegarde :", + "保存的文件名, 默认空为和源文件同名": "Nom du fichier de sauvegarde (par défaut : identique au nom du fichier source) :", + "保存的模型名不带后缀": "Nom du modèle enregistré (sans extension) :", + "保存频率save_every_epoch": "Fréquence de sauvegarde (save_every_epoch) :", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes sourdes et les bruits de respiration pour éviter les artefacts tels que le déchirement dans la musique électronique. Réglez à 0,5 pour désactiver. Diminuez la valeur pour renforcer la protection, mais cela peut réduire la précision de l'indexation :", + "修改": "Modifier", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pris en charge pour les petits fichiers de modèle extraits du dossier 'weights')", + "停止音频转换": "Arrêter la conversion audio", + "全流程结束!": "Toutes les étapes ont été terminées !", + "刷新音色列表和索引路径": "Actualiser la liste des voix et le vers l'index.", + "加载模型": "Charger le modèle.", + "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D :", + "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G :", + "卸载音色省显存": "Décharger la voix pour économiser la mémoire GPU.", + "变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :", + "后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :", + "否": "Non", + "响应阈值": "Seuil de réponse", + "响度因子": "Facteur de volume sonore", + "处理数据": "Traitement des données", + "导出Onnx模型": "Exporter le modèle au format ONNX.", + "导出文件格式": "Format de fichier d'exportation", + "常见问题解答": "FAQ (Foire Aux Questions)", + "常规设置": "Paramètres généraux", + "开始音频转换": "Démarrer la conversion audio.", + "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, il n'y a pas de GPU compatible disponible pour prendre en charge votre entrainement.", + "性能设置": "Paramètres de performance", + "总训练轮数total_epoch": "Nombre total d'époques d'entraînement (total_epoch) :", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot. Entrez le dossier contenant les fichiers audio à convertir ou téléchargez plusieurs fichiers audio. Les fichiers audio convertis seront enregistrés dans le dossier spécifié (par défaut : 'opt').", + "指定输出主人声文件夹": "Spécifiez le dossier de sortie pour les fichiers de voix :", + "指定输出文件夹": "Spécifiez le dossier de sortie :", + "指定输出非主人声文件夹": "Spécifiez le dossier de sortie pour l'accompagnement :", + "推理时间(ms):": "Temps d'inférence (ms) :", + "推理音色": "Voix pour l'inférence", + "提取": "Extraire", + "提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour l'extraction de la hauteur et le traitement des données :", + "是": "Oui", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Enregistrer uniquement le dernier fichier '.ckpt' pour économiser de l'espace disque :", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Enregistrer un petit modèle final dans le dossier 'weights' à chaque point de sauvegarde :", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache tous les ensembles d'entrainement dans la mémoire GPU. Mettre en cache de petits ensembles de données (moins de 10 minutes) peut accélérer l'entrainement, mais mettre en cache de grands ensembles de données consommera beaucoup de mémoire GPU et peut ne pas apporter beaucoup d'amélioration de vitesse :", + "显卡信息": "Informations sur la carte graphique (GPU)", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs qui utilisent le logiciel et distribuent les sons exportés par le logiciel en sont entièrement responsables.
Si vous n'acceptez pas cette clause, vous ne pouvez pas utiliser ou faire référence à aucun code ni fichier contenu dans le package logiciel. Consultez le fichier Agreement-LICENSE.txt dans le répertoire racine pour plus de détails.", + "查看": "Voir", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Afficher les informations sur le modèle (uniquement pour les petits fichiers de modèle extraits du dossier \"weights\")", + "检索特征占比": "Rapport de recherche de caractéristiques (contrôle l'intensité de l'accent, un rapport trop élevé provoque des artefacts) :", + "模型": "Modèle", + "模型推理": "Inférence du modèle", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin d'accès au modèle du grand fichier dans le dossier \"logs\"). Cette fonction est utile si vous souhaitez arrêter l'entrainement à mi-chemin et extraire et enregistrer manuellement un petit fichier de modèle, ou si vous souhaitez tester un modèle intermédiaire :", + "模型是否带音高指导": "Indique si le modèle dispose d'un guidage en hauteur :", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Indique si le modèle dispose d'un système de guidage de la hauteur (obligatoire pour le chant, facultatif pour la parole) :", + "模型是否带音高指导,1是0否": "Le modèle dispose-t-il d'un guide de hauteur (1 : oui, 0 : non) ?", + "模型版本型号": "Version de l'architecture du modèle :", + "模型融合, 可用于测试音色融合": "Fusion de modèles, peut être utilisée pour tester la fusion de timbres", + "模型路径": "Le chemin vers le modèle :", + "每张显卡的batch_size": "Taille du batch par GPU :", + "淡入淡出长度": "Longueur de la transition", + "版本": "Version", + "特征提取": "Extraction des caractéristiques", + "特征检索库文件路径,为空则使用下拉的选择结果": "Chemin d'accès au fichier d'index des caractéristiques. Laisser vide pour utiliser le résultat sélectionné dans la liste déroulante :", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Il est recommandé d'utiliser la clé +12 pour la conversion homme-femme et la clé -12 pour la conversion femme-homme. Si la plage sonore est trop large et que la voix est déformée, vous pouvez également l'ajuster vous-même à la plage appropriée.", + "目标采样率": "Taux d'échantillonnage cible :", + "自动检测index路径,下拉式选择(dropdown)": "Détecter automatiquement le chemin d'accès à l'index et le sélectionner dans la liste déroulante :", + "融合": "Fusion", + "要改的模型信息": "Informations sur le modèle à modifier :", + "要置入的模型信息": "Informations sur le modèle à placer :", + "训练": "Entraîner", + "训练模型": "Entraîner le modèle", + "训练特征索引": "Entraîner l'index des caractéristiques", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé. Vous pouvez consulter les rapports d'entraînement dans la console ou dans le fichier 'train.log' situé dans le dossier de l'expérience.", + "请指定说话人id": "Veuillez spécifier l'ID de l'orateur ou du chanteur :", + "请选择index文件": "Veuillez sélectionner le fichier d'index", + "请选择pth文件": "Veuillez sélectionner le fichier pth", + "请选择说话人id": "Sélectionner l'ID de l'orateur ou du chanteur :", + "转换": "Convertir", + "输入实验名": "Saisissez le nom de l'expérience :", + "输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter :", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers) :", + "输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin d'accès du fichier audio à traiter (par défaut, l'exemple de format correct) :", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ajustez l'échelle de l'enveloppe de volume. Plus il est proche de 0, plus il imite le volume des voix originales. Cela peut aider à masquer les bruits et à rendre le volume plus naturel lorsqu'il est réglé relativement bas. Plus le volume est proche de 1, plus le volume sera fort et constant :", + "输入训练文件夹路径": "Indiquez le chemin d'accès au dossier d'entraînement :", + "输入设备": "Dispositif d'entrée", + "输入降噪": "Réduction du bruit d'entrée", + "输出信息": "Informations sur la sortie", + "输出设备": "Dispositif de sortie", + "输出降噪": "Réduction du bruit de sortie", + "输出音频(右下角三个点,点了可以下载)": "Exporter l'audio (cliquer sur les trois points dans le coin inférieur droit pour télécharger)", + "选择.index文件": "Sélectionner le fichier .index", + "选择.pth文件": "Sélectionner le fichier .pth", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Sélection de l'algorithme d'extraction de la hauteur, les voix d'entrée peuvent être accélérées avec pm, harvest a de bonnes basses mais est très lent, crepe est bon mais consomme beaucoup de ressources GPU.", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Sélectionnez l'algorithme d'extraction de la hauteur de ton (\"pm\" : extraction plus rapide mais parole de moindre qualité ; \"harvest\" : meilleure basse mais extrêmement lente ; \"crepe\" : meilleure qualité mais utilisation intensive du GPU), \"rmvpe\" : meilleure qualité et peu d'utilisation du GPU.", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Sélection de l'algorithme d'extraction de la hauteur : la chanson d'entrée peut être traitée plus rapidement par pm, avec une voix de haute qualité mais un CPU médiocre, par dio, harvest est meilleur mais plus lent, rmvpe est le meilleur, mais consomme légèrement le CPU/GPU.", + "采样长度": "Longueur de l'échantillon", + "重载设备列表": "Recharger la liste des dispositifs", + "音调设置": "Réglages de la hauteur", + "音频设备(请使用同种类驱动)": "Périphérique audio (veuillez utiliser le même type de pilote)", + "音高算法": "algorithme de détection de la hauteur", + "额外推理时长": "Temps d'inférence supplémentaire" +} From 5da29628827ce6bd14c3a80366f6bc0a7d64b945 Mon Sep 17 00:00:00 2001 From: Chengjia Jiang <46401978+ChasonJiang@users.noreply.github.com> Date: Wed, 20 Sep 2023 21:37:58 +0800 Subject: [PATCH 05/26] Fix jit load and import issue (#1282) * fix jit model loading : infer/lib/rmvpe.py * modified: assets/hubert/.gitignore move file: assets/hubert_inputs.pth -> assets/hubert/hubert_inputs.pth modified: assets/rmvpe/.gitignore move file: assets/rmvpe_inputs.pth -> assets/rmvpe/rmvpe_inputs.pth fix import: gui_v1.py --- assets/hubert/.gitignore | 1 + assets/{ => hubert}/hubert_inputs.pth | Bin assets/rmvpe/.gitignore | 1 + assets/{ => rmvpe}/rmvpe_inputs.pth | Bin gui_v1.py | 4 ++-- infer/lib/rmvpe.py | 2 +- 6 files changed, 5 insertions(+), 3 deletions(-) rename assets/{ => hubert}/hubert_inputs.pth (100%) rename assets/{ => rmvpe}/rmvpe_inputs.pth (100%) diff --git a/assets/hubert/.gitignore b/assets/hubert/.gitignore index d6b7ef3..03dfb38 100644 --- a/assets/hubert/.gitignore +++ b/assets/hubert/.gitignore @@ -1,2 +1,3 @@ * !.gitignore +!hubert_inputs.pth \ No newline at end of file diff --git a/assets/hubert_inputs.pth b/assets/hubert/hubert_inputs.pth similarity index 100% rename from assets/hubert_inputs.pth rename to assets/hubert/hubert_inputs.pth diff --git a/assets/rmvpe/.gitignore b/assets/rmvpe/.gitignore index d6b7ef3..dbb24a6 100644 --- a/assets/rmvpe/.gitignore +++ b/assets/rmvpe/.gitignore @@ -1,2 +1,3 @@ * !.gitignore +!rmvpe_inputs.pth \ No newline at end of file diff --git a/assets/rmvpe_inputs.pth b/assets/rmvpe/rmvpe_inputs.pth similarity index 100% rename from assets/rmvpe_inputs.pth rename to assets/rmvpe/rmvpe_inputs.pth diff --git a/gui_v1.py b/gui_v1.py index 3f2073d..3171bca 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -51,7 +51,7 @@ def run(self): from queue import Empty import librosa - from infer.lib.jit.torchgate import TorchGate + from tools.torchgate import TorchGate import numpy as np import PySimpleGUI as sg import sounddevice as sd @@ -59,7 +59,7 @@ def run(self): import torch.nn.functional as F import torchaudio.transforms as tat - import infer.lib.jit.rvc_for_realtime as rvc_for_realtime + import tools.rvc_for_realtime as rvc_for_realtime from i18n.i18n import I18nAuto i18n = I18nAuto() diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index 0e34725..91d3e1a 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -618,7 +618,7 @@ def get_jit_model(): device=device, is_half=is_half, ) - model = torch.jit.load(BytesIO(ckpt["model"]), map_location=device) + model = torch.jit.load(BytesIO(ckpt["model"]), map_location=device) return model def get_default_model(): From e9d9f242e2d67c250bae349ee6506b9ebf84e3ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Wed, 20 Sep 2023 22:48:16 +0900 Subject: [PATCH 06/26] feat(workflow): trigger on dev --- .github/workflows/genlocale.yml | 1 + .github/workflows/push_format.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/genlocale.yml b/.github/workflows/genlocale.yml index 96a29e8..7a7bf0f 100644 --- a/.github/workflows/genlocale.yml +++ b/.github/workflows/genlocale.yml @@ -3,6 +3,7 @@ on: push: branches: - main + - dev jobs: genlocale: name: genlocale diff --git a/.github/workflows/push_format.yml b/.github/workflows/push_format.yml index 0a72789..19011d3 100644 --- a/.github/workflows/push_format.yml +++ b/.github/workflows/push_format.yml @@ -4,6 +4,7 @@ on: push: branches: - main + - dev permissions: contents: write From a0ccf2cdaedd17c8e3e0c853a4f2406269ac94a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Wed, 20 Sep 2023 23:03:24 +0900 Subject: [PATCH 07/26] feat(workflow): add close-pr on non-dev branch --- .github/workflows/pull_format.yml | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pull_format.yml b/.github/workflows/pull_format.yml index 57a830d..e0d5ddf 100644 --- a/.github/workflows/pull_format.yml +++ b/.github/workflows/pull_format.yml @@ -1,13 +1,30 @@ name: pull format -on: [pull_request] - -permissions: - contents: write +on: + pull_request_target: + types: [assigned, opened, synchronize, reopened] jobs: + # This workflow closes invalid PR + close-pr: + name: closepr + # The type of runner that the job will run on + runs-on: ubuntu-latest + permissions: write-all + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + - name: Close PR if it is not pointed to dev branch + if: github.event.pull_request.base.ref != 'dev' + uses: superbrothers/close-pull-request@v3 + with: + # Optional. Post a issue comment just before closing a pull request. + comment: "Invalid PR to non-dev branch ${{ github.event.pull_request.base.ref }}." + pull_format: runs-on: ${{ matrix.os }} + permissions: + contents: write strategy: matrix: From e65a8b4661d3d28e00041f282cd7da66321b961f Mon Sep 17 00:00:00 2001 From: yxlllc <33565655+yxlllc@users.noreply.github.com> Date: Thu, 21 Sep 2023 20:44:21 +0800 Subject: [PATCH 08/26] Add input wav and delay time monitor for real-time gui (#1293) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * 🎨 同步 locale (#1289) Co-authored-by: github-actions[bot] * feat: edit PR template * add input wav and delay time monitor --------- Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- gui_v1.py | 86 +++++++++++++++++++++++--------- tools/torchgate/torchgate.py | 7 --- 3 files changed, 64 insertions(+), 31 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 99eafa2..a6acd35 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,7 @@ # Pull request checklist - [ ] The PR has a proper title. Use [Semantic Commit Messages](https://seesparkbox.com/foundry/semantic_commit_messages). (No more branch-name title please) -- [ ] Make sure you are requesting the right branch. +- [ ] Make sure you are requesting the right branch: `dev`. - [ ] Make sure this is ready to be merged into the relevant branch. Please don't create a PR and let it hang for a few days. - [ ] Ensure all tests are passing. - [ ] Ensure linting is passing. diff --git a/gui_v1.py b/gui_v1.py index 3171bca..f245e70 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -14,7 +14,7 @@ import multiprocessing logger = logging.getLogger(__name__) - +stream_latency = -1 class Harvest(multiprocessing.Process): def __init__(self, inp_q, opt_q): @@ -100,7 +100,8 @@ class GUI: def __init__(self) -> None: self.config = GUIConfig() self.flag_vc = False - # self.device_latency=0.1 + self.function = 'vc' + self.delay_time = 0 self.launcher() def load(self): @@ -112,6 +113,10 @@ def load(self): data["harvest"] = data["f0method"] == "harvest" data["crepe"] = data["f0method"] == "crepe" data["rmvpe"] = data["f0method"] == "rmvpe" + if data["sg_input_device"] not in input_devices: + data["sg_input_device"] = input_devices[sd.default.device[0]] + if data["sg_output_device"] not in output_devices: + data["sg_output_device"] = output_devices[sd.default.device[1]] except: with open("configs/config.json", "w") as j: data = { @@ -353,6 +358,22 @@ def launcher(self): [ sg.Button(i18n("开始音频转换"), key="start_vc"), sg.Button(i18n("停止音频转换"), key="stop_vc"), + sg.Radio( + i18n("输入监听"), + "function", + key="im", + default=False, + enable_events=True, + ), + sg.Radio( + i18n("输出变声"), + "function", + key="vc", + default=True, + enable_events=True, + ), + sg.Text(i18n("算法延迟(ms):")), + sg.Text("0", key="delay_time"), sg.Text(i18n("推理时间(ms):")), sg.Text("0", key="infer_time"), ], @@ -415,9 +436,16 @@ def event_handler(self): } with open("configs/config.json", "w") as j: json.dump(settings, j) + global stream_latency + while stream_latency < 0: + time.sleep(0.01) + self.delay_time = stream_latency + values["block_time"] + values["crossfade_length"] + 0.01 + if values["I_noise_reduce"]: + self.delay_time += values["crossfade_length"] + self.window["delay_time"].update(int(self.delay_time * 1000)) if event == "stop_vc" and self.flag_vc == True: self.flag_vc = False - + stream_latency = -1 # Parameter hot update if event == "threhold": self.config.threhold = values["threhold"] @@ -435,11 +463,17 @@ def event_handler(self): self.config.f0method = event elif event == "I_noise_reduce": self.config.I_noise_reduce = values["I_noise_reduce"] + if stream_latency > 0: + self.delay_time += (1 if values["I_noise_reduce"] else -1) * values["crossfade_length"] + self.window["delay_time"].update(int(self.delay_time * 1000)) elif event == "O_noise_reduce": self.config.O_noise_reduce = values["O_noise_reduce"] + elif event in ["vc", "im"]: + self.function = event elif event != "start_vc" and self.flag_vc == True: # Other parameters do not support hot update self.flag_vc = False + stream_latency = -1 def set_values(self, values): if len(values["pth_path"].strip()) == 0: @@ -578,8 +612,9 @@ def soundinput(self): blocksize=self.block_frame, samplerate=self.config.samplerate, dtype="float32", - # latency=self.device_latency - ): + ) as stream: + global stream_latency + stream_latency = stream.latency[-1] while self.flag_vc: time.sleep(self.config.block_time) logger.debug("Audio block passed.") @@ -611,7 +646,7 @@ def audio_callback( self.block_frame_16k : ].clone() # input noise reduction and resampling - if self.config.I_noise_reduce: + if self.config.I_noise_reduce and self.function == 'vc': input_wav = self.input_wav[ -self.crossfade_frame - self.block_frame - 2 * self.zc : ] @@ -635,23 +670,28 @@ def audio_callback( self.input_wav[-self.block_frame - 2 * self.zc :] )[160:] # infer - f0_extractor_frame = self.block_frame_16k + 800 - if self.config.f0method == "rmvpe": - f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160 - infer_wav = self.rvc.infer( - self.input_wav_res, - self.input_wav_res[-f0_extractor_frame:].cpu().numpy(), - self.block_frame_16k, - self.valid_rate, - self.pitch, - self.pitchf, - self.config.f0method, - ) - infer_wav = infer_wav[ - -self.crossfade_frame - self.sola_search_frame - self.block_frame : - ] + if self.function == 'vc': + f0_extractor_frame = self.block_frame_16k + 800 + if self.config.f0method == "rmvpe": + f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160 + infer_wav = self.rvc.infer( + self.input_wav_res, + self.input_wav_res[-f0_extractor_frame:].cpu().numpy(), + self.block_frame_16k, + self.valid_rate, + self.pitch, + self.pitchf, + self.config.f0method, + ) + infer_wav = infer_wav[ + -self.crossfade_frame - self.sola_search_frame - self.block_frame : + ] + else: + infer_wav = self.input_wav[ + -self.crossfade_frame - self.sola_search_frame - self.block_frame : + ].clone() # output noise reduction - if self.config.O_noise_reduce: + if (self.config.O_noise_reduce and self.function == 'vc') or (self.config.I_noise_reduce and self.function == 'im'): self.output_buffer[: -self.block_frame] = self.output_buffer[ self.block_frame : ].clone() @@ -660,7 +700,7 @@ def audio_callback( infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0) ).squeeze(0) # volume envelop mixing - if self.config.rms_mix_rate < 1: + if self.config.rms_mix_rate < 1 and self.function == 'vc': rms1 = librosa.feature.rms( y=self.input_wav_res[-160 * infer_wav.shape[0] // self.zc :] .cpu() diff --git a/tools/torchgate/torchgate.py b/tools/torchgate/torchgate.py index 086f2ab..f95ffef 100644 --- a/tools/torchgate/torchgate.py +++ b/tools/torchgate/torchgate.py @@ -211,13 +211,6 @@ def forward( Returns: torch.Tensor: The denoised audio signal, with the same shape as the input signal. """ - assert x.ndim == 2 - if x.shape[-1] < self.win_length * 2: - raise Exception(f"x must be bigger than {self.win_length * 2}") - - assert xn is None or xn.ndim == 1 or xn.ndim == 2 - if xn is not None and xn.shape[-1] < self.win_length * 2: - raise Exception(f"xn must be bigger than {self.win_length * 2}") # Compute short-time Fourier transform (STFT) X = torch.stft( From 39b758254dfea771bdaf018ff09b63888350bebf Mon Sep 17 00:00:00 2001 From: Chengjia Jiang <46401978+ChasonJiang@users.noreply.github.com> Date: Thu, 21 Sep 2023 22:20:38 +0800 Subject: [PATCH 09/26] Optimize latency using scripted jit (#1291) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * 🎨 同步 locale (#1289) Co-authored-by: github-actions[bot] * feat: edit PR template * Optimize-latency-using-scripted: configs/config.py Optimize-latency-using-scripted: infer/lib/infer_pack/attentions.py Optimize-latency-using-scripted: infer/lib/infer_pack/commons.py Optimize-latency-using-scripted: infer/lib/infer_pack/models.py Optimize-latency-using-scripted: infer/lib/infer_pack/modules.py Optimize-latency-using-scripted: infer/lib/jit/__init__.py Optimize-latency-using-scripted: infer/lib/jit/get_hubert.py Optimize-latency-using-scripted: infer/lib/jit/get_rmvpe.py Optimize-latency-using-scripted: infer/lib/jit/get_synthesizer.py Optimize-latency-using-scripted: infer/lib/rmvpe.py Optimize-latency-using-scripted: tools/rvc_for_realtime.py * modified: infer/lib/infer_pack/models.py * fix some bug: configs/config.py fix some bug: infer/lib/infer_pack/models.py fix some bug: infer/lib/rmvpe.py * Fixed abnormal reference of logger in multiprocessing: infer/modules/train/train.py --------- Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] --- configs/config.py | 1 - infer/lib/infer_pack/attentions.py | 113 ++++++---- infer/lib/infer_pack/commons.py | 22 +- infer/lib/infer_pack/models.py | 351 +++++++++++++++++++++-------- infer/lib/infer_pack/modules.py | 113 +++++++--- infer/lib/jit/__init__.py | 166 ++++++-------- infer/lib/jit/get_hubert.py | 110 ++++----- infer/lib/jit/get_rmvpe.py | 7 +- infer/lib/jit/get_synthesizer.py | 32 ++- infer/lib/rmvpe.py | 47 ++-- infer/modules/train/train.py | 6 +- tools/rvc_for_realtime.py | 3 +- 12 files changed, 596 insertions(+), 375 deletions(-) diff --git a/configs/config.py b/configs/config.py index 6c9620f..b40a9af 100644 --- a/configs/config.py +++ b/configs/config.py @@ -43,7 +43,6 @@ def wrapper(*args, **kwargs): class Config: def __init__(self): self.device = "cuda:0" - # Float is faster than Half, on Rtx4070 self.is_half = False self.use_jit = True self.n_cpu = 0 diff --git a/infer/lib/infer_pack/attentions.py b/infer/lib/infer_pack/attentions.py index 2b6060c..b24cb7c 100644 --- a/infer/lib/infer_pack/attentions.py +++ b/infer/lib/infer_pack/attentions.py @@ -1,5 +1,6 @@ import copy import math +from typing import Optional import numpy as np import torch @@ -22,11 +23,11 @@ def __init__( window_size=10, **kwargs ): - super().__init__() + super(Encoder,self).__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads - self.n_layers = n_layers + self.n_layers = int(n_layers) self.kernel_size = kernel_size self.p_dropout = p_dropout self.window_size = window_size @@ -61,14 +62,16 @@ def __init__( def forward(self, x, x_mask): attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) + zippep=zip(self.attn_layers,self.norm_layers_1,\ + self.ffn_layers,self.norm_layers_2) + for attn_layers,norm_layers_1,ffn_layers,norm_layers_2 in zippep: + y = attn_layers(x, x, attn_mask) y = self.drop(y) - x = self.norm_layers_1[i](x + y) + x = norm_layers_1(x + y) - y = self.ffn_layers[i](x, x_mask) + y = ffn_layers(x, x_mask) y = self.drop(y) - x = self.norm_layers_2[i](x + y) + x = norm_layers_2(x + y) x = x * x_mask return x @@ -86,7 +89,7 @@ def __init__( proximal_init=True, **kwargs ): - super().__init__() + super(Decoder,self).__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads @@ -172,7 +175,7 @@ def __init__( proximal_bias=False, proximal_init=False, ): - super().__init__() + super(MultiHeadAttention,self).__init__() assert channels % n_heads == 0 self.channels = channels @@ -213,19 +216,20 @@ def __init__( self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) - def forward(self, x, c, attn_mask=None): + def forward(self, x:torch.Tensor, c:torch.Tensor, attn_mask:Optional[torch.Tensor]=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) - x, self.attn = self.attention(q, k, v, mask=attn_mask) + x, _= self.attention(q, k, v,mask=attn_mask) x = self.conv_o(x) return x - def attention(self, query, key, value, mask=None): + def attention(self, query:torch.Tensor, key:torch.Tensor, value:torch.Tensor, mask:Optional[torch.Tensor]=None): # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) + b, d, t_s= key.size() + t_t=query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) @@ -292,16 +296,17 @@ def _matmul_with_relative_keys(self, x, y): ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret - def _get_relative_embeddings(self, relative_embeddings, length): + def _get_relative_embeddings(self, relative_embeddings, length:int): max_relative_position = 2 * self.window_size + 1 # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) + pad_length:int = max(length - (self.window_size + 1), 0) slice_start_position = max((self.window_size + 1) - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad( relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + # commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + [0, 0, pad_length, pad_length,0,0] ) else: padded_relative_embeddings = relative_embeddings @@ -317,12 +322,17 @@ def _relative_position_to_absolute_position(self, x): """ batch, heads, length, _ = x.size() # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + x = F.pad(x, + # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]) + [0,1,0,0,0,0,0,0] + ) # Concat extra elements so to add up to shape (len+1, 2*len-1). x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) + x_flat, + # commons.convert_pad_shape([[0, 0], [0, 0], [0, int(length) - 1]]) + [0, int(length) - 1, 0,0,0,0] ) # Reshape and slice out the padded elements. @@ -339,15 +349,20 @@ def _absolute_position_to_relative_position(self, x): batch, heads, length, _ = x.size() # padd along column x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) + x, + # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, int(length) - 1]]) + [0, int(length) - 1,0,0,0,0,0,0] ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + x_flat = x.view([batch, heads, int(length**2) + int(length * (length - 1))]) # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_flat = F.pad(x_flat, + # commons.convert_pad_shape([[0, 0], [0, 0], [int(length), 0]]) + [length,0,0,0,0,0] + ) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final - def _attention_bias_proximal(self, length): + def _attention_bias_proximal(self, length:int): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. @@ -367,10 +382,10 @@ def __init__( filter_channels, kernel_size, p_dropout=0.0, - activation=None, + activation:str=None, causal=False, ): - super().__init__() + super(FFN,self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.filter_channels = filter_channels @@ -378,40 +393,54 @@ def __init__( self.p_dropout = p_dropout self.activation = activation self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding + self.is_activation = True if activation=="gelu" else False + # if causal: + # self.padding = self._causal_padding + # else: + # self.padding = self._same_padding self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": + + def padding(self,x:torch.Tensor,x_mask:torch.Tensor)->torch.Tensor: + if self.causal: + padding=self._causal_padding(x * x_mask) + else: + padding=self._same_padding(x * x_mask) + return padding + + def forward(self, x:torch.Tensor, x_mask:torch.Tensor): + x = self.conv_1(self.padding(x,x_mask)) + if self.is_activation: x = x * torch.sigmoid(1.702 * x) else: x = torch.relu(x) x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) + + x = self.conv_2(self.padding(x,x_mask)) return x * x_mask def _causal_padding(self, x): if self.kernel_size == 1: return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) + pad_l:int = self.kernel_size - 1 + pad_r:int = 0 + # padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, + # commons.convert_pad_shape(padding) + [pad_l, pad_r,0,0,0,0] + ) return x def _same_padding(self, x): if self.kernel_size == 1: return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) + pad_l:int = (self.kernel_size - 1) // 2 + pad_r:int = self.kernel_size // 2 + # padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, + # commons.convert_pad_shape(padding) + [pad_l, pad_r,0,0,0,0] + ) return x diff --git a/infer/lib/infer_pack/commons.py b/infer/lib/infer_pack/commons.py index 7ba7d21..9b2c93f 100644 --- a/infer/lib/infer_pack/commons.py +++ b/infer/lib/infer_pack/commons.py @@ -1,3 +1,4 @@ +from typing import List, Optional import math import numpy as np @@ -16,10 +17,10 @@ def get_padding(kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape +# def convert_pad_shape(pad_shape): +# l = pad_shape[::-1] +# pad_shape = [item for sublist in l for item in sublist] +# return pad_shape def kl_divergence(m_p, logs_p, m_q, logs_q): @@ -113,10 +114,13 @@ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): return acts -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape +# def convert_pad_shape(pad_shape): +# l = pad_shape[::-1] +# pad_shape = [item for sublist in l for item in sublist] +# return pad_shape + +def convert_pad_shape(pad_shape:List[List[int]])->List[int]: + return torch.tensor(pad_shape).flip(0).reshape(-1).int().tolist() def shift_1d(x): @@ -124,7 +128,7 @@ def shift_1d(x): return x -def sequence_mask(length, max_length=None): +def sequence_mask(length:torch.Tensor, max_length:Optional[int]=None): if max_length is None: max_length = length.max() x = torch.arange(max_length, dtype=length.dtype, device=length.device) diff --git a/infer/lib/infer_pack/models.py b/infer/lib/infer_pack/models.py index ec13fb5..f861b98 100644 --- a/infer/lib/infer_pack/models.py +++ b/infer/lib/infer_pack/models.py @@ -1,5 +1,6 @@ import math import logging +from typing import Optional logger = logging.getLogger(__name__) @@ -28,25 +29,25 @@ def __init__( p_dropout, f0=True, ): - super().__init__() + super(TextEncoder256,self).__init__() self.out_channels = out_channels self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.emb_phone = nn.Linear(256, hidden_channels) self.lrelu = nn.LeakyReLU(0.1, inplace=True) if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, float(p_dropout) ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, phone, pitch, lengths): - if pitch == None: + def forward(self, phone:torch.Tensor, pitch:Optional[torch.Tensor], lengths:torch.Tensor): + if pitch is None: x = self.emb_phone(phone) else: x = self.emb_phone(phone) + self.emb_pitch(pitch) @@ -75,25 +76,25 @@ def __init__( p_dropout, f0=True, ): - super().__init__() + super(TextEncoder768,self).__init__() self.out_channels = out_channels self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.emb_phone = nn.Linear(768, hidden_channels) self.lrelu = nn.LeakyReLU(0.1, inplace=True) if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, float(p_dropout) ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, phone, pitch, lengths): - if pitch == None: + def forward(self, phone:torch.Tensor, pitch:torch.Tensor, lengths:torch.Tensor): + if pitch is None: x = self.emb_phone(phone) else: x = self.emb_phone(phone) + self.emb_pitch(pitch) @@ -121,7 +122,7 @@ def __init__( n_flows=4, gin_channels=0, ): - super().__init__() + super(ResidualCouplingBlock,self).__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size @@ -145,19 +146,28 @@ def __init__( ) self.flows.append(modules.Flip()) - def forward(self, x, x_mask, g=None, reverse=False): + def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse:bool=False): if not reverse: for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) + for flow in self.flows[::-1]: + x,_ = flow.forward(x, x_mask, g=g, reverse=reverse) return x def remove_weight_norm(self): for i in range(self.n_flows): self.flows[i * 2].remove_weight_norm() + def __prepare_scriptable__(self): + for i in range(self.n_flows): + for hook in self.flows[i * 2]._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(self.flows[i * 2]) + + return self + class PosteriorEncoder(nn.Module): def __init__( @@ -170,7 +180,7 @@ def __init__( n_layers, gin_channels=0, ): - super().__init__() + super(PosteriorEncoder,self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_channels = hidden_channels @@ -189,7 +199,7 @@ def __init__( ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, x, x_lengths, g=None): + def forward(self, x:torch.Tensor, x_lengths:torch.Tensor, g:Optional[torch.Tensor]=None): x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( x.dtype ) @@ -204,6 +214,15 @@ def remove_weight_norm(self): self.enc.remove_weight_norm() + def __prepare_scriptable__(self): + for hook in self.enc._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(self.enc) + return self + + + class Generator(torch.nn.Module): def __init__( self, @@ -252,7 +271,7 @@ def __init__( if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - def forward(self, x, g=None): + def forward(self, x:torch.Tensor, g:Optional[torch.Tensor]=None): x = self.conv_pre(x) if g is not None: x = x + self.cond(g) @@ -272,6 +291,25 @@ def forward(self, x, g=None): x = torch.tanh(x) return x + + def __prepare_scriptable__(self): + for l in self.ups: + for hook in l._forward_pre_hooks.values(): + # The hook we want to remove is an instance of WeightNorm class, so + # normally we would do `if isinstance(...)` but this class is not accessible + # because of shadowing, so we check the module name directly. + # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(l) + + for l in self.resblocks: + for hook in l._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(l) + return self + def remove_weight_norm(self): for l in self.ups: @@ -293,7 +331,7 @@ class SineGen(torch.nn.Module): voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) + segment is always sin(torch.pi) or cos(0) """ def __init__( @@ -321,7 +359,7 @@ def _f02uv(self, f0): uv = uv.float() return uv - def forward(self, f0, upp): + def forward(self, f0:torch.Tensor, upp:int): """sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 @@ -333,7 +371,7 @@ def forward(self, f0, upp): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) # fundamental component f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): + for idx in range(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( idx + 2 ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic @@ -347,12 +385,12 @@ def forward(self, f0, upp): tmp_over_one *= upp tmp_over_one = F.interpolate( tmp_over_one.transpose(2, 1), - scale_factor=upp, + scale_factor=float(upp), mode="linear", align_corners=True, ).transpose(2, 1) rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" + rad_values.transpose(2, 1), scale_factor=float(upp), mode="nearest" ).transpose( 2, 1 ) ####### @@ -361,12 +399,12 @@ def forward(self, f0, upp): cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi + torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * torch.pi ) sine_waves = sine_waves * self.sine_amp uv = self._f02uv(f0) uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" + uv.transpose(2, 1), scale_factor=float(upp), mode="nearest" ).transpose(2, 1) noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) @@ -414,18 +452,19 @@ def __init__( # to merge source harmonics into a single excitation self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) self.l_tanh = torch.nn.Tanh() + # self.ddtype:int = -1 - def forward(self, x, upp=None): - if hasattr(self, "ddtype") == False: - self.ddtype = self.l_linear.weight.dtype + def forward(self, x:torch.Tensor, upp:int=1): + # if self.ddtype ==-1: + # self.ddtype = self.l_linear.weight.dtype sine_wavs, uv, _ = self.l_sin_gen(x, upp) # print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype) # if self.is_half: # sine_wavs = sine_wavs.half() # sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x))) # print(sine_wavs.dtype,self.ddtype) - if sine_wavs.dtype != self.ddtype: - sine_wavs = sine_wavs.to(self.ddtype) + # if sine_wavs.dtype != self.l_linear.weight.dtype: + sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype) sine_merge = self.l_tanh(self.l_linear(sine_wavs)) return sine_merge, None, None # noise, uv @@ -448,7 +487,7 @@ def __init__( self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) + self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates)) self.m_source = SourceModuleHnNSF( sampling_rate=sr, harmonic_num=0, is_half=is_half ) @@ -473,7 +512,7 @@ def __init__( ) ) if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) + stride_f0 = math.prod(upsample_rates[i + 1 :]) self.noise_convs.append( Conv1d( 1, @@ -500,27 +539,36 @@ def __init__( if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - self.upp = np.prod(upsample_rates) + self.upp = math.prod(upsample_rates) + + self.lrelu_slope = modules.LRELU_SLOPE - def forward(self, x, f0, g=None): + def forward(self, x, f0, g:Optional[torch.Tensor]=None): har_source, noi_source, uv = self.m_source(f0, self.upp) har_source = har_source.transpose(1, 2) x = self.conv_pre(x) if g is not None: x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels + # torch.jit.script() does not support direct indexing of torch modules + # That's why I wrote this + for i,(ups,noise_convs) in enumerate(zip(self.ups,self.noise_convs)): + if i 1, "Number of layers should be larger than 0." self.conv_layers = nn.ModuleList() @@ -58,7 +59,7 @@ def __init__( ) ) self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(float(p_dropout))) for _ in range(n_layers - 1): self.conv_layers.append( nn.Conv1d( @@ -89,13 +90,13 @@ class DDSConv(nn.Module): """ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() + super(DDSConv,self).__init__() self.channels = channels self.kernel_size = kernel_size self.n_layers = n_layers - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) - self.drop = nn.Dropout(p_dropout) + self.drop = nn.Dropout(float(p_dropout)) self.convs_sep = nn.ModuleList() self.convs_1x1 = nn.ModuleList() self.norms_1 = nn.ModuleList() @@ -117,7 +118,7 @@ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) - def forward(self, x, x_mask, g=None): + def forward(self, x, x_mask, g:Optional[torch.Tensor]=None): if g is not None: x = x + g for i in range(self.n_layers): @@ -149,11 +150,11 @@ def __init__( self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels - self.p_dropout = p_dropout + self.p_dropout = float(p_dropout) self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) + self.drop = nn.Dropout(float(p_dropout)) if gin_channels != 0: cond_layer = torch.nn.Conv1d( @@ -184,15 +185,15 @@ def __init__( res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) - def forward(self, x, x_mask, g=None, **kwargs): + def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None): output = torch.zeros_like(x) n_channels_tensor = torch.IntTensor([self.hidden_channels]) if g is not None: g = self.cond_layer(g) - for i in range(self.n_layers): - x_in = self.in_layers[i](x) + for i,(in_layer,res_skip_layer) in enumerate(zip(self.in_layers,self.res_skip_layers)): + x_in = in_layer(x) if g is not None: cond_offset = i * 2 * self.hidden_channels g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] @@ -202,7 +203,7 @@ def forward(self, x, x_mask, g=None, **kwargs): acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) acts = self.drop(acts) - res_skip_acts = self.res_skip_layers[i](acts) + res_skip_acts = res_skip_layer(acts) if i < self.n_layers - 1: res_acts = res_skip_acts[:, : self.hidden_channels, :] x = (x + res_acts) * x_mask @@ -220,6 +221,25 @@ def remove_weight_norm(self): torch.nn.utils.remove_weight_norm(l) + def __prepare_scriptable__(self): + if self.gin_channels != 0: + for hook in self.cond_layer._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + for hook in l._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + for hook in l._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(l) + return self + + class ResBlock1(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() @@ -294,14 +314,15 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): ] ) self.convs2.apply(init_weights) + self.lrelu_slope = LRELU_SLOPE - def forward(self, x, x_mask=None): + def forward(self, x:torch.Tensor,x_mask:Optional[torch.Tensor]=None): for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) + xt = F.leaky_relu(x, self.lrelu_slope) if x_mask is not None: xt = xt * x_mask xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) + xt = F.leaky_relu(xt, self.lrelu_slope) if x_mask is not None: xt = xt * x_mask xt = c2(xt) @@ -317,6 +338,21 @@ def remove_weight_norm(self): remove_weight_norm(l) + def __prepare_scriptable__(self): + for l in self.convs1: + for hook in l._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(l) + for l in self.convs2: + for hook in l._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(l) + return self + + + class ResBlock2(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3)): super(ResBlock2, self).__init__() @@ -345,10 +381,11 @@ def __init__(self, channels, kernel_size=3, dilation=(1, 3)): ] ) self.convs.apply(init_weights) + self.lrelu_slope = LRELU_SLOPE - def forward(self, x, x_mask=None): + def forward(self, x, x_mask:Optional[torch.Tensor]=None): for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) + xt = F.leaky_relu(x, self.lrelu_slope) if x_mask is not None: xt = xt * x_mask xt = c(xt) @@ -361,9 +398,16 @@ def remove_weight_norm(self): for l in self.convs: remove_weight_norm(l) + def __prepare_scriptable__(self): + for l in self.convs: + for hook in l._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(l) + return self class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): + def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse:bool=False)->Tuple[torch.Tensor,Optional[torch.Tensor]]: if not reverse: y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask logdet = torch.sum(-y, [1, 2]) @@ -374,18 +418,21 @@ def forward(self, x, x_mask, reverse=False, **kwargs): class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): + # torch.jit.script() Compiled functions \ + # can't take variable number of arguments or \ + # use keyword-only arguments with defaults + def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse:bool=False)->Tuple[torch.Tensor,Optional[torch.Tensor]]: x = torch.flip(x, [1]) if not reverse: logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) return x, logdet else: - return x + return x, torch.zeros([1],device=x.device) class ElementwiseAffine(nn.Module): def __init__(self, channels): - super().__init__() + super(ElementwiseAffine,self).__init__() self.channels = channels self.m = nn.Parameter(torch.zeros(channels, 1)) self.logs = nn.Parameter(torch.zeros(channels, 1)) @@ -414,7 +461,7 @@ def __init__( mean_only=False, ): assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() + super(ResidualCouplingLayer,self).__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size @@ -429,14 +476,14 @@ def __init__( kernel_size, dilation_rate, n_layers, - p_dropout=p_dropout, + p_dropout=float(p_dropout), gin_channels=gin_channels, ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() - def forward(self, x, x_mask, g=None, reverse=False): + def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse:bool=False): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) * x_mask h = self.enc(h, x_mask, g=g) @@ -455,11 +502,19 @@ def forward(self, x, x_mask, g=None, reverse=False): else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) - return x + return x,torch.zeros([1]) def remove_weight_norm(self): self.enc.remove_weight_norm() + + def __prepare_scriptable__(self): + for hook in self.enc._forward_pre_hooks.values(): + if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": + + torch.nn.utils.remove_weight_norm(self.enc) + return self + class ConvFlow(nn.Module): def __init__( @@ -471,7 +526,7 @@ def __init__( num_bins=10, tail_bound=5.0, ): - super().__init__() + super(ConvFlow,self).__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size @@ -488,7 +543,7 @@ def __init__( self.proj.weight.data.zero_() self.proj.bias.data.zero_() - def forward(self, x, x_mask, g=None, reverse=False): + def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse=False): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) h = self.convs(h, x_mask, g=g) diff --git a/infer/lib/jit/__init__.py b/infer/lib/jit/__init__.py index 2d3332d..943fd16 100644 --- a/infer/lib/jit/__init__.py +++ b/infer/lib/jit/__init__.py @@ -5,143 +5,117 @@ from tqdm import tqdm from collections import OrderedDict - -def load_inputs(path, device, is_half=False): - parm = torch.load(path, map_location=torch.device("cpu")) +def load_inputs(path,device,is_half=False): + parm=torch.load(path,map_location=torch.device("cpu")) for key in parm.keys(): parm[key] = parm[key].to(device) if is_half and parm[key].dtype == torch.float32: - parm[key] = parm[key].half() + parm[key] = parm[key].half() elif not is_half and parm[key].dtype == torch.float16: - parm[key] = parm[key].float() + parm[key] = parm[key].float() return parm - -def benchmark( - model, inputs_path, device=torch.device("cpu"), epoch=1000, is_half=False -): - parm = load_inputs(inputs_path, device, is_half) +def benchmark(model,inputs_path,device=torch.device("cpu"),epoch=1000,is_half=False): + parm=load_inputs(inputs_path,device,is_half) total_ts = 0.0 - bar = tqdm(range(epoch)) + bar=tqdm(range(epoch)) for i in bar: - start_time = time.perf_counter() - o = model(**parm) - total_ts += time.perf_counter() - start_time + start_time=time.perf_counter() + o=model(**parm) + total_ts+=time.perf_counter()-start_time print(f"num_epoch: {epoch} | avg time(ms): {(total_ts*1000)/epoch}") +def jit_warm_up(model,inputs_path,device=torch.device("cpu"),epoch=5,is_half=False): + benchmark(model,inputs_path,device,epoch=epoch,is_half=is_half) -def jit_warm_up(model, inputs_path, device=torch.device("cpu"), epoch=5, is_half=False): - benchmark(model, inputs_path, device, epoch=epoch, is_half=is_half) - - -def to_jit_model( - model_path, - model_type: str, - inputs_path: str, - device=torch.device("cpu"), - is_half=False, -): - model = None - if model_type.lower() == "synthesizer": - from infer.lib.jit.get_synthesizer import get_synthesizer - - model, _ = get_synthesizer(model_path, device) +def to_jit_model(model_path,model_type:str,mode:str="trace",inputs_path:str=None,device=torch.device("cpu"),is_half=False): + model=None + if model_type.lower()=="synthesizer": + from .get_synthesizer import get_synthesizer + model,_=get_synthesizer(model_path,device) model.forward = model.infer - elif model_type.lower() == "rmvpe": - from infer.lib.jit.get_rmvpe import get_rmvpe - - model = get_rmvpe(model_path, device) - elif model_type.lower() == "hubert": - from infer.lib.jit.get_hubert import get_hubert_model - - model = get_hubert_model(model_path, device) + elif model_type.lower()=="rmvpe": + from .get_rmvpe import get_rmvpe + model=get_rmvpe(model_path,device) + elif model_type.lower()=="hubert": + from .get_hubert import get_hubert_model + model=get_hubert_model(model_path,device) model.forward = model.infer else: raise ValueError(f"No model type named {model_type}") model = model.eval() model = model.half() if is_half else model.float() - inputs = load_inputs(inputs_path, device, is_half) - model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) + if mode=="trace": + assert not inputs_path + inputs = load_inputs(inputs_path,device,is_half) + model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) + elif mode=="script": + model_jit=torch.jit.script(model) model_jit.to(device) model_jit = model_jit.half() if is_half else model_jit.float() # model = model.half() if is_half else model.float() - return (model, model_jit) + return (model,model_jit) - -def export( - model: torch.nn.Module, - inputs: dict, - device=torch.device("cpu"), - is_half: bool = False, -) -> dict: +def export(model:torch.nn.Module,mode:str="trace",inputs:dict=None,device=torch.device("cpu"),is_half:bool=False)->dict: model = model.half() if is_half else model.float() model.eval() - model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) + if mode=="trace": + assert inputs is not None + model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) + elif mode=="script": + model_jit=torch.jit.script(model) model_jit.to(device) model_jit = model_jit.half() if is_half else model_jit.float() buffer = BytesIO() - model_jit = model_jit.cpu() - torch.jit.save(model_jit, buffer) + # model_jit=model_jit.cpu() + torch.jit.save(model_jit,buffer) del model_jit - cpt = OrderedDict() - cpt["model"] = buffer.getvalue() - cpt["is_half"] = is_half + cpt=OrderedDict() + cpt["model"]=buffer.getvalue() + cpt["is_half"]=is_half return cpt - -def load(path: str): - with open(path, "rb") as f: +def load(path:str): + with open(path,"rb") as f: return pickle.load(f) - -def save(ckpt: dict, save_path: str): - with open(save_path, "wb") as f: - pickle.dump(ckpt, f) +def save(ckpt:dict, save_path:str): + with open(save_path,"wb") as f: + pickle.dump(ckpt,f) -def rmvpe_jit_export( - model_path: str, - inputs_path: str, - save_path: str = None, - device=torch.device("cpu"), - is_half=False, -): +def rmvpe_jit_export(model_path:str,mode:str="script",inputs_path:str=None,save_path:str=None,device=torch.device("cpu"),is_half=False): if not save_path: - save_path = model_path.rstrip(".pth") - save_path += ".half.jit" if is_half else ".jit" + save_path=model_path.rstrip(".pth") + save_path+=".half.jit" if is_half else ".jit" if "cuda" in str(device) and ":" not in str(device): - device = torch.device("cuda:0") - from infer.lib.jit.get_rmvpe import get_rmvpe - - model = get_rmvpe(model_path, device) - inputs = load_inputs(inputs_path, device, is_half) - ckpt = export(model, inputs, device, is_half) - ckpt["device"] = str(device) - save(ckpt, save_path) + device = torch.device("cuda:0" ) + from .get_rmvpe import get_rmvpe + model = get_rmvpe(model_path,device) + inputs =None + if mode=="trace": + inputs =load_inputs(inputs_path,device,is_half) + ckpt = export(model,mode,inputs,device,is_half) + ckpt["device"]=str(device) + save(ckpt,save_path) return ckpt - -def synthesizer_jit_export( - model_path: str, - inputs_path: str, - save_path: str = None, - device=torch.device("cpu"), - is_half=False, -): +def synthesizer_jit_export(model_path:str,mode:str="script",inputs_path:str=None,save_path:str=None,device=torch.device("cpu"),is_half=False): if not save_path: - save_path = model_path.rstrip(".pth") - save_path += ".half.jit" if is_half else ".jit" + save_path=model_path.rstrip(".pth") + save_path+=".half.jit" if is_half else ".jit" if "cuda" in str(device) and ":" not in str(device): - device = torch.device("cuda:0") - from infer.lib.jit.get_synthesizer import get_synthesizer - - model, cpt = get_synthesizer(model_path, device) - assert isinstance(cpt, dict) + device = torch.device("cuda:0" ) + from .get_synthesizer import get_synthesizer + model,cpt=get_synthesizer(model_path,device) + assert isinstance(cpt,dict) model.forward = model.infer - inputs = load_inputs(inputs_path, device, is_half) - ckpt = export(model, inputs, device, is_half) + inputs =None + if mode=="trace": + inputs =load_inputs(inputs_path,device,is_half) + ckpt = export(model,mode,inputs,device,is_half) cpt.pop("weight") cpt["model"] = ckpt["model"] cpt["device"] = device - save(cpt, save_path) + save(cpt,save_path) return cpt diff --git a/infer/lib/jit/get_hubert.py b/infer/lib/jit/get_hubert.py index aec7132..1194be4 100644 --- a/infer/lib/jit/get_hubert.py +++ b/infer/lib/jit/get_hubert.py @@ -1,15 +1,12 @@ import math import random -from typing import Optional, Tuple +from typing import Optional,Tuple from fairseq.checkpoint_utils import load_model_ensemble_and_task import numpy as np import torch import torch.nn.functional as F - # from fairseq.data.data_utils import compute_mask_indices from fairseq.utils import index_put - - # @torch.jit.script def pad_to_multiple(x, multiple, dim=-1, value=0): # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 @@ -18,7 +15,7 @@ def pad_to_multiple(x, multiple, dim=-1, value=0): tsz = x.size(dim) m = tsz / multiple remainder = math.ceil(m) * multiple - tsz - if int(tsz % multiple) == 0: + if int(tsz % multiple)==0: return x, 0 pad_offset = (0,) * (-1 - dim) * 2 @@ -32,6 +29,7 @@ def extract_features( tgt_layer=None, min_layer=0, ): + if padding_mask is not None: x = index_put(x, padding_mask, 0) @@ -43,7 +41,9 @@ def extract_features( x = self.layer_norm(x) # pad to the sequence length dimension - x, pad_length = pad_to_multiple(x, self.required_seq_len_multiple, dim=-2, value=0) + x, pad_length = pad_to_multiple( + x, self.required_seq_len_multiple, dim=-2, value=0 + ) if pad_length > 0 and padding_mask is None: padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool) padding_mask[:, -pad_length:] = True @@ -91,7 +91,6 @@ def undo_pad(a, b, c): return x, layer_results - def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], @@ -142,7 +141,10 @@ def compute_mask_indices( for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() - num_mask = int(mask_prob * sz / float(mask_length) + np.random.rand()) + num_mask = int( + mask_prob * sz / float(mask_length) + + np.random.rand() + ) num_mask = max(min_masks, num_mask) else: sz = all_sz @@ -151,7 +153,7 @@ def compute_mask_indices( if mask_type == "static": lengths = torch.full([num_mask], mask_length) elif mask_type == "uniform": - lengths = torch.randint(mask_other, mask_length * 2 + 1, size=[num_mask]) + lengths = torch.randint(mask_other, mask_length*2+1, size=[num_mask]) elif mask_type == "normal": lengths = torch.normal(mask_length, mask_other, size=[num_mask]) lengths = [max(1, int(round(x))) for x in lengths] @@ -165,7 +167,7 @@ def compute_mask_indices( mask_idc = [] def arrange(s, e, length, keep_length): - span_start = torch.randint(low=s, high=e - length, size=[1]).item() + span_start = torch.randint(low=s, high=e - length,size=[1]).item() mask_idc.extend(span_start + i for i in range(length)) new_parts = [] @@ -174,17 +176,17 @@ def arrange(s, e, length, keep_length): if e - span_start - length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts - + parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): - t = [e - s if e - s >= length + min_space else 0 for s, e in parts] - lens = torch.asarray(t, dtype=torch.int) + t=[e - s if e - s >= length + min_space else 0 for s, e in parts] + lens=torch.asarray(t,dtype=torch.int) l_sum = torch.sum(lens) if l_sum == 0: break probs = lens / torch.sum(lens) - c = torch.multinomial(probs.float(), len(parts)).item() + c = torch.multinomial(probs.float(),len(parts)).item() s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = torch.asarray(mask_idc) @@ -192,9 +194,7 @@ def arrange(s, e, length, keep_length): min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 - mask_idc = torch.asarray( - random.sample([i for i in range(sz - min_len)], num_mask) - ) + mask_idc=torch.asarray(random.sample([i for i in range(sz - min_len)],num_mask)) mask_idc = torch.asarray( [ mask_idc[j] + offset @@ -207,23 +207,18 @@ def arrange(s, e, length, keep_length): min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): - if isinstance(mask_idc, torch.Tensor): - mask_idc = torch.asarray(mask_idc, dtype=torch.float) + if isinstance(mask_idc,torch.Tensor): + mask_idc=torch.asarray(mask_idc,dtype=torch.float) if len(mask_idc) > min_len and require_same_masks: - mask_idc = torch.asarray( - random.sample([i for i in range(mask_idc)], min_len) - ) + mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],min_len)) if mask_dropout > 0: num_holes = int(round(len(mask_idc) * mask_dropout)) - mask_idc = torch.asarray( - random.sample([i for i in range(mask_idc)], len(mask_idc) - num_holes) - ) + mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],len(mask_idc) - num_holes)) mask[i, mask_idc.int()] = True return mask - def apply_mask(self, x, padding_mask, target_list): B, T, C = x.shape torch.zeros_like(x) @@ -256,45 +251,40 @@ def apply_mask(self, x, padding_mask, target_list): min_space=self.mask_channel_min_space, ) mask_channel_indices = ( - mask_channel_indices.to(x.device).unsqueeze(1).expand(-1, T, -1) + mask_channel_indices + .to(x.device) + .unsqueeze(1) + .expand(-1, T, -1) ) x[mask_channel_indices] = 0 return x, mask_indices -def get_hubert_model( - model_path="assets/hubert/hubert_base.pt", device=torch.device("cpu") -): +def get_hubert_model(model_path="assets/hubert/hubert_base.pt",device=torch.device("cpu")): models, _, _ = load_model_ensemble_and_task( - [model_path], - suffix="", - ) + [model_path], + suffix="", + ) hubert_model = models[0] hubert_model = hubert_model.to(device) - def _apply_mask(x, padding_mask, target_list): - return apply_mask(hubert_model, x, padding_mask, target_list) - + return apply_mask(hubert_model,x, padding_mask, target_list) hubert_model.apply_mask = _apply_mask - def _extract_features( - x, + def _extract_features(x, padding_mask=None, tgt_layer=None, min_layer=0, ): - return extract_features( - hubert_model.encoder, - x, - padding_mask=padding_mask, - tgt_layer=tgt_layer, - min_layer=min_layer, - ) - + return extract_features(hubert_model.encoder,x, + padding_mask=padding_mask, + tgt_layer=tgt_layer, + min_layer=min_layer, + ) hubert_model.encoder.extract_features = _extract_features - hubert_model._forward = hubert_model.forward + hubert_model._forward=hubert_model.forward def hubert_extract_features( self, @@ -313,7 +303,7 @@ def hubert_extract_features( ) feature = res["features"] if ret_conv else res["x"] return feature, res["padding_mask"] - + def _hubert_extract_features( source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, @@ -321,22 +311,20 @@ def _hubert_extract_features( ret_conv: bool = False, output_layer: Optional[int] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: - return hubert_extract_features( - hubert_model, source, padding_mask, mask, ret_conv, output_layer - ) - + return hubert_extract_features(hubert_model,source,padding_mask,mask,ret_conv,output_layer) hubert_model.extract_features = _hubert_extract_features - - def infer(source, padding_mask, output_layer: torch.Tensor): - output_layer = output_layer.item() - logits = hubert_model.extract_features( - source=source, padding_mask=padding_mask, output_layer=output_layer - ) - feats = hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] + + def infer(source,padding_mask,output_layer:torch.Tensor): + output_layer=output_layer.item() + logits = hubert_model.extract_features(source=source,padding_mask=padding_mask,output_layer=output_layer) + feats = ( + hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] + ) return feats - hubert_model.infer = infer + + hubert_model.infer=infer # hubert_model.forward=infer # hubert_model.forward - return hubert_model + return hubert_model \ No newline at end of file diff --git a/infer/lib/jit/get_rmvpe.py b/infer/lib/jit/get_rmvpe.py index e71c39f..3c2ed7e 100644 --- a/infer/lib/jit/get_rmvpe.py +++ b/infer/lib/jit/get_rmvpe.py @@ -1,12 +1,13 @@ + import torch -def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt", device=torch.device("cpu")): - from infer.lib.rmvpe import E2E +def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt",device=torch.device("cpu")): + from infer.lib.rmvpe import E2E model = E2E(4, 1, (2, 2)) ckpt = torch.load(model_path, map_location=device) model.load_state_dict(ckpt) model.eval() model = model.to(device) - return model + return model \ No newline at end of file diff --git a/infer/lib/jit/get_synthesizer.py b/infer/lib/jit/get_synthesizer.py index 79bb8cf..1498b64 100644 --- a/infer/lib/jit/get_synthesizer.py +++ b/infer/lib/jit/get_synthesizer.py @@ -1,14 +1,8 @@ import torch -def get_synthesizer(pth_path, device=torch.device("cpu")): - from infer.lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, - ) - +def get_synthesizer(pth_path,device=torch.device("cpu")): + from infer.lib.infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono cpt = torch.load(pth_path, map_location=torch.device("cpu")) tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] @@ -16,22 +10,26 @@ def get_synthesizer(pth_path, device=torch.device("cpu")): version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=False) + net_g = SynthesizerTrnMs256NSFsid( + *cpt["config"], is_half=False + ) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=False) + net_g = SynthesizerTrnMs768NSFsid( + *cpt["config"], is_half=False + ) else: net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) del net_g.enc_q - net_g.forward = net_g.infer - ckpt = {} - ckpt["config"] = cpt["config"] + # net_g.forward = net_g.infer + ckpt={} + ckpt["config"]=cpt["config"] ckpt["f0"] = if_f0 - ckpt["version"] = version - ckpt["info"] = cpt.get("info", "0epoch") + ckpt["version"]=version + ckpt["info"]=cpt.get("info","0epoch") net_g.load_state_dict(cpt["weight"], strict=False) - net_g = net_g.float() + net_g=net_g.float() net_g.eval().to(device) - return net_g, cpt + return net_g,cpt \ No newline at end of file diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index 91d3e1a..5d1565b 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -1,5 +1,6 @@ from io import BytesIO import os +from typing import List, Optional, Tuple import numpy as np import torch @@ -279,12 +280,15 @@ def __init__(self, in_channels, out_channels, momentum=0.01): nn.BatchNorm2d(out_channels, momentum=momentum), nn.ReLU(), ) - self.shortcut = lambda x: x + # self.shortcut:Optional[nn.Module] = None if in_channels != out_channels: self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - def forward(self, x): - return self.conv(x) + self.shortcut(x) + def forward(self, x:torch.Tensor): + if not hasattr(self,"shortcut"): + return self.conv(x) + x + else: + return self.conv(x) + self.shortcut(x) class Encoder(nn.Module): @@ -316,12 +320,12 @@ def __init__( self.out_size = in_size self.out_channel = out_channels - def forward(self, x): - concat_tensors = [] + def forward(self, x:torch.Tensor): + concat_tensors:List[torch.Tensor]= [] x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) + for i, layer in enumerate(self.layers): + t, x = layer(x) + concat_tensors.append(t) return x, concat_tensors @@ -340,8 +344,8 @@ def __init__( self.pool = nn.AvgPool2d(kernel_size=kernel_size) def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) + for i,conv in enumerate(self.conv): + x = conv(x) if self.kernel_size is not None: return x, self.pool(x) else: @@ -362,8 +366,8 @@ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01) ) def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) + for i, layer in enumerate(self.layers): + x = layer(x) return x @@ -393,8 +397,8 @@ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01) def forward(self, x, concat_tensor): x = self.conv1(x) x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) + for i,conv2 in enumerate(self.conv2): + x = conv2(x) return x @@ -410,9 +414,9 @@ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): ) in_channels = out_channels - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) + def forward(self, x:torch.Tensor, concat_tensors:List[torch.Tensor]): + for i,layer in enumerate(self.layers): + x = layer(x, concat_tensors[-1 - i]) return x @@ -440,7 +444,7 @@ def __init__( self.encoder.out_channel, en_de_layers, kernel_size, n_blocks ) - def forward(self, x): + def forward(self, x:torch.Tensor)->torch.Tensor: x, concat_tensors = self.encoder(x) x = self.intermediate(x) x = self.decoder(x, concat_tensors) @@ -612,8 +616,9 @@ def get_jit_model(): if reload: ckpt = jit.rmvpe_jit_export( - model_path, - "assets/rmvpe/rmvpe_inputs.pth", + model_path=model_path, + mode="script", + inputs_path=None, save_path=jit_model_path, device=device, is_half=is_half, @@ -662,7 +667,7 @@ def mel2hidden(self, mel): input_feed={onnx_input_name: mel.cpu().numpy()}, )[0] else: - # mel = mel.half() if self.is_half else mel.float() + mel = mel.half() if self.is_half else mel.float() hidden = self.model(mel) return hidden[:, :n_frames] diff --git a/infer/modules/train/train.py b/infer/modules/train/train.py index 763ad06..11f3e97 100644 --- a/infer/modules/train/train.py +++ b/infer/modules/train/train.py @@ -104,6 +104,7 @@ def main(): os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) children = [] + logger = utils.get_logger(hps.model_dir) for i in range(n_gpus): subproc = mp.Process( target=run, @@ -111,6 +112,7 @@ def main(): i, n_gpus, hps, + logger ), ) children.append(subproc) @@ -120,10 +122,10 @@ def main(): children[i].join() -def run(rank, n_gpus, hps): +def run(rank, n_gpus, hps, logger:logging.Logger): global global_step if rank == 0: - logger = utils.get_logger(hps.model_dir) + # logger = utils.get_logger(hps.model_dir) logger.info(hps) # utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) diff --git a/tools/rvc_for_realtime.py b/tools/rvc_for_realtime.py index 7da5169..f827d4e 100644 --- a/tools/rvc_for_realtime.py +++ b/tools/rvc_for_realtime.py @@ -155,7 +155,8 @@ def set_jit_model(): if reload: cpt = jit.synthesizer_jit_export( self.pth_path, - "assets\Synthesizer_inputs.pth", + "script", + None, device=self.device, is_half=self.is_half, ) From 1b7aa52703b64424cc76fc070e64dd68934fb79a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 23:22:19 +0900 Subject: [PATCH 10/26] Format code (#1298) Co-authored-by: github-actions[bot] --- gui_v1.py | 32 ++-- infer/lib/infer_pack/attentions.py | 113 +++++++------ infer/lib/infer_pack/commons.py | 5 +- infer/lib/infer_pack/models.py | 257 ++++++++++++++++++----------- infer/lib/infer_pack/modules.py | 109 ++++++++---- infer/lib/jit/__init__.py | 166 ++++++++++++------- infer/lib/jit/get_hubert.py | 110 ++++++------ infer/lib/jit/get_rmvpe.py | 7 +- infer/lib/jit/get_synthesizer.py | 30 ++-- infer/lib/rmvpe.py | 18 +- infer/modules/train/train.py | 9 +- 11 files changed, 515 insertions(+), 341 deletions(-) diff --git a/gui_v1.py b/gui_v1.py index f245e70..2767d2c 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -16,6 +16,7 @@ logger = logging.getLogger(__name__) stream_latency = -1 + class Harvest(multiprocessing.Process): def __init__(self, inp_q, opt_q): multiprocessing.Process.__init__(self) @@ -100,7 +101,7 @@ class GUI: def __init__(self) -> None: self.config = GUIConfig() self.flag_vc = False - self.function = 'vc' + self.function = "vc" self.delay_time = 0 self.launcher() @@ -116,7 +117,7 @@ def load(self): if data["sg_input_device"] not in input_devices: data["sg_input_device"] = input_devices[sd.default.device[0]] if data["sg_output_device"] not in output_devices: - data["sg_output_device"] = output_devices[sd.default.device[1]] + data["sg_output_device"] = output_devices[sd.default.device[1]] except: with open("configs/config.json", "w") as j: data = { @@ -364,7 +365,7 @@ def launcher(self): key="im", default=False, enable_events=True, - ), + ), sg.Radio( i18n("输出变声"), "function", @@ -439,7 +440,12 @@ def event_handler(self): global stream_latency while stream_latency < 0: time.sleep(0.01) - self.delay_time = stream_latency + values["block_time"] + values["crossfade_length"] + 0.01 + self.delay_time = ( + stream_latency + + values["block_time"] + + values["crossfade_length"] + + 0.01 + ) if values["I_noise_reduce"]: self.delay_time += values["crossfade_length"] self.window["delay_time"].update(int(self.delay_time * 1000)) @@ -464,7 +470,9 @@ def event_handler(self): elif event == "I_noise_reduce": self.config.I_noise_reduce = values["I_noise_reduce"] if stream_latency > 0: - self.delay_time += (1 if values["I_noise_reduce"] else -1) * values["crossfade_length"] + self.delay_time += ( + 1 if values["I_noise_reduce"] else -1 + ) * values["crossfade_length"] self.window["delay_time"].update(int(self.delay_time * 1000)) elif event == "O_noise_reduce": self.config.O_noise_reduce = values["O_noise_reduce"] @@ -646,7 +654,7 @@ def audio_callback( self.block_frame_16k : ].clone() # input noise reduction and resampling - if self.config.I_noise_reduce and self.function == 'vc': + if self.config.I_noise_reduce and self.function == "vc": input_wav = self.input_wav[ -self.crossfade_frame - self.block_frame - 2 * self.zc : ] @@ -670,10 +678,12 @@ def audio_callback( self.input_wav[-self.block_frame - 2 * self.zc :] )[160:] # infer - if self.function == 'vc': + if self.function == "vc": f0_extractor_frame = self.block_frame_16k + 800 if self.config.f0method == "rmvpe": - f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160 + f0_extractor_frame = ( + 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160 + ) infer_wav = self.rvc.infer( self.input_wav_res, self.input_wav_res[-f0_extractor_frame:].cpu().numpy(), @@ -691,7 +701,9 @@ def audio_callback( -self.crossfade_frame - self.sola_search_frame - self.block_frame : ].clone() # output noise reduction - if (self.config.O_noise_reduce and self.function == 'vc') or (self.config.I_noise_reduce and self.function == 'im'): + if (self.config.O_noise_reduce and self.function == "vc") or ( + self.config.I_noise_reduce and self.function == "im" + ): self.output_buffer[: -self.block_frame] = self.output_buffer[ self.block_frame : ].clone() @@ -700,7 +712,7 @@ def audio_callback( infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0) ).squeeze(0) # volume envelop mixing - if self.config.rms_mix_rate < 1 and self.function == 'vc': + if self.config.rms_mix_rate < 1 and self.function == "vc": rms1 = librosa.feature.rms( y=self.input_wav_res[-160 * infer_wav.shape[0] // self.zc :] .cpu() diff --git a/infer/lib/infer_pack/attentions.py b/infer/lib/infer_pack/attentions.py index b24cb7c..2cc745a 100644 --- a/infer/lib/infer_pack/attentions.py +++ b/infer/lib/infer_pack/attentions.py @@ -23,7 +23,7 @@ def __init__( window_size=10, **kwargs ): - super(Encoder,self).__init__() + super(Encoder, self).__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads @@ -62,9 +62,10 @@ def __init__( def forward(self, x, x_mask): attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask - zippep=zip(self.attn_layers,self.norm_layers_1,\ - self.ffn_layers,self.norm_layers_2) - for attn_layers,norm_layers_1,ffn_layers,norm_layers_2 in zippep: + zippep = zip( + self.attn_layers, self.norm_layers_1, self.ffn_layers, self.norm_layers_2 + ) + for attn_layers, norm_layers_1, ffn_layers, norm_layers_2 in zippep: y = attn_layers(x, x, attn_mask) y = self.drop(y) x = norm_layers_1(x + y) @@ -89,7 +90,7 @@ def __init__( proximal_init=True, **kwargs ): - super(Decoder,self).__init__() + super(Decoder, self).__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads @@ -175,7 +176,7 @@ def __init__( proximal_bias=False, proximal_init=False, ): - super(MultiHeadAttention,self).__init__() + super(MultiHeadAttention, self).__init__() assert channels % n_heads == 0 self.channels = channels @@ -216,20 +217,28 @@ def __init__( self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) - def forward(self, x:torch.Tensor, c:torch.Tensor, attn_mask:Optional[torch.Tensor]=None): + def forward( + self, x: torch.Tensor, c: torch.Tensor, attn_mask: Optional[torch.Tensor] = None + ): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) - x, _= self.attention(q, k, v,mask=attn_mask) + x, _ = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x - def attention(self, query:torch.Tensor, key:torch.Tensor, value:torch.Tensor, mask:Optional[torch.Tensor]=None): + def attention( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ): # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s= key.size() - t_t=query.size(2) + b, d, t_s = key.size() + t_t = query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) @@ -296,17 +305,17 @@ def _matmul_with_relative_keys(self, x, y): ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret - def _get_relative_embeddings(self, relative_embeddings, length:int): + def _get_relative_embeddings(self, relative_embeddings, length: int): max_relative_position = 2 * self.window_size + 1 # Pad first before slice to avoid using cond ops. - pad_length:int = max(length - (self.window_size + 1), 0) + pad_length: int = max(length - (self.window_size + 1), 0) slice_start_position = max((self.window_size + 1) - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad( relative_embeddings, # commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - [0, 0, pad_length, pad_length,0,0] + [0, 0, pad_length, pad_length, 0, 0], ) else: padded_relative_embeddings = relative_embeddings @@ -322,17 +331,18 @@ def _relative_position_to_absolute_position(self, x): """ batch, heads, length, _ = x.size() # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, - # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]) - [0,1,0,0,0,0,0,0] - ) + x = F.pad( + x, + # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]) + [0, 1, 0, 0, 0, 0, 0, 0], + ) # Concat extra elements so to add up to shape (len+1, 2*len-1). x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad( - x_flat, + x_flat, # commons.convert_pad_shape([[0, 0], [0, 0], [0, int(length) - 1]]) - [0, int(length) - 1, 0,0,0,0] + [0, int(length) - 1, 0, 0, 0, 0], ) # Reshape and slice out the padded elements. @@ -349,20 +359,21 @@ def _absolute_position_to_relative_position(self, x): batch, heads, length, _ = x.size() # padd along column x = F.pad( - x, + x, # commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, int(length) - 1]]) - [0, int(length) - 1,0,0,0,0,0,0] + [0, int(length) - 1, 0, 0, 0, 0, 0, 0], ) x_flat = x.view([batch, heads, int(length**2) + int(length * (length - 1))]) # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, - # commons.convert_pad_shape([[0, 0], [0, 0], [int(length), 0]]) - [length,0,0,0,0,0] - ) + x_flat = F.pad( + x_flat, + # commons.convert_pad_shape([[0, 0], [0, 0], [int(length), 0]]) + [length, 0, 0, 0, 0, 0], + ) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final - def _attention_bias_proximal(self, length:int): + def _attention_bias_proximal(self, length: int): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. @@ -382,10 +393,10 @@ def __init__( filter_channels, kernel_size, p_dropout=0.0, - activation:str=None, + activation: str = None, causal=False, ): - super(FFN,self).__init__() + super(FFN, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.filter_channels = filter_channels @@ -393,7 +404,7 @@ def __init__( self.p_dropout = p_dropout self.activation = activation self.causal = causal - self.is_activation = True if activation=="gelu" else False + self.is_activation = True if activation == "gelu" else False # if causal: # self.padding = self._causal_padding # else: @@ -402,45 +413,47 @@ def __init__( self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) self.drop = nn.Dropout(p_dropout) - - def padding(self,x:torch.Tensor,x_mask:torch.Tensor)->torch.Tensor: + + def padding(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor: if self.causal: - padding=self._causal_padding(x * x_mask) + padding = self._causal_padding(x * x_mask) else: - padding=self._same_padding(x * x_mask) + padding = self._same_padding(x * x_mask) return padding - - def forward(self, x:torch.Tensor, x_mask:torch.Tensor): - x = self.conv_1(self.padding(x,x_mask)) + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor): + x = self.conv_1(self.padding(x, x_mask)) if self.is_activation: x = x * torch.sigmoid(1.702 * x) else: x = torch.relu(x) x = self.drop(x) - x = self.conv_2(self.padding(x,x_mask)) + x = self.conv_2(self.padding(x, x_mask)) return x * x_mask def _causal_padding(self, x): if self.kernel_size == 1: return x - pad_l:int = self.kernel_size - 1 - pad_r:int = 0 + pad_l: int = self.kernel_size - 1 + pad_r: int = 0 # padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, - # commons.convert_pad_shape(padding) - [pad_l, pad_r,0,0,0,0] - ) + x = F.pad( + x, + # commons.convert_pad_shape(padding) + [pad_l, pad_r, 0, 0, 0, 0], + ) return x def _same_padding(self, x): if self.kernel_size == 1: return x - pad_l:int = (self.kernel_size - 1) // 2 - pad_r:int = self.kernel_size // 2 + pad_l: int = (self.kernel_size - 1) // 2 + pad_r: int = self.kernel_size // 2 # padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, - # commons.convert_pad_shape(padding) - [pad_l, pad_r,0,0,0,0] - ) + x = F.pad( + x, + # commons.convert_pad_shape(padding) + [pad_l, pad_r, 0, 0, 0, 0], + ) return x diff --git a/infer/lib/infer_pack/commons.py b/infer/lib/infer_pack/commons.py index 9b2c93f..4ec6c24 100644 --- a/infer/lib/infer_pack/commons.py +++ b/infer/lib/infer_pack/commons.py @@ -119,7 +119,8 @@ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): # pad_shape = [item for sublist in l for item in sublist] # return pad_shape -def convert_pad_shape(pad_shape:List[List[int]])->List[int]: + +def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]: return torch.tensor(pad_shape).flip(0).reshape(-1).int().tolist() @@ -128,7 +129,7 @@ def shift_1d(x): return x -def sequence_mask(length:torch.Tensor, max_length:Optional[int]=None): +def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None): if max_length is None: max_length = length.max() x = torch.arange(max_length, dtype=length.dtype, device=length.device) diff --git a/infer/lib/infer_pack/models.py b/infer/lib/infer_pack/models.py index f861b98..a60ced6 100644 --- a/infer/lib/infer_pack/models.py +++ b/infer/lib/infer_pack/models.py @@ -29,7 +29,7 @@ def __init__( p_dropout, f0=True, ): - super(TextEncoder256,self).__init__() + super(TextEncoder256, self).__init__() self.out_channels = out_channels self.hidden_channels = hidden_channels self.filter_channels = filter_channels @@ -42,11 +42,18 @@ def __init__( if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, float(p_dropout) + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + float(p_dropout), ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, phone:torch.Tensor, pitch:Optional[torch.Tensor], lengths:torch.Tensor): + def forward( + self, phone: torch.Tensor, pitch: Optional[torch.Tensor], lengths: torch.Tensor + ): if pitch is None: x = self.emb_phone(phone) else: @@ -76,7 +83,7 @@ def __init__( p_dropout, f0=True, ): - super(TextEncoder768,self).__init__() + super(TextEncoder768, self).__init__() self.out_channels = out_channels self.hidden_channels = hidden_channels self.filter_channels = filter_channels @@ -89,11 +96,16 @@ def __init__( if f0 == True: self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, float(p_dropout) + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + float(p_dropout), ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, phone:torch.Tensor, pitch:torch.Tensor, lengths:torch.Tensor): + def forward(self, phone: torch.Tensor, pitch: torch.Tensor, lengths: torch.Tensor): if pitch is None: x = self.emb_phone(phone) else: @@ -122,7 +134,7 @@ def __init__( n_flows=4, gin_channels=0, ): - super(ResidualCouplingBlock,self).__init__() + super(ResidualCouplingBlock, self).__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size @@ -146,13 +158,19 @@ def __init__( ) self.flows.append(modules.Flip()) - def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse:bool=False): + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ): if not reverse: for flow in self.flows: x, _ = flow(x, x_mask, g=g, reverse=reverse) else: for flow in self.flows[::-1]: - x,_ = flow.forward(x, x_mask, g=g, reverse=reverse) + x, _ = flow.forward(x, x_mask, g=g, reverse=reverse) return x def remove_weight_norm(self): @@ -162,8 +180,10 @@ def remove_weight_norm(self): def __prepare_scriptable__(self): for i in range(self.n_flows): for hook in self.flows[i * 2]._forward_pre_hooks.values(): - if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": - + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): torch.nn.utils.remove_weight_norm(self.flows[i * 2]) return self @@ -180,7 +200,7 @@ def __init__( n_layers, gin_channels=0, ): - super(PosteriorEncoder,self).__init__() + super(PosteriorEncoder, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_channels = hidden_channels @@ -199,7 +219,9 @@ def __init__( ) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - def forward(self, x:torch.Tensor, x_lengths:torch.Tensor, g:Optional[torch.Tensor]=None): + def forward( + self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None + ): x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( x.dtype ) @@ -213,16 +235,16 @@ def forward(self, x:torch.Tensor, x_lengths:torch.Tensor, g:Optional[torch.Tenso def remove_weight_norm(self): self.enc.remove_weight_norm() - def __prepare_scriptable__(self): for hook in self.enc._forward_pre_hooks.values(): - if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": - + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): torch.nn.utils.remove_weight_norm(self.enc) return self - class Generator(torch.nn.Module): def __init__( self, @@ -271,7 +293,7 @@ def __init__( if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - def forward(self, x:torch.Tensor, g:Optional[torch.Tensor]=None): + def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None): x = self.conv_pre(x) if g is not None: x = x + self.cond(g) @@ -291,7 +313,7 @@ def forward(self, x:torch.Tensor, g:Optional[torch.Tensor]=None): x = torch.tanh(x) return x - + def __prepare_scriptable__(self): for l in self.ups: for hook in l._forward_pre_hooks.values(): @@ -299,18 +321,21 @@ def __prepare_scriptable__(self): # normally we would do `if isinstance(...)` but this class is not accessible # because of shadowing, so we check the module name directly. # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3 - if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": - + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): torch.nn.utils.remove_weight_norm(l) - for l in self.resblocks: + for l in self.resblocks: for hook in l._forward_pre_hooks.values(): - if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": - + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): torch.nn.utils.remove_weight_norm(l) return self - def remove_weight_norm(self): for l in self.ups: remove_weight_norm(l) @@ -359,7 +384,7 @@ def _f02uv(self, f0): uv = uv.float() return uv - def forward(self, f0:torch.Tensor, upp:int): + def forward(self, f0: torch.Tensor, upp: int): """sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 @@ -454,7 +479,7 @@ def __init__( self.l_tanh = torch.nn.Tanh() # self.ddtype:int = -1 - def forward(self, x:torch.Tensor, upp:int=1): + def forward(self, x: torch.Tensor, upp: int = 1): # if self.ddtype ==-1: # self.ddtype = self.l_linear.weight.dtype sine_wavs, uv, _ = self.l_sin_gen(x, upp) @@ -543,7 +568,7 @@ def __init__( self.lrelu_slope = modules.LRELU_SLOPE - def forward(self, x, f0, g:Optional[torch.Tensor]=None): + def forward(self, x, f0, g: Optional[torch.Tensor] = None): har_source, noi_source, uv = self.m_source(f0, self.upp) har_source = har_source.transpose(1, 2) x = self.conv_pre(x) @@ -551,14 +576,14 @@ def forward(self, x, f0, g:Optional[torch.Tensor]=None): x = x + self.cond(g) # torch.jit.script() does not support direct indexing of torch modules # That's why I wrote this - for i,(ups,noise_convs) in enumerate(zip(self.ups,self.noise_convs)): - if iTuple[torch.Tensor,Optional[torch.Tensor]]: + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if not reverse: y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask logdet = torch.sum(-y, [1, 2]) @@ -421,18 +441,24 @@ class Flip(nn.Module): # torch.jit.script() Compiled functions \ # can't take variable number of arguments or \ # use keyword-only arguments with defaults - def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse:bool=False)->Tuple[torch.Tensor,Optional[torch.Tensor]]: + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: x = torch.flip(x, [1]) if not reverse: logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) return x, logdet else: - return x, torch.zeros([1],device=x.device) + return x, torch.zeros([1], device=x.device) class ElementwiseAffine(nn.Module): def __init__(self, channels): - super(ElementwiseAffine,self).__init__() + super(ElementwiseAffine, self).__init__() self.channels = channels self.m = nn.Parameter(torch.zeros(channels, 1)) self.logs = nn.Parameter(torch.zeros(channels, 1)) @@ -461,7 +487,7 @@ def __init__( mean_only=False, ): assert channels % 2 == 0, "channels should be divisible by 2" - super(ResidualCouplingLayer,self).__init__() + super(ResidualCouplingLayer, self).__init__() self.channels = channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size @@ -483,7 +509,13 @@ def __init__( self.post.weight.data.zero_() self.post.bias.data.zero_() - def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse:bool=False): + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) * x_mask h = self.enc(h, x_mask, g=g) @@ -502,16 +534,17 @@ def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]= else: x1 = (x1 - m) * torch.exp(-logs) * x_mask x = torch.cat([x0, x1], 1) - return x,torch.zeros([1]) + return x, torch.zeros([1]) def remove_weight_norm(self): self.enc.remove_weight_norm() - def __prepare_scriptable__(self): for hook in self.enc._forward_pre_hooks.values(): - if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm": - + if ( + hook.__module__ == "torch.nn.utils.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): torch.nn.utils.remove_weight_norm(self.enc) return self @@ -526,7 +559,7 @@ def __init__( num_bins=10, tail_bound=5.0, ): - super(ConvFlow,self).__init__() + super(ConvFlow, self).__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size @@ -543,7 +576,13 @@ def __init__( self.proj.weight.data.zero_() self.proj.bias.data.zero_() - def forward(self, x:torch.Tensor, x_mask:torch.Tensor, g:Optional[torch.Tensor]=None, reverse=False): + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse=False, + ): x0, x1 = torch.split(x, [self.half_channels] * 2, 1) h = self.pre(x0) h = self.convs(h, x_mask, g=g) diff --git a/infer/lib/jit/__init__.py b/infer/lib/jit/__init__.py index 943fd16..d7f41dd 100644 --- a/infer/lib/jit/__init__.py +++ b/infer/lib/jit/__init__.py @@ -5,117 +5,159 @@ from tqdm import tqdm from collections import OrderedDict -def load_inputs(path,device,is_half=False): - parm=torch.load(path,map_location=torch.device("cpu")) + +def load_inputs(path, device, is_half=False): + parm = torch.load(path, map_location=torch.device("cpu")) for key in parm.keys(): parm[key] = parm[key].to(device) if is_half and parm[key].dtype == torch.float32: - parm[key] = parm[key].half() + parm[key] = parm[key].half() elif not is_half and parm[key].dtype == torch.float16: - parm[key] = parm[key].float() + parm[key] = parm[key].float() return parm -def benchmark(model,inputs_path,device=torch.device("cpu"),epoch=1000,is_half=False): - parm=load_inputs(inputs_path,device,is_half) + +def benchmark( + model, inputs_path, device=torch.device("cpu"), epoch=1000, is_half=False +): + parm = load_inputs(inputs_path, device, is_half) total_ts = 0.0 - bar=tqdm(range(epoch)) + bar = tqdm(range(epoch)) for i in bar: - start_time=time.perf_counter() - o=model(**parm) - total_ts+=time.perf_counter()-start_time + start_time = time.perf_counter() + o = model(**parm) + total_ts += time.perf_counter() - start_time print(f"num_epoch: {epoch} | avg time(ms): {(total_ts*1000)/epoch}") -def jit_warm_up(model,inputs_path,device=torch.device("cpu"),epoch=5,is_half=False): - benchmark(model,inputs_path,device,epoch=epoch,is_half=is_half) -def to_jit_model(model_path,model_type:str,mode:str="trace",inputs_path:str=None,device=torch.device("cpu"),is_half=False): - model=None - if model_type.lower()=="synthesizer": +def jit_warm_up(model, inputs_path, device=torch.device("cpu"), epoch=5, is_half=False): + benchmark(model, inputs_path, device, epoch=epoch, is_half=is_half) + + +def to_jit_model( + model_path, + model_type: str, + mode: str = "trace", + inputs_path: str = None, + device=torch.device("cpu"), + is_half=False, +): + model = None + if model_type.lower() == "synthesizer": from .get_synthesizer import get_synthesizer - model,_=get_synthesizer(model_path,device) + + model, _ = get_synthesizer(model_path, device) model.forward = model.infer - elif model_type.lower()=="rmvpe": + elif model_type.lower() == "rmvpe": from .get_rmvpe import get_rmvpe - model=get_rmvpe(model_path,device) - elif model_type.lower()=="hubert": + + model = get_rmvpe(model_path, device) + elif model_type.lower() == "hubert": from .get_hubert import get_hubert_model - model=get_hubert_model(model_path,device) + + model = get_hubert_model(model_path, device) model.forward = model.infer else: raise ValueError(f"No model type named {model_type}") model = model.eval() model = model.half() if is_half else model.float() - if mode=="trace": + if mode == "trace": assert not inputs_path - inputs = load_inputs(inputs_path,device,is_half) - model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) - elif mode=="script": - model_jit=torch.jit.script(model) + inputs = load_inputs(inputs_path, device, is_half) + model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) + elif mode == "script": + model_jit = torch.jit.script(model) model_jit.to(device) model_jit = model_jit.half() if is_half else model_jit.float() # model = model.half() if is_half else model.float() - return (model,model_jit) + return (model, model_jit) -def export(model:torch.nn.Module,mode:str="trace",inputs:dict=None,device=torch.device("cpu"),is_half:bool=False)->dict: + +def export( + model: torch.nn.Module, + mode: str = "trace", + inputs: dict = None, + device=torch.device("cpu"), + is_half: bool = False, +) -> dict: model = model.half() if is_half else model.float() model.eval() - if mode=="trace": + if mode == "trace": assert inputs is not None - model_jit=torch.jit.trace(model,example_kwarg_inputs=inputs) - elif mode=="script": - model_jit=torch.jit.script(model) + model_jit = torch.jit.trace(model, example_kwarg_inputs=inputs) + elif mode == "script": + model_jit = torch.jit.script(model) model_jit.to(device) model_jit = model_jit.half() if is_half else model_jit.float() buffer = BytesIO() # model_jit=model_jit.cpu() - torch.jit.save(model_jit,buffer) + torch.jit.save(model_jit, buffer) del model_jit - cpt=OrderedDict() - cpt["model"]=buffer.getvalue() - cpt["is_half"]=is_half + cpt = OrderedDict() + cpt["model"] = buffer.getvalue() + cpt["is_half"] = is_half return cpt -def load(path:str): - with open(path,"rb") as f: + +def load(path: str): + with open(path, "rb") as f: return pickle.load(f) -def save(ckpt:dict, save_path:str): - with open(save_path,"wb") as f: - pickle.dump(ckpt,f) + +def save(ckpt: dict, save_path: str): + with open(save_path, "wb") as f: + pickle.dump(ckpt, f) -def rmvpe_jit_export(model_path:str,mode:str="script",inputs_path:str=None,save_path:str=None,device=torch.device("cpu"),is_half=False): +def rmvpe_jit_export( + model_path: str, + mode: str = "script", + inputs_path: str = None, + save_path: str = None, + device=torch.device("cpu"), + is_half=False, +): if not save_path: - save_path=model_path.rstrip(".pth") - save_path+=".half.jit" if is_half else ".jit" + save_path = model_path.rstrip(".pth") + save_path += ".half.jit" if is_half else ".jit" if "cuda" in str(device) and ":" not in str(device): - device = torch.device("cuda:0" ) + device = torch.device("cuda:0") from .get_rmvpe import get_rmvpe - model = get_rmvpe(model_path,device) - inputs =None - if mode=="trace": - inputs =load_inputs(inputs_path,device,is_half) - ckpt = export(model,mode,inputs,device,is_half) - ckpt["device"]=str(device) - save(ckpt,save_path) + + model = get_rmvpe(model_path, device) + inputs = None + if mode == "trace": + inputs = load_inputs(inputs_path, device, is_half) + ckpt = export(model, mode, inputs, device, is_half) + ckpt["device"] = str(device) + save(ckpt, save_path) return ckpt -def synthesizer_jit_export(model_path:str,mode:str="script",inputs_path:str=None,save_path:str=None,device=torch.device("cpu"),is_half=False): + +def synthesizer_jit_export( + model_path: str, + mode: str = "script", + inputs_path: str = None, + save_path: str = None, + device=torch.device("cpu"), + is_half=False, +): if not save_path: - save_path=model_path.rstrip(".pth") - save_path+=".half.jit" if is_half else ".jit" + save_path = model_path.rstrip(".pth") + save_path += ".half.jit" if is_half else ".jit" if "cuda" in str(device) and ":" not in str(device): - device = torch.device("cuda:0" ) + device = torch.device("cuda:0") from .get_synthesizer import get_synthesizer - model,cpt=get_synthesizer(model_path,device) - assert isinstance(cpt,dict) + + model, cpt = get_synthesizer(model_path, device) + assert isinstance(cpt, dict) model.forward = model.infer - inputs =None - if mode=="trace": - inputs =load_inputs(inputs_path,device,is_half) - ckpt = export(model,mode,inputs,device,is_half) + inputs = None + if mode == "trace": + inputs = load_inputs(inputs_path, device, is_half) + ckpt = export(model, mode, inputs, device, is_half) cpt.pop("weight") cpt["model"] = ckpt["model"] cpt["device"] = device - save(cpt,save_path) + save(cpt, save_path) return cpt diff --git a/infer/lib/jit/get_hubert.py b/infer/lib/jit/get_hubert.py index 1194be4..aec7132 100644 --- a/infer/lib/jit/get_hubert.py +++ b/infer/lib/jit/get_hubert.py @@ -1,12 +1,15 @@ import math import random -from typing import Optional,Tuple +from typing import Optional, Tuple from fairseq.checkpoint_utils import load_model_ensemble_and_task import numpy as np import torch import torch.nn.functional as F + # from fairseq.data.data_utils import compute_mask_indices from fairseq.utils import index_put + + # @torch.jit.script def pad_to_multiple(x, multiple, dim=-1, value=0): # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 @@ -15,7 +18,7 @@ def pad_to_multiple(x, multiple, dim=-1, value=0): tsz = x.size(dim) m = tsz / multiple remainder = math.ceil(m) * multiple - tsz - if int(tsz % multiple)==0: + if int(tsz % multiple) == 0: return x, 0 pad_offset = (0,) * (-1 - dim) * 2 @@ -29,7 +32,6 @@ def extract_features( tgt_layer=None, min_layer=0, ): - if padding_mask is not None: x = index_put(x, padding_mask, 0) @@ -41,9 +43,7 @@ def extract_features( x = self.layer_norm(x) # pad to the sequence length dimension - x, pad_length = pad_to_multiple( - x, self.required_seq_len_multiple, dim=-2, value=0 - ) + x, pad_length = pad_to_multiple(x, self.required_seq_len_multiple, dim=-2, value=0) if pad_length > 0 and padding_mask is None: padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool) padding_mask[:, -pad_length:] = True @@ -91,6 +91,7 @@ def undo_pad(a, b, c): return x, layer_results + def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], @@ -141,10 +142,7 @@ def compute_mask_indices( for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() - num_mask = int( - mask_prob * sz / float(mask_length) - + np.random.rand() - ) + num_mask = int(mask_prob * sz / float(mask_length) + np.random.rand()) num_mask = max(min_masks, num_mask) else: sz = all_sz @@ -153,7 +151,7 @@ def compute_mask_indices( if mask_type == "static": lengths = torch.full([num_mask], mask_length) elif mask_type == "uniform": - lengths = torch.randint(mask_other, mask_length*2+1, size=[num_mask]) + lengths = torch.randint(mask_other, mask_length * 2 + 1, size=[num_mask]) elif mask_type == "normal": lengths = torch.normal(mask_length, mask_other, size=[num_mask]) lengths = [max(1, int(round(x))) for x in lengths] @@ -167,7 +165,7 @@ def compute_mask_indices( mask_idc = [] def arrange(s, e, length, keep_length): - span_start = torch.randint(low=s, high=e - length,size=[1]).item() + span_start = torch.randint(low=s, high=e - length, size=[1]).item() mask_idc.extend(span_start + i for i in range(length)) new_parts = [] @@ -176,17 +174,17 @@ def arrange(s, e, length, keep_length): if e - span_start - length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts - + parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): - t=[e - s if e - s >= length + min_space else 0 for s, e in parts] - lens=torch.asarray(t,dtype=torch.int) + t = [e - s if e - s >= length + min_space else 0 for s, e in parts] + lens = torch.asarray(t, dtype=torch.int) l_sum = torch.sum(lens) if l_sum == 0: break probs = lens / torch.sum(lens) - c = torch.multinomial(probs.float(),len(parts)).item() + c = torch.multinomial(probs.float(), len(parts)).item() s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = torch.asarray(mask_idc) @@ -194,7 +192,9 @@ def arrange(s, e, length, keep_length): min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 - mask_idc=torch.asarray(random.sample([i for i in range(sz - min_len)],num_mask)) + mask_idc = torch.asarray( + random.sample([i for i in range(sz - min_len)], num_mask) + ) mask_idc = torch.asarray( [ mask_idc[j] + offset @@ -207,18 +207,23 @@ def arrange(s, e, length, keep_length): min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): - if isinstance(mask_idc,torch.Tensor): - mask_idc=torch.asarray(mask_idc,dtype=torch.float) + if isinstance(mask_idc, torch.Tensor): + mask_idc = torch.asarray(mask_idc, dtype=torch.float) if len(mask_idc) > min_len and require_same_masks: - mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],min_len)) + mask_idc = torch.asarray( + random.sample([i for i in range(mask_idc)], min_len) + ) if mask_dropout > 0: num_holes = int(round(len(mask_idc) * mask_dropout)) - mask_idc=torch.asarray(random.sample([i for i in range(mask_idc)],len(mask_idc) - num_holes)) + mask_idc = torch.asarray( + random.sample([i for i in range(mask_idc)], len(mask_idc) - num_holes) + ) mask[i, mask_idc.int()] = True return mask + def apply_mask(self, x, padding_mask, target_list): B, T, C = x.shape torch.zeros_like(x) @@ -251,40 +256,45 @@ def apply_mask(self, x, padding_mask, target_list): min_space=self.mask_channel_min_space, ) mask_channel_indices = ( - mask_channel_indices - .to(x.device) - .unsqueeze(1) - .expand(-1, T, -1) + mask_channel_indices.to(x.device).unsqueeze(1).expand(-1, T, -1) ) x[mask_channel_indices] = 0 return x, mask_indices -def get_hubert_model(model_path="assets/hubert/hubert_base.pt",device=torch.device("cpu")): +def get_hubert_model( + model_path="assets/hubert/hubert_base.pt", device=torch.device("cpu") +): models, _, _ = load_model_ensemble_and_task( - [model_path], - suffix="", - ) + [model_path], + suffix="", + ) hubert_model = models[0] hubert_model = hubert_model.to(device) + def _apply_mask(x, padding_mask, target_list): - return apply_mask(hubert_model,x, padding_mask, target_list) + return apply_mask(hubert_model, x, padding_mask, target_list) + hubert_model.apply_mask = _apply_mask - def _extract_features(x, + def _extract_features( + x, padding_mask=None, tgt_layer=None, min_layer=0, ): - return extract_features(hubert_model.encoder,x, - padding_mask=padding_mask, - tgt_layer=tgt_layer, - min_layer=min_layer, - ) + return extract_features( + hubert_model.encoder, + x, + padding_mask=padding_mask, + tgt_layer=tgt_layer, + min_layer=min_layer, + ) + hubert_model.encoder.extract_features = _extract_features - hubert_model._forward=hubert_model.forward + hubert_model._forward = hubert_model.forward def hubert_extract_features( self, @@ -303,7 +313,7 @@ def hubert_extract_features( ) feature = res["features"] if ret_conv else res["x"] return feature, res["padding_mask"] - + def _hubert_extract_features( source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, @@ -311,20 +321,22 @@ def _hubert_extract_features( ret_conv: bool = False, output_layer: Optional[int] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: - return hubert_extract_features(hubert_model,source,padding_mask,mask,ret_conv,output_layer) + return hubert_extract_features( + hubert_model, source, padding_mask, mask, ret_conv, output_layer + ) + hubert_model.extract_features = _hubert_extract_features - - def infer(source,padding_mask,output_layer:torch.Tensor): - output_layer=output_layer.item() - logits = hubert_model.extract_features(source=source,padding_mask=padding_mask,output_layer=output_layer) - feats = ( - hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] - ) + + def infer(source, padding_mask, output_layer: torch.Tensor): + output_layer = output_layer.item() + logits = hubert_model.extract_features( + source=source, padding_mask=padding_mask, output_layer=output_layer + ) + feats = hubert_model.final_proj(logits[0]) if output_layer == 9 else logits[0] return feats - - hubert_model.infer=infer + hubert_model.infer = infer # hubert_model.forward=infer # hubert_model.forward - return hubert_model \ No newline at end of file + return hubert_model diff --git a/infer/lib/jit/get_rmvpe.py b/infer/lib/jit/get_rmvpe.py index 3c2ed7e..e71c39f 100644 --- a/infer/lib/jit/get_rmvpe.py +++ b/infer/lib/jit/get_rmvpe.py @@ -1,13 +1,12 @@ - import torch - -def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt",device=torch.device("cpu")): +def get_rmvpe(model_path="assets/rmvpe/rmvpe.pt", device=torch.device("cpu")): from infer.lib.rmvpe import E2E + model = E2E(4, 1, (2, 2)) ckpt = torch.load(model_path, map_location=device) model.load_state_dict(ckpt) model.eval() model = model.to(device) - return model \ No newline at end of file + return model diff --git a/infer/lib/jit/get_synthesizer.py b/infer/lib/jit/get_synthesizer.py index 1498b64..17c99ee0 100644 --- a/infer/lib/jit/get_synthesizer.py +++ b/infer/lib/jit/get_synthesizer.py @@ -1,8 +1,14 @@ import torch -def get_synthesizer(pth_path,device=torch.device("cpu")): - from infer.lib.infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono +def get_synthesizer(pth_path, device=torch.device("cpu")): + from infer.lib.infer_pack.models import ( + SynthesizerTrnMs256NSFsid, + SynthesizerTrnMs256NSFsid_nono, + SynthesizerTrnMs768NSFsid, + SynthesizerTrnMs768NSFsid_nono, + ) + cpt = torch.load(pth_path, map_location=torch.device("cpu")) tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] @@ -10,26 +16,22 @@ def get_synthesizer(pth_path,device=torch.device("cpu")): version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=False - ) + net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=False) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=False - ) + net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=False) else: net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) del net_g.enc_q # net_g.forward = net_g.infer - ckpt={} - ckpt["config"]=cpt["config"] + ckpt = {} + ckpt["config"] = cpt["config"] ckpt["f0"] = if_f0 - ckpt["version"]=version - ckpt["info"]=cpt.get("info","0epoch") + ckpt["version"] = version + ckpt["info"] = cpt.get("info", "0epoch") net_g.load_state_dict(cpt["weight"], strict=False) - net_g=net_g.float() + net_g = net_g.float() net_g.eval().to(device) - return net_g,cpt \ No newline at end of file + return net_g, cpt diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index 5d1565b..044abf2 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -284,8 +284,8 @@ def __init__(self, in_channels, out_channels, momentum=0.01): if in_channels != out_channels: self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - def forward(self, x:torch.Tensor): - if not hasattr(self,"shortcut"): + def forward(self, x: torch.Tensor): + if not hasattr(self, "shortcut"): return self.conv(x) + x else: return self.conv(x) + self.shortcut(x) @@ -320,8 +320,8 @@ def __init__( self.out_size = in_size self.out_channel = out_channels - def forward(self, x:torch.Tensor): - concat_tensors:List[torch.Tensor]= [] + def forward(self, x: torch.Tensor): + concat_tensors: List[torch.Tensor] = [] x = self.bn(x) for i, layer in enumerate(self.layers): t, x = layer(x) @@ -344,7 +344,7 @@ def __init__( self.pool = nn.AvgPool2d(kernel_size=kernel_size) def forward(self, x): - for i,conv in enumerate(self.conv): + for i, conv in enumerate(self.conv): x = conv(x) if self.kernel_size is not None: return x, self.pool(x) @@ -397,7 +397,7 @@ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01) def forward(self, x, concat_tensor): x = self.conv1(x) x = torch.cat((x, concat_tensor), dim=1) - for i,conv2 in enumerate(self.conv2): + for i, conv2 in enumerate(self.conv2): x = conv2(x) return x @@ -414,8 +414,8 @@ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): ) in_channels = out_channels - def forward(self, x:torch.Tensor, concat_tensors:List[torch.Tensor]): - for i,layer in enumerate(self.layers): + def forward(self, x: torch.Tensor, concat_tensors: List[torch.Tensor]): + for i, layer in enumerate(self.layers): x = layer(x, concat_tensors[-1 - i]) return x @@ -444,7 +444,7 @@ def __init__( self.encoder.out_channel, en_de_layers, kernel_size, n_blocks ) - def forward(self, x:torch.Tensor)->torch.Tensor: + def forward(self, x: torch.Tensor) -> torch.Tensor: x, concat_tensors = self.encoder(x) x = self.intermediate(x) x = self.decoder(x, concat_tensors) diff --git a/infer/modules/train/train.py b/infer/modules/train/train.py index 11f3e97..d58d768 100644 --- a/infer/modules/train/train.py +++ b/infer/modules/train/train.py @@ -108,12 +108,7 @@ def main(): for i in range(n_gpus): subproc = mp.Process( target=run, - args=( - i, - n_gpus, - hps, - logger - ), + args=(i, n_gpus, hps, logger), ) children.append(subproc) subproc.start() @@ -122,7 +117,7 @@ def main(): children[i].join() -def run(rank, n_gpus, hps, logger:logging.Logger): +def run(rank, n_gpus, hps, logger: logging.Logger): global global_step if rank == 0: # logger = utils.get_logger(hps.model_dir) From 66c27217e5987a2ee09b429032cf7316509c3df5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 23:22:58 +0900 Subject: [PATCH 11/26] =?UTF-8?q?=F0=9F=8E=A8=20=E5=90=8C=E6=AD=A5=20local?= =?UTF-8?q?e=20(#1299)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: github-actions[bot] --- i18n/locale/en_US.json | 3 +++ i18n/locale/es_ES.json | 3 +++ i18n/locale/fr_FR.json | 3 +++ i18n/locale/it_IT.json | 3 +++ i18n/locale/ja_JP.json | 3 +++ i18n/locale/ru_RU.json | 3 +++ i18n/locale/tr_TR.json | 3 +++ i18n/locale/zh_CN.json | 3 +++ i18n/locale/zh_HK.json | 3 +++ i18n/locale/zh_SG.json | 3 +++ i18n/locale/zh_TW.json | 3 +++ 11 files changed, 33 insertions(+) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index 0fb158b..9fa744b 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.", "目标采样率": "Target sample rate:", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown:", "融合": "Fusion", "要改的模型信息": "Model information to be modified:", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):", "输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:", + "输入监听": "输入监听", "输入训练文件夹路径": "Enter the path of the training folder:", "输入设备": "Input device", "输入降噪": "Input noise reduction", "输出信息": "Output information", + "输出变声": "输出变声", "输出设备": "Output device", "输出降噪": "Output noise reduction", "输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)", diff --git a/i18n/locale/es_ES.json b/i18n/locale/es_ES.json index 74c30e5..961cb3a 100644 --- a/i18n/locale/es_ES.json +++ b/i18n/locale/es_ES.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo de la biblioteca de características, si está vacío, se utilizará el resultado de la selección desplegable", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tecla +12 recomendada para conversión de voz de hombre a mujer, tecla -12 para conversión de voz de mujer a hombre. Si el rango de tono es demasiado amplio y causa distorsión, ajústelo usted mismo a un rango adecuado.", "目标采样率": "Tasa de muestreo objetivo", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "Detección automática de la ruta del índice, selección desplegable (dropdown)", "融合": "Fusión", "要改的模型信息": "Información del modelo a modificar", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta a la carpeta de audio que se procesará (simplemente cópiela desde la barra de direcciones del administrador de archivos)", "输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo del audio que se procesará (el formato predeterminado es el ejemplo correcto)", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Proporción de fusión para reemplazar el sobre de volumen de entrada con el sobre de volumen de salida, cuanto más cerca de 1, más se utiliza el sobre de salida", + "输入监听": "输入监听", "输入训练文件夹路径": "Introduzca la ruta de la carpeta de entrenamiento", "输入设备": "Dispositivo de entrada", "输入降噪": "Reducción de ruido de entrada", "输出信息": "Información de salida", + "输出变声": "输出变声", "输出设备": "Dispositivo de salida", "输出降噪": "Reducción de ruido de salida", "输出音频(右下角三个点,点了可以下载)": "Salida de audio (haga clic en los tres puntos en la esquina inferior derecha para descargar)", diff --git a/i18n/locale/fr_FR.json b/i18n/locale/fr_FR.json index 1caeb0d..d12078e 100644 --- a/i18n/locale/fr_FR.json +++ b/i18n/locale/fr_FR.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Chemin d'accès au fichier d'index des caractéristiques. Laisser vide pour utiliser le résultat sélectionné dans la liste déroulante :", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Il est recommandé d'utiliser la clé +12 pour la conversion homme-femme et la clé -12 pour la conversion femme-homme. Si la plage sonore est trop large et que la voix est déformée, vous pouvez également l'ajuster vous-même à la plage appropriée.", "目标采样率": "Taux d'échantillonnage cible :", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "Détecter automatiquement le chemin d'accès à l'index et le sélectionner dans la liste déroulante :", "融合": "Fusion", "要改的模型信息": "Informations sur le modèle à modifier :", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers) :", "输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin d'accès du fichier audio à traiter (par défaut, l'exemple de format correct) :", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ajustez l'échelle de l'enveloppe de volume. Plus il est proche de 0, plus il imite le volume des voix originales. Cela peut aider à masquer les bruits et à rendre le volume plus naturel lorsqu'il est réglé relativement bas. Plus le volume est proche de 1, plus le volume sera fort et constant :", + "输入监听": "输入监听", "输入训练文件夹路径": "Indiquez le chemin d'accès au dossier d'entraînement :", "输入设备": "Dispositif d'entrée", "输入降噪": "Réduction du bruit d'entrée", "输出信息": "Informations sur la sortie", + "输出变声": "输出变声", "输出设备": "Dispositif de sortie", "输出降噪": "Réduction du bruit de sortie", "输出音频(右下角三个点,点了可以下载)": "Exporter l'audio (cliquer sur les trois points dans le coin inférieur droit pour télécharger)", diff --git a/i18n/locale/it_IT.json b/i18n/locale/it_IT.json index a77e82a..38fdef8 100644 --- a/i18n/locale/it_IT.json +++ b/i18n/locale/it_IT.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Percorso del file di indice delle caratteristiche. ", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tonalità +12 consigliata per la conversione da maschio a femmina e tonalità -12 per la conversione da femmina a maschio. ", "目标采样率": "Frequenza di campionamento target:", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "Rileva automaticamente il percorso dell'indice e seleziona dal menu a tendina:", "融合": "Fusione", "要改的模型信息": "Informazioni sul modello da modificare:", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Immettere il percorso della cartella audio da elaborare (copiarlo dalla barra degli indirizzi del file manager):", "输入待处理音频文件路径(默认是正确格式示例)": "Immettere il percorso del file audio da elaborare (l'impostazione predefinita è l'esempio di formato corretto):", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Regola il ridimensionamento dell'inviluppo del volume. ", + "输入监听": "输入监听", "输入训练文件夹路径": "Inserisci il percorso della cartella di addestramento:", "输入设备": "Dispositivo di input", "输入降噪": "Riduzione del rumore in ingresso", "输出信息": "Informazioni sull'uscita", + "输出变声": "输出变声", "输出设备": "Dispositivo di uscita", "输出降噪": "Riduzione del rumore in uscita", "输出音频(右下角三个点,点了可以下载)": "Esporta audio (clicca sui tre puntini in basso a destra per scaricarlo)", diff --git a/i18n/locale/ja_JP.json b/i18n/locale/ja_JP.json index 9f0f242..903ed87 100644 --- a/i18n/locale/ja_JP.json +++ b/i18n/locale/ja_JP.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "特徴検索ライブラリへのパス 空の場合はドロップダウンで選択", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性から女性へは+12キーをお勧めします。女性から男性へは-12キーをお勧めします。音域が広すぎて音質が劣化した場合は、適切な音域に自分で調整してください。", "目标采样率": "目標サンプリングレート", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "インデックスパスの自動検出 ドロップダウンで選択", "融合": "マージ", "要改的模型信息": "変更するモデル情報", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "処理対象音声フォルダーのパスを入力してください(エクスプローラーのアドレスバーからコピーしてください)", "输入待处理音频文件路径(默认是正确格式示例)": "処理対象音声ファイルのパスを入力してください(デフォルトは正しいフォーマットの例です)", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "入力ソースの音量エンベロープと出力音量エンベロープの融合率 1に近づくほど、出力音量エンベロープの割合が高くなる", + "输入监听": "输入监听", "输入训练文件夹路径": "トレーニング用フォルダのパスを入力してください", "输入设备": "入力デバイス", "输入降噪": "入力ノイズの低減", "输出信息": "出力情報", + "输出变声": "输出变声", "输出设备": "出力デバイス", "输出降噪": "出力ノイズの低減", "输出音频(右下角三个点,点了可以下载)": "出力音声(右下の三点をクリックしてダウンロードできます)", diff --git a/i18n/locale/ru_RU.json b/i18n/locale/ru_RU.json index 26ba6dc..b6530ac 100644 --- a/i18n/locale/ru_RU.json +++ b/i18n/locale/ru_RU.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Путь к файлу индекса черт. Оставьте пустым, чтобы использовать выбранный вариант из списка ниже:", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Рекомендуется выбрать +12 для конвертирования мужского голоса в женский и -12 для конвертирования женского в мужской. Если диапазон голоса слишком велик, и голос искажается, можно выбрать значение на свой вкус.", "目标采样率": "Частота дискретизации аудио:", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "Автоматически найденные файлы индексов черт (выберите вариант из списка):", "融合": "Запустить слияние", "要改的模型信息": "Информация, которая будет изменена:", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Путь к папке с аудиофайлами для переработки (можно скопировать путь из адресной строки файлового менеджера):", "输入待处理音频文件路径(默认是正确格式示例)": "Путь к аудиофайлу, который хотите обработать (ниже указан пример пути к файлу):", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Использовать громкость входного файла для замены или перемешивания с громкостью выходного файла. Чем ближе соотношение к 1, тем больше используется звука из выходного файла:", + "输入监听": "输入监听", "输入训练文件夹路径": "Путь к папке с аудиозаписями, на которых будет обучаться модель:", "输入设备": "Входное устройство", "输入降噪": "Уменьшение входного шума", "输出信息": "Статистика", + "输出变声": "输出变声", "输出设备": "Выходное устройство", "输出降噪": "Уменьшение выходного шума", "输出音频(右下角三个点,点了可以下载)": "Аудиофайл (чтобы скачать, нажмите на три точки справа в плеере)", diff --git a/i18n/locale/tr_TR.json b/i18n/locale/tr_TR.json index e290c93..efd921b 100644 --- a/i18n/locale/tr_TR.json +++ b/i18n/locale/tr_TR.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Özellik indeksi dosyasının yolunu belirtin. Seçilen sonucu kullanmak için boş bırakın veya açılır menüden seçim yapın.", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Erkekten kadına çevirmek için +12 tuş önerilir, kadından erkeğe çevirmek için ise -12 tuş önerilir. Eğer ses aralığı çok fazla genişler ve ses bozulursa, isteğe bağlı olarak uygun aralığa kendiniz de ayarlayabilirsiniz.", "目标采样率": "Hedef örnekleme oranı:", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "İndeks yolunu otomatik olarak tespit et ve açılır menüden seçim yap.", "融合": "Birleştir", "要改的模型信息": "Düzenlenecek model bilgileri:", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "İşlenecek ses klasörünün yolunu girin (dosya yöneticisinin adres çubuğundan kopyalayın):", "输入待处理音频文件路径(默认是正确格式示例)": "İşlenecek ses dosyasının yolunu girin (varsayılan doğru format örneğidir):", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Sesin hacim zarfını ayarlayın. 0'a yakın değerler, sesin orijinal vokallerin hacmine benzer olmasını sağlar. Düşük bir değerle ses gürültüsünü maskeleyebilir ve hacmi daha doğal bir şekilde duyulabilir hale getirebilirsiniz. 1'e yaklaştıkça sürekli bir yüksek ses seviyesi elde edilir:", + "输入监听": "输入监听", "输入训练文件夹路径": "Eğitim klasörünün yolunu girin:", "输入设备": "Giriş cihazı", "输入降噪": "Giriş gürültü azaltma", "输出信息": "Çıkış bilgisi", + "输出变声": "输出变声", "输出设备": "Çıkış cihazı", "输出降噪": "Çıkış gürültü azaltma", "输出音频(右下角三个点,点了可以下载)": "Ses dosyasını dışa aktar (indirmek için sağ alt köşedeki üç noktaya tıklayın)", diff --git a/i18n/locale/zh_CN.json b/i18n/locale/zh_CN.json index a65cc47..b14e5f0 100644 --- a/i18n/locale/zh_CN.json +++ b/i18n/locale/zh_CN.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "特征检索库文件路径,为空则使用下拉的选择结果", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ", "目标采样率": "目标采样率", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "自动检测index路径,下拉式选择(dropdown)", "融合": "融合", "要改的模型信息": "要改的模型信息", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)", "输入待处理音频文件路径(默认是正确格式示例)": "输入待处理音频文件路径(默认是正确格式示例)", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络", + "输入监听": "输入监听", "输入训练文件夹路径": "输入训练文件夹路径", "输入设备": "输入设备", "输入降噪": "输入降噪", "输出信息": "输出信息", + "输出变声": "输出变声", "输出设备": "输出设备", "输出降噪": "输出降噪", "输出音频(右下角三个点,点了可以下载)": "输出音频(右下角三个点,点了可以下载)", diff --git a/i18n/locale/zh_HK.json b/i18n/locale/zh_HK.json index 47ed97c..fa2fbad 100644 --- a/i18n/locale/zh_HK.json +++ b/i18n/locale/zh_HK.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。", "目标采样率": "目標取樣率", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)", "融合": "融合", "要改的模型信息": "要改的模型資訊", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)", "输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡", + "输入监听": "输入监听", "输入训练文件夹路径": "輸入訓練檔案夾路徑", "输入设备": "輸入設備", "输入降噪": "輸入降噪", "输出信息": "輸出訊息", + "输出变声": "输出变声", "输出设备": "輸出設備", "输出降噪": "輸出降噪", "输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)", diff --git a/i18n/locale/zh_SG.json b/i18n/locale/zh_SG.json index 47ed97c..fa2fbad 100644 --- a/i18n/locale/zh_SG.json +++ b/i18n/locale/zh_SG.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。", "目标采样率": "目標取樣率", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)", "融合": "融合", "要改的模型信息": "要改的模型資訊", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)", "输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡", + "输入监听": "输入监听", "输入训练文件夹路径": "輸入訓練檔案夾路徑", "输入设备": "輸入設備", "输入降噪": "輸入降噪", "输出信息": "輸出訊息", + "输出变声": "输出变声", "输出设备": "輸出設備", "输出降噪": "輸出降噪", "输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)", diff --git a/i18n/locale/zh_TW.json b/i18n/locale/zh_TW.json index 47ed97c..fa2fbad 100644 --- a/i18n/locale/zh_TW.json +++ b/i18n/locale/zh_TW.json @@ -86,6 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。", "目标采样率": "目標取樣率", + "算法延迟(ms):": "算法延迟(ms):", "自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)", "融合": "融合", "要改的模型信息": "要改的模型資訊", @@ -104,10 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)", "输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡", + "输入监听": "输入监听", "输入训练文件夹路径": "輸入訓練檔案夾路徑", "输入设备": "輸入設備", "输入降噪": "輸入降噪", "输出信息": "輸出訊息", + "输出变声": "输出变声", "输出设备": "輸出設備", "输出降噪": "輸出降噪", "输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)", From 1fdda815a741bace93ad4a9a474b62fbb119563f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Thu, 21 Sep 2023 23:47:31 +0900 Subject: [PATCH 12/26] feat: optimize actions --- .github/workflows/genlocale.yml | 19 ++++++++++++------- .github/workflows/pull_format.yml | 13 +++---------- .github/workflows/push_format.yml | 19 +++++++------------ 3 files changed, 22 insertions(+), 29 deletions(-) diff --git a/.github/workflows/genlocale.yml b/.github/workflows/genlocale.yml index 7a7bf0f..7ec5ada 100644 --- a/.github/workflows/genlocale.yml +++ b/.github/workflows/genlocale.yml @@ -6,11 +6,9 @@ on: - dev jobs: genlocale: - name: genlocale runs-on: ubuntu-latest steps: - - name: Check out - uses: actions/checkout@master + - uses: actions/checkout@master - name: Run locale generation run: | @@ -20,15 +18,22 @@ jobs: - name: Commit back if: ${{ !github.head_ref }} + id: commitback continue-on-error: true run: | git config --local user.name 'github-actions[bot]' git config --local user.email 'github-actions[bot]@users.noreply.github.com' git add --all - git commit -m "🎨 同步 locale" + git commit -m "chore(i18n): sync locale" - name: Create Pull Request - if: ${{ !github.head_ref }} + if: steps.commitback.outcome == 'success' continue-on-error: true - uses: peter-evans/create-pull-request@v4 - + uses: peter-evans/create-pull-request@v5 + with: + delete-branch: true + body: "Automatically sync i18n translation jsons" + title: "chore(i18n): sync locale" + commit-message: "chore(i18n): sync locale" + branch: genlocale-${{github.ref_name}} + branch-suffix: short-commit-hash diff --git a/.github/workflows/pull_format.yml b/.github/workflows/pull_format.yml index e0d5ddf..d669de5 100644 --- a/.github/workflows/pull_format.yml +++ b/.github/workflows/pull_format.yml @@ -6,8 +6,7 @@ on: jobs: # This workflow closes invalid PR - close-pr: - name: closepr + close_pr: # The type of runner that the job will run on runs-on: ubuntu-latest permissions: write-all @@ -22,20 +21,14 @@ jobs: comment: "Invalid PR to non-dev branch ${{ github.event.pull_request.base.ref }}." pull_format: - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest permissions: contents: write - strategy: - matrix: - python-version: ["3.10"] - os: [ubuntu-latest] - fail-fast: false - continue-on-error: true steps: - - name: checkout + - name: Checkout continue-on-error: true uses: actions/checkout@v3 with: diff --git a/.github/workflows/push_format.yml b/.github/workflows/push_format.yml index 19011d3..a95c3bf 100644 --- a/.github/workflows/push_format.yml +++ b/.github/workflows/push_format.yml @@ -12,14 +12,7 @@ permissions: jobs: push_format: - runs-on: ${{ matrix.os }} - - strategy: - matrix: - python-version: ["3.10"] - os: [ubuntu-latest] - fail-fast: false - + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: @@ -44,7 +37,7 @@ jobs: git config --local user.email "github-actions[bot]@users.noreply.github.com" git config --local user.name "github-actions[bot]" git add --all - git commit -m "Format code" + git commit -m "chore(format): run black" - name: Create Pull Request if: steps.commitback.outcome == 'success' @@ -52,6 +45,8 @@ jobs: uses: peter-evans/create-pull-request@v5 with: delete-branch: true - body: Apply Code Formatter Change - title: Apply Code Formatter Change - commit-message: Automatic code format + body: "Automatically apply code formatter change" + title: "chore(format): run black" + commit-message: "chore(format): run black" + branch: formatter-${{github.ref_name}} + branch-suffix: short-commit-hash From 5dc35d30212788ca98bcdca781e706b07e2e733d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Fri, 22 Sep 2023 00:00:06 +0900 Subject: [PATCH 13/26] feat(workflow): add sync dev --- .github/workflows/push_format.yml | 9 +++++---- .github/workflows/sync_dev.yml | 24 ++++++++++++++++++++++++ .github/workflows/unitest.yml | 2 +- 3 files changed, 30 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/sync_dev.yml diff --git a/.github/workflows/push_format.yml b/.github/workflows/push_format.yml index a95c3bf..481bc02 100644 --- a/.github/workflows/push_format.yml +++ b/.github/workflows/push_format.yml @@ -6,13 +6,14 @@ on: - main - dev -permissions: - contents: write - pull-requests: write - jobs: push_format: runs-on: ubuntu-latest + + permissions: + contents: write + pull-requests: write + steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/sync_dev.yml b/.github/workflows/sync_dev.yml new file mode 100644 index 0000000..436c377 --- /dev/null +++ b/.github/workflows/sync_dev.yml @@ -0,0 +1,24 @@ +name: sync dev + +on: + workflow_dispatch: + +jobs: + sync_dev: + runs-on: ubuntu-latest + + permissions: + contents: write + pull-requests: write + + steps: + - uses: actions/checkout@v3 + with: + ref: main + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + delete-branch: false + base: dev + title: "chore(sync): merge dev into main" diff --git a/.github/workflows/unitest.yml b/.github/workflows/unitest.yml index 1ab65f6..12b64a3 100644 --- a/.github/workflows/unitest.yml +++ b/.github/workflows/unitest.yml @@ -7,7 +7,7 @@ jobs: matrix: python-version: ["3.8", "3.9", "3.10"] os: [ubuntu-latest] - fail-fast: false + fail-fast: true steps: - uses: actions/checkout@master From 93a12443cc85f0732da8aff3b0f2cc7b6704e9b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Fri, 22 Sep 2023 00:04:09 +0900 Subject: [PATCH 14/26] feat: optimize actions --- .github/workflows/docker.yml | 2 +- .github/workflows/genlocale.yml | 2 +- .github/workflows/pull_format.yml | 2 +- .github/workflows/push_format.yml | 2 +- .github/workflows/sync_dev.yml | 2 +- .github/workflows/unitest.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 68f74fa..e983560 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,4 @@ -name: Build And Push Docker Image +name: Build and Push Docker Image on: workflow_dispatch: diff --git a/.github/workflows/genlocale.yml b/.github/workflows/genlocale.yml index 7ec5ada..081158e 100644 --- a/.github/workflows/genlocale.yml +++ b/.github/workflows/genlocale.yml @@ -1,4 +1,4 @@ -name: genlocale +name: Generate and Sync Locale on: push: branches: diff --git a/.github/workflows/pull_format.yml b/.github/workflows/pull_format.yml index d669de5..334158b 100644 --- a/.github/workflows/pull_format.yml +++ b/.github/workflows/pull_format.yml @@ -1,4 +1,4 @@ -name: pull format +name: Check Pull Format on: pull_request_target: diff --git a/.github/workflows/push_format.yml b/.github/workflows/push_format.yml index 481bc02..605b27d 100644 --- a/.github/workflows/push_format.yml +++ b/.github/workflows/push_format.yml @@ -1,4 +1,4 @@ -name: push format +name: Standardize Code Format on: push: diff --git a/.github/workflows/sync_dev.yml b/.github/workflows/sync_dev.yml index 436c377..acc17e5 100644 --- a/.github/workflows/sync_dev.yml +++ b/.github/workflows/sync_dev.yml @@ -1,4 +1,4 @@ -name: sync dev +name: Merge dev into main on: workflow_dispatch: diff --git a/.github/workflows/unitest.yml b/.github/workflows/unitest.yml index 12b64a3..904195c 100644 --- a/.github/workflows/unitest.yml +++ b/.github/workflows/unitest.yml @@ -1,4 +1,4 @@ -name: unitest +name: Unit Test on: [ push, pull_request ] jobs: build: From 7041bb364e543d40610200e235c5f246cdd3ec49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Fri, 22 Sep 2023 00:06:06 +0900 Subject: [PATCH 15/26] feat: optimize actions --- .github/workflows/pull_format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull_format.yml b/.github/workflows/pull_format.yml index 334158b..e21dc2d 100644 --- a/.github/workflows/pull_format.yml +++ b/.github/workflows/pull_format.yml @@ -2,7 +2,7 @@ name: Check Pull Format on: pull_request_target: - types: [assigned, opened, synchronize, reopened] + types: [opened, reopened] jobs: # This workflow closes invalid PR From 527bfa605ed5596bd67ea7e2c1d136d4bd9b9038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Fri, 22 Sep 2023 00:16:49 +0900 Subject: [PATCH 16/26] feat: optimize actions --- .github/workflows/sync_dev.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/sync_dev.yml b/.github/workflows/sync_dev.yml index acc17e5..669bc1e 100644 --- a/.github/workflows/sync_dev.yml +++ b/.github/workflows/sync_dev.yml @@ -17,8 +17,5 @@ jobs: ref: main - name: Create Pull Request - uses: peter-evans/create-pull-request@v5 - with: - delete-branch: false - base: dev - title: "chore(sync): merge dev into main" + run: | + gh pr create --title "chore(sync): merge dev into main" --body "Merge dev to main" --base main --head dev From c6c310c71d04aebeb54d4c23070b0a05ef49b515 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Fri, 22 Sep 2023 00:18:10 +0900 Subject: [PATCH 17/26] feat: optimize actions --- .github/workflows/sync_dev.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/sync_dev.yml b/.github/workflows/sync_dev.yml index 669bc1e..4696511 100644 --- a/.github/workflows/sync_dev.yml +++ b/.github/workflows/sync_dev.yml @@ -19,3 +19,5 @@ jobs: - name: Create Pull Request run: | gh pr create --title "chore(sync): merge dev into main" --body "Merge dev to main" --base main --head dev + env: + GH_TOKEN: ${{ github.token }} From 430ca8676b0e115413409235032013c5cdef32c8 Mon Sep 17 00:00:00 2001 From: Chengjia Jiang <46401978+ChasonJiang@users.noreply.github.com> Date: Fri, 22 Sep 2023 16:55:42 +0800 Subject: [PATCH 18/26] feat: add jit options (#1303) Delete useless code: infer/lib/jit/get_synthesizer.py Optimized code: tools/rvc_for_realtime.py --- gui_v1.py | 144 ++++++++++++++++------------- infer/lib/jit/get_synthesizer.py | 12 +-- tools/rvc_for_realtime.py | 154 +++++++++++++++---------------- 3 files changed, 158 insertions(+), 152 deletions(-) diff --git a/gui_v1.py b/gui_v1.py index 2767d2c..f29f4ac 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -62,9 +62,10 @@ def run(self): import tools.rvc_for_realtime as rvc_for_realtime from i18n.i18n import I18nAuto - + from configs.config import Config i18n = I18nAuto() - device = rvc_for_realtime.config.device + + # device = rvc_for_realtime.config.device # device = torch.device( # "cuda" # if torch.cuda.is_available() @@ -99,7 +100,8 @@ def __init__(self) -> None: class GUI: def __init__(self) -> None: - self.config = GUIConfig() + self.gui_config = GUIConfig() + self.config = Config() self.flag_vc = False self.function = "vc" self.delay_time = 0 @@ -133,6 +135,7 @@ def load(self): "crossfade_length": "0.04", "extra_time": "2", "f0method": "rmvpe", + "use_jit":False } data["pm"] = data["f0method"] == "pm" data["harvest"] = data["f0method"] == "harvest" @@ -142,6 +145,7 @@ def load(self): def launcher(self): data = self.load() + self.config.use_jit=data.get("use_jit",self.config.use_jit) sg.theme("LightBlue3") input_devices, output_devices, _, _ = self.get_devices() layout = [ @@ -313,7 +317,7 @@ def launcher(self): resolution=1, orientation="h", default_value=data.get( - "n_cpu", min(self.config.n_cpu, n_cpu) + "n_cpu", min(self.gui_config.n_cpu, n_cpu) ), enable_events=True, ), @@ -351,7 +355,16 @@ def launcher(self): key="O_noise_reduce", enable_events=True, ), + sg.Checkbox( + "JIT加速", + default=self.config.use_jit, + key="use_jit", + enable_events=True, + ), ], + [ + sg.Text("注:首次使用JIT加速时,会出现卡顿,\n 并伴随一些噪音,但这是正常现象!") + ] ], title=i18n("性能设置"), ), @@ -393,20 +406,20 @@ def event_handler(self): prev_output = self.window["sg_output_device"].get() input_devices, output_devices, _, _ = self.get_devices(update=True) if prev_input not in input_devices: - self.config.sg_input_device = input_devices[0] + self.gui_config.sg_input_device = input_devices[0] else: - self.config.sg_input_device = prev_input + self.gui_config.sg_input_device = prev_input self.window["sg_input_device"].Update(values=input_devices) self.window["sg_input_device"].Update( - value=self.config.sg_input_device + value=self.gui_config.sg_input_device ) if prev_output not in output_devices: - self.config.sg_output_device = output_devices[0] + self.gui_config.sg_output_device = output_devices[0] else: - self.config.sg_output_device = prev_output + self.gui_config.sg_output_device = prev_output self.window["sg_output_device"].Update(values=output_devices) self.window["sg_output_device"].Update( - value=self.config.sg_output_device + value=self.gui_config.sg_output_device ) if event == "start_vc" and self.flag_vc == False: if self.set_values(values) == True: @@ -426,6 +439,7 @@ def event_handler(self): "crossfade_length": values["crossfade_length"], "extra_time": values["extra_time"], "n_cpu": values["n_cpu"], + "use_jit":values["use_jit"], "f0method": ["pm", "harvest", "crepe", "rmvpe"][ [ values["pm"], @@ -454,28 +468,30 @@ def event_handler(self): stream_latency = -1 # Parameter hot update if event == "threhold": - self.config.threhold = values["threhold"] + self.gui_config.threhold = values["threhold"] elif event == "pitch": - self.config.pitch = values["pitch"] + self.gui_config.pitch = values["pitch"] if hasattr(self, "rvc"): self.rvc.change_key(values["pitch"]) elif event == "index_rate": - self.config.index_rate = values["index_rate"] + self.gui_config.index_rate = values["index_rate"] if hasattr(self, "rvc"): self.rvc.change_index_rate(values["index_rate"]) elif event == "rms_mix_rate": - self.config.rms_mix_rate = values["rms_mix_rate"] + self.gui_config.rms_mix_rate = values["rms_mix_rate"] elif event in ["pm", "harvest", "crepe", "rmvpe"]: - self.config.f0method = event + self.gui_config.f0method = event elif event == "I_noise_reduce": - self.config.I_noise_reduce = values["I_noise_reduce"] + self.gui_config.I_noise_reduce = values["I_noise_reduce"] if stream_latency > 0: self.delay_time += ( 1 if values["I_noise_reduce"] else -1 ) * values["crossfade_length"] self.window["delay_time"].update(int(self.delay_time * 1000)) elif event == "O_noise_reduce": - self.config.O_noise_reduce = values["O_noise_reduce"] + self.gui_config.O_noise_reduce = values["O_noise_reduce"] + elif event == "use_jit": + self.config.use_jit=values["use_jit"] elif event in ["vc", "im"]: self.function = event elif event != "start_vc" and self.flag_vc == True: @@ -499,19 +515,19 @@ def set_values(self, values): return False self.set_devices(values["sg_input_device"], values["sg_output_device"]) # self.device_latency = values["device_latency"] - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.rms_mix_rate = values["rms_mix_rate"] - self.config.index_rate = values["index_rate"] - self.config.n_cpu = values["n_cpu"] - self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][ + self.gui_config.pth_path = values["pth_path"] + self.gui_config.index_path = values["index_path"] + self.gui_config.threhold = values["threhold"] + self.gui_config.pitch = values["pitch"] + self.gui_config.block_time = values["block_time"] + self.gui_config.crossfade_time = values["crossfade_length"] + self.gui_config.extra_time = values["extra_time"] + self.gui_config.I_noise_reduce = values["I_noise_reduce"] + self.gui_config.O_noise_reduce = values["O_noise_reduce"] + self.gui_config.rms_mix_rate = values["rms_mix_rate"] + self.gui_config.index_rate = values["index_rate"] + self.gui_config.n_cpu = values["n_cpu"] + self.gui_config.f0method = ["pm", "harvest", "crepe", "rmvpe"][ [ values["pm"], values["harvest"], @@ -525,34 +541,34 @@ def start_vc(self): torch.cuda.empty_cache() self.flag_vc = True self.rvc = rvc_for_realtime.RVC( - self.config.pitch, - self.config.pth_path, - self.config.index_path, - self.config.index_rate, - self.config.n_cpu, + self.gui_config.pitch, + self.gui_config.pth_path, + self.gui_config.index_path, + self.gui_config.index_rate, + self.gui_config.n_cpu, inp_q, opt_q, - device, + self.config, self.rvc if hasattr(self, "rvc") else None, ) - self.config.samplerate = self.rvc.tgt_sr + self.gui_config.samplerate = self.rvc.tgt_sr self.zc = self.rvc.tgt_sr // 100 self.block_frame = ( - int(np.round(self.config.block_time * self.config.samplerate / self.zc)) + int(np.round(self.gui_config.block_time * self.gui_config.samplerate / self.zc)) * self.zc ) self.block_frame_16k = 160 * self.block_frame // self.zc self.crossfade_frame = ( int( np.round( - self.config.crossfade_time * self.config.samplerate / self.zc + self.gui_config.crossfade_time * self.gui_config.samplerate / self.zc ) ) * self.zc ) self.sola_search_frame = self.zc self.extra_frame = ( - int(np.round(self.config.extra_time * self.config.samplerate / self.zc)) + int(np.round(self.gui_config.extra_time * self.gui_config.samplerate / self.zc)) * self.zc ) self.input_wav: torch.Tensor = torch.zeros( @@ -560,12 +576,12 @@ def start_vc(self): + self.crossfade_frame + self.sola_search_frame + self.block_frame, - device=device, + device=self.config.device, dtype=torch.float32, ) self.input_wav_res: torch.Tensor = torch.zeros( 160 * self.input_wav.shape[0] // self.zc, - device=device, + device=self.config.device, dtype=torch.float32, ) self.pitch: np.ndarray = np.zeros( @@ -577,12 +593,12 @@ def start_vc(self): dtype="float64", ) self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 + self.crossfade_frame, device=self.config.device, dtype=torch.float32 ) self.nr_buffer: torch.Tensor = self.sola_buffer.clone() self.output_buffer: torch.Tensor = self.input_wav.clone() self.res_buffer: torch.Tensor = torch.zeros( - 2 * self.zc, device=device, dtype=torch.float32 + 2 * self.zc, device=self.config.device, dtype=torch.float32 ) self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0] self.fade_in_window: torch.Tensor = ( @@ -593,7 +609,7 @@ def start_vc(self): 0.0, 1.0, steps=self.crossfade_frame, - device=device, + device=self.config.device, dtype=torch.float32, ) ) @@ -601,11 +617,11 @@ def start_vc(self): ) self.fade_out_window: torch.Tensor = 1 - self.fade_in_window self.resampler = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ).to(device) + orig_freq=self.gui_config.samplerate, new_freq=16000, dtype=torch.float32 + ).to(self.config.device) self.tg = TorchGate( - sr=self.config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9 - ).to(device) + sr=self.gui_config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9 + ).to(self.config.device) thread_vc = threading.Thread(target=self.soundinput) thread_vc.start() @@ -618,13 +634,13 @@ def soundinput(self): channels=channels, callback=self.audio_callback, blocksize=self.block_frame, - samplerate=self.config.samplerate, + samplerate=self.gui_config.samplerate, dtype="float32", ) as stream: global stream_latency stream_latency = stream.latency[-1] while self.flag_vc: - time.sleep(self.config.block_time) + time.sleep(self.gui_config.block_time) logger.debug("Audio block passed.") logger.debug("ENDing VC") @@ -636,12 +652,12 @@ def audio_callback( """ start_time = time.perf_counter() indata = librosa.to_mono(indata.T) - if self.config.threhold > -60: + if self.gui_config.threhold > -60: rms = librosa.feature.rms( y=indata, frame_length=4 * self.zc, hop_length=self.zc ) db_threhold = ( - librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold + librosa.amplitude_to_db(rms, ref=1.0)[0] < self.gui_config.threhold ) for i in range(db_threhold.shape[0]): if db_threhold[i]: @@ -649,12 +665,12 @@ def audio_callback( self.input_wav[: -self.block_frame] = self.input_wav[ self.block_frame : ].clone() - self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to(device) + self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to(self.config.device) self.input_wav_res[: -self.block_frame_16k] = self.input_wav_res[ self.block_frame_16k : ].clone() # input noise reduction and resampling - if self.config.I_noise_reduce and self.function == "vc": + if self.gui_config.I_noise_reduce and self.function == "vc": input_wav = self.input_wav[ -self.crossfade_frame - self.block_frame - 2 * self.zc : ] @@ -680,7 +696,7 @@ def audio_callback( # infer if self.function == "vc": f0_extractor_frame = self.block_frame_16k + 800 - if self.config.f0method == "rmvpe": + if self.gui_config.f0method == "rmvpe": f0_extractor_frame = ( 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160 ) @@ -691,7 +707,7 @@ def audio_callback( self.valid_rate, self.pitch, self.pitchf, - self.config.f0method, + self.gui_config.f0method, ) infer_wav = infer_wav[ -self.crossfade_frame - self.sola_search_frame - self.block_frame : @@ -701,8 +717,8 @@ def audio_callback( -self.crossfade_frame - self.sola_search_frame - self.block_frame : ].clone() # output noise reduction - if (self.config.O_noise_reduce and self.function == "vc") or ( - self.config.I_noise_reduce and self.function == "im" + if (self.gui_config.O_noise_reduce and self.function == "vc") or ( + self.gui_config.I_noise_reduce and self.function == "im" ): self.output_buffer[: -self.block_frame] = self.output_buffer[ self.block_frame : @@ -712,7 +728,7 @@ def audio_callback( infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0) ).squeeze(0) # volume envelop mixing - if self.config.rms_mix_rate < 1 and self.function == "vc": + if self.gui_config.rms_mix_rate < 1 and self.function == "vc": rms1 = librosa.feature.rms( y=self.input_wav_res[-160 * infer_wav.shape[0] // self.zc :] .cpu() @@ -720,7 +736,7 @@ def audio_callback( frame_length=640, hop_length=160, ) - rms1 = torch.from_numpy(rms1).to(device) + rms1 = torch.from_numpy(rms1).to(self.config.device) rms1 = F.interpolate( rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, @@ -732,7 +748,7 @@ def audio_callback( frame_length=4 * self.zc, hop_length=self.zc, ) - rms2 = torch.from_numpy(rms2).to(device) + rms2 = torch.from_numpy(rms2).to(self.config.device) rms2 = F.interpolate( rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, @@ -741,7 +757,7 @@ def audio_callback( )[0, 0, :-1] rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3) infer_wav *= torch.pow( - rms1 / rms2, torch.tensor(1 - self.config.rms_mix_rate) + rms1 / rms2, torch.tensor(1 - self.gui_config.rms_mix_rate) ) # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC conv_input = infer_wav[ @@ -751,7 +767,7 @@ def audio_callback( cor_den = torch.sqrt( F.conv1d( conv_input**2, - torch.ones(1, 1, self.crossfade_frame, device=device), + torch.ones(1, 1, self.crossfade_frame, device=self.config.device), ) + 1e-8 ) diff --git a/infer/lib/jit/get_synthesizer.py b/infer/lib/jit/get_synthesizer.py index 17c99ee0..ef5fe58 100644 --- a/infer/lib/jit/get_synthesizer.py +++ b/infer/lib/jit/get_synthesizer.py @@ -10,7 +10,7 @@ def get_synthesizer(pth_path, device=torch.device("cpu")): ) cpt = torch.load(pth_path, map_location=torch.device("cpu")) - tgt_sr = cpt["config"][-1] + # tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") @@ -26,11 +26,11 @@ def get_synthesizer(pth_path, device=torch.device("cpu")): net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) del net_g.enc_q # net_g.forward = net_g.infer - ckpt = {} - ckpt["config"] = cpt["config"] - ckpt["f0"] = if_f0 - ckpt["version"] = version - ckpt["info"] = cpt.get("info", "0epoch") + # ckpt = {} + # ckpt["config"] = cpt["config"] + # ckpt["f0"] = if_f0 + # ckpt["version"] = version + # ckpt["info"] = cpt.get("info", "0epoch") net_g.load_state_dict(cpt["weight"], strict=False) net_g = net_g.float() net_g.eval().to(device) diff --git a/tools/rvc_for_realtime.py b/tools/rvc_for_realtime.py index f827d4e..c5e7933 100644 --- a/tools/rvc_for_realtime.py +++ b/tools/rvc_for_realtime.py @@ -6,6 +6,7 @@ import logging from infer.lib import jit +from infer.lib.jit.get_synthesizer import get_synthesizer logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -35,17 +36,10 @@ from configs.config import Config -config = Config() +# config = Config() mm = M() -if config.dml == True: - def forward_dml(ctx, x, scale): - ctx.scale = scale - res = x.clone().detach() - return res - - fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml # config.device=torch.device("cpu")########强制cpu测试 @@ -60,18 +54,27 @@ def __init__( n_cpu, inp_q, opt_q, - device, + config:Config, last_rvc=None, ) -> None: """ 初始化 """ try: - global config + if config.dml == True: + + def forward_dml(ctx, x, scale): + ctx.scale = scale + res = x.clone().detach() + return res + + fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml + # global config + self.config=config self.inp_q = inp_q self.opt_q = opt_q # device="cpu"########强制cpu测试 - self.device = device + self.device = config.device self.f0_up_key = key self.time_step = 160 / 16000 * 1000 self.f0_min = 50 @@ -81,6 +84,9 @@ def __init__( self.sr = 16000 self.window = 160 self.n_cpu = n_cpu + self.use_jit = self.config.use_jit + self.is_half = config.is_half + if index_rate != 0: self.index = faiss.read_index(index_path) self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) @@ -95,8 +101,8 @@ def __init__( suffix="", ) hubert_model = models[0] - hubert_model = hubert_model.to(device) - if config.is_half: + hubert_model = hubert_model.to(self.device) + if self.is_half: hubert_model = hubert_model.half() else: hubert_model = hubert_model.float() @@ -105,72 +111,52 @@ def __init__( else: self.model = last_rvc.model - if last_rvc is None or last_rvc.pth_path != self.pth_path: - self.is_half = config.is_half - - def set_default_model(): - cpt = torch.load(self.pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - logger.debug( - self.net_g.load_state_dict(cpt["weight"], strict=False) - ) - self.net_g.eval().to(device) - if self.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - - def set_jit_model(): - jit_pth_path = self.pth_path.rstrip(".pth") - jit_pth_path += ".half.jit" if self.is_half else ".jit" - reload = False - if str(self.device) == "cuda": - self.device = torch.device("cuda:0") - if os.path.exists(jit_pth_path): - cpt = jit.load(jit_pth_path) - model_device = cpt["device"] - if model_device != str(self.device): - reload = True - else: + self.net_g:nn.Module=None + def set_default_model(): + self.net_g, cpt=get_synthesizer(self.pth_path,self.device) + self.tgt_sr = cpt["config"][-1] + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] + self.if_f0 = cpt.get("f0", 1) + self.version = cpt.get("version", "v1") + if self.is_half: + self.net_g = self.net_g.half() + else: + self.net_g = self.net_g.float() + + def set_jit_model(): + jit_pth_path = self.pth_path.rstrip(".pth") + jit_pth_path += ".half.jit" if self.is_half else ".jit" + reload = False + if str(self.device) == "cuda": + self.device = torch.device("cuda:0") + if os.path.exists(jit_pth_path): + cpt = jit.load(jit_pth_path) + model_device = cpt["device"] + if model_device != str(self.device): reload = True - - if reload: - cpt = jit.synthesizer_jit_export( - self.pth_path, - "script", - None, - device=self.device, - is_half=self.is_half, - ) - - self.tgt_sr = cpt["config"][-1] - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - self.net_g = torch.jit.load( - BytesIO(cpt["model"]), map_location=device + else: + reload = True + + if reload: + cpt = jit.synthesizer_jit_export( + self.pth_path, + "script", + None, + device=self.device, + is_half=self.is_half, ) - self.net_g.infer = self.net_g.forward - self.net_g.eval().to(device) - if config.use_jit and not config.dml: + self.tgt_sr = cpt["config"][-1] + self.if_f0 = cpt.get("f0", 1) + self.version = cpt.get("version", "v1") + self.net_g = torch.jit.load( + BytesIO(cpt["model"]), map_location=self.device + ) + self.net_g.infer = self.net_g.forward + self.net_g.eval().to(self.device) + + def set_synthesizer(): + if self.use_jit and not config.dml: if self.is_half and "cpu" in str(self.device): logger.warning( "Use default Synthesizer model. \ @@ -182,14 +168,18 @@ def set_jit_model(): else: set_default_model() - # print(2333333333,device,config.device,self.device)#net_g是device,hubert是config.device - + if last_rvc is None or last_rvc.pth_path != self.pth_path: + set_synthesizer() else: self.tgt_sr = last_rvc.tgt_sr self.if_f0 = last_rvc.if_f0 self.version = last_rvc.version - self.net_g = last_rvc.net_g self.is_half = last_rvc.is_half + if last_rvc.use_jit != self.use_jit: + set_synthesizer() + else: + self.net_g = last_rvc.net_g + if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"): self.model_rmvpe = last_rvc.model_rmvpe @@ -329,7 +319,7 @@ def get_f0_rmvpe(self, x, f0_up_key): "assets/rmvpe/rmvpe.pt", is_half=self.is_half, device=self.device, ####正常逻辑 - use_jit=config.use_jit, + use_jit=self.config.use_jit, ) # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) @@ -347,7 +337,7 @@ def infer( f0method, ) -> np.ndarray: feats = feats.view(1, -1) - if config.is_half: + if self.config.is_half: feats = feats.half() else: feats = feats.float() @@ -374,7 +364,7 @@ def infer( weight = np.square(1 / score) weight /= weight.sum(axis=1, keepdims=True) npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if config.is_half: + if self.config.is_half: npy = npy.astype("float16") feats[0][-leng_replace_head:] = ( torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate From 3848c7b887a85cb8a515bbf3cff275161c3a7984 Mon Sep 17 00:00:00 2001 From: Blaise <133521603+blaise-tk@users.noreply.github.com> Date: Sat, 23 Sep 2023 06:47:16 +0200 Subject: [PATCH 19/26] Code refactor + re-design inference ui (#1304) * Code refacor + re-design inference ui * Fix tabname * i18n jp --------- Co-authored-by: Ftps --- i18n/locale/en_US.json | 4 +- i18n/locale/es_ES.json | 4 +- i18n/locale/fr_FR.json | 4 +- i18n/locale/it_IT.json | 4 +- i18n/locale/ja_JP.json | 4 +- i18n/locale/ru_RU.json | 4 +- i18n/locale/zh_CN.json | 4 +- infer-web.py | 493 ++++++++++++++++++++++------------------- 8 files changed, 288 insertions(+), 233 deletions(-) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index 9fa744b..5059127 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -124,5 +124,7 @@ "音调设置": "Pitch settings", "音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)", "音高算法": "pitch detection algorithm", - "额外推理时长": "Extra inference time" + "额外推理时长": "Extra inference time", + "单人": "Single", + "批次": "Batch" } diff --git a/i18n/locale/es_ES.json b/i18n/locale/es_ES.json index 961cb3a..7c57327 100644 --- a/i18n/locale/es_ES.json +++ b/i18n/locale/es_ES.json @@ -124,5 +124,7 @@ "音调设置": "Ajuste de tono", "音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice el mismo tipo de controlador)", "音高算法": "Algoritmo de tono", - "额外推理时长": "Tiempo de inferencia adicional" + "额外推理时长": "Tiempo de inferencia adicional", + "单人": "Individual", + "批次": "Lote" } diff --git a/i18n/locale/fr_FR.json b/i18n/locale/fr_FR.json index d12078e..55c9a98 100644 --- a/i18n/locale/fr_FR.json +++ b/i18n/locale/fr_FR.json @@ -124,5 +124,7 @@ "音调设置": "Réglages de la hauteur", "音频设备(请使用同种类驱动)": "Périphérique audio (veuillez utiliser le même type de pilote)", "音高算法": "algorithme de détection de la hauteur", - "额外推理时长": "Temps d'inférence supplémentaire" + "额外推理时长": "Temps d'inférence supplémentaire", + "单人" : "Individu", + "批次" : "Lote" } diff --git a/i18n/locale/it_IT.json b/i18n/locale/it_IT.json index 38fdef8..6b82f9a 100644 --- a/i18n/locale/it_IT.json +++ b/i18n/locale/it_IT.json @@ -124,5 +124,7 @@ "音调设置": "Impostazioni del tono", "音频设备(请使用同种类驱动)": "Dispositivo audio (utilizzare lo stesso tipo di driver)", "音高算法": "音高算法", - "额外推理时长": "Tempo di inferenza extra" + "额外推理时长": "Tempo di inferenza extra", + "单人": "Individuale", + "批次": "Lote" } diff --git a/i18n/locale/ja_JP.json b/i18n/locale/ja_JP.json index 903ed87..40f224d 100644 --- a/i18n/locale/ja_JP.json +++ b/i18n/locale/ja_JP.json @@ -124,5 +124,7 @@ "音调设置": "音程設定", "音频设备(请使用同种类驱动)": "オーディオデバイス(同じ種類のドライバーを使用してください)", "音高算法": "ピッチアルゴリズム", - "额外推理时长": "追加推論時間" + "额外推理时长": "追加推論時間", + "单人": "個人", + "批次": "バッチ" } diff --git a/i18n/locale/ru_RU.json b/i18n/locale/ru_RU.json index b6530ac..638ee7b 100644 --- a/i18n/locale/ru_RU.json +++ b/i18n/locale/ru_RU.json @@ -124,5 +124,7 @@ "音调设置": "Настройка высоты звука", "音频设备(请使用同种类驱动)": "Аудиоустройство (пожалуйста, используйте такой же тип драйвера)", "音高算法": "Алгоритм оценки высоты звука", - "额外推理时长": "Доп. время переработки" + "额外推理时长": "Доп. время переработки", + "单人": "Одиночный", + "批次": "Партия" } diff --git a/i18n/locale/zh_CN.json b/i18n/locale/zh_CN.json index b14e5f0..345e27f 100644 --- a/i18n/locale/zh_CN.json +++ b/i18n/locale/zh_CN.json @@ -124,5 +124,7 @@ "音调设置": "音调设置", "音频设备(请使用同种类驱动)": "音频设备(请使用同种类驱动)", "音高算法": "音高算法", - "额外推理时长": "额外推理时长" + "额外推理时长": "额外推理时长", + "单人": "单人", + "批次": "批次" } diff --git a/infer-web.py b/infer-web.py index e3565d8..e995aae 100644 --- a/infer-web.py +++ b/infer-web.py @@ -1,36 +1,36 @@ -import os, sys - -now_dir = os.getcwd() -sys.path.append(now_dir) -import logging -import shutil -import threading -import traceback -import warnings -from random import shuffle -from subprocess import Popen -from time import sleep -import json -import pathlib - -import fairseq -import faiss -import gradio as gr -import numpy as np -import torch -from dotenv import load_dotenv -from sklearn.cluster import MiniBatchKMeans - -from configs.config import Config -from i18n.i18n import I18nAuto +from infer.modules.vc.modules import VC +from infer.modules.uvr5.modules import uvr from infer.lib.train.process_ckpt import ( change_info, extract_small_model, merge, show_info, ) -from infer.modules.uvr5.modules import uvr -from infer.modules.vc.modules import VC +from i18n.i18n import I18nAuto +from configs.config import Config +from sklearn.cluster import MiniBatchKMeans +from dotenv import load_dotenv +import torch +import numpy as np +import gradio as gr +import faiss +import fairseq +import pathlib +import json +from time import sleep +from subprocess import Popen +from random import shuffle +import warnings +import traceback +import threading +import shutil +import logging +import os +import sys + +now_dir = os.getcwd() +sys.path.append(now_dir) + logging.getLogger("numba").setLevel(logging.WARNING) @@ -38,8 +38,10 @@ tmp = os.path.join(now_dir, "TEMP") shutil.rmtree(tmp, ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True) +shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % + (now_dir), ignore_errors=True) +shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % + (now_dir), ignore_errors=True) os.makedirs(tmp, exist_ok=True) os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True) @@ -219,8 +221,9 @@ def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): per, ) logger.info(cmd) - p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir + p = Popen(cmd, shell=True) + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done, @@ -263,7 +266,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp p = Popen( cmd, shell=True, cwd=now_dir ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done, @@ -295,7 +298,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp cmd, shell=True, cwd=now_dir ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done_multi, # @@ -331,7 +334,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp log = f.read() logger.info(log) yield log - ####对不同part分别开多进程 + # 对不同part分别开多进程 """ n_part=int(sys.argv[1]) i_part=int(sys.argv[2]) @@ -360,7 +363,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvp cmd, shell=True, cwd=now_dir ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 + # 煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 done = [False] threading.Thread( target=if_done_multi, @@ -618,7 +621,8 @@ def train_index(exp_dir1, version19): np.random.shuffle(big_npy_idx) big_npy = big_npy[big_npy_idx] if big_npy.shape[0] > 2e5: - infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) + infos.append("Trying doing kmeans %s shape to 10k centers." % + big_npy.shape[0]) yield "\n".join(infos) try: big_npy = ( @@ -642,7 +646,8 @@ def train_index(exp_dir1, version19): n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) infos.append("%s,%s" % (big_npy.shape, n_ivf)) yield "\n".join(infos) - index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) + index = faiss.index_factory( + 256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf) infos.append("training") yield "\n".join(infos) @@ -659,7 +664,7 @@ def train_index(exp_dir1, version19): yield "\n".join(infos) batch_size_add = 8192 for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i : i + batch_size_add]) + index.add(big_npy[i: i + batch_size_add]) faiss.write_index( index, "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" @@ -701,11 +706,12 @@ def get_info_str(strr): infos.append(strr) return "\n".join(infos) - ####### step1:处理数据 + # step1:处理数据 yield get_info_str(i18n("step1:正在处理数据")) - [get_info_str(_) for _ in preprocess_dataset(trainset_dir4, exp_dir1, sr2, np7)] + [get_info_str(_) for _ in preprocess_dataset( + trainset_dir4, exp_dir1, sr2, np7)] - ####### step2a:提取音高 + # step2a:提取音高 yield get_info_str(i18n("step2:正在提取音高&正在提取特征")) [ get_info_str(_) @@ -714,7 +720,7 @@ def get_info_str(strr): ) ] - ####### step3a:训练模型 + # step3a:训练模型 yield get_info_str(i18n("step3a:正在训练模型")) click_train( exp_dir1, @@ -734,7 +740,7 @@ def get_info_str(strr): ) yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log")) - ####### step3b:训练索引 + # step3b:训练索引 [get_info_str(_) for _ in train_index(exp_dir1, version19)] yield get_info_str(i18n("全流程结束!")) @@ -749,7 +755,8 @@ def change_info_(ckpt_path): ) as f: info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) sr, f0 = info["sample_rate"], info["if_f0"] - version = "v2" if ("version" in info and info["version"] == "v2") else "v1" + version = "v2" if ( + "version" in info and info["version"] == "v2") else "v1" return sr, str(f0), version except: traceback.print_exc() @@ -768,6 +775,7 @@ def change_f0_method(f0method8): with gr.Blocks(title="RVC WebUI") as app: + gr.Markdown("## RVC WebUI") gr.Markdown( value=i18n( "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." @@ -775,143 +783,167 @@ def change_f0_method(f0method8): ) with gr.Tabs(): with gr.TabItem(i18n("模型推理")): - with gr.Row(): - sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names)) - refresh_button = gr.Button(i18n("刷新音色列表和索引路径"), variant="primary") - clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary") - spk_item = gr.Slider( - minimum=0, - maximum=2333, - step=1, - label=i18n("请选择说话人id"), - value=0, - visible=False, - interactive=True, - ) - clean_button.click( - fn=clean, inputs=[], outputs=[sid0], api_name="infer_clean" - ) - with gr.Group(): + with gr.TabItem(i18n("单人")): + with gr.Row(): + sid0 = gr.Dropdown(label=i18n("推理音色"), + choices=sorted(names)) + file_index2 = gr.Dropdown( + label=i18n("自动检测index路径,下拉式选择(dropdown)"), + choices=sorted(index_paths), + interactive=True, + ) + with gr.Column(): + refresh_button = gr.Button( + i18n("刷新音色列表和索引路径"), variant="primary" + ) + clean_button = gr.Button( + i18n("卸载音色省显存"), variant="primary") + spk_item = gr.Slider( + minimum=0, + maximum=2333, + step=1, + label=i18n("请选择说话人id"), + value=0, + visible=False, + interactive=True, + ) + clean_button.click( + fn=clean, inputs=[], outputs=[sid0], api_name="infer_clean" + ) + with gr.Group(): + with gr.Row(): + with gr.Column(): + vc_transform0 = gr.Number( + label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 + ) + input_audio0 = gr.Textbox( + label=i18n("输入待处理音频文件路径(默认是正确格式示例)"), + placeholder="C:\\User\\Desktop\\audio_example.wav", + ) + file_index1 = gr.Textbox( + label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), + placeholder="C:\\User\\Desktop\\model_example.index", + interactive=True, + ) + f0method0 = gr.Radio( + label=i18n( + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" + ), + choices=["pm", "harvest", "crepe", "rmvpe"] + if config.dml == False + else ["pm", "harvest", "rmvpe"], + value="rmvpe", + interactive=True, + ) + # f0_file = gr.File( + # label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调") + # ) + + refresh_button.click( + fn=change_choices, + inputs=[], + outputs=[sid0, file_index2], + api_name="infer_refresh", + ) + # file_big_npy1 = gr.Textbox( + # label=i18n("特征文件路径"), + # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", + # interactive=True, + # ) + + with gr.Column(): + resample_sr0 = gr.Slider( + minimum=0, + maximum=48000, + label=i18n("后处理重采样至最终采样率,0为不进行重采样"), + value=0, + step=1, + interactive=True, + ) + rms_mix_rate0 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), + value=0.25, + interactive=True, + ) + protect0 = gr.Slider( + minimum=0, + maximum=0.5, + label=i18n( + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" + ), + value=0.33, + step=0.01, + interactive=True, + ) + filter_radius0 = gr.Slider( + minimum=0, + maximum=7, + label=i18n( + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音" + ), + value=3, + step=1, + interactive=True, + ) + index_rate1 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("检索特征占比"), + value=0.75, + interactive=True, + ) + with gr.Group(): + with gr.Column(): + but0 = gr.Button(i18n("转换"), variant="primary") + with gr.Row(): + vc_output1 = gr.Textbox(label=i18n("输出信息")) + vc_output2 = gr.Audio( + label=i18n("输出音频(右下角三个点,点了可以下载)")) + + but0.click( + vc.vc_single, + [ + spk_item, + input_audio0, + vc_transform0, + # f0_file, + f0method0, + file_index1, + file_index2, + # file_big_npy1, + index_rate1, + filter_radius0, + resample_sr0, + rms_mix_rate0, + protect0, + ], + [vc_output1, vc_output2], + api_name="infer_convert", + ) + with gr.TabItem(i18n("批次")): gr.Markdown( - value=i18n("男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ") + value=i18n( + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ") ) with gr.Row(): with gr.Column(): - vc_transform0 = gr.Number( + vc_transform1 = gr.Number( label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 ) - input_audio0 = gr.Textbox( - label=i18n("输入待处理音频文件路径(默认是正确格式示例)"), - value="E:\\codes\\py39\\test-20230416b\\todo-songs\\冬之花clip1.wav", - ) - f0method0 = gr.Radio( - label=i18n( - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" - ), - choices=["pm", "harvest", "crepe", "rmvpe"] - if config.dml == False - else ["pm", "harvest", "rmvpe"], - value="pm", - interactive=True, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - with gr.Column(): - file_index1 = gr.Textbox( + opt_input = gr.Textbox( + label=i18n("指定输出文件夹"), value="opt") + file_index3 = gr.Textbox( label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), value="", interactive=True, ) - file_index2 = gr.Dropdown( + file_index4 = gr.Dropdown( label=i18n("自动检测index路径,下拉式选择(dropdown)"), choices=sorted(index_paths), interactive=True, ) - refresh_button.click( - fn=change_choices, - inputs=[], - outputs=[sid0, file_index2], - api_name="infer_refresh", - ) - # file_big_npy1 = gr.Textbox( - # label=i18n("特征文件路径"), - # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", - # interactive=True, - # ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=0.75, - interactive=True, - ) - with gr.Column(): - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("后处理重采样至最终采样率,0为不进行重采样"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), - value=0.25, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" - ), - value=0.33, - step=0.01, - interactive=True, - ) - f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调")) - but0 = gr.Button(i18n("转换"), variant="primary") - with gr.Row(): - vc_output1 = gr.Textbox(label=i18n("输出信息")) - vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) - but0.click( - vc.vc_single, - [ - spk_item, - input_audio0, - vc_transform0, - f0_file, - f0method0, - file_index1, - file_index2, - # file_big_npy1, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - ], - [vc_output1, vc_output2], - api_name="infer_convert", - ) - with gr.Group(): - gr.Markdown( - value=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ") - ) - with gr.Row(): - with gr.Column(): - vc_transform1 = gr.Number( - label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 - ) - opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt") f0method1 = gr.Radio( label=i18n( "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU" @@ -919,28 +951,16 @@ def change_f0_method(f0method8): choices=["pm", "harvest", "crepe", "rmvpe"] if config.dml == False else ["pm", "harvest", "rmvpe"], - value="pm", - interactive=True, - ) - filter_radius1 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, + value="rmvpe", interactive=True, ) - with gr.Column(): - file_index3 = gr.Textbox( - label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), - value="", - interactive=True, - ) - file_index4 = gr.Dropdown( - label=i18n("自动检测index路径,下拉式选择(dropdown)"), - choices=sorted(index_paths), + format1 = gr.Radio( + label=i18n("导出文件格式"), + choices=["wav", "flac", "mp3", "m4a"], + value="wav", interactive=True, ) + refresh_button.click( fn=lambda: change_choices()[1], inputs=[], @@ -952,13 +972,7 @@ def change_f0_method(f0method8): # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", # interactive=True, # ) - index_rate2 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=1, - interactive=True, - ) + with gr.Column(): resample_sr1 = gr.Slider( minimum=0, @@ -985,23 +999,35 @@ def change_f0_method(f0method8): step=0.01, interactive=True, ) - with gr.Column(): - dir_input = gr.Textbox( - label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), - value="E:\codes\py39\\test-20230416b\\todo-songs", - ) - inputs = gr.File( - file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") + filter_radius1 = gr.Slider( + minimum=0, + maximum=7, + label=i18n( + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), + value=3, + step=1, + interactive=True, ) - with gr.Row(): - format1 = gr.Radio( - label=i18n("导出文件格式"), - choices=["wav", "flac", "mp3", "m4a"], - value="flac", + index_rate2 = gr.Slider( + minimum=0, + maximum=1, + label=i18n("检索特征占比"), + value=1, interactive=True, ) - but1 = gr.Button(i18n("转换"), variant="primary") - vc_output3 = gr.Textbox(label=i18n("输出信息")) + with gr.Row(): + dir_input = gr.Textbox( + label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), + placeholder="C:\\User\\Desktop\\model_example.index", + ) + inputs = gr.File( + file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") + ) + + with gr.Row(): + but1 = gr.Button(i18n("转换"), variant="primary") + vc_output3 = gr.Textbox(label=i18n("输出信息")) + but1.click( vc.vc_multi, [ @@ -1024,12 +1050,13 @@ def change_f0_method(f0method8): [vc_output3], api_name="infer_convert_batch", ) - sid0.change( - fn=vc.get_vc, - inputs=[sid0, protect0, protect1], - outputs=[spk_item, protect0, protect1, file_index2, file_index4], - api_name="infer_change_voice", - ) + sid0.change( + fn=vc.get_vc, + inputs=[sid0, protect0, protect1], + outputs=[spk_item, protect0, protect1, + file_index2, file_index4], + api_name="infer_change_voice", + ) with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): with gr.Group(): gr.Markdown( @@ -1047,7 +1074,8 @@ def change_f0_method(f0method8): file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") ) with gr.Column(): - model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) + model_choose = gr.Dropdown( + label=i18n("模型"), choices=uvr5_names) agg = gr.Slider( minimum=0, maximum=20, @@ -1147,7 +1175,8 @@ def change_f0_method(f0method8): api_name="train_preprocess", ) with gr.Group(): - gr.Markdown(value=i18n("step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)")) + gr.Markdown(value=i18n( + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)")) with gr.Row(): with gr.Column(): gpus6 = gr.Textbox( @@ -1164,7 +1193,8 @@ def change_f0_method(f0method8): label=i18n( "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU" ), - choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"], + choices=["pm", "harvest", "dio", + "rmvpe", "rmvpe_gpu"], value="rmvpe_gpu", interactive=True, ) @@ -1177,7 +1207,8 @@ def change_f0_method(f0method8): visible=F0GPUVisible, ) but2 = gr.Button(i18n("特征提取"), variant="primary") - info2 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) + info2 = gr.Textbox(label=i18n("输出信息"), + value="", max_lines=8) f0method8.change( fn=change_f0_method, inputs=[f0method8], @@ -1278,7 +1309,8 @@ def change_f0_method(f0method8): but3 = gr.Button(i18n("训练模型"), variant="primary") but4 = gr.Button(i18n("训练特征索引"), variant="primary") but5 = gr.Button(i18n("一键训练"), variant="primary") - info3 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=10) + info3 = gr.Textbox(label=i18n("输出信息"), + value="", max_lines=10) but3.click( click_train, [ @@ -1331,8 +1363,10 @@ def change_f0_method(f0method8): with gr.Group(): gr.Markdown(value=i18n("模型融合, 可用于测试音色融合")) with gr.Row(): - ckpt_a = gr.Textbox(label=i18n("A模型路径"), value="", interactive=True) - ckpt_b = gr.Textbox(label=i18n("B模型路径"), value="", interactive=True) + ckpt_a = gr.Textbox(label=i18n( + "A模型路径"), value="", interactive=True) + ckpt_b = gr.Textbox(label=i18n( + "B模型路径"), value="", interactive=True) alpha_a = gr.Slider( minimum=0, maximum=1, @@ -1370,7 +1404,8 @@ def change_f0_method(f0method8): ) with gr.Row(): but6 = gr.Button(i18n("融合"), variant="primary") - info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) + info4 = gr.Textbox(label=i18n("输出信息"), + value="", max_lines=8) but6.click( merge, [ @@ -1403,7 +1438,8 @@ def change_f0_method(f0method8): ) with gr.Row(): but7 = gr.Button(i18n("修改"), variant="primary") - info5 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) + info5 = gr.Textbox(label=i18n("输出信息"), + value="", max_lines=8) but7.click( change_info, [ckpt_path0, info_, name_to_save1], @@ -1417,8 +1453,10 @@ def change_f0_method(f0method8): label=i18n("模型路径"), value="", interactive=True ) but8 = gr.Button(i18n("查看"), variant="primary") - info6 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) - but8.click(show_info, [ckpt_path1], info6, api_name="ckpt_show") + info6 = gr.Textbox(label=i18n("输出信息"), + value="", max_lines=8) + but8.click(show_info, [ckpt_path1], + info6, api_name="ckpt_show") with gr.Group(): gr.Markdown( value=i18n( @@ -1456,7 +1494,8 @@ def change_f0_method(f0method8): label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True ) but9 = gr.Button(i18n("提取"), variant="primary") - info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) + info7 = gr.Textbox(label=i18n("输出信息"), + value="", max_lines=8) ckpt_path2.change( change_info_, [ckpt_path2], [sr__, if_f0__, version_1] ) @@ -1469,7 +1508,8 @@ def change_f0_method(f0method8): with gr.TabItem(i18n("Onnx导出")): with gr.Row(): - ckpt_dir = gr.Textbox(label=i18n("RVC模型路径"), value="", interactive=True) + ckpt_dir = gr.Textbox(label=i18n( + "RVC模型路径"), value="", interactive=True) with gr.Row(): onnx_dir = gr.Textbox( label=i18n("Onnx输出路径"), value="", interactive=True @@ -1479,7 +1519,8 @@ def change_f0_method(f0method8): with gr.Row(): butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary") butOnnx.click( - export_onnx, [ckpt_dir, onnx_dir], infoOnnx, api_name="export_onnx" + export_onnx, [ + ckpt_dir, onnx_dir], infoOnnx, api_name="export_onnx" ) tab_faq = i18n("常见问题解答") From d69b0263849fee1528ac346e6b26afa432eb3b47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Sat, 23 Sep 2023 13:57:39 +0900 Subject: [PATCH 20/26] feat: optimize actions --- .github/workflows/genlocale.yml | 6 +++--- .github/workflows/pull_format.yml | 2 +- .github/workflows/push_format.yml | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/genlocale.yml b/.github/workflows/genlocale.yml index 081158e..bac9cc3 100644 --- a/.github/workflows/genlocale.yml +++ b/.github/workflows/genlocale.yml @@ -24,7 +24,7 @@ jobs: git config --local user.name 'github-actions[bot]' git config --local user.email 'github-actions[bot]@users.noreply.github.com' git add --all - git commit -m "chore(i18n): sync locale" + git commit -m "chore(i18n): sync locale on ${{github.ref_name}}" - name: Create Pull Request if: steps.commitback.outcome == 'success' @@ -33,7 +33,7 @@ jobs: with: delete-branch: true body: "Automatically sync i18n translation jsons" - title: "chore(i18n): sync locale" - commit-message: "chore(i18n): sync locale" + title: "chore(i18n): sync locale on ${{github.ref_name}}" + commit-message: "chore(i18n): sync locale on ${{github.ref_name}}" branch: genlocale-${{github.ref_name}} branch-suffix: short-commit-hash diff --git a/.github/workflows/pull_format.yml b/.github/workflows/pull_format.yml index e21dc2d..484a214 100644 --- a/.github/workflows/pull_format.yml +++ b/.github/workflows/pull_format.yml @@ -18,7 +18,7 @@ jobs: uses: superbrothers/close-pull-request@v3 with: # Optional. Post a issue comment just before closing a pull request. - comment: "Invalid PR to non-dev branch ${{ github.event.pull_request.base.ref }}." + comment: "Invalid PR to `non-dev` branch `${{ github.event.pull_request.base.ref }}`." pull_format: runs-on: ubuntu-latest diff --git a/.github/workflows/push_format.yml b/.github/workflows/push_format.yml index 605b27d..81f8735 100644 --- a/.github/workflows/push_format.yml +++ b/.github/workflows/push_format.yml @@ -38,7 +38,7 @@ jobs: git config --local user.email "github-actions[bot]@users.noreply.github.com" git config --local user.name "github-actions[bot]" git add --all - git commit -m "chore(format): run black" + git commit -m "chore(format): run black on ${{github.ref_name}}" - name: Create Pull Request if: steps.commitback.outcome == 'success' @@ -47,7 +47,7 @@ jobs: with: delete-branch: true body: "Automatically apply code formatter change" - title: "chore(format): run black" - commit-message: "chore(format): run black" + title: "chore(format): run black on ${{github.ref_name}}" + commit-message: "chore(format): run black on ${{github.ref_name}}" branch: formatter-${{github.ref_name}} branch-suffix: short-commit-hash From 8f7a3805eb7b38412155cab2d76c6abdc0a94a3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=90=E6=96=87=E9=9B=A8?= <41315874+fumiama@users.noreply.github.com> Date: Sat, 23 Sep 2023 14:01:39 +0900 Subject: [PATCH 21/26] feat: optimize actions --- .github/workflows/genlocale.yml | 1 - .github/workflows/push_format.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/genlocale.yml b/.github/workflows/genlocale.yml index bac9cc3..c56f7d0 100644 --- a/.github/workflows/genlocale.yml +++ b/.github/workflows/genlocale.yml @@ -36,4 +36,3 @@ jobs: title: "chore(i18n): sync locale on ${{github.ref_name}}" commit-message: "chore(i18n): sync locale on ${{github.ref_name}}" branch: genlocale-${{github.ref_name}} - branch-suffix: short-commit-hash diff --git a/.github/workflows/push_format.yml b/.github/workflows/push_format.yml index 81f8735..7278a81 100644 --- a/.github/workflows/push_format.yml +++ b/.github/workflows/push_format.yml @@ -50,4 +50,3 @@ jobs: title: "chore(format): run black on ${{github.ref_name}}" commit-message: "chore(format): run black on ${{github.ref_name}}" branch: formatter-${{github.ref_name}} - branch-suffix: short-commit-hash From df08977b2bdef81999f447d37d6466fecb1971b2 Mon Sep 17 00:00:00 2001 From: Rice Cake Date: Sat, 23 Sep 2023 14:38:20 +0800 Subject: [PATCH 22/26] Update README & en_US locale file (#1309) --- README.md | 35 +++++++++++++++++++++++++++++++---- i18n/locale/en_US.json | 10 +++++----- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index f221877..5385a04 100644 --- a/README.md +++ b/README.md @@ -68,12 +68,16 @@ poetry install 你也可以通过 pip 来安装依赖: ```bash N卡: - -pip install -r requirements.txt + pip install -r requirements.txt A卡/I卡: -pip install -r requirements-dml.txt + pip install -r requirements-dml.txt + +A卡Rocm(Linux): + pip install -r requirements-amd.txt +I卡IPEX(Linux): + pip install -r requirements-ipex.txt ``` ------ @@ -122,11 +126,34 @@ https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/rmvpe.pt ```bash python infer-web.py ``` - 如果你正在使用Windows 或 macOS,你可以直接下载并解压`RVC-beta.7z`,前者可以运行`go-web.bat`以启动WebUI,后者则运行命令`sh ./run.sh`以启动WebUI。 +对于需要使用IPEX技术的I卡用户,请先在终端执行`source /opt/intel/oneapi/setvars.sh`(仅Linux)。 + 仓库内还有一份`小白简易教程.doc`以供参考。 +## AMD显卡Rocm相关(仅Linux) +如果你想基于AMD的Rocm技术在Linux系统上运行RVC,请先在[这里](https://rocm.docs.amd.com/en/latest/deploy/linux/os-native/install.html)安装所需的驱动。 + +若你使用的是Arch Linux,可以使用pacman来安装所需驱动: +```` +pacman -S rocm-hip-sdk rocm-opencl-sdk +```` +对于某些型号的显卡,你可能需要额外配置如下的环境变量(如:RX6700XT): +```` +export ROCM_PATH=/opt/rocm +export HSA_OVERRIDE_GFX_VERSION=10.3.0 +```` +同时确保你的当前用户处于`render`与`video`用户组内: +```` +sudo usermod -aG render $USERNAME +sudo usermod -aG video $USERNAME +```` +之后运行WebUI: +```bash +python infer-web.py +``` + ## 参考项目 + [ContentVec](https://github.com/auspicious3000/contentvec/) + [VITS](https://github.com/jaywalnut310/vits) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index 5059127..eae9f87 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -86,7 +86,7 @@ "特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:", "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.", "目标采样率": "Target sample rate:", - "算法延迟(ms):": "算法延迟(ms):", + "算法延迟(ms):": "Algorithmic delays(ms):", "自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown:", "融合": "Fusion", "要改的模型信息": "Model information to be modified:", @@ -96,8 +96,8 @@ "训练特征索引": "Train feature index", "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.", "请指定说话人id": "Please specify the speaker/singer ID:", - "请选择index文件": "请选择index文件", - "请选择pth文件": "请选择pth文件", + "请选择index文件": "Please choose the .index file", + "请选择pth文件": "Please choose the .pth file", "请选择说话人id": "Select Speaker/Singer ID:", "转换": "Convert", "输入实验名": "Enter the experiment name:", @@ -105,12 +105,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):", "输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:", - "输入监听": "输入监听", + "输入监听": "Input voice monitor", "输入训练文件夹路径": "Enter the path of the training folder:", "输入设备": "Input device", "输入降噪": "Input noise reduction", "输出信息": "Output information", - "输出变声": "输出变声", + "输出变声": "Output converted voice", "输出设备": "Output device", "输出降噪": "Output noise reduction", "输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)", From 81922b07f88fec4b9364952a5cb1f3dbb2dc25fe Mon Sep 17 00:00:00 2001 From: yxlllc <33565655+yxlllc@users.noreply.github.com> Date: Sun, 24 Sep 2023 14:24:08 +0800 Subject: [PATCH 23/26] critical: some bug fixes (#1322) * JIT acceleration switch does not support hot update * fix padding bug of rmvpe in torch-directml * fix padding bug of rmvpe in torch-directml --- gui_v1.py | 3 +-- infer/lib/rmvpe.py | 8 +++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/gui_v1.py b/gui_v1.py index f29f4ac..213b748 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -490,8 +490,6 @@ def event_handler(self): self.window["delay_time"].update(int(self.delay_time * 1000)) elif event == "O_noise_reduce": self.gui_config.O_noise_reduce = values["O_noise_reduce"] - elif event == "use_jit": - self.config.use_jit=values["use_jit"] elif event in ["vc", "im"]: self.function = event elif event != "start_vc" and self.flag_vc == True: @@ -514,6 +512,7 @@ def set_values(self, values): sg.popup(i18n("index文件路径不可包含中文")) return False self.set_devices(values["sg_input_device"], values["sg_output_device"]) + self.config.use_jit = values["use_jit"] # self.device_latency = values["device_latency"] self.gui_config.pth_path = values["pth_path"] self.gui_config.index_path = values["index_path"] diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index 044abf2..ca1eed9 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -656,9 +656,11 @@ def get_default_model(): def mel2hidden(self, mel): with torch.no_grad(): n_frames = mel.shape[-1] - mel = F.pad( - mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="constant" - ) + n_pad = 32 * ((n_frames - 1) // 32 + 1) - n_frames + if n_pad > 0: + mel = F.pad( + mel, (0, n_pad), mode="constant" + ) if "privateuseone" in str(self.device): onnx_input_name = self.model.get_inputs()[0].name onnx_outputs_names = self.model.get_outputs()[0].name From c31d909e367210481c307cd804631c67e973ab38 Mon Sep 17 00:00:00 2001 From: yxlllc <33565655+yxlllc@users.noreply.github.com> Date: Mon, 25 Sep 2023 00:07:52 +0800 Subject: [PATCH 24/26] Fix STFT under torch_directml (#1330) --- infer/lib/rmvpe.py | 184 +++++++++-------------------------- tools/torchgate/torchgate.py | 87 +++++++++++------ 2 files changed, 100 insertions(+), 171 deletions(-) diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index ca1eed9..a65fd7e 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -1,7 +1,6 @@ from io import BytesIO import os from typing import List, Optional, Tuple - import numpy as np import torch @@ -27,58 +26,6 @@ logger = logging.getLogger(__name__) -###stft codes from https://github.com/pseeth/torch-stft/blob/master/torch_stft/util.py -def window_sumsquare( - window, - n_frames, - hop_length=200, - win_length=800, - n_fft=800, - dtype=np.float32, - norm=None, -): - """ - # from librosa 0.6 - Compute the sum-square envelope of a window function at a given hop length. - This is used to estimate modulation effects induced by windowing - observations in short-time fourier transforms. - Parameters - ---------- - window : string, tuple, number, callable, or list-like - Window specification, as in `get_window` - n_frames : int > 0 - The number of analysis frames - hop_length : int > 0 - The number of samples to advance between frames - win_length : [optional] - The length of the window function. By default, this matches `n_fft`. - n_fft : int > 0 - The length of each analysis frame. - dtype : np.dtype - The data type of the output - Returns - ------- - wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` - The sum-squared envelope of the window function - """ - if win_length is None: - win_length = n_fft - - n = n_fft + hop_length * (n_frames - 1) - x = np.zeros(n, dtype=dtype) - - # Compute the squared window at the desired length - win_sq = get_window(window, win_length, fftbins=True) - win_sq = normalize(win_sq, norm=norm) ** 2 - win_sq = pad_center(win_sq, n_fft) - - # Fill the envelope - for i in range(n_frames): - sample = i * hop_length - x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))] - return x - - class STFT(torch.nn.Module): def __init__( self, filter_length=1024, hop_length=512, win_length=None, window="hann" @@ -105,16 +52,15 @@ def __init__( self.window = window self.forward_transform = None self.pad_amount = int(self.filter_length / 2) - scale = self.filter_length / self.hop_length fourier_basis = np.fft.fft(np.eye(self.filter_length)) cutoff = int((self.filter_length / 2 + 1)) fourier_basis = np.vstack( [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] ) - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) + forward_basis = torch.FloatTensor(fourier_basis) inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :] + np.linalg.pinv(fourier_basis) ) assert filter_length >= self.win_length @@ -125,12 +71,13 @@ def __init__( # window the bases forward_basis *= fft_window - inverse_basis *= fft_window + inverse_basis = (inverse_basis.T * fft_window).T self.register_buffer("forward_basis", forward_basis.float()) self.register_buffer("inverse_basis", inverse_basis.float()) + self.register_buffer("fft_window", fft_window.float()) - def transform(self, input_data): + def transform(self, input_data, return_phase=False): """Take input data (audio) to STFT domain. Arguments: @@ -142,33 +89,22 @@ def transform(self, input_data): phase {tensor} -- Phase of STFT with shape (num_batch, num_frequencies, num_frames) """ - num_batches = input_data.shape[0] - num_samples = input_data.shape[-1] - - self.num_samples = num_samples - - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - # print(1234,input_data.shape) input_data = F.pad( - input_data.unsqueeze(1), - (self.pad_amount, self.pad_amount, 0, 0, 0, 0), + input_data, + (self.pad_amount, self.pad_amount), mode="reflect", - ).squeeze(1) - # print(2333,input_data.shape,self.forward_basis.shape,self.hop_length) - # pdb.set_trace() - forward_transform = F.conv1d( - input_data, self.forward_basis, stride=self.hop_length, padding=0 ) - + forward_transform = input_data.unfold(1, self.filter_length, self.hop_length).permute(0, 2, 1) + forward_transform = torch.matmul(self.forward_basis, forward_transform) cutoff = int((self.filter_length / 2) + 1) real_part = forward_transform[:, :cutoff, :] imag_part = forward_transform[:, cutoff:, :] - magnitude = torch.sqrt(real_part**2 + imag_part**2) - # phase = torch.atan2(imag_part.data, real_part.data) - - return magnitude # , phase + if return_phase: + phase = torch.atan2(imag_part.data, real_part.data) + return magnitude, phase + else: + return magnitude def inverse(self, magnitude, phase): """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced @@ -184,42 +120,18 @@ def inverse(self, magnitude, phase): inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of shape (num_batch, num_samples) """ - recombine_magnitude_phase = torch.cat( + cat = torch.cat( [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 ) - - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - self.inverse_basis, - stride=self.hop_length, - padding=0, - ) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, - magnitude.size(-1), - hop_length=self.hop_length, - win_length=self.win_length, - n_fft=self.filter_length, - dtype=np.float32, - ) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0] - ) - window_sum = torch.from_numpy(window_sum).to(inverse_transform.device) - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ - approx_nonzero_indices - ] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[..., self.pad_amount :] - inverse_transform = inverse_transform[..., : self.num_samples] - inverse_transform = inverse_transform.squeeze(1) - + fold = torch.nn.Fold( + output_size=(1, (cat.size(-1) - 1) * self.hop_length + self.filter_length), + kernel_size=(1, self.filter_length), + stride=(1, self.hop_length)) + inverse_transform = torch.matmul(self.inverse_basis, cat) + inverse_transform = fold(inverse_transform)[:, 0, 0, self.pad_amount : -self.pad_amount] + window_square_sum = self.fft_window.pow(2).repeat(cat.size(-1), 1).T.unsqueeze(0) + window_square_sum = fold(window_square_sum)[:, 0, 0, self.pad_amount : -self.pad_amount] + inverse_transform /= window_square_sum return inverse_transform def forward(self, input_data): @@ -232,7 +144,7 @@ def forward(self, input_data): reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of shape (num_batch, num_samples) """ - self.magnitude, self.phase = self.transform(input_data) + self.magnitude, self.phase = self.transform(input_data, return_phase=True) reconstruction = self.inverse(self.magnitude, self.phase) return reconstruction @@ -538,33 +450,28 @@ def forward(self, audio, keyshift=0, speed=1, center=True): keyshift_key = str(keyshift) + "_" + str(audio.device) if keyshift_key not in self.hann_window: self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - # "cpu"if(audio.device.type=="privateuseone") else audio.device audio.device ) - # fft = torch.stft(#doesn't support pytorch_dml - # # audio.cpu() if(audio.device.type=="privateuseone")else audio, - # audio, - # n_fft=n_fft_new, - # hop_length=hop_length_new, - # win_length=win_length_new, - # window=self.hann_window[keyshift_key], - # center=center, - # return_complex=True, - # ) - # magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - # print(1111111111) - # print(222222222222222,audio.device,self.is_half) - if hasattr(self, "stft") == False: - # print(n_fft_new,hop_length_new,win_length_new,audio.shape) - self.stft = STFT( - filter_length=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window="hann", - ).to(audio.device) - magnitude = self.stft.transform(audio) # phase - # if (audio.device.type == "privateuseone"): - # magnitude=magnitude.to(audio.device) + if "privateuseone" in str(audio.device): + if not hasattr(self, "stft"): + self.stft = STFT( + filter_length=n_fft_new, + hop_length=hop_length_new, + win_length=win_length_new, + window="hann", + ).to(audio.device) + magnitude = self.stft.transform(audio) + else: + fft = torch.stft( + audio, + n_fft=n_fft_new, + hop_length=hop_length_new, + win_length=win_length_new, + window=self.hann_window[keyshift_key], + center=center, + return_complex=True, + ) + magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) if keyshift != 0: size = self.n_fft // 2 + 1 resize = magnitude.size(1) @@ -575,7 +482,6 @@ def forward(self, audio, keyshift=0, speed=1, center=True): if self.is_half == True: mel_output = mel_output.half() log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - # print(log_mel_spec.device.type) return log_mel_spec diff --git a/tools/torchgate/torchgate.py b/tools/torchgate/torchgate.py index f95ffef..86cf3a3 100644 --- a/tools/torchgate/torchgate.py +++ b/tools/torchgate/torchgate.py @@ -1,4 +1,5 @@ import torch +from infer.lib.rmvpe import STFT from torch.nn.functional import conv1d, conv2d from typing import Union, Optional from .utils import linspace, temperature_sigmoid, amp_to_db @@ -139,17 +140,26 @@ def _stationary_mask( are set to 1, and the rest are set to 0. """ if xn is not None: - XN = torch.stft( - xn, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(xn.device), - ) - + if "privateuseone" in str(xn.device): + if not hasattr(self, "stft"): + self.stft = STFT( + filter_length=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + window="hann", + ).to(xn.device) + XN = self.stft.transform(xn) + else: + XN = torch.stft( + xn, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + return_complex=True, + pad_mode="constant", + center=True, + window=torch.hann_window(self.win_length).to(xn.device), + ) XN_db = amp_to_db(XN).to(dtype=X_db.dtype) else: XN_db = X_db @@ -213,17 +223,27 @@ def forward( """ # Compute short-time Fourier transform (STFT) - X = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(x.device), - ) - + if "privateuseone" in str(x.device): + if not hasattr(self, "stft"): + self.stft = STFT( + filter_length=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + window="hann", + ).to(x.device) + X, phase = self.stft.transform(x, return_phase=True) + else: + X = torch.stft( + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + return_complex=True, + pad_mode="constant", + center=True, + window=torch.hann_window(self.win_length).to(x.device), + ) + # Compute signal mask based on stationary or nonstationary assumptions if self.nonstationary: sig_mask = self._nonstationary_mask(X.abs()) @@ -231,7 +251,7 @@ def forward( sig_mask = self._stationary_mask(amp_to_db(X), xn) # Propagate decrease in signal power - sig_mask = self.prop_decrease * (sig_mask * 1.0 - 1.0) + 1.0 + sig_mask = self.prop_decrease * (sig_mask.float() - 1.0) + 1.0 # Smooth signal mask with 2D convolution if self.smoothing_filter is not None: @@ -245,13 +265,16 @@ def forward( Y = X * sig_mask.squeeze(1) # Inverse STFT to obtain time-domain signal - y = torch.istft( - Y, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - center=True, - window=torch.hann_window(self.win_length).to(Y.device), - ) - + if "privateuseone" in str(Y.device): + y = self.stft.inverse(Y, phase) + else: + y = torch.istft( + Y, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + center=True, + window=torch.hann_window(self.win_length).to(Y.device), + ) + return y.to(dtype=x.dtype) From ad3617bb9360928093618129d75d0df184fc8baf Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 15:52:54 +0900 Subject: [PATCH 25/26] chore(format): run black on dev (#1318) Co-authored-by: github-actions[bot] --- gui_v1.py | 41 ++++++++++++------ infer-web.py | 83 ++++++++++++------------------------ infer/lib/rmvpe.py | 47 +++++++++++--------- tools/rvc_for_realtime.py | 11 +++-- tools/torchgate/torchgate.py | 54 +++++++++++------------ 5 files changed, 115 insertions(+), 121 deletions(-) diff --git a/gui_v1.py b/gui_v1.py index 213b748..f804148 100644 --- a/gui_v1.py +++ b/gui_v1.py @@ -63,8 +63,9 @@ def run(self): import tools.rvc_for_realtime as rvc_for_realtime from i18n.i18n import I18nAuto from configs.config import Config + i18n = I18nAuto() - + # device = rvc_for_realtime.config.device # device = torch.device( # "cuda" @@ -135,7 +136,7 @@ def load(self): "crossfade_length": "0.04", "extra_time": "2", "f0method": "rmvpe", - "use_jit":False + "use_jit": False, } data["pm"] = data["f0method"] == "pm" data["harvest"] = data["f0method"] == "harvest" @@ -145,7 +146,7 @@ def load(self): def launcher(self): data = self.load() - self.config.use_jit=data.get("use_jit",self.config.use_jit) + self.config.use_jit = data.get("use_jit", self.config.use_jit) sg.theme("LightBlue3") input_devices, output_devices, _, _ = self.get_devices() layout = [ @@ -362,9 +363,7 @@ def launcher(self): enable_events=True, ), ], - [ - sg.Text("注:首次使用JIT加速时,会出现卡顿,\n 并伴随一些噪音,但这是正常现象!") - ] + [sg.Text("注:首次使用JIT加速时,会出现卡顿,\n 并伴随一些噪音,但这是正常现象!")], ], title=i18n("性能设置"), ), @@ -439,7 +438,7 @@ def event_handler(self): "crossfade_length": values["crossfade_length"], "extra_time": values["extra_time"], "n_cpu": values["n_cpu"], - "use_jit":values["use_jit"], + "use_jit": values["use_jit"], "f0method": ["pm", "harvest", "crepe", "rmvpe"][ [ values["pm"], @@ -553,21 +552,35 @@ def start_vc(self): self.gui_config.samplerate = self.rvc.tgt_sr self.zc = self.rvc.tgt_sr // 100 self.block_frame = ( - int(np.round(self.gui_config.block_time * self.gui_config.samplerate / self.zc)) + int( + np.round( + self.gui_config.block_time + * self.gui_config.samplerate + / self.zc + ) + ) * self.zc ) self.block_frame_16k = 160 * self.block_frame // self.zc self.crossfade_frame = ( int( np.round( - self.gui_config.crossfade_time * self.gui_config.samplerate / self.zc + self.gui_config.crossfade_time + * self.gui_config.samplerate + / self.zc ) ) * self.zc ) self.sola_search_frame = self.zc self.extra_frame = ( - int(np.round(self.gui_config.extra_time * self.gui_config.samplerate / self.zc)) + int( + np.round( + self.gui_config.extra_time + * self.gui_config.samplerate + / self.zc + ) + ) * self.zc ) self.input_wav: torch.Tensor = torch.zeros( @@ -616,7 +629,9 @@ def start_vc(self): ) self.fade_out_window: torch.Tensor = 1 - self.fade_in_window self.resampler = tat.Resample( - orig_freq=self.gui_config.samplerate, new_freq=16000, dtype=torch.float32 + orig_freq=self.gui_config.samplerate, + new_freq=16000, + dtype=torch.float32, ).to(self.config.device) self.tg = TorchGate( sr=self.gui_config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9 @@ -664,7 +679,9 @@ def audio_callback( self.input_wav[: -self.block_frame] = self.input_wav[ self.block_frame : ].clone() - self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to(self.config.device) + self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to( + self.config.device + ) self.input_wav_res[: -self.block_frame_16k] = self.input_wav_res[ self.block_frame_16k : ].clone() diff --git a/infer-web.py b/infer-web.py index e995aae..859114e 100644 --- a/infer-web.py +++ b/infer-web.py @@ -38,10 +38,8 @@ tmp = os.path.join(now_dir, "TEMP") shutil.rmtree(tmp, ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % - (now_dir), ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % - (now_dir), ignore_errors=True) +shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True) +shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True) os.makedirs(tmp, exist_ok=True) os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True) @@ -621,8 +619,7 @@ def train_index(exp_dir1, version19): np.random.shuffle(big_npy_idx) big_npy = big_npy[big_npy_idx] if big_npy.shape[0] > 2e5: - infos.append("Trying doing kmeans %s shape to 10k centers." % - big_npy.shape[0]) + infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) yield "\n".join(infos) try: big_npy = ( @@ -646,8 +643,7 @@ def train_index(exp_dir1, version19): n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) infos.append("%s,%s" % (big_npy.shape, n_ivf)) yield "\n".join(infos) - index = faiss.index_factory( - 256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) + index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf) infos.append("training") yield "\n".join(infos) @@ -664,7 +660,7 @@ def train_index(exp_dir1, version19): yield "\n".join(infos) batch_size_add = 8192 for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i: i + batch_size_add]) + index.add(big_npy[i : i + batch_size_add]) faiss.write_index( index, "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" @@ -708,8 +704,7 @@ def get_info_str(strr): # step1:处理数据 yield get_info_str(i18n("step1:正在处理数据")) - [get_info_str(_) for _ in preprocess_dataset( - trainset_dir4, exp_dir1, sr2, np7)] + [get_info_str(_) for _ in preprocess_dataset(trainset_dir4, exp_dir1, sr2, np7)] # step2a:提取音高 yield get_info_str(i18n("step2:正在提取音高&正在提取特征")) @@ -755,8 +750,7 @@ def change_info_(ckpt_path): ) as f: info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) sr, f0 = info["sample_rate"], info["if_f0"] - version = "v2" if ( - "version" in info and info["version"] == "v2") else "v1" + version = "v2" if ("version" in info and info["version"] == "v2") else "v1" return sr, str(f0), version except: traceback.print_exc() @@ -785,8 +779,7 @@ def change_f0_method(f0method8): with gr.TabItem(i18n("模型推理")): with gr.TabItem(i18n("单人")): with gr.Row(): - sid0 = gr.Dropdown(label=i18n("推理音色"), - choices=sorted(names)) + sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names)) file_index2 = gr.Dropdown( label=i18n("自动检测index路径,下拉式选择(dropdown)"), choices=sorted(index_paths), @@ -796,8 +789,7 @@ def change_f0_method(f0method8): refresh_button = gr.Button( i18n("刷新音色列表和索引路径"), variant="primary" ) - clean_button = gr.Button( - i18n("卸载音色省显存"), variant="primary") + clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary") spk_item = gr.Slider( minimum=0, maximum=2333, @@ -899,8 +891,7 @@ def change_f0_method(f0method8): but0 = gr.Button(i18n("转换"), variant="primary") with gr.Row(): vc_output1 = gr.Textbox(label=i18n("输出信息")) - vc_output2 = gr.Audio( - label=i18n("输出音频(右下角三个点,点了可以下载)")) + vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) but0.click( vc.vc_single, @@ -924,16 +915,14 @@ def change_f0_method(f0method8): ) with gr.TabItem(i18n("批次")): gr.Markdown( - value=i18n( - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ") + value=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ") ) with gr.Row(): with gr.Column(): vc_transform1 = gr.Number( label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 ) - opt_input = gr.Textbox( - label=i18n("指定输出文件夹"), value="opt") + opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt") file_index3 = gr.Textbox( label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), value="", @@ -1002,8 +991,7 @@ def change_f0_method(f0method8): filter_radius1 = gr.Slider( minimum=0, maximum=7, - label=i18n( - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), + label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), value=3, step=1, interactive=True, @@ -1053,8 +1041,7 @@ def change_f0_method(f0method8): sid0.change( fn=vc.get_vc, inputs=[sid0, protect0, protect1], - outputs=[spk_item, protect0, protect1, - file_index2, file_index4], + outputs=[spk_item, protect0, protect1, file_index2, file_index4], api_name="infer_change_voice", ) with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): @@ -1074,8 +1061,7 @@ def change_f0_method(f0method8): file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") ) with gr.Column(): - model_choose = gr.Dropdown( - label=i18n("模型"), choices=uvr5_names) + model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) agg = gr.Slider( minimum=0, maximum=20, @@ -1175,8 +1161,7 @@ def change_f0_method(f0method8): api_name="train_preprocess", ) with gr.Group(): - gr.Markdown(value=i18n( - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)")) + gr.Markdown(value=i18n("step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)")) with gr.Row(): with gr.Column(): gpus6 = gr.Textbox( @@ -1193,8 +1178,7 @@ def change_f0_method(f0method8): label=i18n( "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU" ), - choices=["pm", "harvest", "dio", - "rmvpe", "rmvpe_gpu"], + choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"], value="rmvpe_gpu", interactive=True, ) @@ -1207,8 +1191,7 @@ def change_f0_method(f0method8): visible=F0GPUVisible, ) but2 = gr.Button(i18n("特征提取"), variant="primary") - info2 = gr.Textbox(label=i18n("输出信息"), - value="", max_lines=8) + info2 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) f0method8.change( fn=change_f0_method, inputs=[f0method8], @@ -1309,8 +1292,7 @@ def change_f0_method(f0method8): but3 = gr.Button(i18n("训练模型"), variant="primary") but4 = gr.Button(i18n("训练特征索引"), variant="primary") but5 = gr.Button(i18n("一键训练"), variant="primary") - info3 = gr.Textbox(label=i18n("输出信息"), - value="", max_lines=10) + info3 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=10) but3.click( click_train, [ @@ -1363,10 +1345,8 @@ def change_f0_method(f0method8): with gr.Group(): gr.Markdown(value=i18n("模型融合, 可用于测试音色融合")) with gr.Row(): - ckpt_a = gr.Textbox(label=i18n( - "A模型路径"), value="", interactive=True) - ckpt_b = gr.Textbox(label=i18n( - "B模型路径"), value="", interactive=True) + ckpt_a = gr.Textbox(label=i18n("A模型路径"), value="", interactive=True) + ckpt_b = gr.Textbox(label=i18n("B模型路径"), value="", interactive=True) alpha_a = gr.Slider( minimum=0, maximum=1, @@ -1404,8 +1384,7 @@ def change_f0_method(f0method8): ) with gr.Row(): but6 = gr.Button(i18n("融合"), variant="primary") - info4 = gr.Textbox(label=i18n("输出信息"), - value="", max_lines=8) + info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) but6.click( merge, [ @@ -1438,8 +1417,7 @@ def change_f0_method(f0method8): ) with gr.Row(): but7 = gr.Button(i18n("修改"), variant="primary") - info5 = gr.Textbox(label=i18n("输出信息"), - value="", max_lines=8) + info5 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) but7.click( change_info, [ckpt_path0, info_, name_to_save1], @@ -1453,10 +1431,8 @@ def change_f0_method(f0method8): label=i18n("模型路径"), value="", interactive=True ) but8 = gr.Button(i18n("查看"), variant="primary") - info6 = gr.Textbox(label=i18n("输出信息"), - value="", max_lines=8) - but8.click(show_info, [ckpt_path1], - info6, api_name="ckpt_show") + info6 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) + but8.click(show_info, [ckpt_path1], info6, api_name="ckpt_show") with gr.Group(): gr.Markdown( value=i18n( @@ -1494,8 +1470,7 @@ def change_f0_method(f0method8): label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True ) but9 = gr.Button(i18n("提取"), variant="primary") - info7 = gr.Textbox(label=i18n("输出信息"), - value="", max_lines=8) + info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) ckpt_path2.change( change_info_, [ckpt_path2], [sr__, if_f0__, version_1] ) @@ -1508,8 +1483,7 @@ def change_f0_method(f0method8): with gr.TabItem(i18n("Onnx导出")): with gr.Row(): - ckpt_dir = gr.Textbox(label=i18n( - "RVC模型路径"), value="", interactive=True) + ckpt_dir = gr.Textbox(label=i18n("RVC模型路径"), value="", interactive=True) with gr.Row(): onnx_dir = gr.Textbox( label=i18n("Onnx输出路径"), value="", interactive=True @@ -1519,8 +1493,7 @@ def change_f0_method(f0method8): with gr.Row(): butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary") butOnnx.click( - export_onnx, [ - ckpt_dir, onnx_dir], infoOnnx, api_name="export_onnx" + export_onnx, [ckpt_dir, onnx_dir], infoOnnx, api_name="export_onnx" ) tab_faq = i18n("常见问题解答") diff --git a/infer/lib/rmvpe.py b/infer/lib/rmvpe.py index a65fd7e..eb64a6e 100644 --- a/infer/lib/rmvpe.py +++ b/infer/lib/rmvpe.py @@ -59,9 +59,7 @@ def __init__( [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] ) forward_basis = torch.FloatTensor(fourier_basis) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(fourier_basis) - ) + inverse_basis = torch.FloatTensor(np.linalg.pinv(fourier_basis)) assert filter_length >= self.win_length # get window and zero center pad it to filter_length @@ -94,7 +92,9 @@ def transform(self, input_data, return_phase=False): (self.pad_amount, self.pad_amount), mode="reflect", ) - forward_transform = input_data.unfold(1, self.filter_length, self.hop_length).permute(0, 2, 1) + forward_transform = input_data.unfold( + 1, self.filter_length, self.hop_length + ).permute(0, 2, 1) forward_transform = torch.matmul(self.forward_basis, forward_transform) cutoff = int((self.filter_length / 2) + 1) real_part = forward_transform[:, :cutoff, :] @@ -124,13 +124,20 @@ def inverse(self, magnitude, phase): [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 ) fold = torch.nn.Fold( - output_size=(1, (cat.size(-1) - 1) * self.hop_length + self.filter_length), - kernel_size=(1, self.filter_length), - stride=(1, self.hop_length)) + output_size=(1, (cat.size(-1) - 1) * self.hop_length + self.filter_length), + kernel_size=(1, self.filter_length), + stride=(1, self.hop_length), + ) inverse_transform = torch.matmul(self.inverse_basis, cat) - inverse_transform = fold(inverse_transform)[:, 0, 0, self.pad_amount : -self.pad_amount] - window_square_sum = self.fft_window.pow(2).repeat(cat.size(-1), 1).T.unsqueeze(0) - window_square_sum = fold(window_square_sum)[:, 0, 0, self.pad_amount : -self.pad_amount] + inverse_transform = fold(inverse_transform)[ + :, 0, 0, self.pad_amount : -self.pad_amount + ] + window_square_sum = ( + self.fft_window.pow(2).repeat(cat.size(-1), 1).T.unsqueeze(0) + ) + window_square_sum = fold(window_square_sum)[ + :, 0, 0, self.pad_amount : -self.pad_amount + ] inverse_transform /= window_square_sum return inverse_transform @@ -463,14 +470,14 @@ def forward(self, audio, keyshift=0, speed=1, center=True): magnitude = self.stft.transform(audio) else: fft = torch.stft( - audio, - n_fft=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window=self.hann_window[keyshift_key], - center=center, - return_complex=True, - ) + audio, + n_fft=n_fft_new, + hop_length=hop_length_new, + win_length=win_length_new, + window=self.hann_window[keyshift_key], + center=center, + return_complex=True, + ) magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) if keyshift != 0: size = self.n_fft // 2 + 1 @@ -564,9 +571,7 @@ def mel2hidden(self, mel): n_frames = mel.shape[-1] n_pad = 32 * ((n_frames - 1) // 32 + 1) - n_frames if n_pad > 0: - mel = F.pad( - mel, (0, n_pad), mode="constant" - ) + mel = F.pad(mel, (0, n_pad), mode="constant") if "privateuseone" in str(self.device): onnx_input_name = self.model.get_inputs()[0].name onnx_outputs_names = self.model.get_outputs()[0].name diff --git a/tools/rvc_for_realtime.py b/tools/rvc_for_realtime.py index c5e7933..ca2a4f9 100644 --- a/tools/rvc_for_realtime.py +++ b/tools/rvc_for_realtime.py @@ -41,7 +41,6 @@ mm = M() - # config.device=torch.device("cpu")########强制cpu测试 # config.is_half=False########强制cpu测试 class RVC: @@ -54,7 +53,7 @@ def __init__( n_cpu, inp_q, opt_q, - config:Config, + config: Config, last_rvc=None, ) -> None: """ @@ -70,7 +69,7 @@ def forward_dml(ctx, x, scale): fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml # global config - self.config=config + self.config = config self.inp_q = inp_q self.opt_q = opt_q # device="cpu"########强制cpu测试 @@ -111,9 +110,10 @@ def forward_dml(ctx, x, scale): else: self.model = last_rvc.model - self.net_g:nn.Module=None + self.net_g: nn.Module = None + def set_default_model(): - self.net_g, cpt=get_synthesizer(self.pth_path,self.device) + self.net_g, cpt = get_synthesizer(self.pth_path, self.device) self.tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] self.if_f0 = cpt.get("f0", 1) @@ -180,7 +180,6 @@ def set_synthesizer(): else: self.net_g = last_rvc.net_g - if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"): self.model_rmvpe = last_rvc.model_rmvpe except: diff --git a/tools/torchgate/torchgate.py b/tools/torchgate/torchgate.py index 86cf3a3..e4b80c4 100644 --- a/tools/torchgate/torchgate.py +++ b/tools/torchgate/torchgate.py @@ -151,15 +151,15 @@ def _stationary_mask( XN = self.stft.transform(xn) else: XN = torch.stft( - xn, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(xn.device), - ) + xn, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + return_complex=True, + pad_mode="constant", + center=True, + window=torch.hann_window(self.win_length).to(xn.device), + ) XN_db = amp_to_db(XN).to(dtype=X_db.dtype) else: XN_db = X_db @@ -234,16 +234,16 @@ def forward( X, phase = self.stft.transform(x, return_phase=True) else: X = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - return_complex=True, - pad_mode="constant", - center=True, - window=torch.hann_window(self.win_length).to(x.device), - ) - + x, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + return_complex=True, + pad_mode="constant", + center=True, + window=torch.hann_window(self.win_length).to(x.device), + ) + # Compute signal mask based on stationary or nonstationary assumptions if self.nonstationary: sig_mask = self._nonstationary_mask(X.abs()) @@ -269,12 +269,12 @@ def forward( y = self.stft.inverse(Y, phase) else: y = torch.istft( - Y, - n_fft=self.n_fft, - hop_length=self.hop_length, - win_length=self.win_length, - center=True, - window=torch.hann_window(self.win_length).to(Y.device), - ) - + Y, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + center=True, + window=torch.hann_window(self.win_length).to(Y.device), + ) + return y.to(dtype=x.dtype) From 3b2786950be8a82af7be2074fdae67cd50a92a27 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 28 Sep 2023 06:53:08 +0000 Subject: [PATCH 26/26] chore(i18n): sync locale on dev --- i18n/locale/en_US.json | 6 +++--- i18n/locale/es_ES.json | 6 +++--- i18n/locale/fr_FR.json | 6 +++--- i18n/locale/it_IT.json | 6 +++--- i18n/locale/ja_JP.json | 6 +++--- i18n/locale/ru_RU.json | 6 +++--- i18n/locale/tr_TR.json | 2 ++ i18n/locale/zh_CN.json | 6 +++--- i18n/locale/zh_HK.json | 2 ++ i18n/locale/zh_SG.json | 2 ++ i18n/locale/zh_TW.json | 2 ++ 11 files changed, 29 insertions(+), 21 deletions(-) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index eae9f87..884674d 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -38,6 +38,7 @@ "加载模型": "Load model", "加载预训练底模D路径": "Load pre-trained base model D path:", "加载预训练底模G路径": "Load pre-trained base model G path:", + "单人": "Single", "卸载音色省显存": "Unload voice to save GPU memory:", "变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):", "后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.", "性能设置": "Performance settings", "总训练轮数total_epoch": "Total training epochs (total_epoch):", + "批次": "Batch", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').", "指定输出主人声文件夹": "Specify the output folder for vocals:", "指定输出文件夹": "Specify output folder:", @@ -124,7 +126,5 @@ "音调设置": "Pitch settings", "音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)", "音高算法": "pitch detection algorithm", - "额外推理时长": "Extra inference time", - "单人": "Single", - "批次": "Batch" + "额外推理时长": "Extra inference time" } diff --git a/i18n/locale/es_ES.json b/i18n/locale/es_ES.json index 7c57327..fcd5f83 100644 --- a/i18n/locale/es_ES.json +++ b/i18n/locale/es_ES.json @@ -38,6 +38,7 @@ "加载模型": "Cargar modelo", "加载预训练底模D路径": "Cargue la ruta del modelo D base pre-entrenada.", "加载预训练底模G路径": "Cargue la ruta del modelo G base pre-entrenada.", + "单人": "Individual", "卸载音色省显存": "Descargue la voz para ahorrar memoria GPU", "变调(整数, 半音数量, 升八度12降八度-12)": "Cambio de tono (entero, número de semitonos, subir una octava +12 o bajar una octava -12)", "后处理重采样至最终采样率,0为不进行重采样": "Remuestreo posterior al proceso a la tasa de muestreo final, 0 significa no remuestrear", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica adecuada para soportar su entrenamiento", "性能设置": "Configuración de rendimiento", "总训练轮数total_epoch": "Total de épocas de entrenamiento (total_epoch)", + "批次": "Lote", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta que contiene los archivos de audio para convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (opción predeterminada).", "指定输出主人声文件夹": "Especifique la carpeta de salida para la voz principal", "指定输出文件夹": "Especificar carpeta de salida", @@ -124,7 +126,5 @@ "音调设置": "Ajuste de tono", "音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice el mismo tipo de controlador)", "音高算法": "Algoritmo de tono", - "额外推理时长": "Tiempo de inferencia adicional", - "单人": "Individual", - "批次": "Lote" + "额外推理时长": "Tiempo de inferencia adicional" } diff --git a/i18n/locale/fr_FR.json b/i18n/locale/fr_FR.json index 55c9a98..d2fa618 100644 --- a/i18n/locale/fr_FR.json +++ b/i18n/locale/fr_FR.json @@ -38,6 +38,7 @@ "加载模型": "Charger le modèle.", "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D :", "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G :", + "单人": "Individu", "卸载音色省显存": "Décharger la voix pour économiser la mémoire GPU.", "变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :", "后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, il n'y a pas de GPU compatible disponible pour prendre en charge votre entrainement.", "性能设置": "Paramètres de performance", "总训练轮数total_epoch": "Nombre total d'époques d'entraînement (total_epoch) :", + "批次": "Lote", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot. Entrez le dossier contenant les fichiers audio à convertir ou téléchargez plusieurs fichiers audio. Les fichiers audio convertis seront enregistrés dans le dossier spécifié (par défaut : 'opt').", "指定输出主人声文件夹": "Spécifiez le dossier de sortie pour les fichiers de voix :", "指定输出文件夹": "Spécifiez le dossier de sortie :", @@ -124,7 +126,5 @@ "音调设置": "Réglages de la hauteur", "音频设备(请使用同种类驱动)": "Périphérique audio (veuillez utiliser le même type de pilote)", "音高算法": "algorithme de détection de la hauteur", - "额外推理时长": "Temps d'inférence supplémentaire", - "单人" : "Individu", - "批次" : "Lote" + "额外推理时长": "Temps d'inférence supplémentaire" } diff --git a/i18n/locale/it_IT.json b/i18n/locale/it_IT.json index 6b82f9a..00dc3b8 100644 --- a/i18n/locale/it_IT.json +++ b/i18n/locale/it_IT.json @@ -38,6 +38,7 @@ "加载模型": "Carica modello", "加载预训练底模D路径": "Carica il percorso D del modello base pre-addestrato:", "加载预训练底模G路径": "Carica il percorso G del modello base pre-addestrato:", + "单人": "Individuale", "卸载音色省显存": "Scarica la voce per risparmiare memoria della GPU:", "变调(整数, 半音数量, 升八度12降八度-12)": "Trasposizione (numero intero, numero di semitoni, alza di un'ottava: 12, abbassa di un'ottava: -12):", "后处理重采样至最终采样率,0为不进行重采样": "Ricampiona l'audio di output in post-elaborazione alla frequenza di campionamento finale. ", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Sfortunatamente, non è disponibile alcuna GPU compatibile per supportare l'addestramento.", "性能设置": "Impostazioni delle prestazioni", "总训练轮数total_epoch": "Epoch totali di addestramento (total_epoch):", + "批次": "Lote", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione massiva. Inserisci il percorso della cartella che contiene i file da convertire o carica più file audio. I file convertiti finiranno nella cartella specificata. (default: opt) ", "指定输出主人声文件夹": "Specifica la cartella di output per le voci:", "指定输出文件夹": "Specifica la cartella di output:", @@ -124,7 +126,5 @@ "音调设置": "Impostazioni del tono", "音频设备(请使用同种类驱动)": "Dispositivo audio (utilizzare lo stesso tipo di driver)", "音高算法": "音高算法", - "额外推理时长": "Tempo di inferenza extra", - "单人": "Individuale", - "批次": "Lote" + "额外推理时长": "Tempo di inferenza extra" } diff --git a/i18n/locale/ja_JP.json b/i18n/locale/ja_JP.json index 40f224d..9f3214b 100644 --- a/i18n/locale/ja_JP.json +++ b/i18n/locale/ja_JP.json @@ -38,6 +38,7 @@ "加载模型": "モデルをロード", "加载预训练底模D路径": "事前学習済みのDモデルのパス", "加载预训练底模G路径": "事前学習済みのGモデルのパス", + "单人": "個人", "卸载音色省显存": "音源を削除してメモリを節約", "变调(整数, 半音数量, 升八度12降八度-12)": "ピッチ変更(整数、半音数、上下オクターブ12-12)", "后处理重采样至最终采样率,0为不进行重采样": "最終的なサンプリングレートへのポストプロセッシングのリサンプリング リサンプリングしない場合は0", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "トレーニングに対応したGPUが動作しないのは残念です。", "性能设置": "パフォーマンス設定", "总训练轮数total_epoch": "総エポック数", + "批次": "バッチ", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "一括変換、変換する音声フォルダを入力、または複数の音声ファイルをアップロードし、指定したフォルダ(デフォルトのopt)に変換した音声を出力します。", "指定输出主人声文件夹": "マスターの出力音声フォルダーを指定する", "指定输出文件夹": "出力フォルダを指定してください", @@ -124,7 +126,5 @@ "音调设置": "音程設定", "音频设备(请使用同种类驱动)": "オーディオデバイス(同じ種類のドライバーを使用してください)", "音高算法": "ピッチアルゴリズム", - "额外推理时长": "追加推論時間", - "单人": "個人", - "批次": "バッチ" + "额外推理时长": "追加推論時間" } diff --git a/i18n/locale/ru_RU.json b/i18n/locale/ru_RU.json index 638ee7b..10df9cb 100644 --- a/i18n/locale/ru_RU.json +++ b/i18n/locale/ru_RU.json @@ -38,6 +38,7 @@ "加载模型": "Загрузить модель", "加载预训练底模D路径": "Путь к предварительно обученной базовой модели D:", "加载预训练底模G路径": "Путь к предварительно обученной базовой модели G:", + "单人": "Одиночный", "卸载音色省显存": "Выгрузить модель из памяти GPU для освобождения ресурсов", "变调(整数, 半音数量, 升八度12降八度-12)": "Изменить высоту голоса (укажите количество полутонов; чтобы поднять голос на октаву, выберите 12, понизить на октаву — -12):", "后处理重采样至最终采样率,0为不进行重采样": "Изменить частоту дискретизации в выходном файле на финальную. Поставьте 0, чтобы ничего не изменялось:", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "К сожалению, у вас нету графического процессора, который поддерживает обучение моделей.", "性能设置": "Настройки быстроты", "总训练轮数total_epoch": "Полное количество эпох (total_epoch):", + "批次": "Партия", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Массовое преобразование. Введите путь к папке, в которой находятся файлы для преобразования голоса или выгрузите несколько аудиофайлов. Сконвертированные файлы будут сохранены в указанной папке (по умолчанию: 'opt').", "指定输出主人声文件夹": "Путь к папке для сохранения вокала:", "指定输出文件夹": "Папка для результатов:", @@ -124,7 +126,5 @@ "音调设置": "Настройка высоты звука", "音频设备(请使用同种类驱动)": "Аудиоустройство (пожалуйста, используйте такой же тип драйвера)", "音高算法": "Алгоритм оценки высоты звука", - "额外推理时长": "Доп. время переработки", - "单人": "Одиночный", - "批次": "Партия" + "额外推理时长": "Доп. время переработки" } diff --git a/i18n/locale/tr_TR.json b/i18n/locale/tr_TR.json index efd921b..b8f7fa4 100644 --- a/i18n/locale/tr_TR.json +++ b/i18n/locale/tr_TR.json @@ -38,6 +38,7 @@ "加载模型": "Model yükle", "加载预训练底模D路径": "Önceden eğitilmiş temel D modelini yükleme yolu:", "加载预训练底模G路径": "Önceden eğitilmiş temel G modelini yükleme yolu:", + "单人": "单人", "卸载音色省显存": "GPU bellek kullanımını azaltmak için sesi kaldır", "变调(整数, 半音数量, 升八度12降八度-12)": "Transpoze et (tamsayı, yarıton sayısıyla; bir oktav yükseltmek için: 12, bir oktav düşürmek için: -12):", "后处理重采样至最终采样率,0为不进行重采样": "Son işleme aşamasında çıktı sesini son örnekleme hızına yeniden örnekle. 0 değeri için yeniden örnekleme yapılmaz:", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "Maalesef, eğitiminizi desteklemek için uyumlu bir GPU bulunmamaktadır.", "性能设置": "Performans ayarları", "总训练轮数total_epoch": "Toplam eğitim turu (total_epoch):", + "批次": "批次", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Toplu dönüştür. Dönüştürülecek ses dosyalarının bulunduğu klasörü girin veya birden çok ses dosyasını yükleyin. Dönüştürülen ses dosyaları belirtilen klasöre ('opt' varsayılan olarak) dönüştürülecektir", "指定输出主人声文件夹": "Vokal için çıkış klasörünü belirtin:", "指定输出文件夹": "Çıkış klasörünü belirt:", diff --git a/i18n/locale/zh_CN.json b/i18n/locale/zh_CN.json index 345e27f..8f2c67a 100644 --- a/i18n/locale/zh_CN.json +++ b/i18n/locale/zh_CN.json @@ -38,6 +38,7 @@ "加载模型": "加载模型", "加载预训练底模D路径": "加载预训练底模D路径", "加载预训练底模G路径": "加载预训练底模G路径", + "单人": "单人", "卸载音色省显存": "卸载音色省显存", "变调(整数, 半音数量, 升八度12降八度-12)": "变调(整数, 半音数量, 升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "后处理重采样至最终采样率,0为不进行重采样", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "性能设置", "总训练轮数total_epoch": "总训练轮数total_epoch", + "批次": "批次", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定输出文件夹", @@ -124,7 +126,5 @@ "音调设置": "音调设置", "音频设备(请使用同种类驱动)": "音频设备(请使用同种类驱动)", "音高算法": "音高算法", - "额外推理时长": "额外推理时长", - "单人": "单人", - "批次": "批次" + "额外推理时长": "额外推理时长" } diff --git a/i18n/locale/zh_HK.json b/i18n/locale/zh_HK.json index fa2fbad..a0c010a 100644 --- a/i18n/locale/zh_HK.json +++ b/i18n/locale/zh_HK.json @@ -38,6 +38,7 @@ "加载模型": "載入模型", "加载预训练底模D路径": "加載預訓練底模D路徑", "加载预训练底模G路径": "加載預訓練底模G路徑", + "单人": "单人", "卸载音色省显存": "卸載音色節省 VRAM", "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "效能設定", "总训练轮数total_epoch": "總訓練輪數total_epoch", + "批次": "批次", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定輸出資料夾", diff --git a/i18n/locale/zh_SG.json b/i18n/locale/zh_SG.json index fa2fbad..a0c010a 100644 --- a/i18n/locale/zh_SG.json +++ b/i18n/locale/zh_SG.json @@ -38,6 +38,7 @@ "加载模型": "載入模型", "加载预训练底模D路径": "加載預訓練底模D路徑", "加载预训练底模G路径": "加載預訓練底模G路徑", + "单人": "单人", "卸载音色省显存": "卸載音色節省 VRAM", "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "效能設定", "总训练轮数total_epoch": "總訓練輪數total_epoch", + "批次": "批次", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定輸出資料夾", diff --git a/i18n/locale/zh_TW.json b/i18n/locale/zh_TW.json index fa2fbad..a0c010a 100644 --- a/i18n/locale/zh_TW.json +++ b/i18n/locale/zh_TW.json @@ -38,6 +38,7 @@ "加载模型": "載入模型", "加载预训练底模D路径": "加載預訓練底模D路徑", "加载预训练底模G路径": "加載預訓練底模G路徑", + "单人": "单人", "卸载音色省显存": "卸載音色節省 VRAM", "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", @@ -53,6 +54,7 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "性能设置": "效能設定", "总训练轮数total_epoch": "總訓練輪數total_epoch", + "批次": "批次", "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", "指定输出主人声文件夹": "指定输出主人声文件夹", "指定输出文件夹": "指定輸出資料夾",