From 8d6a6d338f9186863ec7c0c155449e7f3f244cdb Mon Sep 17 00:00:00 2001 From: Erol444 Date: Mon, 17 Jan 2022 17:31:32 -0800 Subject: [PATCH 1/2] Added NN examples --- .../images/examples/normalize_model.png | Bin 0 -> 13956 bytes .../NeuralNetwork/concat_multi_input.rst | 38 +++++++++ .../normalization_multi_input.rst | 46 +++++++++++ examples/NeuralNetwork/concat_multi_input.py | 73 ++++++++++++++++ .../normalization_multi_input.py | 78 ++++++++++++++++++ examples/models/concat-model/model.yml | 25 ++++++ examples/models/normalization-model/model.yml | 25 ++++++ 7 files changed, 285 insertions(+) create mode 100644 docs/source/_static/images/examples/normalize_model.png create mode 100644 docs/source/samples/NeuralNetwork/concat_multi_input.rst create mode 100644 docs/source/samples/NeuralNetwork/normalization_multi_input.rst create mode 100644 examples/NeuralNetwork/concat_multi_input.py create mode 100755 examples/NeuralNetwork/normalization_multi_input.py create mode 100644 examples/models/concat-model/model.yml create mode 100644 examples/models/normalization-model/model.yml diff --git a/docs/source/_static/images/examples/normalize_model.png b/docs/source/_static/images/examples/normalize_model.png new file mode 100644 index 0000000000000000000000000000000000000000..be861497ab159d0282f002d1b08abee40a566d03 GIT binary patch literal 13956 zcmbum1ymK$+b@iOco2jG2qGPamX;8tj(~uKgd(kUceiwRw@6BNw{&;6ba%tu`u^8l z?|r}Zt##M;&055n*=J_Alz#v0DZx_&1cYbeV#0C=2#<`w4+jJh>?ut9 zoCE%#nu{q}As}G<{`>PNngN3l0f7ubTv$lKK51{>;qBNJ>6633kGfr~bbYiGdOfmT z(X?B5t=~|6#rHu#aA#(-e}RAi)e0j+K)4t3KtQ;Bf{KW+ zL<)l-xV!z|@8mmV5+q~$x8b9U+SqU`R5-VfjEoEo4ULS*iHoBiDD!`imeys#PcN&n zSLq%a`Y11tbD%893&B3$yRWaW?@#1n#}86(aNOG3T2>pVl8}(->FIfni;IMWl$@M= z#Po!UhK5wYRdtLrHphf&2*qC_Y-(x>d<~O``*2djZG3XFvo0y7G8N=nLt?%i*$*rcSn!{xTKv$IfAfsobDhl{5X*#nKa{+-vy)d?NJ2;$Xh)u=gB?Nk^5x40$Fua0wDvDX-Z(ls zg3qm_u`OT8y!!h4!Iw$FABV?CSTJ(bh}c*fo>*)iX&D*JSQpQhuW|A5sBWtF`uh5B z9Zw+#^n$%Gn9!G3cnegAsIfhP0RdhDw2QA^y*gxiR4xI>Mn-T)qZ5akN>9<-5#rE) ziq_n%<*R#i;D`_gjkwA^H#@a2)^csO8Ly~gs@bG94@>_Z^@kb(kp>s_IO)Fnqt@I+ zv}xN>Au<9gCGG=PA%m8c#slBGPhhJE-JjFgB7w6N{d6&r zw9fsd@GHDxwr6NpdEvmdy41BtY*vUTA>r59qxC|obKm6oCEf*RvS~RzZrqYN@U{m z4!lYDJUkZJ+}(6*V6=&Dt73V(oNdQb%E}Ss>tyUVuqM{^gdj6U0S1BlWh}eCqT*m= zWaQx2XM>aOFEn1MTHP<_^v3?gyu0#5g~tW>A9OSgXh_!(HB84lnasED)$jN|43i2t z3k*FF?a4WxYT)?WRo&LVN#xN|(YHPge{+}mgc$AOEOOSKuXLn_;No8$-1A<~eK(-Db5kilSAPbF z{HWU482l1O%kH3}-MaSkL)2EHvugmYiLi-TA6tKBcKU!u2!;}C{%PNFF7K$hs{D|R zoCTgI6fS(`nkDL^QJI53C1qJ|dW|ao@~z{@S1%|$>)qrK^QCr1gDFuKqbHOCT9Ge` zUL^{NzjwV@3Hq|5bVBn?1oDQCy)c9q=c3%A(sMVk_->e#CiV6v)*prl*Tr+wP3-rt zzjRzMG%4xRN2J(R3NGf15}MG>&$BprAmkf#3SxqH{hGW(V>gAdg|!WlU`ih}jE}OP z|6yZL@(SAuPv8~peP{Z~-r>x^NGk%}r-qw{%uDo3FMs`m*2>v?!abkm>R)L_8Pq~E zlqWn_&Fd}?LobWyS#fcH3*FTu9q@#Yq2ye=Vq{po^D;P{Z2tADX9{xWEIqgymDM+y z$zii*S#x)?@KsDj--wK?XJ*Dl=jp<8@{B+zMefUA7wAR#!jNdbeuwscX6qf&`LSq~ z&OOD?0UuK_OeBS0v+OOhSCp>Y*tuW2BQJMfX`Rn$vF!^vJ>VCLXwJ_ct(p}nZ;Qer zApHVgVH8;83xz^Unq5OQr@L~=g9j0>$8;X6R+^tiU0FUnXrhsguq>YVuIF-*>V^9e zC=)NlLCD{rkC!}_e+|VDgL*+J6xQE53~)?eG?A)ZlX*Et$@%3}2wdY~ofTM%i_4UW zUu(6WlzU@tzmt5;oo%(qKp1klEv>1x5GR-<=t1TM%}UqH2u?}Jw;CkYMeKvesH~u& z(WEy>0>UcM!k*Qpb!}KDZc%@9bTlp$N_Ja-%mar@&xmYKY$T`W z-z53~i2O!|hQiKPN0lSn<9W-=HWVK$Q(pYc$Qa9(Nj=#ZT&%Y6g#XYtFj!WT?L9s@ zsWO?hUF*S>k&#iw5VVhqin6x09!Tb;Nl4u3EqHCWuUg;xs30ONuckjBAOH~w$$@Kq zd~_72;*Imd7?L*}6&=02w6wL3C@MDg1rd?+&AEwJ7|2?FbGzzemDu{88B`D%b#)TY z7Fv4x?d!7V{I0k55BDxOI5->}6--PL;2B*A8pzHv)EZee*dMLj-CS^Ta;ho$b(vBP zQFKd&nJTEO5AeQ8VAVodsPL<;Fq*tS85CHtS0X-?vecD(gwXmQG$F3HIy~33Y^@ii(OGWeXw(r9%dv#KJH|l}f0Pg-#$fMk>D~w#z zf|uEOZGGMK_HuY^?D>B?0;Us4>h>=vM(nxND~gMo>!APcdG*fiQ;?}tP~h&aF~}N^ zA3v@~djx-sI@0KRcYU@!j)t9KB9rp=C7U*6;S<6)PvosIFtc>^^j6ip#XOP2rNhXG zS2j1};^H87T88~QlSPa54y&7+es<)7Q)EIszvt)g+I%qqJObBFXHu`HrJ><6o#O^q zX+=RDAqU3}W~-QkMceA2SRy}%J3>jBX=r@^^KCCdGRb5f4tn~h2XJ9o2ZyV@*{W>G zcn>>rgeTy_Yy11^U%p)KPP5meVZDiEQiDSr?CqD-AkB~F@)~|_Z*MzZ?C%{M{9Bqv zSd1|-F_xB=xw*OaFA&WUI?0Itf~VD0tJBTlfPkmawvEU_w*0 zC?OFr!}2*%@QT%E-tQZ}4=YA+Mwl^PO1&$e;}t&rura0CoAw7udBvf#x7LjQl}7nO zGOLd*Qm?C4jusgvQfkAZqKK@%CWNQwcP^5$ptluo8!=e>&lKJ;!8(Z!ITaL6gG}oh zJX82x0-!vOr}~|>R_Yo8nx<)*3wbqDf`)VhGq-rxs_Nz0`-{mf?1HLB`=_9I~b#=D`v+%Wav)0F4>#Se76$K_8m1?86v8>n7 z-6f7@+|OXscUv6v`b^C78g>M|RG^RUU9-)xYkzD;Nf_#)DAPA~%_F3&9vfQqSvgKd z2@B^avT{Yy|6{m~K-K!Fk*)gns?3i`LpL%paQo|PYneZO94)s^K2+((zJ1H~;eFfL zW;S0~e6@bDhe>mCfM`CqZR5Ap`&m-g@g|A`Qc^3C*@7*6pKnvXl#yaQXoXx2@g*H{ z1DZQHK`!5(K6Vi0+EXjSc&7MC)!|7S%d3q;{pd%L6ep}fC)NkZ^g5!#?TVe$bFo~X zF%>6O<39#|3!a)!9m8b|;n-8aeUa50PBZdL#pqzkHf?kZ@&im?+hVseVbbIx?2Kd^ z2mG{xQyq%i7yBZ^=JT;Cp5KZFq*$W}-Dn^3+TCVJA}nnN^&!0lLG{%SR-R&V{PrRVWco-=R~rCA~c_Gj8us1TEUUW2#Nw1 zEdh9F2Drg{XHO83*>jV#a&sLHmxMqP!@_DYdRhI)TG!B0VW?}R9fwBU@gkSB`!svV zOGVCSV1EJXht6QBh%I;N4S9OvCwW=pD05lEZEe z?@JOIKN>Np>*e*ebR0{|;$l-y4ussR3n|DrS+|haPCrIYUvD)A@~=0V%inQmDZFW+87xMI#gAiY_waf}8<|k?OnhhaRs1 z4FSY?2)36DIXO9>0J>(-rOzQS(EqXgWBk^;}r7w7+my ztY8^ubPY=wcZb3^Z48flLn}G2{j_)9#zo<|CE5|SWGp&P=$jY?LE$yoa}D~xo)oGD z)Vu146d}oPh*lO;VqvXVa{IaaOsUZ7QPZ3-5p!iM)H7@Gbg2HQIImQ!&&ulP?KRpQ z`e8JYzqQ$6sNy@ZR)&dXMMIjSArt7IrZ$^Rva@BAkruolICRJ#jIG7W$_n{GS7`d8 zh$fS&UF2~;iLk=UN`b@+Q#I3`jU8rY`TZl^FQgB58^ec2;unz-`cGU6x`fMgQl)QR z`ijc^F>&LMje6eDah8}@<9REs{vmxY;dpI)ZlpY8eTTU(KjVG9gPLgH03X(*X=WC6 zC-TEehR9faTOxH%<9gADF_O9i!;!P4qN4T6xrKy|j*jj~nhOB2($d@y_RiJ)Y3OAT zTY3aELVmc?_Tg-I<34G36m1XXlp1^diPL`LiNkqG#d%DDdP9OB+3${y%J~A5#leiq z+%kUzR0@T|hBzhzV`FPOI~g&tYLJ8g=v`QYOvJP60qFSR`SWtqd7gTsN9l;z8F^F2 zl^{pS$_|6PZ)0tJ$n^fBoSdAhs;X{(0xlk&j~%0+m!OBWXG8?Hk&%&ug9A@8Q+TM0 z^Zjj2boBVpk9RoO*pBDB{&sKvwF{k_Qw8k+-2MGqSV>8#zIRL7Y_YMTtSoZL8CmU- zqE%gInvkcarsn=!&Fa?H^w5yI-JAJ!03$*|LI5TLP{jQbVG+O@&ceb%y}=X!7}I}y ziHVE*|NN;107rpJ70f>1iS=Wcm|X&&Q{`{3F0ep|iHR8)806*TmXu=BI9M=8?pj-0 zfi{vYolNlZC0&*k#oF2$1_nlEW~SZ#+{?H2EDLK0!;euZpbxhvq1oBlaXw-?I+;Iz zuFh7Ok@E}IExi&YbG=;l?ds|R*jq+s@cP;jbjGW+l+6!w$if_*La?3^>&1%~1i{qI z%%OH~_84BhBH}Pq(9xm#{bUp1L##AX*GqCv&hzuV*@1xpQhw*=x+Gg|J-ykfsjFe{vESN!GPAPeXrBW#zk-Y{jDLE1O2Y4)^W(?z z(vtbX0-tjr-K$r($DKgJva`FOlg|R`7A+m!vchwyVi*}bJuAx=v?3%V1Sbo8!OU58 zb9IGU6Vcq!XN> z_s~-@2?>DeT3Pac{Ad{+jT9tH;<1s?|B2L%)JX=XVqkCrW54KnV**eehz%_)=;Y6_ zu_?+C?X|`yCZvFEs@>K)*O#9}0uZz22xDSUNbTRbdQJJf0{Mc%*P0IC}t zzr48kqoShS;ZjR!X{p&leU0l~ZE$cfwBs>0dWg{B^6DxM;43>5j9wqVeIpMR#c55e zM+$R$xT~tKPZIqp%?3t9b^nIzt<>4}b#vUZfva%&v>nEdMy0lBZ zz1y>-5|fgXAM?Eq1H6L!D_eZG6u2Z5etdik5ccHI5OgjLOkKa=;DWrocaS$=kfKmm z7!A$AbXj(FHGb@H)3>i*iyhAl{hPWQ1%*78s3|B`fQ$%Miin6vMMcFcyboz@-9X~( z?FDB)12+Sfo7?S?Z`ef=fZMB`q3?x-r>3WaNd?r^)YLRIG~ymR(+LPXvMYofrn;2O1(0BBI2H4}pP!-p*X?pbIW8cMA2%u?I4q$~(6r{CC^?YXQ%s2NnewryQ!X?ov!z*H%qdFtCMpv7qhF{!NqINcCtf&%Ude zueDY<9lQZdESOA47^7QvxjVycv1A(G$AbQ8Ss`gs);!WdI)eVoTY~`no}iC2te6Ay zLpF6d9}NAUTj|Xb=!#P==sX%?1L1}hPm7q=#*XWXiN_o)pY7f6&ra&^>pxBUK`mf= zd@)w`v=l(ap$G|Z_H1~|p}arZl(w7Ju4vV3#2O?YZg;bpBqWR&!Z0kZL-*ZPdL1cO z;a6qNPrKjQDw$nT)DIkP9^N6;Kn>-w)sZxygwLIxnc{T-ZB@a<>U|L-m-3aZ)c8+! zBi)elp-j18V`^r!!QT(Hv41?Ypjh&8pPJ(Z<_7f?Zd1iGldqZ~HT(1DKmYP4ClHKcBt{&bVcbgDIEqy*F zdxKHER`6zJfAUg;-BeL$%ZW+6e?iA9*9RS*Ro{_be7CWHamBYcCbw6F5qYB`vl$A7 zca2&1)&>p|*-oyODizQR~sLdO`tIN%sr8H|voPNRi-$%DzSpu1@`L;9Y!U3e;wZKq+Ct+~1=ms+sq+Fic7!xV&wR~T z?0u!?j2+c~w&?5ancef}S+-6L;2RnJ`1#X>e{aZ2x6O_;FI?v~@vQOuO83b&%>*Gk zy|#L6K>KnWH%L6nNhBahz~EH4x^*YS zrZDG*kdJ-k9uv#Gf83zZdsy~}fF7<+y7lOUjG}{+Ot(PDQPuF9xCofJUZO!be&Bm2 zjv;AXKQA2S92c-z(%F>OR@;nN4Swp)8gdsXZ$WQ5Eiui~~$xh`yfBsxjQqqeh?8gssIIVzy21M(~_oDBa zdUBdPnUMKcP0jSQv>{i4NAj|=SOe_x_FUe~3=B9R575!l3PL~{TUPVC{Epo6Ju#6m zUb5pNgcqbsWJDo-88re<Cc zjha#d`z|wcZEK6J{Cx*93d+RT7||O`VeU4YoAV>ovIw3H9wA|3czAen^4<10gHWiBuCA`8=F0AF&}yV+US6I< zUQ&4Y_tkPg08Rj01{-)nL@oeu`+W%r4o+s%kpPc0nm$24RwaD9NpcScB)t`h$38q;PU~vk#GnAd@E`YenUgUFns}0c}GDH zKPq*PLt7Z#?>h9pSqOqg7@4Mc4-RsOknj_510Yyp18$4t)^O?RhE#&wNoI~fK|v<; zcX%W{{D5>@GFGN1qoboUd=-%w25_-FEpbi!YbduE)I!JD`1bb7sy?r|6R70R(9sVI z5y_BIDH_qy(K9%O08E4V`2movh=PKGj)HVRZ{ECS`9VSgq-JE8Uo{&G3oR4Vy6eTJ zfq!gdq$d>g0xOJcC7=rJ$d)FzmG_v9le41^>JEkZU9xd-?17O@H z_n}P8%o}_qvJ!{bR()}-KrH3|4i223pTn@X0H*_s<}H`m!fQ^>&x(qjb>QQ zxIF7SRbvDMJo>-20Ki&Skn)C6Q@7FuiGi=1vFrAUiHQNgi?rYkc_%F`EhN+u^pYLu ztj;K?6btKp@su<)cI$mFftKsza&r!FvCSoZ1{2*=A^D)YjG(%cwFDKJaN!(~aR+AYcPZhG6Hv}*JnolsWN<4>yq!@wbeVE@C;qpoCZ9jg5j*9X0MkKoF3S9O}_D>0%Gx~g5oY61R+)VB>%yS!99;Qd47FpNA-`H9}Rp<%w z3+Q0Jp}F298`N){5VbsQ=4}z=f zdkvneS|{n44ETiPP09NOUP1ks)5NQ~i~{~2 z50$gYHZFUmA19)e&OZr#;L?%UQZtwZqfQQjs_XsLhRx2zCq>0owGVcVASmD8-2j>_ z!xld`u47T^W~X84W^E4F)^-lkYY#9$lb=ENo|E5Ke?5*g$t0rQRGL2dJ=cmd9^2k; z@{_&HZ+BDqPoDrcJqM=Fs+yRcrG*76C#NaU51V%E8k1kP7f*`kUP%@8%0;G@5(3pE zAtp-xW{ZQJVQbj8C>E1F7r!q<0viM|cD(_~cs8BCQ&Thu&}mci>(eb>yZwypY#hiu zyLax|twWYl|Acr!^8PmGK)*(_EUOjC3?qjaJ|eZhmDxz&;^Mg0BW-AMnE1rTfk~}gY*fZZYRxYl|$;l?iGx~*!!_c+Dw5&>mu#HPkSwwzuvH4W-Hz0BVyMMnBE#0F{ zx6H)EWZIZBSz7V)=Z-p}A#xT>if*8a7OFQ8kdik2(}4`0_@N{V@$IG*7xVe-T<@p_%~R>{rytEWl9B&$b>S)K(`wjTue;%fD}G9 zHglsKzXZ&+5Pa6wg@qK+pDh(CtU%LI<9ELNYGwxVeIlSI>FFL?RLmNUN zt_O)TB-*{u-H3H0Qzn%kB-~ecz3uJZ?V21_db+w#9rdXrr*E%;5(9!FfGeQa-j`6L zFfzIK^74Qe*Ay2IjgLpSEgup@Mn*p7(f=ndtTwnl+&6$7_4UpGeSn?_c0Tq1YbqE8 zg9zAhmj_J`wKX**)5cyOL3BSMVS>CSC@3z5k^}d1A24JMjnt?pVBwOs1Vg-#u!SY! z6BB`uy|+JKCvba+bbXC41O+ukAUAyaG#oPCVMs!omWFS~8B? zVg+PoPy+=J2f3f@Q#3Ri8=Ln*_$|6+FVQ=adF{YVd&~-j@&gmo5GVu!PUptX&dy*4 zM|M9S`UWv80q_Vck$?RTgMoeri2hXi@&yV(CM8Yv@j+2v1;qa`>%`V*HWu8Nv#zqz z_D@$hGGfd0v{D+4l%PI{X}BG$o?o<(=*83%YP@ zc7H@dLIQ+(JUqO4ZscYx7(`I9P_@Q#O+r-EXXzea3^r2ytqq7T>$|(J(#R2*z$8#s zS6=`Nq^z`b%GeexkCQFQDoSjBz{rIuhNq_;Mzf?q(H9~C1o9H*cM5b7mEy0D0^55A z2Oo}4ay{NrN1mOY{#jcqQmaEDr+E35`jex{Ty=Q2HkhzzH2hGwBsDubdt|>Z5R{a- zDG`N$sVWENA#h^B=D}-aAp7p!J75D^T9&2>yvO`ERGHb?ZkLDOFT!ofwbluc z(n?BT%?rHbGqB2MW|ZqiiwX;jZbP!Y*@wg*DVkDCeaF!1|GUsmhju)5Zbs_HRKd_R}0Kl#k zxhj&3jAzM#HSMu5n@HnftIR#ku{aDjAUx9d;5MwzwQ=Wd$3eY@?~m3^5!+$=Nr*5a zk*E9T;t%qxW^>inbUgMOrKoMMVpMYpEADpBhhbjYPR!LI-(L1gp~5;FmM#jeD+M4tRJ>aeb8kY@I0XtDYcDM<>#OeB{m~} zxE%O|A{ixtCI%fM$9Ig5Q1X*Si3@9bd$N`_ys{8pP~VwH>80JZsylV+h-G)g>cOlc z{{Yhyo?SaYK zW~6An?b&d5%<2R*W%44E9exo)a@>Gnb4-n*BkJJE$pKZnK+mBnnd<(xjRGz@Q|E%e z>v@vTv12tKsgnr_qE+`{R}TE4Ebj&?5f#;7N@nW$=@mP@=d|U;r0k8BZO3hwKeuCn zM9#%iglbRZM~kcKXJc}Q#%Fsz0g*TmnkK^^{aZ3|l!vwMo8KLv2nY{gl7|D$9-i|a zb1pqI?8ljMf7#)!3Kmm)=v{_Ler?YKbK$KX51j&FthN}Mx2#{>?#`^j> zzkb!a+}JufIX!xY?1i49M6(ALqwB-n1&vG!;w#<`DI@Q;{V5@(*OJN z)Bl&2X*UhQSBtM_zc+}(DB7L-exWgQn+Qba?3xZG4R^SR!6Ir)BK5>jZcbE+iVDp+ z$a`_<)^TiMjYdrkRDHfQcb(|zv|(b}jI;5)qdkd>|D*a~#&)*lf=tLA!JOsS;(lCp z^=9+(+IzPoEY%@hGYa1{&Nqjus&knQt0nes2k!YOpt^4Ey%Arwh(#x99e$mQw!zS; zc*~VZWtGQook&$gcwt97@rA6o2A9p_;8=aC7q@eh85lEAoENnbarMUA0hg7tLV+v@ zJZ8WBu=x4;i8s5@3G+g3H3jYz{+pSOxoE5RALIXi)aXooy))%xq5F&0bQ=X(L$EEn zQ*^?0rGfaT(9x$;!HY}}_dIIpXA`l{@O>I)ci;p>-BEk#6t*qV$Av zF{W+@N}(ne9+12`E&CDQr8=%kn<0vE*^x*sr}UMI>$sU2${tE=LxYo0 zhpB4G!_M)BUSIoY#UF>cf}LxIK6$J2qOK{BCsDWR>~^I^WA|h`5^knPJ&aLXhAACa zTT~o=Fpjtg@`s1e2>#b^>=OrrCqnYSC^0*n6N3KyA4W3r`V01`KaWH!rP(Kwd0&5E zGIi_Y^@1vSdql@Xah}o9X<@&9{RvV~N$K^J`IWLm?nS{z;NQdC#3FqHVYpB~-Dwfpi)s&d(4Kf0=BFqBbH5s^chh1h_|cnxK>JN|xsRvrhL zsc_jYNl|~_*d+FjqPxqIo5*X3tn(ZFzj$670k<=}%f(@9it7@;ty|LFZ>g;^`kAwpeQZ{^Q4wzx7rqC;$J7_5Tws z-KBvYMr@2Mhlq`A4gdP}tFG>PEMt6p{P*wQxucN|pIjI|NnToBUf$g$!p3gBVTHl| zl5v2@e^=1D(^R)XH;MH0Hlm`Q16-fN=Y;qCIjG9ueDX95l-{LT0bA;QFv0lkYHuv_ zUxS+A$rAdykl+S4FE8(#H%IC>*Vk}ou{Yc(kCG>x@e!Yp0V3S0qpi(?9|Vf?S5{U` zRd)eRb2{HO(9#+W?tT6G_1M@L_+4IJzT44`_#M>veX-`@BO`Nf*Ch7>`raW^w#zHL z9RP75{Qp*x)`SEY6~q}TU%q}d|q=;>F~#1!f1=@a7PZ4RhlLwkVF_5=Osx6yX_GoUy2_4c;aJsrXmgQ5b; z>}!WQ|4vr+=;~y{Uh&Zkn2-P7*wBA*;d0*Kwfi(&2?I(F#z5_H=f+?P>6N5*43aS9 zKd*F*WYN<+`jRsC!MMk#y`X~snL>_4Vp}|ji23B@Dlp=RGl~pe>(7d5t|qZZB;0T!XJi5_oEC7 zc`ABSzFcUWuW9kd1Uah56Ep}`MFLM8!0rTdIgDMgh$DW|nr3G@)dSyK z(^F?lPgr%j+YRWEy~fnCrsn#GzJ)w$v4Zn7| zOd{UEA<@)(i=-Wu1Z48(wALrxRTB&{!Ta9Okob_Fl-)7uPnvj2FBS@{I;WKlVh73e1pnTdO_LA4psgW`GPZ5H z_V_?#uj^coNRLZJ#hz%~xv9<@pub}5v%n0%f&$eGCZ?uLL$b$+ouTRR@AdZ0p?_%W zd83E7tc5TTYmwK5U>}8q9xYH=6F&9!utWW~baa7cf^DUk0PkvU znKWrk;y>=dMZJ9P)2<0#vcYfd926!5$B&MW=@}UT;BK!AE99~K1NkoK{)FETNX+6| z+S=vj%dOyz9Uwt318E3o8sTAKP<(s{m*EQwb4F1%P`1NEMWwSnmJ6`gip``BULc4b zg`kMV$Xz>79gSZOnN$g{hW}cu@ zbb}rCq4NV>UCnhd=XC!H3_!&TFkxD|1FJ#WM>xd%TS0vbm`__, `tutorial here `__). +It uses multiple input :ref:`NeuralNetwork` feature and links all 3 camera streams directly to the NN node. + +Demo +#### + +.. image:: https://user-images.githubusercontent.com/18037362/134209980-09c6e2f9-8a26-45d5-a6ad-c31d9e2816e1.png + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/NeuralNetwork/concat_multi_input.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../depthai-core/examples/src/concat_multi_input.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/NeuralNetwork/normalization_multi_input.rst b/docs/source/samples/NeuralNetwork/normalization_multi_input.rst new file mode 100644 index 000000000..d2d6f2926 --- /dev/null +++ b/docs/source/samples/NeuralNetwork/normalization_multi_input.rst @@ -0,0 +1,46 @@ +Multi-Input Frame Normalization +=============================== + +This example shows how you can normalize a frame before sending it to another NN. Many neural network models +require frames with RGB values (pixels) in range between :code:`-0.5` to :code:`0.5`. :ref:`ColorCamera`'s preview outputs +values between :code:`0` and :code:`255`. Simple custom NN, created with PyTorch (`link here `__, `tutorial here `__), +allows users to specify mean and scale factors that will apply for all frame values (pixels). + +.. math:: + + output = (input - mean) / scale + +.. image:: /_static/images/examples/normalize)model.png + +On the host, values are converted back to :code:`0`-:code:`255`, so they can be displayed by OpenCV. + +.. note:: + This is just a demo, for normalization you should look into OpenVINO's `model optimizer `__ arguments :code:`--mean_values` and :code:`--scale_values`. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/NeuralNetwork/normalization_multi_input.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../depthai-core/examples/src/normalization_multi_input.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/examples/NeuralNetwork/concat_multi_input.py b/examples/NeuralNetwork/concat_multi_input.py new file mode 100644 index 000000000..b451b72c6 --- /dev/null +++ b/examples/NeuralNetwork/concat_multi_input.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 + +from pathlib import Path +import sys +import numpy as np +import cv2 +import depthai as dai +SHAPE = 300 + +# Get argument first +nnPath = str((Path(__file__).parent / Path('../models/concat_openvino_2021.4_6shave.blob')).resolve().absolute()) +if len(sys.argv) > 1: + nnPath = sys.argv[1] + +if not Path(nnPath).exists(): + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') + +p = dai.Pipeline() +p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4) + +camRgb = p.createColorCamera() +camRgb.setPreviewSize(SHAPE, SHAPE) +camRgb.setInterleaved(False) +camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) + +left = p.create(dai.node.MonoCamera) +left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) + +# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type +manipLeft = p.create(dai.node.ImageManip) +manipLeft.initialConfig.setResize(300, 300) +manipLeft.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) +left.out.link(manipLeft.inputImage) + +right = p.create(dai.node.MonoCamera) +right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) + +# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type +manipRight = p.create(dai.node.ImageManip) +manipRight.initialConfig.setResize(300, 300) +manipRight.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) +right.out.link(manipRight.inputImage) + +# NN that detects faces in the image +nn = p.createNeuralNetwork() +nn.setBlobPath(nnPath) +nn.setNumInferenceThreads(2) + +manipLeft.out.link(nn.inputs['img1']) +camRgb.preview.link(nn.inputs['img2']) +manipRight.out.link(nn.inputs['img3']) + +# Send bouding box from the NN to the host via XLink +nn_xout = p.createXLinkOut() +nn_xout.setStreamName("nn") +nn.out.link(nn_xout.input) + +# Pipeline is defined, now we can connect to the device +with dai.Device(p) as device: + qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False) + shape = (3, SHAPE, SHAPE * 3) + + while True: + inNn = np.array(qNn.get().getData()) + frame = inNn.view(np.float16).reshape(shape).transpose(1, 2, 0).astype(np.uint8).copy() + + cv2.imshow("Concat", frame) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/NeuralNetwork/normalization_multi_input.py b/examples/NeuralNetwork/normalization_multi_input.py new file mode 100755 index 000000000..4a757dfaf --- /dev/null +++ b/examples/NeuralNetwork/normalization_multi_input.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +from pathlib import Path +import sys +import numpy as np +import cv2 +import depthai as dai +SHAPE = 300 + +# Get argument first +nnPath = str((Path(__file__).parent / Path('../models/normalize_openvino_2021.4_4shave.blob')).resolve().absolute()) +if len(sys.argv) > 1: + nnPath = sys.argv[1] + +if not Path(nnPath).exists(): + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') + +p = dai.Pipeline() +p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4) + +camRgb = p.createColorCamera() +# Model expects values in FP16, as we have compiled it with `-ip FP16` +camRgb.setFp16(True) +camRgb.setInterleaved(False) +camRgb.setPreviewSize(SHAPE, SHAPE) + +nn = p.createNeuralNetwork() +nn.setBlobPath(nnPath) +nn.setNumInferenceThreads(2) + +script = p.create(dai.node.Script) +script.setScript(""" +# Run script only once +# Model formula: +# output = (input - mean) / scale + +# This configuration will subtract all frame values (pixels) by 127.5 +# 0.0 .. 255.0 -> -127.5 .. 127.5 +data = NNData(2) +data.setLayer("mean", [127.5]) +node.io['mean'].send(data) + +# This configuration will divide all frame values (pixels) by 255.0 +# -127.5 .. 127.5 -> -0.5 .. 0.5 +data = NNData(2) +data.setLayer("scale", [255.0]) +node.io['scale'].send(data) +""") + +# Re-use the initial values for multiplier/addend +script.outputs['mean'].link(nn.inputs['mean']) +nn.inputs['mean'].setWaitForMessage(False) + +script.outputs['scale'].link(nn.inputs['scale']) +nn.inputs['scale'].setWaitForMessage(False) +# Always wait for the new frame before starting inference +camRgb.preview.link(nn.inputs['frame']) + +# Send normalized frame values to host +nn_xout = p.createXLinkOut() +nn_xout.setStreamName("nn") +nn.out.link(nn_xout.input) + +# Pipeline is defined, now we can connect to the device +with dai.Device(p) as device: + qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False) + shape = (3, SHAPE, SHAPE) + while True: + inNn = np.array(qNn.get().getData()) + # Get back the frame. It's currently normalized to -1.0 - 1.0 + frame = inNn.view(np.float16).reshape(shape).transpose(1, 2, 0) + # To get original frame back (0-255), we add multiply all frame values (pixels) by 255 and then add 127.5 to them. + frame = (frame * 255.0 + 127.5).astype(np.uint8) + cv2.imshow("Original frame", frame) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/models/concat-model/model.yml b/examples/models/concat-model/model.yml new file mode 100644 index 000000000..3f26f962c --- /dev/null +++ b/examples/models/concat-model/model.yml @@ -0,0 +1,25 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + concat-model +task_type: image_inpainting # Just random task type, so it's supported by model downloader +files: + - name: concat_openvino_2021.4_6shave.blob + size: 1024 + sha256: 6ac3023ea8dac9b7501ead0f9b2c2a4495d2791a58b7049de065246455cf87be + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/concat_openvino_2021.4_6shave.blob + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE diff --git a/examples/models/normalization-model/model.yml b/examples/models/normalization-model/model.yml new file mode 100644 index 000000000..ce6c65ed5 --- /dev/null +++ b/examples/models/normalization-model/model.yml @@ -0,0 +1,25 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + normalization-model +task_type: image_inpainting # Just random task type, so it's supported by model downloader +files: + - name: normalize_openvino_2021.4_4shave.blob + size: 1216 + sha256: 9b5dc4b375ed9218c2910284556f8152cbfcc0130e7b7a42d9a9991eae8be23a + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/normalize_openvino_2021.4_4shave.blob + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE From 600e6a10e2039d90ef450af24184d5c2ab61c304 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Mon, 17 Jan 2022 19:41:16 -0800 Subject: [PATCH 2/2] Update depthai-core --- depthai-core | 2 +- .../NeuralNetwork/concat_multi_input.rst | 7 ++-- .../normalization_multi_input.rst | 16 ++++----- docs/source/tutorials/code_samples.rst | 6 ++++ examples/NeuralNetwork/concat_multi_input.py | 34 +++++++------------ .../normalization_multi_input.py | 7 ++-- 6 files changed, 36 insertions(+), 36 deletions(-) diff --git a/depthai-core b/depthai-core index 0eaab54a7..eaf90f98b 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 0eaab54a7e3415ec135d315dbd33d931475c0097 +Subproject commit eaf90f98b369785b66a3a3fa6807baf19e0f0fe6 diff --git a/docs/source/samples/NeuralNetwork/concat_multi_input.rst b/docs/source/samples/NeuralNetwork/concat_multi_input.rst index f9717e363..5b5f93952 100644 --- a/docs/source/samples/NeuralNetwork/concat_multi_input.rst +++ b/docs/source/samples/NeuralNetwork/concat_multi_input.rst @@ -1,8 +1,9 @@ Multi-Input Frame Concationation ================================ -Example concatenates all 3 inputs with a simple custom NN created with PyTorch (`link here `__, `tutorial here `__). -It uses multiple input :ref:`NeuralNetwork` feature and links all 3 camera streams directly to the NN node. +Example concatenates all 3 inputs with a simple custom model created with PyTorch (`link here `__, +`tutorial here `__). +It uses :ref:`NeuralNetwork`'s multiple input feature and links all 3 camera streams directly to the NeuralNetwork node. Demo #### @@ -31,7 +32,7 @@ Source code Also `available on GitHub `__ - .. literalinclude:: ../../../../depthai-core/examples/src/concat_multi_input.cpp + .. literalinclude:: ../../../../depthai-core/examples/NeuralNetwork/concat_multi_input.cpp :language: cpp :linenos: diff --git a/docs/source/samples/NeuralNetwork/normalization_multi_input.rst b/docs/source/samples/NeuralNetwork/normalization_multi_input.rst index d2d6f2926..75007c617 100644 --- a/docs/source/samples/NeuralNetwork/normalization_multi_input.rst +++ b/docs/source/samples/NeuralNetwork/normalization_multi_input.rst @@ -1,21 +1,21 @@ -Multi-Input Frame Normalization -=============================== +Frame Normalization +=================== -This example shows how you can normalize a frame before sending it to another NN. Many neural network models +This example shows how you can normalize a frame before sending it to another neural network. Many neural network models require frames with RGB values (pixels) in range between :code:`-0.5` to :code:`0.5`. :ref:`ColorCamera`'s preview outputs -values between :code:`0` and :code:`255`. Simple custom NN, created with PyTorch (`link here `__, `tutorial here `__), -allows users to specify mean and scale factors that will apply for all frame values (pixels). +values between :code:`0` and :code:`255`. Simple custom model, created with PyTorch (`link here `__, `tutorial here `__), +allows users to specify mean and scale factors that will be applied to all frame values (pixels). .. math:: output = (input - mean) / scale -.. image:: /_static/images/examples/normalize)model.png +.. image:: /_static/images/examples/normalize_model.png On the host, values are converted back to :code:`0`-:code:`255`, so they can be displayed by OpenCV. .. note:: - This is just a demo, for normalization you should look into OpenVINO's `model optimizer `__ arguments :code:`--mean_values` and :code:`--scale_values`. + This is just a demo, for normalization you should use OpenVINO's `model optimizer `__ arguments :code:`--mean_values` and :code:`--scale_values`. Setup ##### @@ -39,7 +39,7 @@ Source code Also `available on GitHub `__ - .. literalinclude:: ../../../../depthai-core/examples/src/normalization_multi_input.cpp + .. literalinclude:: ../../../../depthai-core/examples/NeuralNetwork/normalization_multi_input.cpp :language: cpp :linenos: diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index 29e82a814..f01f849a4 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -16,6 +16,7 @@ Code Samples ../samples/mixed/* ../samples/MobileNet/* ../samples/MonoCamera/* + ../samples/NeuralNetwork/* ../samples/ObjectTracker/* ../samples/Script/* ../samples/SpatialDetection/* @@ -95,6 +96,11 @@ are presented with code. - :ref:`Mono Camera Control` - Demonstrates how to control the mono camera (crop, exposure, sensitivity) from the host - :ref:`Mono Full Resolution Saver` - Saves mono (720P) images to the host (:code:`.png`) +.. rubric:: NeuralNetwork + +- :ref:`Multi-Input Frame Concat ` - Concat mono/rgb streams on the device with a custom model +- :ref:`Frame Normalization` - Normalize the frame on the device with a custom model + .. rubric:: ObjectTracker - :ref:`Object tracker on video` - Performs object tracking from the video diff --git a/examples/NeuralNetwork/concat_multi_input.py b/examples/NeuralNetwork/concat_multi_input.py index b451b72c6..bdc901ca9 100644 --- a/examples/NeuralNetwork/concat_multi_input.py +++ b/examples/NeuralNetwork/concat_multi_input.py @@ -24,34 +24,26 @@ camRgb.setInterleaved(False) camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) -left = p.create(dai.node.MonoCamera) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) -left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) - -# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type -manipLeft = p.create(dai.node.ImageManip) -manipLeft.initialConfig.setResize(300, 300) -manipLeft.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) -left.out.link(manipLeft.inputImage) - -right = p.create(dai.node.MonoCamera) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) -right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) - -# ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type -manipRight = p.create(dai.node.ImageManip) -manipRight.initialConfig.setResize(300, 300) -manipRight.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) -right.out.link(manipRight.inputImage) +def create_mono(p, socket): + mono = p.create(dai.node.MonoCamera) + mono.setBoardSocket(socket) + mono.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) + + # ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type + manip = p.create(dai.node.ImageManip) + manip.initialConfig.setResize(300, 300) + manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) + mono.out.link(manip.inputImage) + return manip.out # NN that detects faces in the image nn = p.createNeuralNetwork() nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) -manipLeft.out.link(nn.inputs['img1']) camRgb.preview.link(nn.inputs['img2']) -manipRight.out.link(nn.inputs['img3']) +create_mono(p, dai.CameraBoardSocket.LEFT).link(nn.inputs['img1']) +create_mono(p, dai.CameraBoardSocket.RIGHT).link(nn.inputs['img3']) # Send bouding box from the NN to the host via XLink nn_xout = p.createXLinkOut() diff --git a/examples/NeuralNetwork/normalization_multi_input.py b/examples/NeuralNetwork/normalization_multi_input.py index 4a757dfaf..62594c8ef 100755 --- a/examples/NeuralNetwork/normalization_multi_input.py +++ b/examples/NeuralNetwork/normalization_multi_input.py @@ -31,7 +31,7 @@ script = p.create(dai.node.Script) script.setScript(""" -# Run script only once +# Run script only once. We could also send these values from host. # Model formula: # output = (input - mean) / scale @@ -68,10 +68,11 @@ shape = (3, SHAPE, SHAPE) while True: inNn = np.array(qNn.get().getData()) - # Get back the frame. It's currently normalized to -1.0 - 1.0 + # Get back the frame. It's currently normalized to -0.5 - 0.5 frame = inNn.view(np.float16).reshape(shape).transpose(1, 2, 0) - # To get original frame back (0-255), we add multiply all frame values (pixels) by 255 and then add 127.5 to them. + # To get original frame back (0-255), we add multiply all frame values (pixels) by 255 and then add 127.5 to them frame = (frame * 255.0 + 127.5).astype(np.uint8) + # Show the initial frame cv2.imshow("Original frame", frame) if cv2.waitKey(1) == ord('q'):