From 08f03316220c60818b590a46269e304c1988b854 Mon Sep 17 00:00:00 2001 From: Eugenio Culurciello Date: Thu, 16 May 2013 09:33:00 -0400 Subject: [PATCH] added [rofiling --- .DS_Store | Bin 15364 -> 15364 bytes CMakeLists.txt | 20 + README.html | 37 + README.md | 77 +- dok/graphicalmodels/index.dok | 9 + dok/gui/index.dok | 5 + dok/supervised/convnet.png | Bin 0 -> 71613 bytes dok/supervised/index.dok | 1134 ++++++++ dok/supervised/linear_regression.png | Bin 0 -> 685 bytes dok/supervised/logistic_argmax.png | Bin 0 -> 1551 bytes dok/supervised/logistic_regression.png | Bin 0 -> 2750 bytes dok/supervised/loss.png | Bin 0 -> 993 bytes dok/supervised/mse_loss.png | Bin 0 -> 1430 bytes dok/supervised/nll_loss.png | Bin 0 -> 1634 bytes dok/unsupervised/auto_decoder.png | Bin 0 -> 693 bytes dok/unsupervised/auto_encoder.png | Bin 0 -> 837 bytes dok/unsupervised/index.dok | 344 +++ dok/unsupervised/mse_loss.png | Bin 0 -> 1636 bytes dok/unsupervised/psd_encoder.png | Bin 0 -> 1398 bytes dok/unsupervised/psd_loss.png | Bin 0 -> 2444 bytes dok/unsupervised/sparse_coding.png | Bin 0 -> 1031 bytes dok/unsupervised/sparse_coding_optim.png | Bin 0 -> 1569 bytes face-detector/PyramidPacker.lua | 0 face-detector/PyramidUnPacker.lua | 0 face-detector/face.net | Bin face-detector/face.net.ascii | 2461 +++++++++++++++++ face-detector/g.ui | 0 face-detector/run.lua | 7 +- face-detector/slideshow.html | 0 filter-bank/g.ui | 0 filter-bank/run.lua | 4 +- gabor-layer-demo /GaborLayer.lua | 5 +- .../example-linear-regression.lua | 235 ++ load-data/load-images.lua | 79 + .../example-logistic-regression.csv | 736 +++++ .../example-logistic-regression.lua | 532 ++++ neuflow-demos/.DS_Store | Bin 0 -> 6148 bytes profiling/.DS_Store | Bin 0 -> 6148 bytes profiling/conv-cpu.lua | 49 + profiling/conv-gpu.lua | 54 + profiling/conv_model.lua | 82 + profiling/linear-cpu.lua | 41 + profiling/linear-gpu.lua | 52 + profiling/results.rtf | 51 + simple-frame-grabber/g.ui | 0 tensors/slicing.lua | 157 ++ .../{dataset.lua => dataset-mnist.lua} | 0 train-a-digit-classifier/train-on-mnist.lua | 284 +- train-autoencoder/autoencoder-data.lua | 201 ++ train-autoencoder/train-autoencoder.lua | 394 +++ train-on-cifar/train-on-cifar.lua | 107 +- .../train-on-housenumbers.lua | 413 +++ 52 files changed, 7329 insertions(+), 241 deletions(-) create mode 100644 CMakeLists.txt create mode 100644 README.html mode change 100755 => 100644 README.md create mode 100644 dok/graphicalmodels/index.dok create mode 100644 dok/gui/index.dok create mode 100644 dok/supervised/convnet.png create mode 100644 dok/supervised/index.dok create mode 100644 dok/supervised/linear_regression.png create mode 100644 dok/supervised/logistic_argmax.png create mode 100644 dok/supervised/logistic_regression.png create mode 100644 dok/supervised/loss.png create mode 100644 dok/supervised/mse_loss.png create mode 100644 dok/supervised/nll_loss.png create mode 100644 dok/unsupervised/auto_decoder.png create mode 100644 dok/unsupervised/auto_encoder.png create mode 100644 dok/unsupervised/index.dok create mode 100644 dok/unsupervised/mse_loss.png create mode 100644 dok/unsupervised/psd_encoder.png create mode 100644 dok/unsupervised/psd_loss.png create mode 100644 dok/unsupervised/sparse_coding.png create mode 100644 dok/unsupervised/sparse_coding_optim.png mode change 100755 => 100644 face-detector/PyramidPacker.lua mode change 100755 => 100644 face-detector/PyramidUnPacker.lua mode change 100755 => 100644 face-detector/face.net create mode 100644 face-detector/face.net.ascii mode change 100755 => 100644 face-detector/g.ui mode change 100755 => 100644 face-detector/slideshow.html mode change 100755 => 100644 filter-bank/g.ui create mode 100644 linear-regression/example-linear-regression.lua create mode 100644 load-data/load-images.lua create mode 100644 logistic-regression/example-logistic-regression.csv create mode 100644 logistic-regression/example-logistic-regression.lua create mode 100644 neuflow-demos/.DS_Store create mode 100644 profiling/.DS_Store create mode 100644 profiling/conv-cpu.lua create mode 100644 profiling/conv-gpu.lua create mode 100644 profiling/conv_model.lua create mode 100644 profiling/linear-cpu.lua create mode 100644 profiling/linear-gpu.lua create mode 100644 profiling/results.rtf mode change 100755 => 100644 simple-frame-grabber/g.ui create mode 100644 tensors/slicing.lua rename train-a-digit-classifier/{dataset.lua => dataset-mnist.lua} (100%) mode change 100755 => 100644 mode change 100755 => 100644 train-a-digit-classifier/train-on-mnist.lua create mode 100644 train-autoencoder/autoencoder-data.lua create mode 100644 train-autoencoder/train-autoencoder.lua mode change 100755 => 100644 train-on-cifar/train-on-cifar.lua create mode 100644 train-on-housenumbers/train-on-housenumbers.lua diff --git a/.DS_Store b/.DS_Store index 2072f2f5a9f4e6abafe1b98ae8415b198e1d5f40..ae36da075cbfdb0055a1577b5b98931747e9f427 100644 GIT binary patch delta 164 zcmZpvXsMXs&nUVvU^hRb=w=>)gN&0O3TLt#nCU1OnVU@x6cL_$Sj>2HlTa43U;#rB zLq0qoEb2)ff8&3v%eO>0th%VHY^}EFgKGu5*)Ln8Dj>KfYY-FAo>K80u}4C PzZMt*vk@rs6On)o*`6VW diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..595eb01 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,20 @@ + +CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR) +CMAKE_POLICY(VERSION 2.6) +FIND_PACKAGE(Torch REQUIRED) + +ADD_TORCH_DOK(dok/supervised tutorials_supervised "Tutorials" "Supervised Learning" 1.0) +ADD_TORCH_DOK(dok/unsupervised tutorials_unsupervised "Tutorials" "Unsupervised Learning" 1.1) +ADD_TORCH_DOK(dok/graphicalmodels tutorials_graphical "Tutorials" "Graphical Models" 1.2) +ADD_TORCH_DOK(dok/gui tutorials_gui "Tutorials" "Graphical User Interfaces" 1.3) + +SET(luasrc linear-regression/example-linear-regression.lua + logistic-regression/example-logistic-regression.lua + logistic-regression/example-logistic-regression.csv + train-a-digit-classifier/train-on-mnist.lua + train-a-digit-classifier/dataset-mnist.lua + train-on-cifar/train-on-cifar.lua + train-on-housenumbers/train-on-housenumbers.lua + train-autoencoder/train-autoencoder.lua) + +ADD_TORCH_PACKAGE(tutorials "" "${luasrc}" "Tutorials") diff --git a/README.html b/README.html new file mode 100644 index 0000000..d6144e5 --- /dev/null +++ b/README.html @@ -0,0 +1,37 @@ + + + + Torch7 Packages + + + +

Demos & Turorials for Torch7.

+

All the demos/tutorials provided in this repo require Torch7 to be installed, as well as some extra (3rd-party) packages.

+

Install

+

Torch7

+

Follow instructions on: Torch7's homepage.

+

3rd-party packages

+

Different demos/tutorials rely on different 3rd-party packages. If a demo crashes because it can't find a package then simply try to install it using torch-pkg:

+
$ torch-pkg install image    # an image library for Torch7
+$ torch-pkg install nnx      # lots of extra neural-net modules
+$ torch-pkg install camera   # a camera interface for Linux/MacOS
+$ torch-pkg install ffmpeg   # a video decoder for most formats
+$ ...
+

A complete list of packages can be obtained by doing:

+
$ torch-pkg list
+

or checking out this page.

+

Documentation

+

Torch's main documentation gets installed when you install Torch, and should be available here, or wherever you installed Torch.

+

This repo also comes with documentation, in the form of tutorials. To make them available, simply issue the following command in this directory:

+
$ torch-pkg deploy
+

After that, a list of tutorials will be available in the documentation.

+

Tutorials

+

Each directory provides a tutorial or a demo, with no particular order. It's a good idea to have the local Torch documentation open on the side, for reference. As Torch is based on the Lua language, it's also a good idea to go through the Lua 5.1 book.

+

Credits

+

These demos were slowly put together by: Clement Farabet & Roy Lowrance.

diff --git a/README.md b/README.md old mode 100755 new mode 100644 index 2c107f1..d654f61 --- a/README.md +++ b/README.md @@ -1,67 +1,60 @@ # Demos & Turorials for Torch7. -All the demos/tutorials provided in this package -require the following dependencies to be installed, in -order to work. +All the demos/tutorials provided in this repo require Torch7 to be installed, +as well as some extra (3rd-party) packages. -## Install dependencies on Linux (Ubuntu > 9.04): +## Install -1/ Basic tools +### Torch7 -``` sh -$ apt-get install gcc g++ git libreadline-dev cmake wget -``` +Follow instructions on: [Torch7's homepage](http://www.torch.ch/). + +### 3rd-party packages -2/ QT4 (at least 4.4) +Different demos/tutorials rely on different 3rd-party packages. If a demo +crashes because it can't find a package then simply try to install it using +torch-pkg: ``` sh -$ apt-get install libqt4-core libqt4-gui libqt4-dev +$ torch-pkg install image # an image library for Torch7 +$ torch-pkg install nnx # lots of extra neural-net modules +$ torch-pkg install camera # a camera interface for Linux/MacOS +$ torch-pkg install ffmpeg # a video decoder for most formats +$ ... ``` -3/ Extras +A complete list of packages can be obtained by doing: ``` sh -$ apt-get install ffmpeg gnuplot +$ torch-pkg list ``` -## Install dependencies on Mac OS X > 10.5: - -0/ Install the dev tools (gcc/g++ from Apple), - and we highly recommend to get Homebrew - (http://mxcl.github.com/homebrew/) as a replacement - for MacPorts. +or checking out [this page](http://data.neuflow.org/torch). -1/ Basic tools, using Homebrew: +### Documentation -``` sh -$ brew install git readline cmake wget -``` +Torch's main documentation gets installed when you install Torch, and should +be available [here](file:///usr/local/share/torch/html/index.html), or wherever +you installed Torch. -2/ Install QT4 (at least 4.4) +This repo also comes with documentation, in the form of tutorials. To make +them available, simply issue the following command in this directory: ``` sh -$ brew install qt +$ torch-pkg deploy ``` -3/ Extras +After that, a list of tutorials will be available in the documentation. -``` sh -$ brew install ffmpeg gnuplot -``` +## Tutorials -## Install Torch7 (full instructions on torch.ch) and extra packages +Each directory provides a tutorial or a demo, with no particular order. +It's a good idea to have the local +[Torch documentation](file:///usr/local/share/torch/html/index.html) open +on the side, for reference. As Torch is based on the Lua language, +it's also a good idea to go through the [Lua 5.1](http://www.lua.org/manual/5.1/) +book. -``` sh -$ git clone git://github.com/andresy/torch.git -$ cd torch -$ mkdir build; cd build -$ cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local -$ make install -``` +## Credits -``` sh -$ torch-pkg install image # an image library for Torch7 -$ torch-pkg install nnx # lots of extra neural-net modules -$ torch-pkg install camera # a camera interface for Linux/MacOS -$ torch-pkg install ffmpeg # a video decoder for most formats -``` +These demos were slowly put together by: Clement Farabet & Roy Lowrance. diff --git a/dok/graphicalmodels/index.dok b/dok/graphicalmodels/index.dok new file mode 100644 index 0000000..426b4ca --- /dev/null +++ b/dok/graphicalmodels/index.dok @@ -0,0 +1,9 @@ +====== Graphical models ====== +{{anchor:tutorials.graphical}} + +In this tutorial, we're going to learn how to define a graphical model, +using the gm package, and do several things withe the model: + + * inference + * decoding + * training diff --git a/dok/gui/index.dok b/dok/gui/index.dok new file mode 100644 index 0000000..a3cb16a --- /dev/null +++ b/dok/gui/index.dok @@ -0,0 +1,5 @@ +====== Graphical User Interface ====== +{{anchor:tutorials.gui}} + +In this tutorial, we're going to learn how to construct GUIs +(Grahical User Interfaces). diff --git a/dok/supervised/convnet.png b/dok/supervised/convnet.png new file mode 100644 index 0000000000000000000000000000000000000000..4ecfe327e719b85c42669d4f88788eedf62f9e96 GIT binary patch literal 71613 zcmdSAMOYl$8$O5ycWvAn4GzJb#wADy?(Q0by9RdxjXNQ@2Z!LUjR$C4gKHRafA{`p zHnW-C)TW-QRh{$6`Ch3gRTWtbR1#De7#IwBIgmOG47>mg3@jxw;_DkzPh`^98?n2z zw!4OtrTb?SR|^;kb0<>^DtQMJD+_fC6LT+@VGCgxSRx~Nki-Yi<>PFW6a&rFKkspo zxi$Hlt+hB;nt-qo98ZnRT#;r4{DEbf{2nXclxrG^SoAbl^zzd^N@)s|*?HunQ5loU z0%=qU@Xb||hgNCIk&(GFKwl%~UjC@8oy}JzKUS=N^VYmRoc~kjXxQ<1DssE(Hreqx z!0PNd8=DL>qHoig|NlofGh5SezMy|U+AP!5v599dp!$0E|6YU!L3F)^S<@B6=ofn% zu9e1}qm^)`Z%Gan(u%R8ZvR_rncWA|r?GLOh`w5pw4EYz9e=SfFvQ(_bk|8MC>A>yk%u~z1J2VTI3J?>y-*V;;mn^dQ%@Hb1k(~nN*~w)kpio?yBoCq@NId`! z{%=hLP2TlFBnTbDxAXTMs-T3-OCJZDuaC}#U%B4jSb8LF{}aMOL3iRA zYRQ?*07>%)^d4AJSE(v)hIdcvIjl$HynYsOjr}Nbg(|h{2{EqP=BLTi;rbKYx9M~W zewG~@v?{Lu+sPOlJsFN6fS(G%k}ev0-KlT(4O1_@lcQ|5Bt=G=$-_Z`?~f}(?JQq* zntMM>RTJe4J-t6a%KvF%25g$8xcpK}5rJE9` z`<@>;u13mK7Y)OiV$yZ??pX;JNDiT^+V~~zsnh1&;}X~2?yX+EQNeQeYbN|tW(#`* ziwDCGVI~|6veXU{z%C&&4HQq{N}usQb8=IOA@KQXQqub8H>6B_KN3&5E)C>E zjM?r^R2!kSIfw5?^n*M!{GpkwBafZYj{m16#W7(>3j1y8{eS9HJUpuVw6zrF&cmtI zXtBxm@j&lE1oG(ZNP>NYYJw?ltxKPl!j9-QK1_2<@86keZtf^%mI2a&;H7U=l) z@Xn(AA?}Ta|5XpCZNm=MfxE<3y#-m^Vp95s>4Q^*QTWMnS)V>yE=x7nm~zgVN8S6! zr<|B!J`JUr2FoOH*Y`yc9-Y0LZ%hjw2U@4v0ZL213Y?E3;eKMJBpIFAj zBiL9`7Y%z)&S4mUrQK2E168Nfd9(>)4NSND{7Odh6Q#%wgWn z6&vP?U4xvjz=j!s6w>6IEcB%0B+a;h+SV@c7;lbRrze7*na)fAJEm0M@?5-7(q3T9 z4ZT#!mEH+9UBA3&^q3qLEiCNI6;_JI?VDS>DfDcsE@c=##!cs$0TworKGzU@(mvBo zAd5%_tDe`SyYS}#2Wv%obo7nOVg?(nfmi5K2z&GcDy^a$Nw=e};prG5q~m9)B$fxy zT54x1tklej4S=7( zx$$u-!a)EHZN!Gz4IE3*FxkfOL}g!`1GexYpEFcMyE)-Ju~=!KtIgK!or3L+gIlRc zYI5N2x3qEnzDJM1@V!}9oWG)Xpq(3&FLO%6EPP$Vc&g#6T5^K$dBclyK9Tfrpoe=; z+{|qXu^+%Dv}|TNB5XIGIccALyId+M+W%;%+N-sHg5Iq<lXYw2CvZajh4wP&8#}l0q-v&F!e>WcTCwO!G6# z$)d8!BS&-WaH~hgo=F#{AoFv_#?>@WO4X3+TV^Md9`SipN}de$$vM~OW`@@A1Gl>U z)+Nd&EG9#%=}+{+dG%}rdNj5gOCM%oi*iMmmxEa&5S zv(l}TiL>==WhNFdE-#w=s>c&-I~o=U+RGNddRH<6vcV&ZCM^Yoh5{ zN@7MP%j>5bmg}<0Cf3NYlt*;a=*F;+#u?mHfp)7zZ3XD7O=E3>y#6+GdDjXLObKpIN1{R=2r6^ur#TOm1xO$ z)WGueI*voMwJr`(wvn2r(R5#0COr0R70`tBR`Iqx4H|RRQK+P+{a%Z1@1uDGD<%cK zRy8~HYNw$xW;4|r_c+AYrY9P>R*jqF_M78YQ|(266$V(!@PQ~UO_Vefb>SLczn;R3 zhle16(8mu-t)o3@ja%Z#CNPkzrrejCs%^8)f zUtVFbPybEpTrxxhc9;nlQlmKS(^*!X>f`uP(KY;=Vmxt1Ee=iATTe?P{hmApy2(GT zdkIZ;8vgCEC4YF~ib;|*6hN6p2}o>o7^?)C)wj->*eUTRL85+RX5_M=hUPF&Yirl*ZH+p$~K+F zlD#yO&04zG^?BFtlcwvqL)ZS;S+ufcR->0uSxJ`%!!3dJgxCt&!`=vDE219rs-wvt z`+b!vX{~ip)?vRE=GKYt7F9;xFMLG8c5Rnt_BMH)UCd4lUX(t)c`V0;=a$!*H` zwx1mBVtVGXyAp~|x5~td*h@MtB0Bsys@X4H?W#<(Y6}|k0#x{ute?{h1jpZtmh#Xf+ZF_eR1DPW>O(9yLJznoJFE5R9uc|$yuF|1hikaR;8?q~t zE6K&xW6wKf?AG1&KgO=lR7CYIPfng!4Pg^)*{N6;G=2yMe1tp}xB(t%Gakzt6=>U< z^X!if(wjSmv2&W^jX$$;-Ker2SE@Q!-2lq>yfs8sr-gzYw4)sO(wZ76R=?lpDcv$2 zL4j5zVkr+Yo;B_aRnu3XUq&p zT=HLa*-j%<3w6*62ULucR_;=y^69V}KcWi?7?lp+wOv|SC}uYrB*L1lRMn6D8ee^C zuHxYE9=&IKpYw_Jp4(gVkMPP{CoG(-ME`0+zk4@Vi8>aP*zHDp_aV-4BcV#wK`@-X!0wRDn$cazr;PE$EK*MykowcA@#x1l*63cj)8nAD6yW z@NZN-2{?`FIQZ*19GbfM^>O%ls{u$@xd`n!`e!RrSh-Fbsm0Dxhj~wES3gHf*78++ zbojwlT{!M^V--|r3t0`tFK>5GG#0No*RZLy;3y=wA#+mO(#;gtN=Z(x2!XU{$KIqo z8nQ`DE)*xZy;bmbzyI}ca!V^AAjp(o)2e%fTIJQM-?-`O$}Hb&&)gzb>Z&sF+OcEC z!g; zJv?uAVx?cUx@XuOm9gXReAd@5i9dXH(v(}IyFF|xdLPSE8vQ*)vq0(5u;N!+!LNJW zxy3KL!^DmhWIkZmYvs}E{*U`8sl!Y}C z*mM|_FL2scasa87yA9QhD@zY;kC-|3qtQcY2$&zaR0u}WFw7blso%|4oH0R4_QSr~ z9@+K(tgzN}bwnF&n3g%S)JTFhgq|dG5Ji~k%>I0XwO2xt2u|bpu+VOO8d3iAw4{H& zbs~qzehkLDdDF@1rJ||eT3NBS1%0z-WIQx?M27KpjN7W5lVeuiVU`zSofDycG+SRW z-r$f}XRPt2OY61$X)xKlz7{C9H12QKo;__B{*rfuL@~VhBG|S)Q?m|4X`GUL;TPO% zC50>pO6}wpGDIKF3Z^3p1Oj*CwpP;!V@$`d*?8&XRgh^X-!={CC2AK;9{VkE?SkrbW<8X zBVXP=KW0RssL7>|_0CGBO<;DkYNr&`@&UmCK`LP`v2cq2HEYNzWM(ch*)HhL0f5$7u&gFhls~H z{eQ5Yhbp1SX)>(}QKV~ugR1a^G0^8d{SX>!ohe|UWCtJJCQt)CPnwH(X9AzFZ4&`4XTH}Z(7>BAICX)+PnIoC_o&Q0xV z%)E4KVfYuu_i>KbSd0=5NkH5tMHCT=kg@UoL;53+t$qBrfB4rUAkp+Q^ha!HiXM%S z+W{IPF7Q4Ifua8gx=^nMygY9XGvuPP`);B|&{15pp9`&Qc&s?&x3&t{ueS>bUrkB< zb`qAr61_9_yxWY}zD-L@o0^-$Z(g#i?&ysZIt7SHN6JXJlKls4U!$=3yDq(h#ed5s zC=?y3cjC1Da>DtbI6a)ysh!h)xq<0vhdlsg(r5jWrZ(u8@WXx~ozq0;<0&a2t1kZV zq*K7-iu1Bf>CbI(-y4V-mXj~Tzv%QeTzBFWBs*isH$qS-U)Q}d{~p+_mo8l&w}U%@ zW@tp?y$C^vkgx8l%OLsY$lGf4A7`CD5Q}t!-bw$P`t#+*PTp5)No4bWGQb~VmFu_V zaXL-BcXXuu>)$A5I3mb``q&} z^3^gj<* z?g^apeRZMDJ&{K7j!o0*J66xYQjGX@;|LPy7ch%mes_@~;VwB!V!$~8;jCpg}t{Q`H-P7#u$moS+ueK+e z+iC)QUj6b=tp*YYNdqQklfuC|XEF37VLY{RwXyo-!k{OI+s{fA!w0|V!D-r4Fnhn= z#~(z=T)ljz^gZu6Az=k8aYTP^a<>S`IdwR6(A}F{rCX|7k1CRa?htj|Ndx&AvuhKi z+%AS0WKCcb30P^pbQ=tCsJFC_Pl2%IK@LKLh%4gSRP1X?x)gEUFRi}&aDB8bs@G&6^@$qsqL1>A^Rc}W z^eTk%iHbgIvn14{Z=ERwqF~DI^jflIJ`)06FVAQ34+o#uK54a6i{7?QhH0n>(EXYC zEwG-oepTYx3Cs%;d%2(FEWz8sh`SsV`@Q=^te=eUIxamo71nGyFkrlfY>VZ<@V*zM$zK-824mM zbaQ#Ljfj}r#zk7Q@syAoD zY^)L07E<1~Ug9GwJ?k@^;C&^o;ixy}L`I1GL@}5b;5}gmTzGAcjn>HhNV8vN{gYXI z?6?3n=Erls@Xl4wl;XH>6O8)sA@L7!{9BQCUZbqdAUIGjVU%798cvY$Zvzm_Mct(6 zBdA;~62)90DbRQ83y3T<5y|c4Z&hzFq(aM4_r-K-yJY6Wj+re+pwpc8GANK;90oOeNQo|5J%R<#d6G}U; z)lDQ-bcGp|%*OCZhc!ReTk2JDeoZZ7mOfw3gj8d?_wb0nzc;Uw2%&_+xc2-s(;I?c z5f{HpNv!H>{pHRvvWFM$5bGr_P`QUEZXPGFo1sm~GqLo3om%vO!<~_Iugt9c^r31y z+P*e-f$2br;%%Z(YL9?~<(+vCX+JX8diGuD*iF93d7y+oBhe%V0y@o`;e3{P9Xz%2 z4gt~YU+o%6W+<4^miwAN>a^j^<>Tmtm{+_5ST_G5Y0XGX?ZOZSqNYClsL^X4=y8ui zHE!n2i|b%ERew9rZKq+vz?Hb4s%1!w)ONq3#sG|ngKfFUn4`;D7?A6$~1C_ut;0t?E8p5wT+=me)^pjVqLVDb|%W*ZHHyR2~!Af zU%QL_q%KbWwLqaiN?}eWmKbv|Ig7X#m);>?mV#XAs;u zMJ+||PX^aO=thm$HMSSFOC2!cjAdcPOsMHlzI8^n85l9e^}Uq5>^45&X*;7CK0jai z4ybgZS?#SERfEHId#KeVcO*sp4^Q7))D@i@ z;+L}?4xeTbghcu5tyi6)G80SA=#2oFoj?1xt| z;&a!dfm6m-Fx`4d>}*auRz2@Y4)mrn6j`(2Vv$In!(>dgAJc=>1FLCO0*RcbQt8T? z`Ul2JbdN)U=g>U8bO>Fx&mgBfFhWH<0|1JMlVC$d6hH@HVGY!!ZYAHs73Q3oj?oD6 zZh!^^&;}C|S$p%%J@x&bCcFias-zElIH`GQ$ZO05IR~MTg&(x%2G>pfgKWp&`Z;id zQx5D%M|Mk0JoFA_9I^HkJNO9=+0gBpY#d(eLTZkn!+kl`)nWguBX3qfA!(Syyvl`& z&K6^m*-r%Mn?Ae#Rz~1t5>?oq&U-!D5R_K3S)>R`f6u<0m8^}YUB(KorMqz_f4ser zbKYy->zhiF)EMTEG#MP(k*a#4R8!;?=VK=ECZk*&@!#Ek1tAT{H-UBL@lJJfbmuqOn7Cgg@l8_ctSxc4phkMK}CO&s^)9)pt(YglZ zE5Q*Nq9xt`F3PJ4-;+j=4sVbgw6{)WYrj@?-lUdm+xLz5RYQ5S{;<$I(Drv8&u?z# zBl+g3co!>7#FQaX1FFMEEZP2z!igg<=cby1RHRQ7Hp_JOM*O_tyKAa7yc}VS0Z6`8 z^zfL5ij2~faS{UvKN&AWa+l?{*g2PL#z8XRol1Ka?T8;GRrb8&unky#4~%ldM}d+- zssE=os%e#hI853Y2VHVHTGdvy#^WFo2^c#-TI{fsjOF0E$1nY`>R+vYOq5t_XW{t7 zY>SuG6suu1g*+GbkAR_#acN{wP^}31*;-LYqN$iUS@ppJ*q(JP?H!(iD-}6og#3dF z1sofV3>USzx`Y%LaY3Iq8+~{KTM->9*9d0?9UP2H9^MqJd1S8pjQl3VEIu|HXE&DR zDvZbwkUC98kb-g`0hhwXPhpP6g?L}b<#%DtIHw|sR1q!s?w4nO?^Bp*6uW z1Tkr_F%Z2DQ$Tq+-NRJuknxN(8vjb)v->Y7j0*=!#^#YNekHxroMZ~~`zg*7(ghJJ zLgy%})IWA-#(Wf^<7eX8k7X^E@IRql!OlXHV1COkf++rxXa{tbf>Y&Xr9!hniaVWz zSG3#yKIO;m+C7)<*!b{uNs-EBl9|WOwH=1$Zdt63Ck!n)EeWj-0w^%0{n&y%8u9UZ z(TFm+f{GY0@fT{yOJGDQ4cSpm!pZQ<2%~5DarDE=h@b^pn;g4=b6=kJT!OA%FOcMA z+hPaXVfJiL3PT7I!XmIXfrjy74$^8>Y1r!O(a@0^|FMG#@bOwnfjk%imY>0^7HVL+ z4&HeAb32_ZC8TCGl83<(gg?9~^l%s>P9?=i!0V%NKtJazM5BKky{HIOGJMwvYcVb3 z>nbY9{?C9s0G~|2H6X;tS2=?+ksf-$&91_lx^2spQNzOL`j)lYi3iDq$`F18A_yHS zx||me1c}!~;eXcvIrBI>b?{wr?`U80J#x>Z-f$<%CT811ur-Km#oeK>Oc7~Mg4esb zYhD3^ZnHV;<6bN5jP+tG?Q8lBO=3$|$yHzw>+tYic<^KN<7qc4OC4JS|NW zCLK*E?Mh<|trxF|ECfbTCQ)hf3gWHfv;$vZP_D2(#*PB> zJMyx9`tXTp&w0PI4#i58StU(@n{E1SO0x&dH>zanMQrIZR55T4$76R~Xu**8kO+u4 z)FZk-GLIusSWOCI4k-X(m}qb|V@z{Euh6}MppPb$)jIX`l?FeWQVauQBV0*&iO@U% zOh{zgFRzeAAq27=GXci#T+@%A57%x8-|U+gyz~oAEs0rwdOQT@RkBElYVEExg-SS0 z6o8$t39r(l=mdbXM*jCZ({cmWgBozeCOs{Kvv9u?Z-cJ&g^|%&h$q7ch^N|ld`I|8 z$ud~U!(18RT|uSB12@ z^-`=%X&lE$%79^0PNYl(B$5kO?DnfMXx`=Qo^EP;7F&x&hDbv0FtZjz^OV?_)!|Ws zD&-SZTncFnKho$r*d{MB#!Kfg$Wbj~B*`1FP$xAHj>zf#Y&{6~{#v}kDUqfE|Lr*2 z`>@>QtNqg!QCDPmkGu8km};8&@0Q?g^YGSQ#fM&eWgdq9c8YGFbxzw4k}}MiaeWYj zB+^!cd|yUg4MzlaV&$;f5Wt*yo=ynFqdU|3YpdIyOjfis-n1DXC!+yRK-{j?Uyeo5 zbq<@c7zsoxEQGmpp6yFU22&?y2eVwI&o+8c!rDrBS;69CC<)?)F3LhJ6#-WR?ysg2 zR;n?-`%ii~i-qqtx@PCNGyGKv284+jWdtaY0BkZm_JnnPKcSLe=DPK%Ve~+a`UhSanR+jY}m+$uhrO&sF+Y0G#H9C zbg|+Dv$DU*9uWxAe`;QF>JrHj+jLuwcQY4@A{kON9mb1J*#Nar#rfK*YaH3A^Jjuk zTZZj}vvx-kwZ4!>+EG>G4O&s&XpNOyE*E2Bzf-jJy)k%g8f6p!Sv&#yWKzsjVCEu; zMw+(3VHz?MP+@Z(ee=#E@>&d+3JjO#F0%3EO=>QST?4? zAXzVr?j6Ca=`!}4RzM$Jm;rFj^}t!inZ|jS`k|VP0QpT}Q3Bb`PmLa4b@+Oe(@L{A zz!08=I1D~J(Dxr zQTUq%Tc7DYg3Fi^jrtXfCYbA^_@nk2wW2Pec-1LavjoAVf^WM9RBS6qm7vPQjx^#a zOvDa}>|PGQPVuZ29k1Ca^}nLOT5}n2x_tqVsJ+>Pmw}9rMMP~Yy>kBOv+-!ym9!pD zv;+0~#K?};L5=6XM7~auw!P zy}qFLdi>b>@|MP{*Am!!fZ^mR^_1n8@(q5twgBN>ps=yxNJ8{FnBRWkJQm zOxKbjwVf~S?9%i!&I|p=6>nn@Gjts1AQ2{LHIK$d7i&2Ggx_jdbj7>uv<>IFxpQ>Z zoDPjCtFA=`HE34ZhQYQm7zq!X_ODSS4w3F7FFuA@;;O2o*Ng)T@xyJv%dPW@L-WtB z;nwG?QZe$EKYxij|EogE*cI>IFI;-XLOxCdo;#E;cY^>UJ+2=XAx4KNQnG*)DT?8@ zHN5EFZz?`&_3dtFzWv!-acGJrIWQ@BRB4TrXw=t02VyYUn|4y+-wZ8_vEq|*Sk@vn}wJ&DCdy|z)dcA=d|*D*XNzKQO)EC2M=Xw zGh9JEHF`KrQ+Q8%Ucp0Eu4+=@Fh7b9E^#2ol$tsIHbM6ayv9#YOSQ;!%QWR=sF~Ba zAddRfh*ZxGgN^yPRo`cShAu=kSYa`-`{DNrE(y@*DHN1|Ld>#MoC+B~ix!P4O~qtT zz2xx+EiYdllyYGj+>h9oGSjgWjv}C>;xwd@B>FWuTs#9h{EHBFJ z%rPqG=^LGlM@eh_S=}F7oE(ora*%&9(hcXa4KT6k^t1C<3d1UK+h-Xa9;pZ=`@~T6 z_Gdc^g8*;-no2km5g7M0)yOQ?>YIXP>O4o&eHKswz(FoSK{#SbdP56}!3_R^y-00} zkO0pVriLV#b10?bDVah^yJSa^(?Y6g6jm`@NE=LRn*UIfJp{XR)PBw!WgIEYbSp<~ z3im1Eryh!(BItuwW0qWjV3(Y{f~&iRHtZWc%0t+oe!6d}0_9!u`{Fsw3<5vJ8+tkY zriPe(0B)j3?ac?~@S04CBxoOWzGqg5TjPv^s$LeFJKwR}i$z2f!UnR2ts!}t#F5$4 z7PA3X+JpNa6=uWrcC5>4x4mck|W$ znwkCbE(sh?H&SFl&g}k!+fw`Bk>YKxcubd!3mAzU^`yeKy4XVnaB;g>xt&X{-wo3z zbx!Z0ut?n81Tx8EWS8WIhIRp7uyS+?gk`k37lZHsy#qBMxsIeteM{=Mvrcnm_Rt6C z;J-j$~MuQjc5pYOVnL{)JE>ug}^q&0-JXEX4Z zhCEEYmNIO3;t_HK3v7!rg%ICQY;in+B^ur{%Ov<4iS}}~U4%WWurnGR*$*wJ)pQIx z7#eB`c^bc*BBzFR&Zj}O%^-%z2K-sGa793B+Kt^WL(#O8k>hU?+Z&qH7=ob2MSV%X zBNO_|fWGa#{l5z=Fh?zgWI{p4Ma$vq0<>~1lr9WgpOQ$vYU1&IdcBsN6VOloAwwz$7vL zXp1zc0h|Mzd?zqb4RaKRMKOglI2tbxjZ~P)d=~5r9JJ#1+NU!q8(Hs9ATuzwv)4Nd zIl-slYCGq;Ir+IjGZ~kv@KH9wnmI58Q>2_p_!ZCo@YHREt75WUut?7H^dlrJ!gz9VQHlWt6`(BNvNx^$1sx>!?>Iyp}7uGOdE>^dIOr_19zWc)@ z8v!3?uK+T(>G);Sv5v;oY`FL$9+(i=hIEATyWVk*L|OD5>-3NSXpz90SXr6~Glq{Q*mr&=9l9C5{G#VFF%i_;pX-cX^^RJwZ{xH9c8H z@rxt(2mUm%8!#F1V)@|D2^6zSh2wttg^;{weBENC3=kX@@|UO&cf_G#Pq>mZDQA+z z*!V+OSgI5%_VCm}`@eQ@mIC=Vd-8?TUKN_3t7Kk6_H1$4`pFxr!wGPt*P3K>cT1E0 zydYN{3v9{KRno@nAvF>oo8oWZAM5u?@Vx?$Z&NCn7`{A=b}-jzOk{C`(+4?W+9To) zLS#NJ30===Y;!Gj0|xT0c^zVSExHq>4{h3IWqUQX3YHU;@>5y%# z^}IPxLNh*=d_7|>EiR_>cFuUxKzK7FDI3%XMu_{*#HvK*vBsml|4GTvW1( zk>qG4uZ<(2P+jZe{a2voBt$0)|H$Onx|4SPN+7neO+*Au{5Wk>yl6Td|JvI>y-8{!}Mee)Hog1}tiQNPoWa}MCeXA(>Mw$Zfqmx7e#;@5y zw?W$^xL`HM6=C?F4#wPVe^^{oZ5!C8iz?ZB^acWaRbQ*=W$Z5%MLtpMDJF`i4)L2& z-rU@TkFxz>0sGx771!_~wmEg)?*d$Szz#p1u-_JVuli{C%=IUm#abxZnnO!-NXuhw z;8e$B%^22%UlnFE!|1s&Z6v*c4vc*2h8PF-uBQtCqDMcDEU34j)!cpvd}t;A1$Pu^ z+FdFMxiWx5(Y2bNKVB72pux$x?Y!;S1iMWT({K;{Q8!P9^5iBBhldsyzG>=^xu_|+ z+%7trCv0PTQ0XWuRd#**Iay%191}Q^^Q!Z%W5>ql%+aI4yP?0HRIt~;n3pe7tvtkK zs^x9Vuk!Zz%mzrqm_PV(QY=7!aWYpdK+%%YJ%1we`_akKk=^8k$@59`M&|(g%baAf zLOmfwRf%FGg-q=E;JsN2&gC&S4p6Nkk;X-sJWoYFPE(^kyb(iFBPT*IJ~eF@FT@BW z<;CGkgPVtvQe-UCPU8|iCJ}PXo$TpKhCgq3nYjpZ4Iz^4m&v!}C5yYLBx^imtcCXq zNEtL0JMkYG+~iXhMy+~Ht`>~n4`94Jr9YPBQ5N=-ys{Kt=K4+y$OJR0jkfcG>UGKC z_{{q3^S-xA`|<9r7eD97kEXNdrnKSjXwSKFjmS@k6~sQG zEquS`{eHx2f!od6q_k23-Q~#8d3)Tv;a}WEd^gvSF;);KkgPvsj{>I^VMk(fvowLHQ%avPBpogkW~Q&5mj(Gvq-r<# zB(#v1Xb6`bhiElKp|eyUhwI1tOgWkCcbnzyDm3!Hgf{L~d||kjN)~=|;OnK|MxFuR zfB98(F=nHp(wbFeeF}Dkd-Kq6=yB5qIZ>a{fNzYqLHgkJ1*%3KM^Y$9XJ-RQQ2R~{ z=q^X{ecGmscl+jHe4mBmeWRiYWjQA?<;cIGCY=2cQ6QhzP( zU}a%mzEx)_^!GxWp?Ic$(2nQ`FmM6^68`&2o~}b(5`px6W8P93r*%`Y0BybTrlAzJziyK+r37vR{j5R_Q_cIcivsMn>sMJ>P~Ux8dh_sGHQOibA(%~@ z1kxdN&Ujx2!ZeO|GMYvG^y)Ibcm%r{=!5HCpB$gn)oV84k{4jHgO9Vlicfx zrpbqu@h&Q(FSoL5(BniP_0g%iEDl&%rBxK=r?B_}9g+$ajKDxDteVrAfxvufe9aEO z#QmrQO-a=+auaeZ%S}@S52-6+*(Zxm6SYC$*wa%-r$|KUKlU_AgFlhUW>I!}XJx`d zutYtD;QmSj&)05W<~opndMRHNbhqT$yKf7sRPyh!~mkgz1;H%R_@ zb>w)mb9Z-FSzt2=JMGd_;4lz^4DtJ-o+M(oXcoXlKJ?nCC@x(A?F@qQ0=wu_v}tc3 zTFkbm2VdcU5z%F&S<%%34lOmjFJbT5Sk$9^C0&OKsVVMXokP)eqrZxhRla=rz^FkU z@bsH*6ZXRpBUZuXW$ugc>Z|KSjW+j!p8`SZa;Ij|p z$1^vqR^FzIW3K`XiOLjDF&k0=lwMyph+<8j9UmQq?1RBtvg>T}Dnd);QbSd#tR<22Ngv@PU4#NS*yr9X}LJ%{`4_!B2~AQa|1)tb`jwI zZ=tc9+pZTsvV7`hL>rkA@5`(Wvd*hw_OQ<#M+Td(hOub!h>4z%P*wIAkc_-4qZxig zt2;koe@_xdn8~z2^zJ53Pi?-{Na}RwgG%%qr(}>Q#9sZ;)Z9_F~TZq>OPT1^`NX%NIiM%y=clM9ASn4HZ%q= z2)j^h*Hb)PeznRWCUTlZanRVwurD+5iDT9`?g@V50u%`oW|N-Wj56#p=?hnpa+Q>H z)Q|c;F%+Ba3TjHDapCzYm~Uf07$}S*@8ox+*Z1m<*-BcA7*}B9Me`^?j$6^t2!#%D z2L(-rrDX)}c5D{7*4I-6OHp7H!4U8WlhzpbHl9zcep)p6>i~emQA26CqLrMSA}d?QT7q^KcAnR!yN~94zJC61kI;L z{PtW|>el?0R4AayiZ$ez(%puxYRQMA&4&pUX|k zm*13qe+_f{b%Py9(>69XQ9yb5KTj%yKTeuf3an%=C(lWvlOBtM=_P65MXzI239&#? zjF2dydFt} zP3T5;m>(rSpU;XSw>N>KmS~$9iTT&W)%RoL`SgzO2lqL}d|BQ<2` zO$;wA=8JEd@3dKx23i%#ebU5Ys)446yTDmWyOd=K;li_9gKM zZzJ()EzYueezUQX#K_SX*{ zZS=vIann+XAeKx7L-+%TGze;pHnN~z5&96AoXPa6kfOHyI#-QUGKQFqUGfoSMkLce z_xs7V)7YZbOR3^bUQmIr}%tF zGRo_Ban(TfA#}9sU!UpVC4I|ikEn#~zsbv@pqscvM&eau0Da-D0!VZ>NmOTkt2acPpx>$QKa+AnHWRDDE{gfgIRcC{}R+7K10eFO3JOsloII2;O%mB(C965-KvEv>dRuvGsp zD;fM79=cKiBS5wtn)>zC8E5gS4`7+i4mWW3&G5S71Q@`8{QVv`2nWpCv0hR*=y$tD zw32fBgwS~4f%x**gvkbqs(5pD?;TLZ%g1-tlXr-mus3E=%76pWykaV5U@Y!gR$BN8R?cDIa94~&RdK>6qjWz_RL3KK&-3l`q?R-4z3Ro3Qtz1}@gp&MjE@IM%kD1V- zMWPW^hhyTKRMwRtVLvRy!j$ckpzR|(X%s~s@Du^0;{kuJ);K6Ux+vj*QS4_c)Km<< z_ZVB08i0-AsDGemDTLf*(*JZQLZlY}Y-Bz9;EjPZsWviUiX4Y?1gZdQB_}inG(I)y6c+GUAMTujJ&HetAROxY8)u z^1|H$we^_s!bFqmD9{pNa9$DT4!(_=Kue}T2?z~AG9CunKj6U3AR7wedxN0HnL}f0 zU-%$_YNI8GgD{Dk&r`%g$8D3L!)$LXOLiPcjQm!5n?eWCJqh` zf2|?AYv+Eyw|>%@WeStesZ(JC$#FZw5h$rGoqV@Rx2GMdUAtD$mhy?WV(n<4+uQV#mR|;QvO*Hz5Q)uLI<#hhx~+ zKR5kf0n4o=<%yQQi!_tu7Bu-iTZdiG2TiTf!zT#i=TCG^B!$*oH&;G_=d41?U08fC z#;6R^ujd1{dXwqTFVr)m*c9l#l0zJ)oGn=VzXJEK1F<=l7%1>A?5KqQ9(CJ&hks43 zESH&fRVZSwR5ne%gh<%Mz8pigyZYVa%hROxM1<&7zFZ8l-j6>IM`m%DXjt!XaO6UJ zn?dLxwxKQ)lYWIwvyX^bC-2T;^;3AsFwat5gOC@K2a(BMCrnP0!aS#`!P$d*~N;GtL2}OaGX0^PETZ-29EM#KRxYA#aMH(WvajaIu~f z%Iko(Rt$j4v#%_MZQiW`6cF2_W(A(Dpj?SU_w6JL;dl$e#zc)~L`w zvgSylo#*tc%R@vA>OLP-rLRV$>HQpIi{GdH@{`@4ljL?R@$#F|GxHbpv1}o}Bucs0 z3Ft3#k;w7az`bcD_NdcxIU!_bOLtCIoaVt(G zxVr{-FZSV1i$js%QnZBzFH+p4e|r9NZZdM2J;uAU)|zWR^Uc5pN{GUl60rvrk=mkL zsP{b1oA&;2pc`htP-8+(6o7@}+K!B%`7_iCzGD9L!vlFX#BHV~QLs_f&k0{tX|=JG%~`$2gqh*}m+$it9LXx|uXIyr zf@|Z?!j=->sc2Jt2q{!+mIDQ4pQHLN;ZhHq8S{fs3%Ny1$2R?`wK7a5DE3R`5_8c$ zmp39Y^WUo&tF0~4KW)N_6h5@fTu2*Q|qC7I?70= z^WCY$-7ppPTbbmkop!mDcu(^re1&r$6)YQQ=+~GC#RpHJc9wM83~HkZJs{WZsHh2^ zWB%JCWJ1noSzMejL5V=vxEI z&Cj)>YhZf@%e1AClsvdzYN!E^ccs$cm_xz=4M+0804hGf*G%{qh?8a@i42aZu(%Z- z#M!w)Sr-){jxC9FpqUO!aAbkTeTaKP6Cz0`uPkH zJpcz8I4dJkXEJNEk;rz$Yw-ACLb6jM45;f?Ij{_2!cAZ|YH_mvILr0@)du#qD@{PT zHtG!_#lq=_UCgKqdtqf`j>G`O*{6UKs1~^kL`Sc9_WtR2!PBV8pq>{Xdq^;C!1BDq zrN0q+1kGnDx<0%Z%>O(r{8_oE(eHNifME4$cN9Oz>nr=~F0@uMulh z&g@X6&RdH@JP2#Vx5y@dF!0Q2DKe543!Nl(r@1eu-whUM{&be1(#S%Jfs6TJOFT8S zVafvqH6qK-l+0=ahyF384CenwPr*xGrL+`q4|oi$iqRQ)+p=m|IX_5;(k4xPbFGm zElV|u_^y9kcw-jtXX%W+HoO@uQ1M@U#G$4kTWhx^u0n}LOHa#C@YaZU_n$8Z>M_;; zM!9Z+B4>!h#eF$Sb3KXH@nd-y4t!8~3Ho~2I980jN~`l@8^Q|QVr7mtPYNfXPJj6w zHqv?Qe;i9L#$o2v6ade@Nu@00cS zrX`U#+31EF6<4L&*_*~W7QSlh1`O=-tkAD$4&3%2S>OXX#FvcbXlRRA}mL&8KP>M;8!ay z%<4^$Yahg)aFR^l{N22qSq)t~SutUz)7fh$Kt*Zk|v|? zBzTKDQYGs2t3c@qAB7Ddkin!w5WqQtxkfn=&Ps}|)jrxfF66aeSssDH)bwQ%A>xP< zJzi20gGt(rzNfR|+)r!XD0&r(Xak%khF(W>88U9_y`470%FZUTgNyM`aMb6lF{<2op63y9tRM|zaB{0_4~Q9d&t9eVG5zi>9l7k|yn@euBPzkjSV@V99y zpnG>Utgq)k9$$;&V8O9;J$8iW(f4<>sup2+ev@;L-(OiLm)zR&FM-4m9+r#>7-2F+ zxynmaYv`R~{|uic@+3u|2&#TX8rrQO96Y2`YEg$3h2%Gm^NnBMgDHM^j`4WR7`%R5gPHPpaJy-=ArC>l{LJ0ndBARV~oZFgp6@O4Jl% zt%6Rs+#wKs?)gwJt1dQZ5`n^9rWmdpHBkHSCi7pIvs)DN{j@fIWmEDj;(NpSZ_c_X zuN@~t{t~>sQ8*mQ)`)BrxDweVVbu4Qc~F)iW@3zD_`s?Y=`O_- z@u2;aEApsQ+ut|~$ zOnZonHY`7s8J9kb=0^+P@8BqZlIv0YHZ?Qt(Dv5Uc1n+^jG*HdVc6|?hmyAzzuo6u zm-Iq>T7VjwuTCp<{HyeGbczCbrSdF-ro+H2b{3NG@&XupQ_IIm=NcEw;pHY}mlEX2u|1yzl1E6qP8C5qJz;cYLeiW@jfxRoJ9rJ&ofk0%&n%$ ziHG_{^+lS_XXN7cE&xQ6)Q-*w@O6~eCM@-v&txbLIM4w5GNM9PhKUz9ual3(?(hZP zA8`eX)CnJ7U+WyfKmZsgk!%;e)GA|*e}SB>xCBBUO=AQ&;{ai99}|lGPP0Dy|Nrup zyMPa)Z#MqdA^XC)AomCT(DJ(-7rGQ=+q{z;u<1p%+n3*E-naMl629xaw6S04{;5No zf<&mnvivKV$zaoz^O*CO;9y7RuPAMhKbc{{%y)vXB>wBF>sPyvzw#9!65o>;mOr_^ z{SF%KlN$oEgA(f(IQ_k{>Hh@_A&0 z&qz*a1791Lu0#vXHYSpy8A!IQdea&(7XL(oEu|Ca006#e$nn5a|i-;{qt-%2R->JWwE@=DTLsKK4$rPB=WO*rqRCI-YE^8`JW@# zALfHp3%2=1*>@iz5YG{AWYCW|u_{~%&pqq(K4+2Wg>j5B9zO#IdGgP*WyOIKG8FKw zQ&BkiV7LyGX=>uT>~(<=+GnBSq}@a7%=b&fR!o3x1zh2bxUHN@K7yEcupHc?PzqAl zq0qQami_)#eTx!5tpu)1k=9p#0738Npip*koM=YQ)1LPPw(E|VE6I(NjW}w^n&a@coarDW0{nSuDG_DW)<@NdsW{8f0u1JD~hC~d^(Bz>LvAlkyHEo$_m*L zdvOX)^YEJU&vKFRCD9ia7(?hgdSr|$d2j|^b|_{jy`!e9t19OCAmC$o21Y2;aTJ7| zW|igwMxd)PFc%a|?9q3!Bc*nm9hYMLs|DGI(N~lrAH~0>=Q=>c*TsmU3>k?xyv`@Y zRIEz9KY+8i6Y&m|xGR{MI2f6aBYs592S0+a0F0!>SGLk&QFpq$ACkjF!7AkW7}4aM zVC`+M0Q|+xp`tF_#S(~TvV+d3>KOe%hd>XYLnf$R)>WZ3ZCrGCBYWEJEZrkuOWlJ< zoDbDc@DW^PBn3lcv!mwq)=zTd9nq;--No6n_D-Ifi?){diK2eLkjnE&Ls7GQ5oOXS zrjY=pz*)ey;3@+smt}c7u}LEOLyqeTfXROgUT;5_AL`_&ZAurqL4jf&-$HGQ!qRIN=)5HIhLECYpfIPWd2QM{B98KHmWW z<2nhOq;8q5Bt;gz4Zo8@xtE+OUaN9{V$gnvG!)}Na`XxpUpys4V*J{Oi zbP|fA;Q`7~$JX-vX4%Cs29V8HkR^i0iava5Ml^NQ+vG&5N6lZ0JY@yvONmThm)F&@ zy|1D$r#9piDzkDVnIDfzMBtQ^lw5F{CjGGZhMcjoJi(T@@+yxXhZyX{AWTDibtSjZnwmO}5t<0cIYcr@9wUo5P!T#c81)0$SBh4oPP)wfy@7uPt4zjmj2DK>N46Vg zdTDH*N12E>6HMsoOiG$C(VALwUP5HV!R6%bGQ!p$ij|rXx*MS@n!v_K0iIP?4E1cB z{xa$)wSDUDm!q+T+UQ89+hD`I8f$l=&yzkDqwS+ZIZ!+jmi6UN0O^|N3^{5m6D<>@Zt@C)&Hyw^ylghzcYXBMg| zbWx0k@mxU(ZzCzT&-Vxw8||D`yza&XffGeSx{M^jIqu1!=s)M?$m6XDKW>nNuwW{< z)B~#uMzkO*&T31C&3%JOTyUh$2!03_jQM|5nzd(K(+>gwblUZ>YV<4QS5LE4DHhbj z!k|t-T-b*T3#+~$dq%M4ts>3!{9Amxf8hfUL?rkdcr4H7qfaDTLEMP|zJx#&OAnn5 zEgwq8tsK(3`Uh{1^_qlJM+<`V7$QZ(<zcC24a=qB5P#y`C1ue@F&Rew@PAX zJm>fs&N|!Ur}DC!-*qB3_V_OvCPvhhqM&SNG2*h5y}yL6rVp2;lsg;_4GtTInFG0- zFs~6Z%tXG29V$r!c5<*nK@+u@o|#sh+{_tX`OmQ^fVUc>M(S7lO?8-XN zrU=KrCQpwHAXkVM$|NX9CJRMC;9c=}N>O9lA(+hIJ;`7|@Y@sx-G#-HiZi5O zO(+`j%#5xz>4>V5YZ|k)CL76()`o966PVIr$Cl6?xqc0M`}5|&K^9IH93Qs7^5^R@ z)lvP=Yu(ASpo!(T-x_c{5%yc;iC1nap6dAwiLt02Ryn_>nN2v zLNRQwtd`O6Y7~9~?ewd?kinJF|BRBpv;Q|JAUOx;HjQBt+bxbE zT;+qx77Kt2ajeQiaLg zO3vea7E?v*kcWu_`IE?^(7cslh3qTwXA8NBgDz?L5#KZ3GTge(Bd0S-1 zFrGe(m!Cde+vIPA`A*O^Xwl9K!Li6^%l`4ifmhlGi1kP*NB^q;e;*UxMW;nsp&)UV z>@MF=xmi8@ktPNHkR00X31f+=skQPz8rOTm9*16)-c((yxuBb&uK>gsp@KD=#kj#v za8-r$*`ms(oIooX(xJ(2G6r)XQRYf@$s>}n#7yZ)xb>I0uQUgtCe{k`T^msz$FTNz zf(rXnu9<(-ONPwAk0zOFVcN=`c9V33CNEgjW3)Xr0i#ItaF2M)1P+XW%t*eHs54D> zJ^qW)j=tW-(t_;Nl<Nl zybNM3vkEbSOZM{c%T#%L6~;HAJyzq^B8M~J`NPO7(Kc;UV>T`Y^%#eUZYRk0m*KQe23<^@tF;Y z)%uwmm#pU;FSz81Yu>z>JhszZKMF-BCs@`C3XS-td_aZmz4;IG#BXc)Ju0q3b3vkY zl@B$qylWTTHbeN%R*YyF<;b8&x^f*r^g99Nzwe<{gRjMGRPu-3;N zrmagA{g#+FO}SqUiDFBvzNk~qr{&6uLR0IX*2&`@Nv0LeX-T^fi)F>k!(N`r{TG*0~gQS&=?@S?DEF+}S4n5#-!&ZKZaD335?pTaJ zypSUv4+4JwH|~A>`Jqi*^-(PFe53xj`(BCce6J)ivd90o2FvY@wQ|76A08iDDI39tUKBx+=~qi!8nTa z*m0J1-Y+vd$w@HRcc%*8lF`bA6D4AUaRwNyFg5J@gcN9KhFUt?S;UibBI2e&B8l;x zV)_mS<_T9BCOS}+p0QZg;(>JrOwR!hupAR%_(DU?R<1FL@vnB++tk7-8JG$ zl4$oN;ZrAT>lg|L3mo2@=G|gZ6G?dN@IZ5sXB1xMB=}$>+lu@ut*rBeAxnUh%C?bw=Lv?#`8(N>JZq;=z4?Ec3;&G0pGU8|1+kk3kTkNz zP2Zv?0gXZu>dud$fzD&Eia%v{8H~4d9Mr~V`ClZDN8B4^in(bw+g>kOFP{9uBvHyC zo++d5Jb7mM_=U()*-izX(=<2PRAQ#iLmB+8z`>^@1;ntPnA zN7@%GQ{g1Lb)H7J-;hQUh5fhu=15dtS_m&^CFA>P`O_ajnka15zIc}#ttvN|!on%O zaOh-O@24=Feaqxtn901Ej+7Zx$>`opLwPtfeDhGcD|tfAEgI&~;~TdGkh(Lu@<*ur zofcv|VLapJVK-dX{%R3r%OQLEV~Z2vG9{B@Jk%t3&)F3DkrA-aZw^+EZbm;ebnf4U zlygBYFUTzyh`$aVD~6%>Yog$F?e~p=Q;+}~L4Y1_Wr~9MGJxVUR zsUbX-NI*F)uQ)#vtl~9c0eOL*f6Ve6_8#|@qC8>fTPsXcPC~!dO(~E`vg+%EcuyxF z(9dd+gdfSl$|Z?%o^N^>1F@5EJPm>9`?#N@Q~NE*GA=_@*O`igkR(~Z#PwO57!f|w zP^Q|Rr@NDx#DHdK$P%_&LjSkJTv`0}O*d66+c7DyAnRq@zaX!fIVA}|u^1|QOCWHk zpMWpUoPYjDg;bH_zx1W?OL=20b$6IK_z9?!us_k^kiu^Hp?3zXH?x z#gId$^d|z6hu^`;6Yng))nil4MG*#2m+y!D}II?DPawGkwNzol2w ztAx#7uw_Bvg%oJCy?K$V>9sQL$p3|n4v4|36EB$+6&L5>ZDd5@IwT5<1z`5J4|^#@ z4nz&B+Bbj)cXlee&kj#=plCqqB>eCSB&GUx1?AAUPp#$LBj5i4M+aJr!uBMq0Bg0M zBL_%R&TAAnGzIt|&US%`uv!0y6Sc|*WCOY%uU7feDJf+HF=kP4>i3nySXWfU%IDjt z5nmRD1kXy8qnH{$Q1Oj8c0nASoT<2BIF{9oCvS834j?4=jPdN-?Y{uA>#svd_m`n* z%U)Gt#>0x$E$PZam};;^4mwR{K=R-%USE!=8k#&l~4xWAPvE?kGRL4it9g zkdj7X0q#)&c5eny*=6KMEKx+ER~LU#-&rRk2hhYnKL5DFAYmyqF%O54Yf>vB3%%iCQ5EycuZEC&li6a-hbYh~~(r;hFQ{;2T*A)j(nZi`Et%9K=p`*6V?H zug^OwXYcE`bVoJuwIoSoeqEdwyK7RT%5lHGIs+{Fp~%$_W4pJW@q+|P$k|*^9WE_~ zL2tCpiHE~}aDqwB`oavFsn0=x^U-)^SE5~fxK*(pL>@&Lx(pvz*PdI3mVS z!5?)SaynF6z;W%1R1<$Af>o!}e_8vY*JNb!ns(vWuuOX&XE`Ikd2j z9<=C@kFmkDYWmY&;^}3VhC3jkZieeQIdhk+lv?9-t2 z3_+Lzy=~KiQfD7n6#r-t&TsVU`*3wSKUNU6Rqa~*IPXfuq{E^Qr1F ztv`SMj5=@N>EssRyC9?@91^)^%eq(}WJ==nS_Q?%l?4H2-Zg?`j%iC&wgh=6TY?d{ zs3_%_6)*l~%}R2qRO|yh+SSGn8Vnb^Gn!8UGCvnos6xuK6eqK3ehEsEl z;w%<^%&n_;oN`{pY`V9QJ9^D}Jm{&B#o$bImkiH6Myx@zx zIoJ~@|BRG(!DL&At4luFL1hGgIAkd*Y-Tcw*PZu=QCPnR=JQlyxolin-^H(Mlx}h; z&oC&o-NaEvnRzR(h?pSLK_yP&+d9V-hdPLgMRR=UWvuqhV*~2x2s$c5jg`LJ=$Z zp`q8M))r#N6#3T=c|xm4$jp@U+}%S;;(RZ6)MWS5D#|pzcupISb*h*KZQsppDT%MFwvnD79;!asIGEY?lQ zuAW<0^KzCg<-eoL7e+fsF`undGqI4?jL*g5DZjw-A^Pkk{Qw3 zCE?mOB~yd!$jTG3j)2I8Ryl7*wRYBVJ=DGU+pR-+zuH+Ddwx*#NWE)2Iac?|5dAhc zz5`|>MOtX zDTTdFIdKN)L-PfrEK@myfAU@KFC{ozj#5D5BkRr#PlP=P5oP(g%t1DVVZn<^=FCx{ zP#jzV)(Z{t)UpmambdslO$Vu~ipwmY0JPqo;HchP{YS0ZWq6I%D3$-@>prIqNuX!e-p{QW7Td0+Jw zFHfS#+(}i-(Dg++0aqHw$j)aT#Ny{r(F2a$Een{nIQhmP2S}IZ2TaUn(AwFWKLwnK ztWy(BYkuq6&std%&A{cSek;HbDJWd&uus|;rZC8LNyBv!-&iSRgVqQn#aK{u5SNwY zw6yY5*oe}4tqt#F#Q$c{O&TW81oZCQdNtGiabAvzO%fs&%KoR6NP?G%C14RE!y@{8 z-KPus9#=l4OKKKV1;?1i?ngZxJ2edvF96nMDa78Q=CDQPp@ zZ%U{4vtAYjp3@JdVq3QJom%PRMA5H0Y?%IGRGag1`;9WiX6dwUx*>k$&!Z9Zabb&} z+&RfSm&SQt*C2f_)XRDclab)N+Vm6W+aLRw8=F@!7EwIQ9z(q-H1AVAm-M!!A&XC{Y0=NPg=r8&Y19fST&+16h1%sw2Oc4eS&R#h)KcaJa?^kkcO1Yw5 z#0i(W^7HG7Ib^ztD^Z>J4JC5f9;_`Q$7weR=Qc^khv=G<|zf^b%HsxOdL^ZSb8q$yl&QDzexHV_R(A?pn4ny+ zNF)4-WY3(u_o>>}Y6&73)p~^hZp3h`yvo2pQK{-=e2-rK9V(IgH48P1DR&b0BtbAb zDN*+eD9M0wmHXMv$R=1sGLLx`WeZ%7$A3rTiy6b{3*Ah-b$YKja$vJdp8QEuLQ%9p zEh5gdM4SoxzqC#}X(x?jMT%R?V9wDcK}I2efT+Z&qvmuu5cK0M{`XWQH(wR|e40DX zHQSmheR7pN$z5${Y<1+t$;mMNxIeUD69&_`r6(J3yYT;`q>6rE%mF9*!xtvj&P*AB z=lCmsC$oz68tppukKUvjd6%9W6{2-N=?C-KiKq6w;Wg#1rWxf5LI@+d_&)M7$cqYn z{yY(#VxxUqzKpoKMCOZqC8i7KEK72UpR!bt^*PaYOE~PevDz(*Zt6P@zSdHE!C(MJ z2=2$Nw^pJpC`M$QRhfPjouA0(bEwT#S5Xv(2Gtz@0fi4LP}TGCu&R%9{}g3Btb7Mi zDdwosa=A?jnkP3t5Iq{tU}1HuIqj}y{^sG5xm0FjWZqMy9sdob$)Nal9vOo+>71_NkBGy5 zd;4$hqD;>)j}dt#8V|}_@ z7X*`NhtX%RNF+`5;P5amMQ`8RA>10~`XWXSG;4}!rg14Im66RR^o8lol)s|vXOca} zA*m3-f6-MZHE#a@fmTxGd)3*A*!+AT9L^}A*K1SXIu4qhzO8I_z=_}$%tLIcAK3q_i7r8jTmNt}!GxW&HU zr`uO|kpq9`0f@wx+OaVJlJ`6ZVrea((?M)jWQ1Nj`wEZ-|QHu7h*V&~bE49p!H20V`_|sTXbY z4rrN7`6JSeXq)5Qb3;f8#brGOpmMHLAvA(dTQx5;Y5b>ru;+UYF@H?Pvnl4@M@GA$ ztBa;SyWP!)C*fhDU-;i~_!s!50*qAKA>jS{LlsvBKK6bGZ}xiWN0UeCSLQGM=cZp1 zF)aO%krq3F=<(sDxf2F#db5qf0?J(pfvVogI{C~#yk}>FIp4hNyG9BWu3n0HcY|%S zwOt7-G7v*4Y|H*s@f&jq&7_Nok}+86V`jNPXMA$N^OAu7cFWjA_E+)KUiTywmoakB zepLW1rH>qV|M$*ev|+_3q|*+yA4?088Eq;>Zy1b=C}#qyju?S!FL_-=!kp9sxWk8A zaD@K83Ky4Hx7X|LAzbse+}~MsogliqV)3;C*S#ZcpBl}Q=e~;`E{MZiyFY4k``z|6 zphv)WxX_$leJc5e&q>aHIE&Wv>se*r?j>RVrqJfskX@N$JXPI}`FNI>0yH9~f8S}V zzV}T#`bi%Vfh|hwmJLbxZ$MWs*mH^#-oQV8g%HaTPcG?jx(G=ts-X^p@RQ;FvKm`0 zqzULBgx#K>gjPp2*YjXwm7;IrC>Cr^WC@i+Ifcn8r^(IPQ|m}2zU$>FU}GHE#Bti? zO=7>K!v2TAlTspJAb8Vx5s5!1ohA)F-Q$3T1j<3Ao|2-EV|qPL?oRAOO3Z)L2Vw0x zq~Tf|BVUxAo%#V_P{axs?Vsy!0NEfobnOjg;H6(Eol0h)EfTwG%UA5LpNNmNZ&O{o zqM&QS_>lfF10l+CRs(uvpfD>b`!G1TyYDdwC*?lnsT7@<3Y&)C;>H97XTfJV$4{C6 z6%q&eIq;=tv?~jBDS+dK1>7~<{Wtlng7kY?jS59fF8w#9rhnwaUAQz~=OgCDTlh$6 zo6a&Fm}z@_o*J016CnEY!AiF-seRJ5oeVz>>B4lsqBKo=@06C2n+1Qh4xb20Z^TXf z5!w{KH)#cPsm_$rBYUG`xGo*7eoZrYrkmq^&0kqxHw+uY4{P@U(&|oX?=_-m)#5W% z@Bp}R!9$os5la@sr|*xIK6<#Nsa0`*_fOk>`Pu6Cw~mi&X$q4yEZ$r_P{h|o+CzJS zVH$c8CkbpqY}R0Op|(M=@J9`&%H4p2w#lc0^UDPpX;ta#r4}W zG#+W{51}(SIKeyPp1g#2#V>;<>d4S16iVp<8Y8b;}I2`*umOAQIc_VDsggb0lIbZE_eC z=aOmZ`iCGQLUTY15GWEbRu?i^fM~(b`~9 zVp9m_$}b3~Vhak-WX{^894677F6MSG6%R;w1&b!Zn_-C$k@vj_-T16a#@xCiP4i}n z@?G5t=FrfodM_KpACw0_PWpB$MNa^f@PxwjqjyIoghaPV`9-b7Ex{!dsF|Cmb-U}W z{-4sDQToB_-Mmlc&2k!WtUY&g&ZLV88YPFq6B;wj@D`X(HeWj8Lmxfc8MfQw_YX57 zuiNuDpt>|a0m*5h2E5KS-_SqT3}9(WoMfSGhpp$|(;nla+8uO@Mx2KzKI z|L#m2z?a=-zH6py@ekQsU1+A~57V;e5&dSjV?xD#qATgMwcY#)?j*6>%H~X@_@x7# z!pt{*GNf!Uw%4(7fdMp$)sHMfy7Bzph7>Wo1Fs}-NCPb7khE0w`m*>bWKraq$?|0s z%xeZ0k`@3<*#N$9Dh$AFzD&F5_yk6&Q((xz_aUYBDPjAAwi7v}>wM1~^;gqhvBSjE z3Hs)bOV)Kd%GWf~CwH@474-CX;x8Z@1ZfH3;3KiL2Fb)%&AoSmz2)Ai=mQIb7E4KQ zSPe5ydAZu;E#(e!p9PvVKDi={Qp&#wr=BC?Oou7cyaM6Bq7{y58t_D{wX(=}>`5p1 znfY}bT&m){G(k9LpZFdo_v!aHv!i7+O{yyqNXs7i-SsDsjQxRG=;#Wxz^R#xtM{kt zJtkhg1C7F>0ce$|^4`8HZRwu-Sp=QXg(N-J*8HlRLc-}Y3C%t3LJR@USv0gxAuwi2 z+W^tfMqPMbPrm+}hHIHb=<7GFFL1(L^rzFt>yB+d8_d>6VIYhrAMA~Vf5!^^MI}PS z21Qdx|J@pNHz*!AMLAMVrH9cRwxZ!55|0aD8__pte)p^E35DN;iat||hp(SVFRIIh zoR);2g336DlL#y|7t4-H(&Yd+!g&%PE&wa+UQ7KLD27CmD@9uo9cm0tx zE83y;*1H=qaEx4{ffGMxx{Y()LB0P{?oZPNr!r!Gh`!-kbPi{vU*QsMWz-gP_Pier zm9UaN4jcG>6O2bXK=cSt^c(y~h7saMvM><2;dq>fW&V05Z!GUIYX#JxC9hL3Z-#}! z__}X4SX`R>bt%?9KU2n+MWiW^mcG0N`?i+2-)v$f(9pl$EH4OkAx-$P2DOP5 zdtH=o@oR@l>~&LbBuR#bF9&H9-om<-PyRx|O?Gf?qZ^S@oNa37F8UXWC@KTZtixINnNYVUbCv7e|NinW;UZ|h$+Yaa z3vWD-?($tW8^vQTVYtyhD6;_f-ek_hM=Zk2bU{YZpW^_Qf+@|y>7eE9`TlT-aGUb; zJT=8n3&mY+&1cK|6dT`%(eem8(eAj)S13+&dM+=hJaZ1LCi>M^8@$T%KJ3scZR!or zBp9hgx`gxpUZ}u%-VdDo+S1g&o6!Gb`fXuSZEa=i<1qv7-RXZGkN8S7=*;lgn^su#@aiguSlgPZ^OdsFK?1QZ`E6jNmd2S-O6J~%DjGqpUG)x z`)!~q{Oq$kbt2VbX0mHl8{UpH?1(Dq6PQDBr zRor9Hj?O&p`qXEBp6UmifImDzL>7_v@Sd&7D493CS-WYw3WxSILNMYR^L%x#rL`|w zIh}Iz$MJu9m_J??OwYcw_vBHgQLOhoF|y+RiV|cnY{E=f!w5dZ2~+Wz$2j z&P6putG(g*8n<^W)Oeq(b6db5e+r!3I@8+gtGh9ZePQU8SH4G$oBT;Rg6*$y;ml(2 zIaDZr%POSHt|UOKyuq1qV6&BJ_mfGAp+AN`>~B0FCUOBZblNM7E^53Mt#__UNk~Sy z@beY_Y*7K3$N8VywxbXNUc&R6r_y{&tz0*2u7m-V%{Sjxf{LOgFs$dQjaxiwq<)v$ z3Q*L3{mS735%6RrI&ZpI)JrlMI`tuWCHHX{SYT33A(>@&@bk|a^Yv{m|A=0-%(&gZ z{qm8{-wU0@Wu55uAKVX7ox0|_{d(#aE>@u4lwjM;@b$UoDx>6NcS`nRT?ur|Z$Agx z2enG3`FSp$d;WH`ZfT#XgXkI+=mHz}fvQMdLK$mo*?8k|w>N0$yr^r|n*UjLt}+5| zGe~haYV;hK7USQ^o4~$q6>S*x;_kBk$9I1c6nsPb-y|)Z(u^dFd+k9qw~4I8dymE! zgsdrytE|QO6))pp;Oxav{=8=s*HgKX-#)y~F@(7}on%2eV0VnK2l5tyL;B}MKF9a| zjVFKz?=@DwI+{fP<2V1({!Gq=V)Y$yT5_!JZ8+9+J@YkvPjtW8E9%HcadU17mS{!nKOYprsFZ*1GRg6~LU?k8&IzoOhiP=#6@$={I=ck>9bA!j zrw<;4)+yI=2)^C_B$UQY1N<1KZbzcvl%5>cQCFA3E2fII4ME9g23wXnTpNx}&)%m_ zeRfwT(a+-OaE-y3_GfoXl1r2;`$Nn8XO9)fj^$L2*SeK`EjM(bAq_*1P?;r0hcCtk z-L2>HwJCWTAL6|3HeDiMG`Y!nES=FK4=5Qaf@{~xV-i5M&^i|z_q7_cnp!2XHIaQJ z%jxL?{gVcFG{`;ji@a`LR9^g^IPt9;s{*GWa6H zsi_FaV<2*am_hXY!S&D)g_?6^nzpm@Qq^1c5;e|kh|qQvaSRECMf&|nq#$RQb#@~f zV@A|nuBk|Y4$D+*MM_6T`|h9vdts4p?4wc(cU}0O7r{(H#K64WV1UFQ6*Rzt$&}u{@#dMJ-xA<$ri+_k@q{LdMouh5O$B+~ier9S^&Bw~RN&BYNqj zJC2^`!ZiltMFq;34r9!{QBkqa#MyVbzp}S>QGVZcwA!Y^2%9NZpu>iX8e%^d_4E{r zFV?g3nk>-tYKEfXiH@%34SJ&2ik=ZrQ7yak;oTL9z1|j#mLu-pBB4RzC{^}|-}X^p zlvS;9<8l&cLVf^$0Q3W9)w1)G4WcrxIq}t#4GCM?svPyMe?5bTYZc1MPQiD6 z|Mw%@d(ORQpS{<5R*Oj3`z@AXu}T}WpBWb7L;FzVByD^Bz^rgkH~qJ|^5Vn1v#8He zDHmx0jl6TXs+jN5Jx!zS?ut-<_+*L-~=b*u$)%N*N% zb1A-k-UG_Jmxq32&R=?^Oi<2Lt3v-llNwdsvv%xroHrfIU}7F8e4(-r_dj+`*v2&q z3Dd3jakX-D>RB@Q-5C}iZ z4764%gu%ys0t`Lcn+(F=qc5l&6gyemY_H+N^e_^ApQcU>Y+{9Ed zXb{EIOKCaX#PMQ>R^uS!Vj6w$4{aqG(JybCRQlkP!Bd!zSGyByke>X87qoD&;EB z2h;p^IX*|dd#Zy<;yJp!FR5pZ%yH~}VnWfRQ^=Oq7-aZoL76K%)x7v1U(1G9iKorS z!G45A;dT0E+Zf6Krv`Cm({CRnOtCnl{Yp6-5%IUwIF-?LYn{qE7v<=5VceDdls%%2 ziUD)m=DYDM|IxIAG}C!WJ#_e+ygA!4kbJ^C;5m_HVJeQeR!MQDga7P<(V#gFO zZY&WHoq3wkx=$)My_Y=5{_B79aatW0PWBoT>2W$5*OrD7Y!sTN)6>g=bTcgXQPXPk zZGIDJ9MyrGfA={`p&PGP9}KV7ES~QFBR*UzTChF--jy_J2pN5ytMKNFqIr0)uS>(8 z55y(~i}RE!n4qAY!o(J)LY`nZ98NR*Fyi$=3_i&da2{=?feS)o<1?E{B#lZCP1#W6 zZheuByUU|S1IgI9EW%?Wg$qSa=)3CNXoL^lF$9Bo>i(YBusUgu7P-0G6d7>i52>{# zv%U!J)K4*>hcXCD2gJh>x_XX zmPB%!61HrK%h|sBO4Z%aEVudOzE;J-PKKDQ|B=p$$a#y+jy#hgk3gKi;^(+N+CTNXbJ_q3N? zo4OekGbG7os`4)kZhoywl1f?sjTu25b z5yjW$!QIhZ=&){?5D`JHY#wCcqM=sRTMD2CC-0AZT*)m`q<**wSpY`5Tf}}%@qnJv#9`p$^ zr@pUCtj>jZ=S^{0NmcQDIv-@?qe}E#f;k+7gWisU^4vl|E3aQ#&a+n4!((vu^w5E-SW2q`to`uh{N_{u_2)A6%vV6U5eH(=+S7m zRVR)>{FHA(N$_UcCwp8>=q1h+{!~Yb8jS06PRXv!R1gnU3~%#DeOa4~-;Ts(X7Ab+ zmBU{!t!_Z0ImxA=*w?;KB~6W8)&ARZUQed^nKToG?-&o8v)JhI(v>{l_+iseq_xFK zzIe5UUgLJ$xG*RfLjd}XM%F%yj6XVe(Z%RUuY-=@4BDGIyN%e!z%{>ZhamTnI4|eT z#~$phAXYuAV)8({YIYRfImCQI!4t1~VWhpsXF$w-go%`x(d}%7 zXonE%QmOAjn*LaB2G4*(&TxCGGjMZ4@8clNmc6dsY}ff9<>m%<8X(0_A|IgLJF9)8 zllpltE=X%KT(;gu<1!C)VXwSD4d$XSnBl{yn@{1ISe79;K#-=^8w^fX83%T&i_ts^4$~f|XDcT1&8iGl43%ORo7;IUf zVgXJW_n9db#RLT`sx-)n8H`)0!~acom*`8M-e?#VI2lgn~^wvdF^bqcrAZpMPM&ToT?Y^t(u=#R9MUp zpXZ)9of&$;J%02x+(L?+YjE`ma;P1Yg*Da+ZByV@Y>MnMF8kAst>)U273EZoeWiqF z4Nv=iPjZUsMdX&KuB&b~j6{H%5wG7b-{sY|MQNudMa^CtEO$LnqjgmlfyGrfV|X{y zXh;(&t5-lg8`zx24E~ybZY0vUbJwtIuK&2d8trS3kPx3sf{$4He};F!Tt@sVJcF+3 z)6gjF>V9UybS&YUY~urwiSOIKkPoLVguU;f6sf*w73S;o^t@g?G7-cN$>otHkzS;r zA&)>QXSepB$)8DU?IwL^n3lLyf5_0nR*@D8dl*iP2aradg8MUKL*xK18h zgi{fr68O3HY_@j4tF+4EeEzG#suqT$^5qc={0xJV`& z=MT{<^|LzkX*}JM0QoX3?R8YbK*To`h zE!H>c%{j1IM8@9RBEU8K2d!m$L#3p`(l^J@i{b%Y!3cRjy0M0KW5kbp@1UOL-E>ez z!b^On;f<5ZI~LxhI3YX=2y*PB)n{hryYNqU;p*}65jfh5b2K?4%JfS)iFj;~kK0v6 z7;-5tS=@f5P_jQCbx%+t?#2X0B&vSXh6eAOB!j2idoDv~WX%Wyy&i}}BQHQG&)0qr z3A|dg#}WpT9+9G|F(WXXLm;(fsBk$L#GKK?D*D{Hl9x4_~`ZwgU(QDW~Mi zT*tZ@A>P|uQN3cvmcvanni8S+|D{aAr%duQsU{L3O!NNhj2IrcB@X$X9rC51IRe#b zgvpM)hl_47oR))Sf)XRMwwNhxj5#Yv;QgY{# zOUS=ppt{k7TMhyfc=INU&)G+&Z{%Ebwb|J@Rqp#y`L`-pHjQ+eXQ)sOv&d zTdz1EPJwh;;=4*+`Dw&w_?H>YV0HvC%&*SSJ+AYhN%pBfWE(T`Vf4ssCOHWOGZRh1 z+H}E^6F%*7L#ub5O@*NdIuXaG_O>)XAtwnqJ=c*O4IyviPc>_AR3UVGbw77mx;sbt zLWHF%9vv#r$Ymsz15-Tjq=QO9X%Qofgr54bj(-S~AxRGY4(&34K2rrJ9OAIiR|JvU z=%*N6dtUvHAdc-Scj#-e7_s6Ut+rOrCP_U~Km?#g%zzXvzPFNL@w%EVnVFOm==% z1ntZr?{R9Ke52aKs+~VK?Yvn&bfr#|$r7dMXzJ{w527d@%|6j3JKtckdu?PFsErz| zbj+#j_0B49`f|#gWG7@2-NN`X)%xaim2^J1Dc_QbV*Xq3hu?Fg^L#Kbw=|c8zMv;W z2y{PX&rnE#dQA<-4>>HUBQ(0Cg~8Y65|w2}ZAFESGnSUUh;O0?JIIPL$~F+fMn#1d zE~<)YiNZ=6<=gMpkKOEfX!srBmDtqqLnLi2{fBuXsT_nLYA!1st4|;zn9yC!$8br7 z(8}MWv4MQ$#w96KgJoijrj)gO`+f@iYu5Msa#!{F%LdAn`7X2%w&M+Qk5;cY1R&`^ z{4ko0$<@6Y_R!9NV*sD4i%XK&VbHQotuw0xAGlHD%zdPztM!GDx=L<+zYC6XoRJ<=sU?7 z(|lenm>h~XpiadSjJyhZMaE(2_%{q9cd6iWeN?{2JLHIS;i-o#JqpUYx|K}%;XBii z)60U)eSPE;^wj}yYR3|HU7-woLDq$E6%bUvhMw(YY&!3PYHU)q0FltSlcbFGb6B&;R~q zzAN{i!B^;$`{B;3$8~VxND@DLurhjgi0MoaPA^Rj6G&6g=G_RPw=# zKZmGy?IOR25X;SvH7)7lKGCFqfR|ZJpWc{=gzf%9(S~XDLDut?K2bo}Hc2Q5CBaKu zxQD+rGM_C;t;-Vs4v7>XgrY{3s5b5p=enj3#uEZCX>&t?Ra>NU;gjIA+<5 zhUeO)U5V-nG|mT+J!zXFyfwA&!^kkLpn zWpHhn9h(Bw&J34_=J$f93a88gRZ)wA+fIV!s1LKuD|9Izdsm52!_fEaXLQB|dEn@I zjGQ9Ee?0W`)rrf&ErG#C!U(y)3klx{YzJ#PgO?BVecz*GG zrBanZ!)l@t<1i{bO(rkHk(=)$@^0a^3;VXl~o0}3eX0>Wsz0b~K6-v43yC(hA zoG+kK?Yl61;0OHa{jjxk<{i8@QkIV6WB-Aq8ShZe z?p7V~ihfWnrr~33k$EY8$J=*#kyc+KKpqHqeA_0PvkkqcX0)p?@oEs*Gr2w&zpAwM zSR1V|yk-i+Q&S>!M}V92&pgl`#&j2H0I9Tki^0ec)OXtzL(W?v?%e;q1#7b z#21RJsQ-JCbT$QCtzd`{l(AX~j^=AmymBHIVm|!FYNunYozepO%JurKk-5Jop92jC z^=P@QRSPUVBg0yQicL7*O(D_>dLcQJ-FxEAQ`0jsvRKO=JQz7*wD{Gk;&Zb66ZHqH zCN)v3zq%sYvt%59_r(kcq+6N6L13x6VfIYLg`nH3YGi8@cFztE|Ifx_8+n<`CarIt zGV*UZvT}H1r6&_M_6x=W>EU$;n<<1^taKLquUdunWsC4T8b1r)WTOL@k$2hq+TBCG zK7ZTkc*@}s1Q*sC@Q_8>Y@8EkE(S8b|BU?%(>)yQy||475}b^vQ<+h4AKV%&R7lsB z2|`yV-T(S03K7vOcH7VA7rp|h0eab=PVfhFTV+C}@E_<{&@t&YH%zzRd~rL9RxZzZ z`M#m+*<{Lm*>?w2R4vMEE5^a;-IjoZRm`imraMJ_E4e=M>4N1GL7x^>d;DV9K2|pM z(zfsdSV5wg$a{rpd(=m${Gn9VES0n+N6{isIOD|@a$E89r9DLORtOPgLf`Ocxk4+g z*eyV_1C!KZ;RlmMk&LA+#3lAp7_sKyUssyAN7-ngC<=D*c+C7+7v&}jm%85yE%KWz z50W(1dreqYT$wL*BK7fv>OVShP{0C3?Al&g#mBu|yBtr&{C}{5i>vG$7KvdUk>rYE z6U-T)L9(*45-ZevS5OiIlSYLtgi%LE zCXj(vj>{G_K;+1uoYQWs`qr>CwqhU#^eyYaWjKi=2*lTaPKq*Ff6m&av;eo(CsdAR z&li&#wZj{}^ysu|o%6&?xG>!Hh{#39M<$rFurF4lK>v&+C&z}FfQ5w`rZ`(Htw++~ z`N~2?^;VQ@SY^SP?d#V~49=|$qX^PlQB)w&gN}W;)J{d^8O)j=HSa$Xl$!v$J_L1` zE}mv)`#(j?c1VwThj=BkXxHnVlq?O;FTDGh#4xdx)hc*8NiNei$v({9|+&na9H_U43HxJ;Ql#KDYK2b;6*DWX#EY7u_ zUUf+~1Q8XBEm*>kzRLDJNjuAk5WPT*{j=iKQcF@N`{@7t#jupticfWLjB4dujDqfy zd%CW8Z%#0<{m7FGk1j)ch3WXN{J%x`bQFIzCn(rqS(#0GHSthm$P>Sko}GP?I;fW( z5Ra`h7C4Cvr{cO&KLYc0wt1x+8cLR@T`9Mg^UR6jg=xCtscIHiMpyi@=aM-{9Z=8g$qZSzCfhN^Y~F>eV3 zg+?((`zTgKMoilaK8fEd0CLO4eGl8=$L> z0EB5YvT;v6^*_Y6r-GLxbzTDsia6UU-b_DtyV%SX75KLs4OU_Y4>|rgs{1_axPf>^ zPw9;!Tt^sq;>+I6L`2YqNrdd`D5!VrN{iCS1;$kPmG<0>H zC$3M7sEy2PtV1a!{97*aY(!>HHzJ+y#!ll|!h9*Zdp~MF`|1WYeAPRoq(d82%?8Dg zw*7P3BAO+f=>MXiIQ?>&j^(Rsawd#Rkd|aVwwFStfh@j^H3K9RdS5qnbF|`o*$Xu{ zms-8CjKCb4z!ftnj6|K*4-+i>Zqa{mM4#Cv9@&{@jeX5Rj2RJR_s$VIqu1zpdo@VI zCiIH@aPNw^>|0~iwy4-9;P!W;w8f^Qj#Wr)uj#FHRb+FzgL(B;X?Hfk39)8A3JMBj z+jNDZ8yymHV1%yETBx>e*}&HJO3Rr8&rtTJalp!jH6{Cv#tIt|wT`_2&cim9zD&mH z=i~xB5>VOwxlCDn6rrKj8^X}x84?J=Bd~Jz9MLcH#f5A3XzXJhU3A{WF4r()(WXu0 z*=Pm4P?^$C=ey}3F4p-*Cqp~48u~HbG``5Wg?&vzyhqN~v-EB{j@{#bLAb;(hed1} zi~n$Xv2Eu^tjW>=C^|GaCQ~|pYu*vYDT>6O@csFI(H4cB@TfnCsil?GdfR=J#Z>GjrdN^B)_pv($8s4{Na< zqp+5oK!mR_I8yjfc+BWeC)Ub4eRtpa@mV(N+wkf>`=q!seSU=Lxh(7NK%`2qQ8t$F z)9;LBJumYgbHwPB)^(yj?^dUV{l#YFD3evwCXlaXm)<0$VB3D+!xSFQwDRjh6G!g` zioi_jqT;G&x^c0xqw@6?f9VEeT-+a^q$^|Ay?pH{ch#BTn{5%K&Yk!Z`%l1buM%uFfR|OIUIK&ir#`==Vrq+KPVEyq??)s`BRJ;<8!vrhintV zYL{{@8knsx=9}V=O*uHCwncr!;at8Kh!U2tjaPtLBzw{SE2M3^N8wJUxPiK|K{;MQDWM zUEE*jj%hY1n*|0KSWqOcD?3uqXrlcrC4Nq@)U-BJ7ZRA$FLVqu~mk-@kBgql$7Lhg6?0WsJw5-5%;voIq;o-3w zL`i_p=(UoaQ68(qN9U^-ji}v>n;tkWgZNv+YDw9Cr5ed<+{thBt8Ll<`oQFuWDkfW zMT+QSTM#`wjwr%y{hj)}w_9omh+Vx&_hdL!uXGnNNBQ~#!&p~;*}3C-ewZ_~7p+vL z$Byr;(Hhx?=nMO{se)6Hx9SMd^f~OML;S4vXglurOX{t^P2Kqw*RZEf*J&G!%i(shFG(eJ?TStfYCK)cZw=YCnE;WRLz zULK_O)gNZPK~jxmEg}5FgeH#oF&bMi*GXG)w`Rg^Ch+FO>Zg$MJR+#3_y|83wk*rx zO6cpK)z}xTS=Y|LP$Lljwfw7|zsLm8s*n1n87RQ5$MGs5v*d_8(gzOvJbt5hscuo~&ex^vy zw;>@(qzL^GBN~vo6f1{3mqceziqllx4)NW~0T08D0u_?ojvs;PiX8Z8djg}zYTgpO zyA{_EsFgx6K$#p;HdaCN{@O~M%KJpf)* zf;8gY9{<2b|JAg`a3hyWNS?#pNg8{JBAXQNx5cOvY1E?uB{0t0Hn>Od zL5OY%@xzpX-0=tn(iOmz`gcFYh~CW}dQIA<$=PtECIzY9k{IHvMX2>% zjJKytKbi9`gjW~!EzP!y(I6LbER}$ecd-|BX{fE~f1_(n{0C>Z&m%kscEg$>wnll? z=#Lj!xbLs?ifR0cI)sDq32aOl=W-RLhmgo=i-j=)HG)4UUA#`3(l8ut6kYipc;2Ax z=x=&5X*Yx(Cyrkukge`y^1gYeem@0O>@`3SeaDht^b zI*|H#80i%OQ`!_q(rtSup8{>(F;0IhvPWUhWD|bS*T}b4W47wb z*qq_Il7ihuqY#@{@Qlt7E3qouCyafKYDf%;w4R-vk!uGX6}QB{-K5}5Q@4?A5rwEO z*>8opV<^>Ug2dfKWk5C#fE?-y5q3|Z>_VihzhwEl-UXs92fZ#Um2zr_tjb@t)%BR)^DwUgXP=Fgr8gk@Yo_o z-|QY?;LE=VDve;==wj0; zryy_%)?IE-H;kG==YRIKof>A2j?MTRJ6Gm)el+P}j4_#n^_P`LJieJp#*2-+OC96% z)D_?rz!*1BASXhhO1aCyEGloIJ+f=HkdF)VZns0dAPL=zB-Sxw$T3Q;+T0*t^2=g~ zrGKORgC`*vA15Zyvxce$*8Tp`7;>g=G&0&n9v9U9gxoXf&0qU|EVskC9g#jrq4W*u z!YWMf&YSbal{h>G4(&bT6kfW*IwiPVo-M=jC@*KfnEp<+h4=u*=XdL|1O=Y*y@54{ zQ3WmgRb82vxtc0H`yR-6nsHhfU}1NY?k<`Js?kN?pG^uG0cdM zch+(6s!W=3O)=%Vp~si)usnxRy686voM31jMz4t8D`q!91iqGbVBSvC8#?H(Yclq3)#b(O8oZ<9n* z1k^lzeq=9O(q06K;P93>_V7XM^t>r7BED9;uc_dX;exw+z8ADJXhG?w(l+Y0vGpuf z%(-4amgRT^>cw(I8yq9E7H%gd=GqIl-M)|9$Sx+F{Csc`wWWONH48WtTG&C;7Twa8 z`H9mOylLh&E>~^c0~~k$w{D_1J@o*cOpju#BE8WhdX%sk6#!YcVLbw}5$Jtp<~^_v zAnX*Z4X2_y*m0o>K_(*hf4<+f&g(MxW;if)gp?ZCBCQIwG=Fi2lnsCr)0h<^IhzET z9KgB&1Dp_MtuLSiDBGZ1qyWky!hxVh%!piUfMkUfl&XwGQH5r`^S~OfwC+u%l;O15 zvG_Z`39p-WcjS6~phchW^)3S#B>+i!6_cl3o0EN1WG{zLW`e6azME#Z=VJr?TQOMZ zYzQi&jmc$qu^@_5yKX?e9{-Ehg|{}NgvAM)HQ#NcZ1hMpW=i5+UTLKwGRwB@&) z60S&t&2%}|m;alTn3lLSPYPy0MruubnAT3istHzxGe5hTwUK}o0S+BCj_R;edG_3m z-27g}kvUHbP6nuSdov0hQ~HO-G2_-rKsr)(!5;0D63~Af9YbxoFk^w$hYe4e9Kd7H zgc$J8TA5-sdmPoxV6&%@Y$ZONIIoB)S^I-sjr}Yhq0;O4=cS=!3DM>97cQiNpR?{+SZ_@#Qs#LMUUuI8LY-B# zYs~o4)4nZNYx#D>{NJ^?^WPrafEuwJ<`UfDR-XIsLbUGOn{)!%G4ROSx-sr(%vRY> z^f!GzksG*obia@GOLF3p`DxJFg;;M{mqJPslY?twYlJ%G&Qa;|5|AL;cd(hJq`Pey zpZo2vjClFXxWBZ26jekEYCzhRk%>?h#jgfjFB;H2hOP5|lWbSAw#QlnNJmqyF`>WH zII7$Ev(YvQ_GFN?$%J#`-}|wg#zMs}f-vl-o6zyezCw9l?e!xoN&R58`c%cV!2L}z z&drz39WM~vMY_40?wWa#)FSD}lBy^yZcE4>QOniU3IT4;! zYhm@vW~go_R*bx7q4TEE8bYBJ;)e)@kFO?o(bB}&X)G=-mI1{H6JRf{oc@5LyFFyS z<8=!gb?at2Y*~kAA~Jcu0B#pct`vcp#Dor5%FiqOIMydx%>RC)q>Y6{my3V9V}dJL zYO>CNIWJOjd67_4Tc)J`{q_Eh7*soOadGLC?egTJgwkj;ei3SCV7+2JYiIF`#S@fW zB|};vgemPI(FLPL+yY#o#WF&Gv5q&jb%7L2Ytcwrqg6HnDkVv(JnlsG4UgBLnnW37 zx?fMi0%CjE@r8BC8?JGM$0XK{`EHS#B6p)muP^f(R&miOUi~ytkX+L8gb@`F-t(gl zj^6KmM8P_jSYS%b(CaSdLvb2s>t3m^XFwCO=4PDua_xg~o79#%%pwa5$F?jK2P%gmkpEh1{BgB&L@Jt86Ed`@uf$>pTj|Y*)6!aZ`MK z;*NRJW;3(>^iCLmGjPcsg<~^)%W|t8mv&AvgMZm9!^wR8hHH1ivS%^jYBjx`r+F&N zFsior^g2>deyBpT$uW~wZBl1*G1G67G}CnOLmY0hkSZU&9TB?gE=Em%1D6^t=M^B=iCoj^|4`Dh?6$9}(}hyFtTayHcyLgwrjfP(@s;XUDxT zlnlbUK{}cyrT03L`wflifa=0uz+3>j29;laxY8TGGY*&)u5qIa_-`6|y8)td>=)X@ zWPr^P&nfBSSpBBs)-XpO*>zAO-vJpbrwRn{MoT8ufd{+rmH*M zk-*o%4sO`$(I8u>tm@#-RwFa{SlHI3=zS%$)9vYcVJg*K0b`I7ZdK97$F3gvQGqV_ zJwQ)nn{xMGT({2spdMUTlM8XC=WgUjb4TT(cEj_h%zwtxo1=Xx{8rs5wo^Jy^Gjz( z)DR{B8Aw`?Tf^*@wkOR+M=?LL_qHh9s+TgO!;lXBT3TnYMl{_mx`6@0!+VPcPiPMF z2T?{CucE6V@w&VL}jd#A^kR)Cw5BS7l=U`d*&xT|3%UCLW z7XXP|puevVBq|Tc|FtzZmlNZlH2d6!d}N<9xOMridHCh>c&=im6Mw-lddgJLf+XQS-lhO+d&&Es+v{YoTx1V`8&3GlujuAS)~t+buapb7z8_B=pes>Ox^^W);*k!E z!1A6ciTZj$S_cplpiD>Xd}$&leGj^jR|KZ+Fvj$i zk1)la?FlTv4lnEXW!v!PNlYb z#dsP&Owyt``e$8an}9E1u)>rYUl^!qdjIR~f74rQUs5w~ORPu2#+MejR%lfjZbby! zHlf>Q8TDci>Vwhw_JzB2@gCe=Umht?057ALCk->HM420DWiG5$LZ0=&A%Y65( zw1Q0VS20V>d1yY0m;6bAsN~l)Ot&z`N;y9oYzG&Ts-6di1-sbS@=e$U$J-d*@6?xn z2`TO-sBt4XuK+{n7qCD``&jt52syMso^2|(M;XW6{$#B=h2Luz37nus*= zkPB2*Y=8$OlR#Q}5_`~g)^VXv77i8hnIjX z5b~KsZt#Xp0Ol@#qGxC$+uZ(acZNC{B`m9so6^fo4A+rF^96SMgGGpDpS>G#Dg6n&Zn|&gLwy9sOGh_aK(71n}Ic6LM zu_jm^Kvs`~Lqokb(gGR<+|Zu@UNXuxS&v^0J$DM^6P_Tl^Il98DKpqCDLY>$mEr=4Np6$>abGY@4IMA87xsp;_l- z2s_7ZTCd7`ZGC)h_?)

?8%M5uoxpG<`exG%xlU9(N(6AUhWjGQFw!tun*ezXQmf zIo2AKr7bSLR*T6L9UhUH)Rwhhc=)l~!1RqCMSnH~Bp10jY44j{pMLK}k=zx)DU4fi ztSJ6Pg$qq28ADL`CDxsK&+tCo#Uk(im|S2uT!owm>z99$1f5*2tTZ}aZ=uE_(ds32 zabB#X`v*r&<>||3G_q-D{D(PWLV73V@yV~33%CQhjNOsX5slR z-$Ap}(JNWgG0m$}_pR(QLbVLpn~(AO@hr70{mKzUeu~m2kVYoNG*nruYyayV$M%K# zCWEP-;;g{r1S9frvY|c|;eh$kTPrR(lR>>eVZb*13B6g?1ocI3|0W-Z?C;-QsrX+A zw`7IR1xNxQksbPvT3;H21VPR{3t79jtX(5Rwx!kvxAa8!wq$>Lz89f+0GXnfxxxz&2Bg4o!>9jAI3VTg3Dvr4zx(W2oh@zg+(u$IlA6(uq*UmSSG{7x9A#)v)kg6|# zr%Ufdprc~@DgGF?i!FY6JK4Ud%MSB-C$LPe5G% z6?U_cZ`-tAw%%9tnHeIo{%S-8g&CDS7jLQDuCT7tOj*aV`#oc*ptmYO>TS0;=(*VG?~BZN0p|<& zSq1t%EoA*xd>3p1#;e0!z;)^KTHf@I>p9F%moX)UHIZRH0%GT6h{TUZOLaIahz2Tb zN(*AIs*Q@-;V-alfr&$4l-!Fd0=3v^M_c^+Myo0q{2$+l&$GQ_@K?H=30 z$AkU~P1F{jsE_%M_Q;`T+|7 z$(uR_sqgkL)084i5)2OU%Ilw#8WrEnr(G`QSq4u>V8=b35wUH>Zhk`C>xZTqIHvp0 zb*~0e21U*nffr{l@Pp7T@^-LtKunn&(otUa8Sc4n0Fw8!G@Cn6Kr2RNK*fM4Wg7Vb zuTn6YVz=iWgKP#h?=75;udYm+{k`i+!&Iuh5)kb;Ah^@K>D&G~v!r+R3>pb#z%?3U zLa_=P{YB2UsB)SkP&;ADD`7eAh{*neetogvzEIU%$v9pcB`VG~o{Xs_`P0Woyt2AW z_~aMm&PD1sJ%dEBx<5Xr0ptl;`v>)Mc~up}qDY9p-e5g(|FBjrysXxccs?{Psf3kE zk@jenb~&AJK31+0jUoE=_yD=K$IGv2fE&WbqKXb9=nqiS{%{Uf7fb|WDa*7ft{Zlu zsq(EGkA}-6D)A|ulZw#)z(k>Bq6($Jg;)Rz=A4e`s<%83Cc^$@AD&Ix`l zY68eO4|h_jRmm!jFc8@7_g;*@4T)c80BIS!W2$r9FjJ&4{s|MdO+{81a=7x(JDeRW%#c7H45_+ev7mKAlln3LE0 z*p8?93`?rU`JWg|GVk{oZ#VweIcNUrgsJ+T5Mxi%5ILdVvOEi&E(C)Hg=#zP>$~k? zIe0G)075=D6ExNy)F}l4tb1@Y*;0TPPR;C{;z|6oUk=QQ|7-j|d;&wNtjsPCpiFOk zVHyFIR(N@?U24x}4PfC}mDGsetQ*TVX+Mxt{vziX?Q9pI8uMybhGPOG*+i0u6w^FI z7c^&RdKB+j{j^TWPosG{jSB;rc_N-uSo%FTc^(5E=5pe8yJ2o00M5aQ^O(S=7Lu(! zmyQ-i?@UBTbC9qosPby4aWk`}$`lF2wQS=zsxpe`rUnnYTzJjWCihbOU z6F;f>uQDL?e7~?X&d50Z#D6mG__-EG(>zIUiDst{B7XcM851UI38^R#S+D3-*6aV( z_v~#bzdxQyzBCC45qIGK_4p|FXiC|)AQn&4gn}JiAqFpa9zEc_rD>FVkGAoRroiaq zSL);A=C+#RtLy9jS5SkRFfXdc2VUnc;`zby>F0Icp&aOKA$m6C=-vH`l;BLK5$9Ob z!Pm=9>BcOd$K$|UZ(mA|i3hSMs9We}qP*IomKiYwX%T(ujsQ?cp#U&oigE3&sl8&N zzNmO1;jau+>KRJzvs*SirhKqzk3r#GlObBcX=j3S38le?Gvnesy5r=} zysfxGQwcJj75OyC8IP5VsU5lyF%7MvRJRAj+xcT+H$_j%O0uJ@OP=BpK;LuYklH$_ zP0|R*4 z0ELtdv%ypw`s_jr`2yNaVu*7GKEi22hcGGt28AuUx6StFDph>`vn0Di=NBT|Nn^ZB z9L_A%qhCy8G#29UR+E~Gd2Pc@?r8V4;52_Ue(gvZ zG0y&uonSDS>frxJcH{;+JzCw^sx1-70FLS;T50JS(`YNf{Sx<=mnolaxL)|2L_Ow3 zM5kdXP{oHs;0#nlb_CB3gbFmgj6Uyr`^ab?zfBj*Vw|o33_y=&!eo+;*tEx;4n_-mB16Q0{^y(gO$%@IyT7pN z(Zv^u5CK^r@mj|KWRuu<3E2jfD14o0N`jxa*0@l4_MLr4_ifpD@fGE% zh6Z}?C9=Go$_cep151Im*HYJ`$D{VksmA72zrKT= zAB!s2e=M_)8~f0HSyyJj+;?Al-u)u%kp7%FZs81wc69uiCpYuGIf}a%9UuBxf6(hQ zt?KlKb3b{Aar?jR&D;TfJgVor`QDP=+^A6Wi665YSs}-T6^;erj}jPdkS)+(BnYJp zrNKi2Wb$?d9_hR_h~6@7zsf}|$~irUU>M^U{~t+L!4_55wP)z=t^rAF%y?p7;9)=Q?MfwePjo9dh0zG{oYFG;lE-s)b~hO<@sk z_|OJB!lZ2*Mu=NG@we}y#6Y?ZLONhjF#jGdEg%S(1`L}wafIYxqQ6Y?1fGS0;&y-E z#wA(=UYDEni_&*t%;8L6TWos=vxLWM*ICnm!Ty{q>?>X*T|ezZ4gQ52hWuaSpJ z;yAm~-njtAxo9qdfgT^$3`1l!S(&cpzUhp);h}9LE5SlXcyk~`xHQ@xIgsWP?^7iK z$jPHYv;K8_p~P9Ax-$V?dAvb-uSLEyuM59ot#s07kV$Mr1)g=^2N8e7)t)a51>eXA z-pTsz`=2%n)$cCN?!49%1J90@&%YVfKJldZ#)%#7nB(PG$uA~;HaL_ra9_+CP)2&j4~%K=5fln_M89CKal!*{GwID&8s3wqcB45^Ei)Cg^Lp zzaTE4AKbJ;0z{`FCq70B;^K$#{U;DW@w06M z^CEm9g#&jxnVZMC+j!E+Bvw~Yc>{w>m<@$+rwlLKhBo^J?^J;9N4$zrs4P#-ix~aM zV;>v4_^pm&FP64$d`+XE8hNt};N8al0o4d;DKX6&2o@n_MYP3RiYg{zlucm=82+YBrw$!j0VGRqsY0({*NM7iRDePp)pH47Id= z^b#(;UkWT3ONg+rcfge6Qov^e>Jp=8xo>n*o&7%&%2!r|e-@h{AVzbH`9)q0JCQY! zGm&+gCy~`EqNe(Ezf*)cj-Z(d@r7kzn!AtQT>=<}CNI6ksJTb^QD_~>imEi16dXgY z%!yA4K$>StOm)*u%SX`Dc){33EYQSP8yNS>A(RYvtp)8Ciqb_Pni>>KbYVy)B#C^O zmVp)gE6(AZ3pqrXMAm~Zt2uABIl;hTy>%zp`M?`6d_oE6f0=YdG$zB{LOFu;6MyQn z5cC>gPK2Auo|18zDi)+cw13lD^zrheyScqgeMC;rPCW&PQdX*K`+)7&X3(28f^Ak! zgu?#Kr!`1?t7E*+Ml8F47{g7gDvtF{naBEcHFmsgDaHubTe7iux{sU*k<hjOp zk{vEQ(+GN)D}W#_WZt&sGe^R1Xj$FyLg^spKHt%H?>noq=^gkEQ>X}dt*7P{d3|*& z6E{6IV^_aB+E{!X=@_Vo%fV#Q(6n|ynrmNwh@o7F*-HVrQjoW09z6^rx$R9@ApDkN z)%RCtXVDi2-Ze0OB_su+k^s_NiOe-{=vjs@CRu{>)!BpK+9yec=tlr*cr;Knjj;Ee z<@HL+HwZbHaB-ub48VYZ`|;}Q^teg*%f8rViT^g?#tL46IjA<4n;(iVJDGnFCpTa~4S@8bIm|m9@F|J- zhd0Y^@g)g0-_l>y-L2Trx@LPU^WPSZ4W(~N->|NqGN9=gd<)8$dCMplbMg(Tq=0rU zxAycxM(Rb=+rL;DggqpFc**R$Z1n?1-RaL&!%IYH!0`ujuiGb|AQU-E{u+f|Uc;LEZx^l>Vxs6*|82cMK{GoP^!aX3tXGv|JqMc)rM%Ep$V1 zM6MhDR79L@xB3*u3+Z;67B*6gT3%z4>N)7yBxp{4Q8gk{ip$1yV9qO}+dVih*)kRf zPMG?;fw1xx~y!6}vhlD$RL<1~J2G^7i1TJwld{$Hv zK>WIu1HcYq0N}VCRyGZE0N$#J-&-!@x(sHv7(X#ysjg!?W13kTsx3ZVR|isGy2vPw zy)=wnBQOjV7efjHtGG9}$bx8FB!lQgCF4})!Y<3ijHx!W=I)-6F7{%MunxO zGoQwIa-9{R0!)|(xzQ;{>C_$0s!*%o9QJ5pz0+-vdp(3aT?T(=chismr1$1+APB9C z!wsx>o=P9d=aZ-g^-dwfPZ5^O=A?gYg6sJBn@PQ6(_u!MNCM4sxkJqYC4f8$FQsvZ zT&>T3Z4S%V`q`R-{0z?rezwJid+X{xB1`gs|N6&$&O9HL+xcBo?zH#JrxS1IXps== z5zqDx8LwJrKI4M0F2!ZIP&Z*wWbeI{{E)~p)+;UxBbC5#fp6(gwSkh$*~d(-^o zgqVY`7YI%7$FOI68}n$>^~T-D@?SWInzH#G9;l|;I%e=BtEAkvBdYT9Y{K0w=D_DL zvHPn(W-HkizPCp>r%eW%)1=3xpNt!OU)OGrBpu4dE{}L`vdaR&N=;9HcTfQCLsq*B zMOvf-2t^glSG-Y<$}jv2=exBlUqyXGfghil^7Dgyi+lukzy8P0%Q6k-JN%!qrd%N- zm0cZL3^yGmhE8e|VyXT*FsX=(zY8%BjmezgAMv~IFiIImUvqAEL5o8G?W)n6SC3g$O|A$niAr^^yKJ&=f#%R(B>ABhdxma)DcX z4V>EHc9rBXBy*riPIAn}^{2S-+-#63AlOp6gwCP3^xgruF!U|()8wN%2{q9z@OG-Th5;A#oRufJ=eFytlRRoW=6Ydl@*?p$MZEmBevS$J7Nsf3Vw!|R%Z*9 zixrAFXQ6^WX$_1?UN&ya2Sk%oLJD@vM}X{c|9UMv|2CDM5R=1zSQONfS%x(}9H1zC zhy!QeWW+sBD?4J}$MCRkD-HRVt=*pR4BxhH-FF`9S!#X3l!=Lm=JRi^vrfnZ za-0LeS%)r<3#tJHzM51#D@0QFUrJzF539EAcKUa(F5Z7@uZ1Lf&>TaX zyxQ$5Q86?S8tcpdn+1J|eG~q=EG)8{BX@Yb-#Aj$%oCBLGcy`f;dCs&i0NP{crx(a zAO_;@p_6Y!G8_3TrWH`&xg_y=`C3>1HOH=LE#MPSLI2ov?r<2Xq(X_DLGBIfFj>yD z=`XvGlW1(kSFisEwAGH71>g7KoE{pk7NjIzyWeP;?pb#lCACla{x=K#Z!(qlJ`?o` z_nJU~K?)f*DW8H>V7S9JQd__jf$*W+zGxIh^k)_JqsvLyRxO;dL_-DRV_@4%r$ddr1X{9) zCYXmBf&iFZ(dTub^$c#Ha!{=rsMAx?@e6u`Au>}*6@CBfOH zYbevpSBMMyScH;qn)D}wWtcLT(aqrEgWXMDu!BQ@LgHgJY^|A84w*GmDXej!`ANHZ% zcM6OyK#rFON}*464oC37gT!^H4w`8L<0VfV>I&gcX2n9|R`FL*O4YZ($T<02j*7#TZpSG>x91b=@ z-S@+LTb!~-Ok#ToY!St3R%3nhvm+^n0i~RbmyuY+td0fp9(Ha|F1FpLsyhL=dKpZY zClpHf3C(<_x!Cv||6iT-vaT$VG|$j&G=U$tA-_X|1ww}Og;-hEF{lX(H2#UiG{U$L z1cwDQ$cUGa@`%hCC+PtU1;ucm>tbTrFSaiTSladSW*(Co^|_>y$Qy=$w-{xi7vx=Z zUu?oXrZ@`_XnS|zwuB6tGUe)JT@+zR4U9YdRiG|ZK=^I~M9Wp1m`le@Bm8`dDX=9Y zE;j%ukwaP-1dxXbc!z0k%6BOS{UK#1osnv!2QCH6=aY~_N0p*LxiSi=Yo%Z9$*8?*a zzCkh#l!ZX83(bVMs%!OQC)I%>(P%J^9zS!nf$I9Lz{vR0dD`@L*Esvn&HDiDAv8ES zt-60#vFP?cfySmoMMo!`wES;G@y~7N>+=8?$a%ia>(m`*#+TNo(yJ4kO8tpLLDrn; zFjh!FBH)hhv4`}8W0K$N>a#z=s#4{*8+t(cI}sPte0H$tL;&XD?$ejHyO@#V@7Ze( zCa)lq#)noRe&6w-*#(t5y+Fs8Zw^%~1wYP$zlb!}C(2EX7=b!RjD7CYGbebf0}9~_ z0tcO~GEXmk6M%62E)kjsnnx@xYfwRNB+HL>+VMG36Srh=p!H8`p)Z27x8%qR>ypK& zLSFhq()DBHIn#9mVz*!%^_72%4vKl**h4pNtqBk3o%^}E7-}a?jz#!mvJQ|WxNN9} z>T7{_ao8nkKV|smLLI+E^TdB~XJUkWt3VBQ%%~c@P4sgtW4OM#z#kfWj>U;23GB~3 zjY-x0z>>7x|5k7Rop}^_oTX#$NN}0VAOaDlrug)YRNiGocfQL0kPPxwgwVAl06X%iQow6* zgtOhrChWR$yK%0#2^lWqP_j7wmP$dga3c12BqoqcqlhwY==Ys3=dUUF(Q`>H;3l*E zZrY)C?BpciG5hs?*NjG-akR3+tbSA26Mn(>@$Zi2S~wpq+u5*H)!Y=fRkT{`vL&A7 z-7e!nq_Xng;>1+$=M4Pm{k&w2eVuC-Cx5&QnTTa`4AV04JFgKvsgz!R92O93MXScppg7taDzf}eTkWqi-o3OE)ZGAT*6pjEBByL^Ylz3s&%&5dt@5GU zV7gJ|B|B7g+INm=0X;*ym7j@8Ngj~M00~Ib^J>bD-b(Xws@S6r3{McM-QgDiZWsP$ z(P^Upk;QoqCp;I~oCc4xL#vVwC;;ODQ0arWvCXG8{QxQmfx{}(r#*VVC#Fsd4zcm9 z0%5X#)`mHR{@m68oKD8a{l&{iipGahS~S9OkMTJMUsUf0ITGF)CH2&GCm77I)%Rk<5nvQI(hpxcp0abKdsS z_1QS^4@c|X2ZMQa(C+W?suR63>#3gb+N9k>c4wL1zj^-7OD`*JM;Y1GO%H(2UyeO~SeFj){}q>dVoL$s#jH1xW#69}aD$ujche zWH`FS<6<8K*aC7$6<@s#ym6xerige_cVv%0IdFC(zADioo#HYe7>X>W{p)WwRUs^< z;#A8iI4ro_LXNUL0+2q$^9Cp>Avs@F3xC*B=QbXQ5%?$>b!pBPC$-v&o?&g%4G$H* z--6U@7Wt)8OZfk+NH5o--L+=j{`I31+exY#6#PN`l;rBWnQ|@i+OEqzyWU+@4dGnW1 zTpI7Lmp=!bdL~T^bymAeKD#ES5)bFrZ3i(*$5&b;i*3#@J9xD7yK-E}Yv9Ag>%z;{ ztD$o{FzD*qb0H+WY~d-wNn~+`W;WI6CQB`F(f%#2%fZ{5-}Gwp*aG|uT595EtW}0I zlH;cc^DUwk__?$5&3&u$wSV4~KlHYK$>96TS(CVOjec0Y*I>1q2bwJts?C@{!y+$d zUs}3tNY^8qW1KjPA$+3d7p}?Tr>cZr+>22hqOgY3e+M2)Fg?cY0skisP!?YxAv*f5 zz0tH2O@XzJ5T5PA9R9t35q>CuXF-u4Xi(A|ZwOW1V>G}eQXgn*5Yg6o!2p)6S`8MG zM^S%Vy+G(##=F_GO|mYe}gtT7tT;=Wu9IqJT2Lf|!pt!5iKK zsJ128551wA3sc(5sSr&gYGPWeBNiK~Ue7ANz{1AwM)>dGRmG;?p?7wc$TSQDQ|jT8q!&94XgE=krqMLKB=C$o&|wJ)DkR4bNET*8?S^ zg8Y)!+zq6qdl$)})=nOf5B1pi9dn>0RVy8<5_jv9@PYdZ-icc0$V0y|kGTaAV48L$ zU+~k|%SoQz`z!cP`e=Pa+Dsf=(_)rza&T1F)I-7Lpj+wxRFx=qI$CFCBSUe}(Secu zNr01tcBKi@)EN1AJTqt9nC%1`diM@lgMT7 ztZT^%N81I)(#6?CkEpS8{78nrj#Xnjx#SG|oKq~GHkoPj8650PUyTLSr(d|rnTI7e z;P3?6(X?|SPQJGyb72t4as4@zfK5*V*%XKn%J`pRj3Hf7RxYS00Rh`qeLn5N;Y^T) z8D*PF_QTMnqm9IBam9#M(Ikt{yAo5((E`7yw{e|U!0hCdD>HIU%BU!88FXyDq^9kI z^6G>Q3j{?3y*T8F@Gb&y@m(xkJf!=<7d{3b2>8!6X?}dR(xCV@44hNW(zw7>O+2`{ z(bF4Z6At!n^#f{-$2tVt$Qx56l$un5HuwMp1BnQ|*QF*ht2~PvRJ1%@hm6AD@#NF} z&`U&ZV4C0nsou6QgvvCMU*m}>(+K+>hRBIP_=~>2oBelXDBEc0jHiOH3BM2?z50gq zo5c#5-et#93a-_Ut*amZjyR-D7*wWlGEBOa!54{a2h~H3WzP{9Lk`fCIlJbyO4sKG z!GfYg|5!wO?4jDqA)yL2ecL%9)-SQ?Hk}kI_-IOzb*0_o17~sY5uFN0o>uegCsT!< z`Vn3(!hl=)Mu?i`VWaCx= zK=uNk2h?Ux{PA2Gcad#&z&=63e138SxusGWoN7!`|Jk8TNU1aLwybVfD#4{DXZ;T&3k?dPNtT;lPT{;MUJhbI5)f~BXSYXO|h{2dd*1;%22&M|_ zvN4U$gL7SlDnM)>Q(HJ7>SUqwX3k(Y&}HDm;|}h^*u>WdNJ?Lu3({;G{rzhmx+z#2 zJH+AK5)Z{Qo_h+};I*5Q=YOQHW)Ors zvCYx?W5lL_dF#1c%TC{Oowlu*bU2VU#U#n267MuEuQDQ@i^JsTU>JRDa~ztMd%R9n zTN;$Ow-fnUC?1p*Lsk@lUtL$U_VB=+ITj;Xc6tXpT_TjM9Ro^}gOT3wfBuQ*GAmlU z5S&&zj#p~ff_>uctc}pe<`t0|qn%Be+NpngDvX-9XxaBa)U1qnjx0T>rM{CDbdFVk z73+%CPtHiGh8)w@Q!GPN@I>a%ytvoE9NCG%i6|rc$C1Fz1D>8I!??;DI0Rw%2c#~q z;R9m%l~dhX$tqR8fN+U1}6sIgED0#Lj;Gu{)ukxV!kjuH)aLHV#~3z~>Mcmi+$ zINlv7+ET{g^SL!ggeANkjB>^MIM^a>+vUBVe;h}sj(`Sd1#vExs#OgBlIjyo>!g=d z&_fDBtC8=#qAU2VgoKPCJIeU=Eyp&?k3Y0OfG&u?yHg31HN9uK?9bv7v^y@+tLIIg zXH2#~1-dB|V-us}|2+`54`-w~UVZt+CGSZDTZil{mUDJe0Vdx<1xFtz6(wHgH*Y#$ z=UyEmaj6@&Svi_0otjrWE&~1gTN7w%TNfa`uo2N`&x+(;zsKIl07skL7D0C{{N2mf zXSG+$Pe$#-PR-{;ZBeZbW&z^IaesQD{06oE?3X%EC39l+V*?&i^OpjjonQF?-=00k z8c(AO-OC90=jJTRID{8=Mt{}5JFG9JJ_fps;nY-`*Bo+X`mKI6{(K6y^9Zu!a;}h^ z6iBggl!}nD14p>_?-KLdkg}^hfkJydTE2e_TEsR&(|_L{6}2Q^m@oJ11g%LkI(KU3~b-e%g)T2Q;T; zUN5~&EsMSZl6yPYroup7QMMI8X4u)AA%F)L!6EMHiQi7?vzDNmmxhc(TQ>4N36&3rcMV?$8oRc!CRZf@!V5AFXyiwo>vRY zEtd$U3l4J?t^7ebA*c~gc}3lctxoa@VZP@(XV69A)-OF?I`D;@jh?fbS7!c_g)5x+ zaI-KKidtb#BG>og!jskTTGVbLHo9A~yjzHckqtJzn^%x2VKnYY6D{vG8!@!$E%-U{ zza|ActA>g?m?+Ks!pM6=N0QsdO|F}ym?Ea(cJfZf(?@g5UxQm6NRj3i-tiL}8}#w> zy)NiW_1B>AnBX_w+P1iiJBtq0>;fsJegGXnv&Zfgamp^tRw_m z(^>?U^>8W0H_OU&{udQ^6ayy7JT)Nj8F>woI{w)EHQ<0YAkCroj*NG=Cv*SnY5)9y zZvwF|Re?ORl+T3FTLFbRodV@f6^<6ctOEfVA&i4rXa7Tk^ZRb4SW;a&2XD8tCYIuZ0p zY}Ll75rC=#d+eDmY5C@JZt}97fBau|-ptHce4&|`*a~-dCyyvF|L!?4(x+8?bg2F~ zXF$%tbJgbto`*v()z#sDI$zxRWn^6N`MvLT<6H&vXyez)W}YV+CGdXTTT%vUhQAO9^*_J~6iTapYoeju>lIq){$)?K3<*rpG#5=p`hU13G-L$YtU+ zpDp4AXG6i6IziUEht-R3&Di)ZRiXV#lp|t=Z!F0j+RbvQ17C7E8>ni_(xY@EQXV!! z3jbeQ9vZ_EaZW?avc~erGUbOVh({hQ`i^A@Lt2T~6aDG#j3vCb(2w<}aZq9&ya*EQbHU1$lC!G#!c#jDt`ud#7hY(54!bE^)m(BKh-1TkPov9`RCC%G zl(H6&Z}^Dn?i>^+!25Z3E{m6zoi$F|~UF zsQ+^?DJWXTmkd#x|4NX!3uIg*z1E)3K&9DF;%O{=F5+Va1QqEs`{A-k(v>-%@)6vf zfzLwh;-DJqh4I#g4XntLj{uo7Z#={?A=|bfV4-&dT~a|&)>VA^R||czbkbhmAPmk$ zlBBZ&1k-x&^MB^Kv=xzTF^eWXg1<=ne7HEqgD0-KeS^6MWY&^U0jBQH+IZ;}01n60 zGP&!hE%ePN+fQA#A#Y0{Lsp{c>+Y)wEIk%v*`oAeb-=K`J`^jz&oAq^FQ(IZY?qfO zD{a}1wmgYh^u)*(2LAriITDEtAn20Qb3v}pY?fP-(H~i<|f;D;l%4^$)UQFX6KOK zckNNa)GOfW7Q9iE3J%Z{0jNac(A2><3vWqK!DyR5-~e{>R}U@8Y%DVNH8;p+{7Bz( zd^)!B(6+7pWzhi+qzz_Uy%KX9k2FPmhr0N9zy7Z`YDhU}A$u3WLu5ZwcaDUJ7E^by zzZZ@+|6?G&D=y6rNSx-#U2P{36#N@&i&#^${nKB4Ke#q{zYMPo#x&duj5;H&Mmy$v zc|G!XaM78|+Ab473r0$@qCr5nc<&JjyNoqht|F zRDRz}%^OURnCacV>ObY=y!92YMED(fRFm-M)WAwM{Mg6WhL*@#UvZC;IpR_L_?w;Q zAeoj^5yc>Z7!L+sv#=e2e$G zr=gC&j-V46y>zaK*Kxz^<<%elUk%Gl$~l(wawJPx^}7{^hGn_97SnB!xDhWft~m5# zsPidJebu4k>j{a7_g;bj`DK26%O#1P?DZCxYMQ==YqZl3g#=nG#S+2@ghSLvTK9>N zQXrud)(Hb&W>jj_Q4Q3qmSfp~ZAB4pkysl)1NU_-c;s zq7m8*V3KN>FQ78IV0nw}7P{u*XsGnJ*D|lU<40ul2dRp6IVnsEXlP0DU7`P-^3407 zb7eu3zeA@;2mi#*Z=-KBIf6pCM!TAGJ=s13XoVZL08wj&TkqLm|H;uv%vu{tXJ*I@ zZZ#x}Zsvb2EyoJd5O1LtS@1irin9JbgzmoF+m(|Un581@XZl=@-IPk)hlq7p#~)*F zncE+y&C3^Yu3ZmsECsG7`ESI=R+<~8=YDy0BnxzE7?yPV^~=Grn;Nb#A{YoN!k;CM z8hIRC0v~3KdD=d=1^;&MfR;KVQ8Kj2WB7}*(^ z`1k8p8iTEhS&zmi0;lZlnGC$7?1)%?kFZW+6A$5s(M7W|$w@XbDc?v5a_#RRkrUc~ z_Np1no)B6nLymZ7$-w3gxtvA%D|MJ$je8wB&qedSu&^-F6uj2aF0X<8Xj=E7iId?E z5?z4^$$@l|_&92(Rsy-zBuaDQhCmV(6{bf>=DI+|xTYzPkUY(7Ny>1KMlM|FFS=W3 zG`=F4@q8o_zQ=}FLujp_pqHV;=N@lrQCFEIJ7Q__*S57pM3!hvYewEwlc+u>b^@_OwIn!M0X}Z?YJn4S4*LHJKhp@8vLnupn=7ZB5$07Mp-u`sb=Mn&>>;N#xwF% z?|ed3LI^Cg9=Y6z7CX*l!}41O_D4nDyL<+CsZsn^lRL!9eYdXXB43Qei;lR`^MeM( zbp1`C)Cw*B4>*3O6|!YErZo}7VdDFNDD@=x>2Lfg5keO&(gjcnon3!E!G9{E-S>9j z!9}_C+zgaNY{;veQbV0VYRC-A4)a$h)eUr2_WRx#TlK{jHJa|Wn37@$JG%7i+08Gz ziqP^bSR})b5Ux1a*D9m7FFY{j6Z^xrqLOu&xu<^2;;imqX;9=YO_k$l3!;Mybl2|u zU%x&s%w?m8wS{_j{y*=N&C41(q1`s#5~iClxGbeXRiweWni|_;Am>3}SEO`y+$*>K zcnP{FA@QuO%zc(M;m?GZlB_EhP^8j%5}`Nfd)m0b+xZgT*py$R=6_Jne?C;kYhk!& zBcE7R^IgsFO@cIM`%_G=kO$LM<-M+vj@2eY@~_s`#euX$=(nLp4Owh=aWwbAQ zlcyE+<-BoXGrB@fIk~5z4#ZZBH>(@|H%p{>jadRKrdsyQOF|4FcIAqz+Qhj~))acQ z9;;-k&mb=HTSDt3aug_p62(t>*6tj*?iL;`#x5d`inA$0M*pE4kLehmC1seS+=UV1 zsje49@S7#p&4sTG9w0-HexWE&4wCuA&a{i9NH=t`J2X$*kS+B(kGqTHfU`WkCmspu z;g)n$r^%pB0TG~o_V->*tN4CEQ3aAz!cI^ul1T~mxBFSB08nI7jFI?6AfNv}Jzvp# z@7I8Xw9;2vEDK8=&}9#z9$Pmr+K}~?tieJKi}Wxn@aH#BFY!m77FLgQ)_myukP(;ALy*d(QuBmg3Qw<7TdKTtZaQ?% zvGTn=hFfq1%rMO<*Xm<8>3NKr5Dvtp`1w9xtMET&WEGll)Ic(Unb#!92eP>} z${(J5*l_Y1w93@_8S*$^N6I~q+CAGgBFJ@#2cSzm+&r6aNU+ovvl9Mz+DnrKsH@=S(_+6;QQ$&kGN;fW#2d{5tq0Rx9*`XqW3{J}K-R9GZDO4!4 zy($QVRsl9gsFnO!FnAXHi9@F4rbtsVmCIwU*D5*lcFvszkLrgYV(q^`2Xv-43<$;v z!W@f?edtzZ-G%)~ODgwvA>hA z9$okzy%B2*d)(VMSrY-`gaY48k`(*&qukNM zH%^PjaHyAUd~X}(`_Mnh^XLfI30@7A70I{<1neD-$VIdc7cuUy`gW2GCKkhgs0}#i zXc>p-5}{51)>L>*#Ifks(bYG$!iMwgce5nm-4%bh>wZ#upm1CkaG5WoA`|5 zL=(7s2{g@5PY{+^(Pd;$%0b{6bfSk8s_GveV(wx^1U zGJ&x5sO{r(2I;W^t=#3#idkxPmYJ0m@yB&Gcwy?r2<4Cqg1_^T35ool6o`U`y*2T_ zOx+uMWT+V;2<=*%%*=7$NR1#@iZ#4RLh@;`bF{foIhru!5dX<&;rP@MH$yj7_=f!Q zA+2wH9G0g?+d#T)?0-GejLXa*2-&dm!LN+U-H=pJNtMUAC3T+(h_9Mt}&Mk1a^whYW92g@td=J^j> z%*T5hp`I^}k?_6u;l?U-G0jd3{IBFK)M58Hv;hhw)Ue%!vs6g=eLiHR6YVZc(W6SYoN&=l$KHF%#lM?FeD~jl zvK>@6NXEu3Y5osKON8k-)4lf@W9^*Mu!^f7q;FmJO}8PZ3#3<4x`|`Qt~)kbPH01Z zrQshJM|hQFg$Xf%?V z8E1|5X6&(&q*8IGV+}bxzxISxt|T;CY#jKx>xlT?$Y@K6Pz?O~WzM4KDeqOENkpXi zB9DXG1L4O@bu0gwWtv*142|BbhQ4joI(lZX+uIuA)8(&Yh$AG1`c8;={=;Q{;{%t~ z(=?U8W1dG0ygnX9AUrX8vf=iOyyf8RoP0N+yI8@U&JWqahFbh_t~{O(w%y3$_y0l$ zim7zhwwr(t1^)Y5j0l9j%2Cd(``-m?2etwZXDeNca4&kJ1yHRqa9lo&9XHgN7Rbh7 z+(;Q|6I^NzF-TFJDO&IKAO{7V-HCUzgr4ue$j)(64YGbsW{@JrK1QZ1Ea4Teugo*$ zmcV&BQB+VTfv40X%^|0fABA2Pi;(qGsz*4nR(y4vHnz^@blQ}Ow2UXrZAhUQzVsSa zyrh}5-(nVjS^W)vI{&j^^jF>qQ7wFhaa4?+e;A+m*5P(piI^<;FIQ#7#F3V82;UV{ zD-=-J(^j1VbE)lNHoZZ3V{C|I*>3>3kMlL(B29VPEy2|%=m-Gst&U52TwGK-UXh9T z-6hSa{;d6%Yy~y%#|0~1elj2bc5+D;I=)c5$y{0D9;Z^p8uJD?nAo(m0HNmR!222# zbHQ>AGwa3Vzs)cJ-yAJ?KKxY_fjp%7zdCo=lr7S|gDehc$JJtUiaI~ubUOU0ZXaJ= zZTus?;9dq^xw5B~)vEmR4~bI$^EWni;+FJQm@RmCWaMh7)T+35O`|P6js<(Lb>_Q? zS8;k;eC+mLo7gS1o|;In7J!|ifrJ~dT%H!IM$eSX5*G7GHB^a`&;lXHk~a?-QCty~ zO;nlVV=Nlf49|uQQ^YBS3J?87jz_YeKZ)cso1Zw@(@E%&D^){o732ruaZow=;l;V+ zzD!nw;JKkXX@#Dxh%RK~Cc;4}8Vu7zDQ{%3`GNa5Ngy4P3>Ee%1h($p&lnHfxfIEi zlaD0dc@roM*)?XzmLkEX83my>X zkuB-NlNf@Tz~}Z(D7Z6l#`rN%q{4Cw%|T?=>C(Av2A=rPH0yIxK}@oxufL$M)Y-&1 z-W6NetRKbgq#f7(mV+Z&hHVw(Jxi|Otl@6bX0gG1^lhKF@`q&{jc_uR6dpl|5eJ3! zL|(gFSFQ}1_l9*ix@ZMWoNwV{sv6r6?UQ?#hyv1qsVLfTEn6j9oW(|g3Mq&4OaUgP zQL7}NhLagh8{Ava3ljq%yk>O~=`tiy4U79GBMl6>q^B+w6{%__Y=xc6(<;Ktca%&d z0t&6Kb|Afew@=C>w`z0-r!giI$@VYyYPw3jw8ZZos0CdB3(u#meziE4L>E8Ryv(Px z&`t?wDZIK(_<$bVq+FFnBYL+Usz36BA5)cHQe`?6ecLYSBZFve@99Eu+?+tzcY3Xb-WL(e|<2asSz;o zU3$5gIE~Nvw&L5P_lalvT{&~u<9oW+nnq}Y+?@4tXn){ESr(Pu{f04Zx|9Yj^NNeq zNrSX}!e^yJClMEy&gYFQ{l~jO37UcIKR4wjV&FlwhJ~3=pB9xmeTQ~NI~fVsug0QI zgOT4#bkY*kxNiQcXgls@iHzzB~HU{t~?Y(AR6j#v4lHx-cmpE91&m?bg6>oF$-uK*x9K>g4?>2RRm)%A+N7jA z2HcFI%SiZR;JL_toue5vE2wQcne5qQmtoLa=6H3IYUa|dGHjv1luZ=9Q0RSmexzTx zKk@3M)M-RRilevVO2I(X0|duX{Kg@PU57@T{;m(@NrDd6qmJ-D^0t1xAiu5^T6Iee zu)fa?i}1%C`Qo7V<+TQUKglTMqj5vtgP65!&0JZ)Ww_?z|8gv*rC`j`%Z2$J%H&fV2>Lvv zwKts0iul0Bm{fBt=ZphGk%jH(d%Vvs^Iv;Z^F*l=J1qvSms%jcwA|@8M-WE-7EgPs ziSM&2n9mncE)%Vl$9mb|rMe;2hQ_4^hr=Stp0w6|e)Fw)E|<6($gXsL-W=mM`u~9h z1VjeHB{P?T7z$x)J%vmN%2qFHs98|>4AM3r4RJ%kfYe+*4G(+xJtRFP-;Nk)Dke3G zSt#%KH2bIM=u{0mA2*TKrv_UZp{>qnJ_GW@6H$MsN#ja;4`(X_4w-O z=(IDED%Ud?ywLhKPObO|lK<%F?$y?G&sp5K<4s0rveK{s-<#y?Z}!NZJ6o)R zFD*hQe+|ELz_rkmA5>hb^C@59IlXZ?dUJR*EIkcMWt3--#|x7JpqOAaxabsTldODO zHy>vNJib3iYq$3MZ$W=PWDskUcvdeqin zwWX&1n1Q{EBdGas(@2o+Y!?`^wG@A$&0Y+vpIDRr#GUe{%aG7Npv_-uz2q){(!F+mnVasR?)^JmM_!jTye;#;{jKO-86zQZRQFzlLR@JIU zDYGQX?L^ED7ER8rj_!1|gml)1rMv)yqB?Eq$DF zY;nVD3hfB9M8=dIZ3-7BonQ(xx)ZGjU2rGbO%ddSMs5-Fs7j+X%~$lzr9T_7qh8#Z zDk{pC)?-J@o_frg_V^Pex&n#-!1>S$zev8L!tDm_WC*ZjG-bbGTi-gJugPV>Nh#+^U9)N9pT z922*l9}#k>o4?bN!sJ?#_6^u$5CLW~!?4H~(iq1%mCgbC>MZjz)zAhS%v*#>dvL>NA9AM@d7R%J;!=1h36V5kY(2ak?%)YU z^Yn5xmP0AH$~nmnpbSO$?+t{NB6<^ui(-aL~e4laur zdgE72cvxK6oSJ81uW)Vat)eai&)YeYh*??c@JlV&KpfHA>AKpY!{Z)E)B0hVc~#;Z zy@r;D@w=l<-@ktszg2=y;gHPmq@u02+yOpaeU!Npzi}C}|2Pqe16fe@U*GthpzQoC ztot@Wmx}Ua{HRznfY;>v@ zV(DAUxiSmu$Jm}kNs`Q?Us>7fkbCp{%a0#2Aw7!dRmh)w{i`rjR6seS=|(}v=i>J{ z>ovx$jmcrj?&u*du4vDyMD%y_ga6msl|M55|8ciTwB}6i=E~+yhR6_dBtC8?BFZsm zxrqra*CvEWNQY}~BGUSmR#qIO2=;;$GWvBl21NBw&`ndeBTLX(|aI2qJyU2f5k!XvX||E`@^ZP!bz zbIwxtz+{emT|{N2BYV}~ktjKu2(Vb*ABW#sZzUW0m|wr1@-&@8Yg+{~vB{z_<(F7o zeZ`Iwq|aWD2znTqTa+Bf!imDefU;3j^X-cu z$;ot|%aR7qm!WqMjgBlvK!sCh)qRuQS?EK0o*+-!de=U4k!fq;bv4ux*m+_!eO?4@ zYKctHV06Dip-}e0hg8lByqa6tcl2{c1-}{{iN6l8p}RfWR|BU=Qy#uF^8mW%4)nw3OgswZ>S*Jp?hHC7%_Ga zC-U8T9DPmS!DaaEmP@jl^VLIB&C@=$W3sn^hRhLoE`=IbF_6c@zSFdTm{%DSG9LK> z0eit@whBCgYl&SssQZB;B*TYA?M1aRru8v3*mAI%%?G8I2Xm*5^ye96x!rCgNpZ** zY&w;DvUXTIq_VJ6-7fGh;Ra5ixuI&LJBvKt`+QcdFUCH@pZG4H{IRyE#ld^i`?80( z$@9b{I1{NBj_FR}x>@k;dCmy7XZzU<#|-=Ud!Ln*({{*kvCT76X7Uf>l{Ajz0FR7f zDR&C+DzI9+qMz%nh!aB5T+WlDzl-N0w}&s5n66hi)J0SnA>gFG`7!9@=y(AIK(&;5 z$izfm74T_m{~GhrC9T2;%BvHX{3T)W;)_?*mE%P*JX&b5EI-eFh>{NbRf}fW%ZQ5{ z9h&bq3LcdpS|7SG^r5pQKnzhy6U^AaELI0^Br?4;c@mJp-Bxod#t8PZvshS~`4jho zK0G74=>?jJaOP%^w~HGjop`N>zya00-JlLUD{U>+C7$Ab2ul06`s_d#yBL|1ldoC| zs!D1i4c@Z8pSn2OcN`|bAG3qgkkp{b8}@5HX=;|%*vT|a7-og@9L#3(zlJRcIX>U< z5_8vxiZ+NcI+G!UCqHvVfu_<#w0yhbS#d=>=Y6zAEA^cOx>LXZ**+9Fn!p?PyPF4?d76$6F^T;c&JD>c8n$(`|o<=px!hQrzS6A z;RGTqZb1y;GALiZ=d3~UOI5%jY`Bg(X^r0g4Q`J<*s;Gw<2@X=LTxEu+o$UA)iEIc z)?0U5VLdwG>7I|fd?Uyrp1Jet*Gd)V`-{j&a)cL;u7pip7_hQVd_?V}bl-^ZEkAgi zZX-c(*Ac=nRQ-biE2puCK8I9_>cGl#wtu1??Ri=9_wCA8YO^DYpT7YlGKZOei|LNo z9EpOx6V3Lj{TQ+?d<}S;u>=*mFBF+0Aa>TU`~AOGvzLLWt}fc&G^4cJE~OKFCM#C# zTvDU2G&-w)C%44`I<{NLb;Iv&jugOf8}~IB8@nn~7nDiRk*Q4W--AhC{l2xZ$Nx1` z)QYL=xFL{5?^i}=qELtB0?%Cx%Y0oqS*;20`7?;H7>a@;dK84-hFHO_@ zBCW?Gs7HGdkj3pQq`W;^ghc<0OWI$bTM;j`FDQ%=?L;?a6nLO0zz@-7C0c{ngIAd~y^G%7(- zVWq$pthq9)jyRvUb%9s9*zA(cMDE3;&^?yohkZQsEKA3MGQAQU9DEWr*Bp2gthEv* zs5sEQ5#m`weZbQ9rfLt5i4i+qek`!Eent6=9-YDe+{MOMKN3dpR%9)DAOnSvC*-2r z18Z}dOEOumn~XapK6u%hN2fA6Mserc8NOk1F^a#}v49V$sh)BP#At1T36GSbj3S_i zaj%u-&t(8Rep)lCFi`^(tS)Mt7-D4!Iui#A9jACtFWhPKOs4LkhAW6u=K%VwwRGJJ zdYh4)dVHD`lEeYsiOR9Zhy7~m{L-3&o>3e$)= zVvHX%7@C^)^&_|Yj03X7pH{>F#f`7!1U|_|ea}6oYs%xUW;tl4pkjp-XzW$nv0e@` zu?WxJt}>^*X-Bn=A50*=&QjO|D!N|k|8W96-~+zP0k$r2H^LX^1urP{L2@fTi?Fx5etD3EYJqu?{jGQ<9(WA&Sv^Q#S)A?6Kb>V|4j zvzF1iRK!U_aik~|CJxA--R)%rUdCjaueTX%+qT@THZ|rN!?cZ)->Dh3zp)}Wp0_b7 z;ffi;{-=P$vH74c?FthvskT}56K;MgI2nRT-)8A*%jm8%*vo;xHiwkHSaQe&F4q}s z{0x~n+;yfkmO~!NHNDG*fMn(xmj}fw=wLT9QQ%Tmy5lRnoCje`-N_ zK*T{p&*2F$8{L)X@mtE|ZH~r5^Oe}obbRz=(Zkn=O;Ir((m%>-naeEhg8RS`<9H4Q zL@0s0ly-7VGf>Vyw1L?-U21AR>|usl^Bg;~DMmBj3Lg}zi|TJGAIa?OEp?d-BJUZ% zbN@skN+Kc3g-H*nz&MEIxYzh8&xU}HSP~Ztj#e2%4%L3a=1+bE|2}4%bS3M|UFh8L zcoXXh7iQz(BfOPKGYtkx2AB3?3npG2)XrorIRhE+W#LO*{HuISCM7y62b+%Co%=b` zRQY3by2~8{q*?#jUWg-VYqYTa7$jg5OxW@4w&2iu`?;t~z8@T?M5~>9IZntZ`^v03 zWM`R@w^wKz9H%GRheYJ3_R3jV*=j4*Lo4MPFT++H?FuRW@)%;i6%Taa%C>V^mGsGb z6+y(Ug+-VvrBLizh;6f1AoieulUn5}Gz7+9^@Y%*Mhrx$L~x5Etw-1V7JKJ1pjPd;ChFPdnAd z+i5A;EIfO)PJQqK2sPhk|6%r;*vD+o`ySH_yT5A)$#jF!UZ*cQU7<%WJxt*{2h3lF z4pp-e8vUMJtb1GhVi<{K+@0W#GQ!n%syXnH3R=+;sh-o!Hz|M4wJ7U$J?XvVT$0j! z0Ghf1S|z5UA^Ed0RKmLov}Z+8Fp>ZlZn=im>f47zBVt4UW&~Q|>j8x@yF{E``e5LJ zTWWlv=IDA)BYS4C-=yT6YHAvw9_C*6>2%6xVd|}O&V@t2@V##;svF)yTliC}bRE>= zbdst%(3P}itxa#G3|~w-Al*G<*N0)0vPgfwhhNpoKg$o+(V6kZJBW-2-TG7#{&26P>2A zL|A-^ztzs=A3q-wIaF0paCl<%F=@{Ci2RRM;r=@Dw-e#WX#p^^UrK`Rf8b|W`pZ#n z8t(U0uKN#MXIorSxCkRIb2pIo9X%9KJ03-CF$isc>mXlWCy$)M$O?CcHJtETcnq*Q zJQ!|}Ewh&A$K2HUPigXqKH->eGGA$rEtpBXlA`{Qg`c_9Q z)iIGRo=VVJ$rZLoIbw*RJ9+}mFHFTREKPQ3 +$ torch-pkg install tutorials image nnx optim + + + +===== Linear Regression ===== + +This first example provides a very simple step-by-step example +of linear regression, using ''Torch7'' 's neural network ([[..:nn:index|nn]]) package, +and the optimization package ([[..:optim:index|optim]]). + +The code associated to this example can be found in INSTALL_PREFIX/share/torch/lua/tutorials/: +example-linear-regression.lua + +In this example, we consider a very simple regression problem, where we want to +predict the amount of corn produced by a farm, given the amount of fertilizer and +intesticide used. In other words, we have two input variables, and one output variable. + +==== Definition of the model ==== + +Linear regression is the simplest type of model. It is parametrized by a weight matrix ''W'', +and a bias vector ''b''. Mathematically, it can be written as: + +{{linear_regression.png}} + +To implement this model, we will use a [[..:nn:index#nn.Linear|Linear]] module, +which takes two inputs (fertilizer and insecticide) and produces one output +(corn). + +Note that this linear model has 3 trainable parameters: + * 1 for the weight assigned to fertilizer + * 1 for the weight assigned to insecticide + * 1 for the weight assigned to the bias term + + +require 'nn' +ninputs = 2 +noutputs = 1 +model = nn.Linear(ninputs, noutputs) + + +In the ''nn'' package, all the modules are self-contained building blocks, which might contain +a set of trainable parameters, and if so, a set of similarily-sized matrices that are used to +hold the gradients. + +At this stage, we can make predictions: + + +input = torch.randn(2) +output = model:forward(input) + + +Also, the gradients wrt to all the parameters in the model, as well as wrt to the input of the model, +can be computed this way: + + +input = torch.randn(2) +grad_wrt_output = torch.randn(1) +grad_wrt_input = model:backward(input, grad_wrt_output) + + +Given arbitrary trainable (nn) models, trainable parameters, and gradients can be obtained +this way: + + +parameters,gradients = model:getParameters() + + +We will come back to this later, when we start training the model. + +==== Definition of a loss function ==== + +Now that we have a model, we need to define a loss function to be minimized. In this +regression example, what we want to do is minimize the mean-square error between +the predictions (outputs of the model), and the groundtruth labels, across the entire +dataset, which is defined as: + +{{loss.png}} + +With the per-sample loss defined as: + +{{mse_loss.png}} + +Torch7 provides a couple of standard loss functions, we will use +[[..:nn:index#nn.MSECriterion|nn.MSECriterion]], which provides a readily-usable mean-square +loss: + + +criterion = nn.MSECriterion() + + +Given a loss function and a model, we can now completely estimate the loss for a given +sample/target pair, and also compute the gradients of this loss function wrt to the internal +parameters of the model: + + +input = torch.randn(2) -- a random input +target = torch.randn(1) -- a random target +output = model:forward(input) -- estimate prediction +loss = criterion:forward(output, target) -- estimate loss + +grad_wrt_output = criterion:backward(output, target) -- estimate gradient wrt to model's output +model:backward(input, grad_wrt_output) -- estimate gradients wrt to models' parameters + + +==== Creating the training data ==== + +In all regression problems, some training data needs to be +provided. In a realistic scenario, data comes from some database +or file system, and needs to be loaded from disk. In that +tutorial, we create the data source as a Lua table. + +In general, the data can be stored in arbitrary forms, and using +Lua's flexible table data structure is usually a good idea. +Here we store the data as a 2D Tensor, where each +row represents a training sample, and each column a variable. The +first column is the target variable, and the others are the +input variables. + +The data are from an example in Schaum's Outline: +Dominick Salvator and Derrick Reagle +Shaum's Outline of Theory and Problems of Statistics and Economics +2nd edition +McGraw-Hill +2002 + +The data relate the amount of corn produced, given certain amounts +of fertilizer and insecticide. See p 157 of the text. + +In this example, we want to be able to predict the amount of +corn produced, given the amount of fertilizer and intesticide used. +In other words: fertilizer & insecticide are our two input variables, +and corn is our target value. + +The data entries have this format: ''{corn, fertilizer, insecticide}''. + +Here's how to initialize a Tensor with the data: + + +data = torch.Tensor{ + {40, 6, 4}, + {44, 10, 4}, + {46, 12, 5}, + {48, 14, 7}, + {52, 16, 9}, + {58, 18, 12}, + {60, 22, 14}, + {68, 24, 20}, + {74, 26, 21}, + {80, 32, 24} +} + + +==== Training the model ==== + +To minimize the loss function defined above, using the linear model defined +in ''model'', we follow a stochastic gradient descent procedure (SGD). + +SGD is a good optimization algorithm when the amount of training data +is large, and estimating the gradient of the loss function over the +entire training set is too costly (which is not the case in this example, +but we'll be dealing with online problems across the entire tutorial, +so better to start with the right tools). + +Given an arbitrarily complex model, we can retrieve its trainable +parameters, and the gradients of our loss function wrt these +parameters by doing: + + +x, dl_dx = model:getParameters() + + +We then need to define a closure, ''feval'', which computes +the value of the loss function at a given point ''x'', and the gradient of +that function with respect to ''x''. ''x'' is the vector of trainable weights, +which, in this example, are all the weights of the linear matrix of +our model, plus one bias. The form of this closure is imposed by the ''optim'' +package, in which all optimization methods require the ability to evaluate +the value of a function and its derivatives wrt to the trainable ''x'', at +any point ''x''. + + +feval = function(x_new) + -- set x to x_new, if different + -- (in this simple example, x_new will typically always point to x, + -- so this copy is never happening) + if x ~= x_new then + x:copy(x_new) + end + + -- select a new training sample + _nidx_ = (_nidx_ or 0) + 1 + if _nidx_ > (#data)[1] then _nidx_ = 1 end + + local sample = data[_nidx_] + local target = sample[{ {1} }] -- this funny looking syntax allows + local inputs = sample[{ {2,3} }] -- slicing of arrays. + + -- reset gradients (gradients are always accumulated, to accomodate + -- batch methods) + dl_dx:zero() + + -- evaluate the loss function and its derivative wrt x, for that sample + local loss_x = criterion:forward(model:forward(inputs), target) + model:backward(inputs, criterion:backward(model.output, target)) + + -- return loss(x) and dloss/dx + return loss_x, dl_dx +end + + +Given the function above, we can now easily train the model using SGD. +For that, we need to define four key parameters: + * a learning rate: the size of the step taken at each stochastic estimate of the gradient + * a weight decay, to regularize the solution (L2 regularization) + * a momentum term, to average steps over time + * a learning rate decay, to let the algorithm converge more precisely + + +sgd_params = { + learningRate = 1e-3, + learningRateDecay = 1e-4, + weightDecay = 0, + momentum = 0 +} + + +We're now good to go... all we have left to do is run over the dataset +for a certain number of iterations, and perform a stochastic update +at each iteration. The number of iterations is found empirically here, +but should typically be determinined using cross-validation. + + +-- we cycle 1e4 times over our training data +for i = 1,1e4 do + + -- this variable is used to estimate the average loss + current_loss = 0 + + -- an epoch is a full loop over our training data + for i = 1,(#data)[1] do + + -- optim contains several optimization algorithms. + -- All of these algorithms assume the same parameters: + -- + a closure that computes the loss, and its gradient wrt to x, + -- given a point x + -- + a point x + -- + some parameters, which are algorithm-specific + + _,fs = optim.sgd(feval,x,sgd_params) + + -- Functions in optim all return two things: + -- + the new x, found by the optimization method (here SGD) + -- + the value of the loss functions at all points that were used by + -- the algorithm. SGD only estimates the function once, so + -- that list just contains one value. + + current_loss = current_loss + fs[1] + end + + -- report average error on epoch + current_loss = current_loss / (#data)[1] + print('current loss = ' .. current_loss) + +end + + +Running this produces the following output: + + +current loss = 446.52843167045 +current loss = 209.72667362749 +current loss = 190.12634223999 +current loss = 174.55385751715 +current loss = 161.66828948384 +current loss = 150.98450667711 +current loss = 142.1093935021 +current loss = 134.72182736708 +current loss = 128.55920125807 +current loss = 123.40652701895 +current loss = 119.08760675586 +current loss = 115.45787300296 +current loss = 112.39857668915 +current loss = 109.81206467758 +current loss = 107.61793893366 +current loss = 105.74992971952 +current loss = 104.15334760711 +current loss = 102.78300513789 +current loss = 101.60151990016 + +... + +current loss = 1.5817980707772 +current loss = 1.5817750296182 +current loss = 1.5817519936254 +current loss = 1.5817289627972 +current loss = 1.5817059371316 +current loss = 1.5816829166269 +current loss = 1.5816599012812 + + +==== Testing the model ==== + +Now that the model is trained, one can test it by evaluating it +on new samples. + +The text solves the model exactly using matrix techniques and determines +that ''corn = 31.98 + 0.65 * fertilizer + 1.11 * insecticides'' + +We compare our approximate results with the text's results. + + +text = {40.32, 42.92, 45.33, 48.85, 52.37, 57, 61.82, 69.78, 72.19, 79.42} + +print('id approx text') +for i = 1,(#data)[1] do + local myPrediction = model:forward(data[i][{{2,3}}]) + print(string.format("%2d %6.2f %6.2f", i, myPrediction[1], text[i])) +end + + +Running this produces the following output: + + +id approx text + 1 40.10 40.32 + 2 42.77 42.92 + 3 45.22 45.33 + 4 48.78 48.85 + 5 52.34 52.37 + 6 57.02 57.00 + 7 61.92 61.82 + 8 69.95 69.78 + 9 72.40 72.19 +10 79.74 79.42 + + + + + + +===== Logistic Regression ===== + +The first example provided a step-by-step introduction to training a linear regression model. +Linear regression is very limited, and is typically rarely used in practice. A slightly +more powerful/interesting model is the logistic regression model. + +The code associated to this example can be found in INSTALL_PREFIX/share/torch/lua/tutorials/: +example-logistic-regression.lua . + +In this new example, the problem we try to solve is the following: + * there are 3 brands and 2 explanatory variables + * the variables are coded this way: + * brand: 1, 2 or 3 + * female: 1 if the person is a female, 0 if a male + * age: a positive integer + * the goal is to predict the brand, given the variables ''female'' and ''age'' + +==== Definition of the model ==== + +Logistic regression +is a probabilistic, linear classifier. As its linear counterpart, it is parametrized by a weight +matrix ''W'', and a bias vector ''b''. The outputs of the linear layer are then fed to +a ''softmax'' layer, which produces a properly normalized probability distribution. Mathematically, +it can be defined as: + +{{logistic_regression.png}} + +If we're interested in classification, then the final prediction is typically done by taking +the ''argmax'' of this distribution: + +{{logistic_argmax.png}} + +in which case the ouput ''y'' is a scalar. + +In Torch, we construct such a model by using a container, to stack two layers: a +[[..:nn:index#nn.Linear|Linear]] module, and a [[..:nn:index#nn.LogSoftMax|LogSoftMax]] module. +Note that we use a LogSoftMax instead of SoftMax, for numeric reasons. As we will +see below, the loss function works with log-probabilities, so never really have +to have regular probabitlies. + +In this example, we will have 2 input variables, and 3 output variables. + + +model = nn.Sequential() +model:add( nn.Linear(2,3) ) +model:add( nn.LogSoftMax() ) + + +Note that we use log + +==== Definition of the loss function ==== + +We want to maximize the likelihood of the correct (target) class, for each sample in the dataset. +This is equivalent to minimizing the negative log-likelihood (NLL), or minimizing the +cross-entropy between the predictions of our model and the targets (training data). Mathematically, +the per-sample loss can be defined as: + +{{nll_loss.png}} + +Given that the model already produces log-probabilities, the loss is quite straightforward +to estimate. In Torch, we use the [[..:nn:index#nn.ClassNLLCriterion|ClassNLLCriterion]], which +expects its ''input'' as being a vector of log-probabilities, and the ''target'' as being +an integer pointing to the correct class. + + +criterion = nn.ClassNLLCriterion() + + +==== Creating the training data ==== + +In this example, the data come from a tutorial on using R from UCLA, which can be +found [[http://www.ats.ucla.edu/stat/r/dae/mlogit.htm|here]]. + +The model is one of brand preference, where there are 3 brands and 2 +explanatory variables. The variables are coded this way: + brand: 1, 2 or 3 + female: 1 if the person is a female, 0 if a male + age: a positive integer + +The data are stored in a csv file 'example-logistic-regression.csv' +and will be read using a 3rd-party package called ''csv''. As all torch packages, +it can be installed like this (at the command line): + + +$ torch-pkg install csv + + +Then loaded as any regular Lua package: + + +require 'csv' + + +To construct our dataset, we first need to load it from the csv file: + + +data = csv.load('example-logistic-regression.csv') + + +The loaded ''data'' contains one list per variable in the original CSV file. We +can easily turn eachh of these lists into tensors, to be able to access this +data more efficiently (by using slicing) during training: + + +-- first convert each variable list to a tensor: +brands = torch.Tensor(loaded.brand) +females = torch.Tensor(loaded.female) +ages = torch.Tensor(loaded.age) + +-- copy all the input variables into a single tensor: +dataset_inputs = torch.Tensor( (#brands)[1],2 ) +dataset_inputs[{ {},1 }] = females +dataset_inputs[{ {},2 }] = ages + +-- the outputs are just the brands +dataset_outputs = brands + + +A this stage, we have two arrays: + * ''dataset_inputs'', a ''Nx2''-dim array, with ''N'' the number of training examples, and ''2'' the nb of input variables; + * ''dataset_outputs'', a ''N''-dim array of indices, pointing to the grountruth for each data point. Indices are in ''{1,...,N}''. + +==== Training the model ==== + +For this example, we present two methods to learn the model's parameters: + * Stochastic Gradient Descent (SGD): an online method, which typically scales well with large numbers of training samples + * L-BFGS: a second-order, batch-based optimization algorithm, which has been shown to converge extremely quickly in convex, deterministic optimization problems + +=== Stochastic Gradient Descent (SGD) === + +As for the linear regression example, we now define a closure to estimate +the value of the loss function at a given point ''x'', for a randomly +sampled training pair ''{input,target}''. + + +-- params/gradients +x, dl_dx = model:getParameters() + +-- closure +feval = function(x_new) + -- set x to x_new, if differnt + -- (in this simple example, x_new will typically always point to x, + -- so the copy is really useless) + if x ~= x_new then + x:copy(x_new) + end + + -- select a new training sample + _nidx_ = (_nidx_ or 0) + 1 + if _nidx_ > (#dataset_inputs)[1] then _nidx_ = 1 end + local inputs = dataset_inputs[_nidx_] + local target = dataset_outputs[_nidx_] + + -- reset gradients (gradients are always accumulated, to accomodate + -- batch methods) + dl_dx:zero() + + -- evaluate the loss function and its derivative wrt x, for that sample + local loss_x = criterion:forward(model:forward(inputs), target) + model:backward(inputs, criterion:backward(model.output, target)) + + -- return loss(x) and dloss/dx + return loss_x, dl_dx +end + + +Then we go on by chosing the SGD parameters, and doing N epochs over +the training data: + + +-- parameters: +sgd_params = { + learningRate = 1e-3, + learningRateDecay = 1e-4, + weightDecay = 0, + momentum = 0 +} + +-- epochs +epochs = 1e2 +for i = 1,epochs do + + -- this variable is used to estimate the average loss + current_loss = 0 + + -- an epoch is a full loop over our training data + for i = 1,(#dataset_inputs)[1] do + + -- optim contains several optimization algorithms. + -- All of these algorithms assume the same parameters: + -- + a closure that computes the loss, and its gradient wrt to x, + -- given a point x + -- + a point x + -- + some parameters, which are algorithm-specific + + _,fs = optim.sgd(feval,x,sgd_params) + + -- Functions in optim all return two things: + -- + the new x, found by the optimization method (here SGD) + -- + the value of the loss functions at all points that were used by + -- the algorithm. SGD only estimates the function once, so + -- that list just contains one value. + + current_loss = current_loss + fs[1] + end + + -- report average error on epoch + current_loss = current_loss / (#dataset_inputs)[1] + print('epoch = ' .. i .. + ' of ' .. epochs .. + ' current loss = ' .. current_loss) + +end + + +=== Batch, Second-order Optimization (L-BFGS) === + +Now that we know how to train a model using simple SGD, we can +use more complex optimization heuristics. In the following, we +use a second-order method: L-BFGS, which typically yields +more accurate results (for linear models), but can be significantly +slower. For very large datasets, SGD is typically much faster +to converge, and L-FBGS can be used to refine the results. + +All we need to do is re-define the eval closure such that it evaluates +the loss function and the gradients on the full dataset, to compute +the true loss, and the true gradients (reminder: SGD computes a very +noisy estimate of the loss function and gradients). Here's the code: + + +-- we start again, and reset the trained parameter vector: + +model:reset() + +-- next we re-define the closure that evaluates f and df/dx, so that +-- it estimates the true f, and true (exact) df/dx, over the entire +-- dataset. This is a full batch approach. + +feval = function(x_new) + -- set x to x_new, if differnt + -- (in this simple example, x_new will typically always point to x, + -- so the copy is really useless) + if x ~= x_new then + x:copy(x_new) + end + + -- reset gradients (gradients are always accumulated, to accomodate + -- batch methods) + dl_dx:zero() + + -- and batch over the whole training dataset: + local loss_x = 0 + for i = 1,(#dataset_inputs)[1] do + -- select a new training sample + _nidx_ = (_nidx_ or 0) + 1 + if _nidx_ > (#dataset_inputs)[1] then _nidx_ = 1 end + + local inputs = dataset_inputs[_nidx_] + local target = dataset_outputs[_nidx_] + + -- evaluate the loss function and its derivative wrt x, for that sample + loss_x = loss_x + criterion:forward(model:forward(inputs), target) + model:backward(inputs, criterion:backward(model.output, target)) + end + + -- normalize with batch size + loss_x = loss_x / (#dataset_inputs)[1] + dl_dx = dl_dx:div( (#dataset_inputs)[1] ) + + -- return loss(x) and dloss/dx + return loss_x, dl_dx +end + + +Doing the optimization is now really simple, we simply have to configure +L-BFGS, and call it only once! + + +-- L-BFGS parameters are different than SGD: +-- + a line search: we provide a line search, which aims at +-- finding the point that minimizes the loss locally +-- + max nb of iterations: the maximum number of iterations for the batch, +-- which is equivalent to the number of epochs +-- on the given batch. In that example, it's simple +-- because the batch is the full dataset, but in +-- some cases, the batch can be a small subset +-- of the full dataset, in which case maxIter +-- becomes a more subtle parameter. + +lbfgs_params = { + lineSearch = optim.lswolfe, + maxIter = epochs, + verbose = true +} + +print('Training with L-BFGS') +_,fs = optim.lbfgs(feval,x,lbfgs_params) + + + + + + +===== Convolutional Neural Networks (ConvNets) ===== + +Convolutional Networks are trainable architectures composed of multiple stages. The input and output of each stage are sets of arrays called feature maps. For example, if the input is a color image, each feature map would be a 2D array containing a color channel of the input image (for an audio input each feature map would be a 1D array, and for a video or volumetric image, it would be a 3D array). At the output, each feature map represents a particular feature extracted at all locations on the input. Each stage is composed of three layers: a filter bank layer, a non-linearity layer, and a feature pooling layer. A typical ConvNet is composed of one, two or three such 3-layer stages, followed by a classification module. Each layer type is now described for the case of image recognition. + +{{convnet.png?600}} + +Trainable hierarchical vision models, and more generally image processing algorithms are usually expressed as sequences of operations or transformations. They can be well described by a modular approach, in which each module processes an input image bank and produces a new bank. The figure above is a nice graphical illustration of this approach. Each module requires the previous bank to be fully (or at least partially) available before computing its output. This causality prevents simple parallelism to be implemented across modules. However parallelism can easily be introduced within a module, and at several levels, depending on the kind of underlying operations. These forms of parallelism are exploited in Torch7. + +Typical ConvNets rely on a few basic modules: + + * Filter bank layer: the input is a 3D array with n1 2D feature maps of size n2 x n3. Each component is denoted x_ijk, and each feature map is denoted xi. The output is also a 3D array, y composed of m1 feature maps of size m2 x m3. A trainable filter (kernel) k_ij in the filter bank has size l1 x l2 and connects input feature map x to output feature map y_j. The module computes y_j = b_j + i_{kij} * x_i where * is the 2D discrete convolution operator and b_j is a trainable bias parameter. Each filter detects a particular feature at every location on the input. Hence spatially translating the input of a feature detection layer will translate the output but leave it otherwise unchanged. + + * Non-Linearity Layer: In traditional ConvNets this simply consists in a pointwise tanh() sigmoid function applied to each site (ijk). However, recent implementations have used more sophisticated non-linearities. A useful one for natural image recognition is the rectified sigmoid Rabs: abs(g_i.tanh()) where g_i is a trainable gain parameter. The rectified sigmoid is sometimes followed by a subtractive and divisive local normalization N, which enforces local competition between adjacent features in a feature map, and between features at the same spatial location. + + * Feature Pooling Layer: This layer treats each feature map separately. In its simplest instance, it computes the average values over a neighborhood in each feature map. Recent work has shown that more selective poolings, based on the LP-norm, tend to work best, with P=2, or P=inf (also known as max pooling). The neighborhoods are stepped by a stride larger than 1 (but smaller than or equal the pooling neighborhood). This results in a reduced-resolution output feature map which is robust to small variations in the location of features in the previous layer. The average operation is sometimes replaced by a max PM. Traditional ConvNets use a pointwise tanh() after the pooling layer, but more recent models do not. Some ConvNets dispense with the separate pooling layer entirely, but use strides larger than one in the filter bank layer to reduce the resolution. In some recent versions of ConvNets, the pooling also pools similar feature at the same location, in addition to the same feature at nearby locations. + +In this tutorial, we're going to train a variety of convolutional networks on 3 different datasets: + + * The [[http://yann.lecun.com/exdb/mnist/|MNIST]] dataset: a handwritten digit recognition dataset, + * [[http://www.cs.toronto.edu/~kriz/cifar.html|CIFAR-10]]: a tiny image dataset, with 10 different classes of objects, + * The [[http://ufldl.stanford.edu/housenumbers/|SVHN]] (House Numbers) dataset, a dataset that looks a lot like MNIST, but with digits sampled from Street View images. + +For each dataset, we provide a script that basically does everything. You will find these scripts +installed in INSTALL_PREFIX/share/torch/lua/tutorials/: + + * ''train-on-mnist.lua'' (MNIST) + * ''train-on-cifar.lua'' (CIFAR-10) + * ''train-on-housenumbers.lua'' (SVHN) + +Each script provides an interface to load the training data, and we don't detail this in the tutorial (it's just mechanics and not really interesting). + +In this tutorial, we will discuss 2 things, that are common to all 3 datasets: + + * describing a trainable ConvNet for each task, which includes pre-processing of the data + * training the model according to different optimization procedures + +Note that each script also provides flags to replace the ConvNet by a simple MLP +(2-layer neural network), and logistic regression, for the purpose of comparison. + +==== Describing the trainable ConvNet ==== + +Using the 'nn' package, describing ConvNets, MLPs and other forms of sequential trainable +models is really easy. All we have to do is create a top-level wrapper, which, as for +the logistic regression, is going to be a sequential module, and then append modules into +it. + +For all 3 datasets, the input is ''32x32''. For MNIST, it's grayscale, for the two others, +each pixel is encoded as an RGB triplet. For the next few paragraphs, we will assume that +we're using the MNIST data, in which inputs are therefore 1024-dimensional. + +Let's start with a non-convolutional architecture, a simple 2-layer neural network. We +arbitrarily set the number of hidden units as being twice as much as the inputs: + + +model = nn.Sequential() +model:add(nn.Reshape(1024)) +model:add(nn.Linear(1024, 2048)) +model:add(nn.Tanh()) +model:add(nn.Linear(2048,10)) + + +That model is self-explanatory. Moving on to a convolutional version of this, the simplest +form of it would be: + + +model = nn.Sequential() +model:add(nn.Reshape(1,32,32)) +-- layer 1: +model:add(nn.SpatialConvolution(1, 16, 5, 5)) +model:add(nn.Tanh()) +model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) +-- layer 2: +model:add(nn.SpatialConvolution(16, 128, 5, 5)) +model:add(nn.Tanh()) +model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) +-- layer 3, a simple 2-layer neural net: +model:add(nn.Reshape(128*5*5)) +model:add(nn.Linear(128*5*5, 200)) +model:add(nn.Tanh()) +model:add(nn.Linear(200,10)) +model:add(nn.LogSoftMax()) + + +The basic building blocks of this simple ConvNet were all presented at the beginning of +this section. For details on the basic modules used for this ConvNet, check out +this [[..:nn:index#SpatialConvolution|section]]. This model should still be fairly +self-explanatory, except for a couple of subtle details about the geometry (receptive +field sizes) of each layer. Here's more info about this: + + * the input has 1024 variables, and the first layer reshapes that vector into a 3D array of dimensions 1x32x32. It is the convention for all ''nn.Spatial*'' layers to work on 3D arrays, with the first dimension indexing different features, and the next two dimensions indexing the height and width of the image/map. For the MNIST case, there's only 1 input feature: the grayscale value. + * the fist layer applies 16 filters to the input map, each being ''5x5''. The receptive field of this first layer is ''5x5'', and the maps produced by it are therefore ''16x28x28''. This linear transform is then followed by a non-linearity (''tanh''), and a max-pooling function, which pools regions of size ''2x2'', and uses a stride of ''2x2''. The result of that operation is a ''16x14x14'' array, which represents a ''14x14'' map of 16-dimensional feature vectors. The receptive field of each unit at this stage is ''7x7''. + * the second layer is very much analogous to the first, excepet that now the 16-dim feature maps are projected into 128-dim maps, with a fully-connected connection table: each unit in the output array is influenced by a 16x5x5 neighborhood of features in the previous layer. That layer has therefore ''16x128x5x5'' trainable kernel weights (and 128 biases). The result of the complete layer (conv+pooling) is a ''128x5x5'' array. + * at this stage, the ''5x5'' array of 128-dimensional feature vectors is flattened into a 3200-dimensional vector, which we feed to a two-layer neural net. The final prediction (10-dimensional distribution over classes) is influenced by a 32x32 neighborhood of input variables (pixels). + +This model is quite basic, but typically yields excellent results on binary/sparse images such as the MNIST dataset. For natural images, it is necessary to apply some whitening transformation. Empirically, it has been shown that using local normalization operators helps a lot generalization +([[http://cs.nyu.edu/~koray/publis/jarrett-iccv-09.pdf|Jarret et al.]]). +One commonly used (biologically-inspired) type of local normalization is the contrastive normalization, which imposes zero-mean and unit-norm in local neighborhoods of fairly small size (typically between ''5x5'' to ''15x15''). For color images, it is also a good idea to help the model by separating the color information from the luminance information. While the latter can be locally normalized, it's better to leave color untouched (or simply normalized globally, at the level of the dataset). Here's an example of code that can be used to preprocess the entire dataset: + + +-- we assume that the data was loaded into two tables: trainData and testData, which have +-- this form: +-- trainData = {data=Tensor(trsize,3,32,32), labels=Tensor(N)} +-- testData = {data=Tensor(tesize,3,32,32), labels=Tensor(N)} + +-- preprocess trainSet +normalization = nn.SpatialContrastiveNormalization(1, image.gaussian1D(7)) +for i = 1,trainData:size() do + -- rgb -> yuv + local rgb = trainData.data[i] + local yuv = image.rgb2yuv(rgb) + -- normalize y locally: + yuv[1] = normalization(yuv[{{1}}]) + trainData.data[i] = yuv +end +-- normalize u globally: +mean_u = trainData.data[{ {},2,{},{} }]:mean() +std_u = trainData.data[{ {},2,{},{} }]:std() +trainData.data[{ {},2,{},{} }]:add(-mean_u) +trainData.data[{ {},2,{},{} }]:div(-std_u) +-- normalize v globally: +mean_v = trainData.data[{ {},3,{},{} }]:mean() +std_v = trainData.data[{ {},3,{},{} }]:std() +trainData.data[{ {},3,{},{} }]:add(-mean_v) +trainData.data[{ {},3,{},{} }]:div(-std_v) + +-- preprocess testSet +for i = 1,testData:size() do + -- rgb -> yuv + local rgb = testData.data[i] + local yuv = image.rgb2yuv(rgb) + -- normalize y locally: + yuv[{1}] = normalization(yuv[{{1}}]) + testData.data[i] = yuv +end +-- normalize u globally: +testData.data[{ {},2,{},{} }]:add(-mean_u) +testData.data[{ {},2,{},{} }]:div(-std_u) +-- normalize v globally: +testData.data[{ {},3,{},{} }]:add(-mean_v) +testData.data[{ {},3,{},{} }]:div(-std_v) + + +One other remark: it is typically not a good idea to use fully connected layers, as was done above, in internal layers. In general, favoring large numbers of features (over-completeness) over density of connections helps achieve better results (empirical evidence of this was reported in several papers, as in +[[http://yann.lecun.com/exdb/publis/pdf/hadsell-iros-08.pdf|Hadsell et al.]]). +The [[..:nn:index#SpatialConvolutionMap|SpatialConvolutionMap]] module accepts tables of connectivities (maps) that allows one to create arbitrarily sparse connections between two layers. A couple of standard maps/tables are provided in ''nn.tables''. + +Integrating all these remarks, here is a new model, that we will use for the CIFAR-10 dataset: + + +model = nn.Sequential() +-- stage 1 : mean+std normalization -> filter bank -> squashing -> max pooling +local table = torch.Tensor{ {1,1},{1,2},{1,3},{1,4},{1,5},{1,6},{1,7},{1,8},{2,9},{2,10},{3,11},{3,12} } +model:add(nn.SpatialConvolutionMap(table, 5, 5)) +model:add(nn.Tanh()) +model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) +-- stage 2 : filter bank -> squashing -> max pooling +model:add(nn.SpatialConvolutionMap(nn.tables.random(12, 32, 4), 5, 5)) +model:add(nn.Tanh()) +model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) +-- stage 3 : standard 2-layer neural network +model:add(nn.Reshape(32*5*5)) +model:add(nn.Linear(32*5*5, 128)) +model:add(nn.Tanh()) +model:add(nn.Linear(128,10)) +model:add(nn.LogSoftMax()) + + +Finally, recent work ([[http://cs.nyu.edu/~koray/publis/jarrett-iccv-09.pdf|Jarret et al.]]) has +demonstrated the advantage of locally normalizing sets of internal features, at each stage +of the model. The use of smoother pooling functions, such as the L2 norm for instance instead +of the harsher max-pooling, has also been shown to yield better generalization +([[http://arxiv.org/pdf/1204.3968v1.pdf|Sermanet et al.]]). We will use these two ingredients +for the House Numbers dataset. This gives us the new model: + + +model = nn.Sequential() +-- stage 1 : filter bank -> squashing -> max pooling +model:add(nn.SpatialConvolutionMap(nn.tables.random(3,16,1), 5, 5)) +model:add(nn.Tanh()) +model:add(nn.SpatialLPPooling(16,2,2,2,2,2)) +-- stage 2 : filter bank -> squashing -> max pooling +model:add(nn.SpatialSubtractiveNormalization(16, image.gaussian1D(7))) +model:add(nn.SpatialConvolutionMap(nn.tables.random(16, 256, 4), 5, 5)) +model:add(nn.Tanh()) +model:add(nn.SpatialLPPooling(256,2,2,2,2,2)) +-- stage 3 : standard 2-layer neural network +model:add(nn.SpatialSubtractiveNormalization(256, image.gaussian1D(7))) +model:add(nn.Reshape(256*5*5)) +model:add(nn.Linear(256*5*5, 128)) +model:add(nn.Tanh()) +model:add(nn.Linear(128,#classes)) +model:add(nn.Linear(128,10)) +model:add(nn.LogSoftMax()) + + +==== Training the model ==== + +Once the model has been defined, the training procedure is pretty much the same +as for the linear/logistic regression example. One big difference of course is the fact +that the model is not linear anymore, and therefore the optimization problem is +not convex. This reinforces the need to a stochastic estimation of gradients, which +have shown to produce much better generalization results for several different problems. + +As before, we minimize the negative log-likelihood: + + +criterion = nn.ClassNLLCriterion() + + +We also use a confusion matrix, to monitor the progress of our learner: + + +criterion = optim.ConfusionMatrix() + + +Now that we have all the building blocks, we just have to define our training function, +as well as a test function. In the three scripts provided (MNIST, CIFAR, SVHN), the +optimization algorithm can be set to either L-BFGS, CG, SGD or ASGD. In practice, it's +very important to start with a few epochs of pure SGD, before switching to L-BFGS or +ASGD. The intuition for that is related to the non-convex nature of the problem: at the +very beginning of training (random initialization), the landscape might be highly non-convex, +and no assumption should be made about the shape of the energy function. Often, SGD +is the best we can do. Later on, batch methods (L-BFGS, CG) can be used more safely. + +Here is our full training function, which demonstrates that you can switch the optimization +you're using at runtime (if you want to), and also modify the batch size you're using +at run time. You can do all these things because we create the evaluation closure +each time we create a new batch. If the batch size is 1, then the method is purely +stochastic. If the batch size is set to the complete dataset, then the method is +a pure batch method. + + +-- retrieve parameters and gradients +parameters,gradParameters = model:getParameters() + +-- training function: +function train(dataset) + -- epoch tracker + epoch = epoch or 1 + + -- local vars + local time = sys.clock() + + -- shuffle at each epoch + shuffle = torch.randperm(trsize) + + -- do one epoch + print(' on training set:') + print(" online epoch # " .. epoch .. ' [batchSize = ' .. opt.batchSize .. ']') + for t = 1,dataset:size(),opt.batchSize do + -- disp progress + xlua.progress(t, dataset:size()) + + -- create mini batch + local inputs = {} + local targets = {} + for i = t,math.min(t+opt.batchSize-1,dataset:size()) do + -- load new sample + local input = dataset.data[shuffle[i]]:double() + local target = dataset.labels[shuffle[i]] + table.insert(inputs, input) + table.insert(targets, target) + end + + -- create closure to evaluate f(X) and df/dX + local feval = function(x) + -- get new parameters + if x ~= parameters then + parameters:copy(x) + end + + -- reset gradients + gradParameters:zero() + + -- f is the average of all criterions + local f = 0 + + -- evaluate function for complete mini batch + for i = 1,#inputs do + -- estimate f + local output = model:forward(inputs[i]) + local err = criterion:forward(output, targets[i]) + f = f + err + + -- estimate df/dW + local df_do = criterion:backward(output, targets[i]) + model:backward(inputs[i], df_do) + + -- update confusion + confusion:add(output, targets[i]) + end + + -- normalize gradients and f(X) + gradParameters:div(#inputs) + f = f/#inputs + + -- return f and df/dX + return f,gradParameters + end + + -- optimize on current mini-batch + if opt.optimization == 'CG' then + config = config or {maxIter = opt.maxIter} + optim.cg(feval, parameters, config) + + elseif opt.optimization == 'LBFGS' then + config = config or {learningRate = opt.learningRate, + maxIter = opt.maxIter, + nCorrection = 10} + optim.lbfgs(feval, parameters, config) + + elseif opt.optimization == 'SGD' then + config = config or {learningRate = opt.learningRate, + weightDecay = opt.weightDecay, + momentum = opt.momentum, + learningRateDecay = 5e-7} + optim.sgd(feval, parameters, config) + + elseif opt.optimization == 'ASGD' then + config = config or {eta0 = opt.learningRate, + t0 = trsize * opt.t0} + _,_,average = optim.asgd(feval, parameters, config) + + else + error('unknown optimization method') + end + end + + -- time taken + time = sys.clock() - time + time = time / dataset:size() + print(" time to learn 1 sample = " .. (time*1000) .. 'ms') + + -- print confusion matrix + print(confusion) + confusion:zero() + + -- next epoch + epoch = epoch + 1 +end + + +The test function is simpler: + + +-- test function +function test(dataset) + -- local vars + local time = sys.clock() + + -- averaged param use (ASGD) + if average then + cachedparams = parameters:clone() + parameters:copy(average) + end + + -- test over given dataset + print(' on testing Set:') + for t = 1,dataset:size() do + -- disp progress + xlua.progress(t, dataset:size()) + + -- get new sample + local input = dataset.data[t]:double() + local target = dataset.labels[t] + + -- test sample + local pred = model:forward(input) + confusion:add(pred, target) + end + + -- timing + time = sys.clock() - time + time = time / dataset:size() + print(" time to test 1 sample = " .. (time*1000) .. 'ms') + + -- print confusion matrix + print(confusion) + confusion:zero() + + -- averaged param use? + if average then + -- restore parameters + parameters:copy(cachedparams) + end +end + + +We can now train and test like this (please refer to the actual code to look at +how the trainData/testData are created): + + +while true do + train(trainData) + test(testData) +end + + +This produces an output that looks like this: + + + on training set: + online epoch # 1 [batchSize = 1] + [=============================== 2000/2000 ==================================>] + time to learn 1 sample = 11.309485500002ms +ConfusionMatrix: +[[ 175 0 0 4 1 5 4 0 2 0] 91.623% [class: 1] + [ 1 208 1 3 0 1 0 2 1 3] 94.545% [class: 2] + [ 6 9 159 5 2 1 2 4 6 4] 80.303% [class: 3] + [ 2 4 6 158 1 10 2 2 1 5] 82.723% [class: 4] + [ 1 4 5 1 178 1 4 2 0 18] 83.178% [class: 5] + [ 2 5 2 13 8 130 7 0 7 6] 72.222% [class: 6] + [ 7 3 3 4 3 3 176 0 1 0] 88.000% [class: 7] + [ 1 7 2 3 4 0 1 194 1 11] 86.607% [class: 8] + [ 0 12 5 6 1 8 3 3 129 5] 75.000% [class: 9] + [ 5 2 0 4 14 2 0 16 3 164]] 78.095% [class: 10] + + average row correct: 83.229621052742% + + average rowUcol correct (VOC measure): 71.643643975258% + + global correct: 83.55% + saving network to /Users/clement/work/github/xrocks/demos/train-a-digit-classifier/train-on-mnist/mnist.net + on testing Set: + [=============================== 1000/1000 ==================================>] + time to test 1 sample = 2.5694309999963ms +ConfusionMatrix: +[[ 82 0 0 2 0 0 1 0 0 0] 96.471% [class: 1] + [ 0 125 1 0 0 0 0 0 0 0] 99.206% [class: 2] + [ 1 0 104 0 0 0 1 4 6 0] 89.655% [class: 3] + [ 0 0 4 95 0 6 0 0 2 0] 88.785% [class: 4] + [ 0 1 2 0 99 1 1 0 0 6] 90.000% [class: 5] + [ 0 0 1 3 0 80 0 0 2 1] 91.954% [class: 6] + [ 3 0 4 0 1 2 77 0 0 0] 88.506% [class: 7] + [ 0 0 9 0 0 0 0 88 0 2] 88.889% [class: 8] + [ 0 0 1 3 1 1 0 3 78 2] 87.640% [class: 9] + [ 0 0 1 2 2 0 0 7 3 79]] 84.043% [class: 10] + + average row correct: 90.51488161087% + + average rowUcol correct (VOC measure): 83.103343844414% + + global correct: 90.7% + on training set: + online epoch # 2 [batchSize = 1] + [=============================== 2000/2000 ==================================>] + time to learn 1 sample = 11.362733499998ms +ConfusionMatrix: +[[ 187 1 0 0 0 1 1 1 0 0] 97.906% [class: 1] + [ 0 214 1 2 1 1 0 1 0 0] 97.273% [class: 2] + [ 2 2 186 2 1 0 0 2 1 2] 93.939% [class: 3] + [ 0 0 4 176 0 6 1 2 1 1] 92.147% [class: 4] + [ 0 1 1 0 204 0 3 0 0 5] 95.327% [class: 5] + [ 0 1 2 5 2 164 1 0 1 4] 91.111% [class: 6] + [ 1 1 0 0 0 2 195 0 1 0] 97.500% [class: 7] + [ 0 1 2 1 1 0 0 212 0 7] 94.643% [class: 8] + [ 0 0 1 0 1 4 1 1 160 4] 93.023% [class: 9] + [ 4 0 0 1 5 2 0 7 1 190]] 90.476% [class: 10] + + average row correct: 94.334501028061% + + average rowUcol correct (VOC measure): 89.440071582794% + + global correct: 94.4% + saving network to /Users/clement/work/github/xrocks/demos/train-a-digit-classifier/train-on-mnist/mnist.net + on testing Set: + [=============================== 1000/1000 ==================================>] + time to test 1 sample = 2.6758019999979ms +ConfusionMatrix: +[[ 83 0 1 0 0 0 1 0 0 0] 97.647% [class: 1] + [ 0 124 1 1 0 0 0 0 0 0] 98.413% [class: 2] + [ 0 0 108 0 0 0 1 2 4 1] 93.103% [class: 3] + [ 0 0 3 99 0 5 0 0 0 0] 92.523% [class: 4] + [ 0 1 0 0 102 0 1 0 0 6] 92.727% [class: 5] + [ 0 0 1 0 0 85 0 0 1 0] 97.701% [class: 6] + [ 3 0 0 0 0 0 84 0 0 0] 96.552% [class: 7] + [ 0 0 10 0 0 0 0 88 0 1] 88.889% [class: 8] + [ 0 0 2 3 0 1 0 2 81 0] 91.011% [class: 9] + [ 0 0 1 2 1 0 0 2 3 85]] 90.426% [class: 10] + + average row correct: 93.899238109589% + + average rowUcol correct (VOC measure): 88.683768510818% + + global correct: 93.9% + + +Yeepee!, that's it for the supervised training examples! diff --git a/dok/supervised/linear_regression.png b/dok/supervised/linear_regression.png new file mode 100644 index 0000000000000000000000000000000000000000..8426bff09e440445f004fd9b87ac4b9559072276 GIT binary patch literal 685 zcmeAS@N?(olHy`uVBq!ia0vp^%|I;5!3-pWYSw}zj01c^T>t<74`h-AJ~sI905u4d z1o;Is$#yFF=%0uQg3luO3X|Lf{KLK8@*{oWYY9iAJyY&bgg+R?tB(Xl-=H zQoAivrZ2tvsIsN?kNT#6hY$P`<8;-KeebWkG{uH>+15LLDJdxn(-_xBRBc?iG~tL? z^*xU**LdsH3V)P!?R##prN#QQw^_*RuFD3l(HhgQ8Q!jKZ+0$lOUO7`(2;Jr!!zn& zDRZXzwbLW|986d9xE;_Z< zQnbLSAb!_hDZbOv2ZP;zau^rycvSRq>YYBd@<+3^d;&vW_%STunf`K9!R)UxPET2$ zFiALArCd-mXWrf)W9!1TXi-iPpS!xHX*J*Of@gN$rv@8kOb)*3v#e{LwC_ie3JcEI zq_Yfx3gwWUf^4MLwbEHm!HKi^jgs(^_ zKd(D^_j84h{rY0d7g==HA5+NJbxqS5Z6e%qdT%pGjp{&NKWS%0|SUTc0eb?Jkn|6DJh qjXxx^!eHL5m(wjO-1E=Q@6&(QB&Z?#EAJ2}#d*5=xvXI_`B?$dvou%B5c^JwRinOnNYr(nD9;v-6&02(WmftgC*COs?ejQkE!GC*WH<#LM3W8c!b zBsSxya$;P7Szzm|_#1&v0F9LrU~C4+KH0$K6oflbqJl}Wp(YI!a`ROD>spdI#s~YP zQNjDTA^ciJnmnx&Vo*2Rzkl7w;cAO?kD19@UN z78(?bW9eXDQjiQZ0IlShFE&x5mic>&t7J5ZUixMeAzaz{7$Amvl!6+f=U273!-E7M zl1|exGq)BRd2eVaKA(KQoLq&t0%V_;73SkUd(2VA?kygZ!OTEjFMY$439a}p1H_08 z9daD*h&s9)g&1ZfOwiQKL{+TrassHO!IC#u6sSJ52?=Tra&i!=Z0YSnDA*@Hz!iYl z;`*`z#^ksbl)l-61IkeWMZG9Jb2=J2B1hk0&O!J(A zo~qbD!3&`X5Hc16gr;JPvy)W0d}TK!iQdxVqB6^wbQi;86$79njm4mOxp8Jd-)sOf zREC;^%?Y5EM&Vw1mCF!q`kaEAbI?apkii=O=|BLHe{QH6XPK&&PxS<7vU8Rm@hP(p zgm=n5&1KN;dzeeM{+BfU)p3R3%%cp^~N|d=km*w^el}0TjiNou&8V3}oV zSw+`EKG_RWJowHd7JJl_&^v50%^!VmDM@;`f0vWGHRoBr%_nrk<9^tYy!x{^VCi3_ z8?VdXgT|A;8@&ZcGVc|O^DKW4+AOA|8ZB4U_v7k89Ok{L^c}(VOt%n!_BHXm;(Fu` zpy{|Rjp)y_@}W*oc(eQC<`rJ1(MFy35)pGXD{*_-`*i zLX$62V!}`8U&Q)nu-*S>>3c(}f8u{4?qwX${sI>4Evs=Z<2e8T002ovPDHLkV1lQ7 B#E<|0 literal 0 HcmV?d00001 diff --git a/dok/supervised/logistic_regression.png b/dok/supervised/logistic_regression.png new file mode 100644 index 0000000000000000000000000000000000000000..8f3fab80c4e72757ea5ca1a278edc5589e7f37bf GIT binary patch literal 2750 zcmb7GX&@7f8{Zga81g!E4b6STiZC-dl3O87bU0JyigHheqU8t?My^E85xH~bdL3)Y zHup;kbM(*T`s@Al|N4JEJkRg>@I24&IX*m3Y^}|BAYu>z0Kj8@$=DtM0Q#L^44Cyq zySEaBPmGqW*Wk;c zX{H>={5M|pY(s}r1IszOB;*(QCjQ!TVZ|AAe8$eAdtnhQv9dVDg~-w5p~QSpVSBpY zouY%P`{q+$XI_0Npt2qjvh#guB*Ei%uNyuKu?EuqK6tM?nEw%oTZpIzzlza3zXHxF0}pa1pp=64-a_%p#Cg$MVwSP z6lU~}S|cE@g9^Dom>8KU41PcQ38A-QD?6;y!m)jmUYaW?ruFhvarm(9d83SDwzS@6 z4s)NvvuQH7F4>(7DhS?`s*2xNvC^O~8fw0Mb63{;Fh_Udx}?BPjhK1nO!qpxHpcH= zoMO8EKah$}^wPn#ueb#F)}p%YJh*X`3vQ<#S6BuW?e+0VqbkT3Bv{@p%tGS^zun*J z&{1=h#G@uBaN##~8;pI0d%Y++9D$ahe>y83$8cnYI(E^;7x1o)3D4xCG(?~-eMmO7 zu6A`j+T3#pCp*(f+nn82W3)MR04TzAD2_Z5v-b#bf${qRcVk}fccx$klR5u>`E;2j z`iIibL<*0ZmA~MbH!sjjuvAZzt?YB7$dPyVIB8x!Q^EU3al7oR?qE|S`GwY)41oby+nb2Rt|y)&p%(X(a(T%uWqTfk%;<7>FnkgP_}!S z5>fIcx%S|J zix2%Vv!hps<$QzU^uk`HF02CPoCBcC2e#&_4IR}R^FrxYE@E9DhfCfg`_(Af*qi=o zH&jU7!-BLg#}iJc-9We&T<|tYfAPNXwcDrno=u;q2#>P9`oH| zz}^M%jFNdrmGaK)Z*;XY#fvu7Rj+2 zLt&eT2YL1+ehJADS2X;K=)6edvQJZen*H&apqbC9$;izXHx2q6%sbbF&ySqRFDJ7o z{uO|0j69U-e%8HLxD7n@uFI)u^L|8sUAKjf^~YIgRsSR)ls8YhKz<$GJa%BX&h7FF z3csd2;5+;0+RA_yb#`YC<^1I*k|C0#Z1WMXYAQ7$k|4+G@4@PA0RT>Sbvk1D%x?xx z7};~$LE}npQd^*X%^;_hUw_tO>!oYuohFWs1Pvd^P=pQrc0-Lq0rDvJ`9^Wxauj(d zMhP%^AGKem=WisS96V4NV6Bqj&UGM}gHOZwEL?otmh`l!#!~*yZ8>`GisLuZ?;g** zAcbcc&Zizy@M7_U*>*$VZ4-nG@h$#i?nCrm`>f9CG!sJ z4nHF3XJ}o0%b0v+)KtYVuN~%hJ5yNJ#ay6_l=SQC+eQH*o((b^Pa@w^#)1(bF%}+N zyy3+H*chMpiQCQxOqRmk&}4pQCO!ALV0Eg|oGG_7d0|bu(7Wslcnlt2VpYD!WZv+7 z=-R{E*ql_3a^+LI{|L!K;#&3ABZR6%+~Ltv;towN&^04ao|6@Tz^MS?Wt@L^`{-F` zgl1vld}$0t)=n>qADG-5$kjP=t99r$oEAVlXRP+IQ~@4OHA#X|*~CKuEtj$~ucpZ~LUBD&z~2C=KQE=y|5au7D4%F?{8+iL0vM*Hy(G-w!% zLO=ByxGRF1O{AR`ka&g)e{-tP9LjW5zU_rk3I-I&a&?Q0j8?Mxb|gbM$MYx3#Kl5T z=}^XHbqF?wJ{3!oNwFx%92zm4h0Ji~cf4%P>K+dQ2PPRtv6O8a$7AoAZEw3gf}oed z?zd{}k_-tVuHwU=YtNMNxf2Y9EV$87{%a?!5+B+?U2DbKXVU&HBAjyL>ME{%_FAz* zoHT=fU{jGX^>UHxgixFuvSz;H@ClvJ$#mRV~duse@Yks-sf*t<{)2VH3Y4B7Dr zr!ySOxT}P@%HbeUr!7?nS;ON{yMiiCbi6}^#b>ZeGhWZkW2WWFrWd=1TU*YPYFKU{ zk$3a>F;Ik-AdRY;RoxR4{{cF8_)iN=71RvvyZ-hkZ$XIE^zN8Zif?LVA!c=?Jul6G z4rR13&ZvSO^)E~oDLUJUX)aaR>N`Z~8u~^Xc8(om*V#f0)!AI3qgF&cXT309ARF;P z;-_a4*!+fSMr*>#17)XaF@jB9HTC^85k1$9vSW(E+sm>%KSb|b*Pc8K_wLU6OgT)t l)S;`^o$Ch$D8yp|CVl+=e~JTnzfr(GZwE7>R$6?<&3LQ zz5i}D%$b_?cFG;;i2poS7n*N)+H2|_J&C`E)5SHa?0eM)&G7D2W&4Ya2YBXWM=_o- z@Q``=-5}|X5EJ`NBbJ52&zHo@%uL(rvtFU-=7jE`@I|+m%rHDq%+TWJbLd`=i`8~k zxi!q1z6H!jS@W`TFDfZYoZ-mW+8|uoc4PC-li|}V{f@kwB@-xHtC*G>`fjD%{JqB) z*k{jpDN}s4wQuT7&y~i{y$c$9uS!f^HRZy&K4Gp9vvn`$Tu@@@Po4Rrfr?BmQ$QJgbV0FQiTe9&-^M2^5WUR|k3Z6X2-`M?R zPTy^oH5#s)Kdy2JlTEMqGBZ7dHa&=p;^7v4GApRYYkw2lEjGDB z>4~RHb(ba>u2X6DZ7%Z2?iGvL%psf}^=RM1cGbyy$~5jN zr%uP_DoG}untm@uV0PV0xlJZl4=bK^EnVMwt?AR0SF%i7btj}RU-f%wb-=Rs6R)*J zW$>e*F%^G~(d^jD)&1hBo>jm3d25xzeUmHpJlGU= zp=WxNIo~_3)GhMwUKGCA<{H(he)EpaylkQSx7y`j$ef-Po5hh>b3E(P{sTJZDumE(YaNPgg&ebxsLQ E0EXVUwg3PC literal 0 HcmV?d00001 diff --git a/dok/supervised/mse_loss.png b/dok/supervised/mse_loss.png new file mode 100644 index 0000000000000000000000000000000000000000..d0ec347ab608afef7f987a54977e868796c3ebad GIT binary patch literal 1430 zcmV;H1!?+;P)IIdz(^c91(b6IaSQ*Vc>0KHib z)z|daa;|Ge#tg4OwoJNfU%}`s52~+NoJVtO6+6mNerL>p+ca6MJX7zTUz6!8CQz7N zJtjTa*))0fH!_iS5~BB}lrck$Sv*N87RkH7K4ms^#{jx+wsS{37;?=-hTzc@mweCYw$fF>*%6Q|;64XJ%_3b?g})EVI5S&S2slp!BIB zdUZo=59^fZLDwM|W0#=7EM%&C`T&vO(O_}D<#>open}Xu1MC(@p?XhOC?*;=(dXLq z&H#8yJ%4~`P}pPe2z50*(uX|D=!459{)*VrPIoFdCAsKUV;@D#I>0yyY()~lT)t~{!S25|5OXZ&rd%U%8eO*Z91n0Jp750RPiLERca z3F@|>OAOyJrk!c@Y0G6Z7ntpYl`&d(y#`7$rB9}41Zw+i!K1N+ZKA|OWNviyqYkhE z%Ub8dF?_p_I3^=!U=fW#nF(Q}oT2K?hNs;%8d4EwbZmEox{?dwQ+NuFUjhWL4gM4! z(~J_W(>EHS;Kv%mTPTp~4mTS4B{M$)cZ8LhI*F%P$?X&Zn8H_tQU^*8uYWW`!HqS9 zv?bm75$9;R$H3~7cnUe(PGNw_9!aM02SmtW1+&c?$P8vjCiaeto!Iul_@yit|6k&1 z5<~YJj#S5x+bIk%*(34y{bYm!TTJG39J8Y{XL@F3Yb*v27v_b8ZKn`51_<`>VU`}l zy?v4q3U1yId03+Gcp`oW{s3n16s1oAFD6%D_+kTeDDX6GA(*P_dkG++CIDq%v(>9cHca>? zCpP8Lv8`mp;~^O(p|NHzoF&a}TqB-k(bcO|TH}5LKtjE-!gTTC=iP|dBM`azveM4o zkJO+Co892e%GS=|z8~&K8y+Pd+5Nl%#}5-~9$>g{y8CEyCCsq8522REr0jR=GQu+N zm8YY6@ToZ*$2lH<9b|cPgP6KKDNDsSRTmT zQA(vpc_bxpIP${(iwOAKqNCblSDYCr`sG?dM;A;pn+i=8a`>HSCSy{umm1 zZ2B>;KL&pm_^buKb<_JS)K^F+k4>*&cyk`)vFYYmUjh$5Hob)5&6%Lbrkf*u4ZQua k={5ZB&SigWdOOnp0E?O=f>PeoYXATM07*qoM6N<$f}z)&egFUf literal 0 HcmV?d00001 diff --git a/dok/supervised/nll_loss.png b/dok/supervised/nll_loss.png new file mode 100644 index 0000000000000000000000000000000000000000..fa200881c7353bcca4e178d99c7b23982c7042f0 GIT binary patch literal 1634 zcmV-o2A%ndP)(4Yxnu?z`V=gc*k_T%QCIz95<_uSV!a!f|o_1yLcONFD-O*8j6LfLbg z+%>(*XL^@);m~KlmO5~|V+|);^)=b1-O8}tzE>tjS82MpGjpgEhwa4ey#3RL$ij1_OixNPlYr2HGaJJ=U0)S=JIDeJ%vE{#B&>rV^kJ)&8XFBm-i<| z5A3LPTZ|uxKd|xZ%9cuj$T^bD?A4FR#jP-hjp;VJ1twJJd>+G?J!p;?HE=ctxJ9N^ zG)q((bw1+}YuGiT#gx?56u;C|>2`gs^mtPUN&(A-iM~@eqhh_$wIat>x2Rtl*Fd)Ii#_x z>Ka`WqG4mesPQ>VFkU>Xr1?flQ{fq*H`q5u?UWL1Ve+lAAW2p?7%M22O!QJOdS39y zBH1^M!6A)-l607A{&Ec%HE2#RMSs0zmw!z>DQH>ikKo zC2P(TB4#h5Mv16o3$`#ioGv82h_3b|3&-0w0bY8f6rdEMz0kj3?zQdiO`eMWcoU}S z++mwhUhf>=^JK5m>~=?tUMpgv)DzCj1z0k$#ZWk)k~F%360fY;Ab8)^uChIT3pO)4 zo-T}rJir!GB|*@i6}BINYrj%}QtH*oG$C6P1ch?0os-PDBRz7H)ChR8!#*cpRSM2M zxJ8V53h;o1{P@6g`f&mW?&k=LVn4iYLbK)-zf1jcd*k0>6Qe&)mzcygiOlA8L-=P( zkeD;Xj-IgKS!Jb2BAu8#XeVBV@d%?eR&pWQH-n)Xs0ch zd9Lovqesn}(GW3@(n8K~^jxAIvu2LZE8-n?V)U-kp+Le5e`D=aX;e(b% zw#(^>qBe9&C@Jgos7VAz&PfznwV}lWZDe#2hCD>>^u}6GMhTAYMpM_Mfv&tmNGfP_ z#&5t@Mu*b{CKoE%D(zVC{sBL3H~_ko5|HqQFsikLf)Pb6y%>Bh*Ch<2@_()M;grgR z86Y#1Dtetp6)51^?uQcBuJtsh3N|s?nXb+jPV|v8i%J+aDJkK9h&^X&J%k87<89sx zZa1h8lslA7^b#fc?Vw~IM%&#O7@JlTV^m)9{()CEVO8gobJ}&*Y1Hggf00HTu#wTu zbkl4BlB@%jh6?wE_wkkkhEf0$))|E#A4R6mv|rouD$fS6g*j!h&0rwmnmN(~Lw@q4 zwZJzcEbcGAOdD+KVH8Rk5jUuEGTJ~GojuA=0mMG|Gl^v!_0g5@Wn;aSkdgbuNe(|hfS-# zH(i~r(|V#3$H+?IF2gOrnU6g!Xmw&WHu~YkJo-};mnL3RH~Vn<8t(q^ml*BY4rU80 z{kBM}Z%^XYe2{F7b4#At=%1)RBEA5cc!E76K1nRo%YE<%ArqWh} z!|l7j5qkE)_wMrkcxAs)O(Lr*Vytia(Kn4C20$NzZi z4x!Fv+xKpskr;S|p}m~(pQft$VaaX={`XOr=2^ z{{8ph(<%N#igg9w*PYugaLipLLfoExBL6j)hlM64_b#h!40!dgPilpz-_l<*W4>Chx7S+2!*fO>5`Z>WezI-;H&5*7Pg<(UT1~Dmed&MXsqk?)x;? z?@Gp6$|o~l3Ai(sTXVdRfAM6m{iLgHo@GA2H(5EDDrF?-KhRSCc;U+GBMYuLA2PKF z`ety4dH#W5JxjYP>0K;${(lz8xLsps)U@zMOL*hi6Gw7m?3S9j#WCBwIud4kMl~{? zr|~>r_w_^4KW;v{dSgq2i07xQhVJyEyG1AZ$8#$7w1jtr9B<#Kdc^#}oTSgIl0(1lU@SR#zjXIm^=MH)@jG)L z_C`LIoAzA7vj0`2R{6`dCEn{72F`wO{%l8NW|I{AjWCCEs_X2O+7>=``|#;|Vt4a& zxt!;Df6S}>b2yjlsbJZp0^7+#a?rY26E|3lGn=beCfNJa~?{b@j+l_}R&97XV znw0N+RDK$dm0940RrB;sHR`YIU+$r4^6*otk-`7fp%d3lU)g+i2fMm;KmW8palJE@ z>O{i&WA# zUfx@o`G>>!fU=B7L%H%k+{_bIbWaBISYR!lMuGS2b(bs-B&&l0|`M;^Ya}Otq}Pr!83VV$Ifb?cWX_uPb@Qy=>;m z2U!*}j$Pa#H#OO1`D;XHPMok!vq^5XNdlM2Jh$*ei^QM)y7;u}@K3)t)>(;$B0P~( z)m3s66g`xTnI`#*c!oEhdsZVf=d4Uh+^>Hg9VyFfF0`~e&y(iKpPDM^`sKh^-L4pi zQw_0ssz)u1Bis@-=0$~@BBrt%PYDaL*$Ik9O-z=4>(*Jzwue{O+PO+Z`Hsq( z<7O_NOHX`RyQ%$%pg;(dpeoy&%K?!*oq}`c{5+)UD>aMx%8n(I5|@SFbbiFaq2=iz z<5Aq>+9`6z&}#Ox>0P&XN(vrb7#ibYy?DhW2YtiqdXvKfUNUvADfg;1W?o}q8@cpr z)#>+h%ev-#yEa`ZoJUW*^Z8-1>Cs18tbZn-;MBUN<{iPH_C-=dV!7m3)uoP04d<@W zlXp8Bv0=-**zy%ZUksI>-YB^zp8WLxBF(h-n-BYSUOv~e%RDv7ZswFyd%I*E&;0+2 zvEo|NXWDYNNxXicIyGgAL;Cc&k)Qtx9{PTC-NrtzRjO^wv;K$AJ@r&=!v48w`T0x% z@4WBErCJHAEK5D~Wh+a(=<%tSdG6ht-jF|a@$Elp(^4OHMcWE(fA)FuvklkI#NF50 y7=QomvgT7~6K?!^_f)<-U+1~?`}O-aUzhha5fKaC{NM^GCwRL0xvX +$ torch-pkg install tutorials unsup optim image + + +All the examples in this section are based on: +''PREFIX/share/torch/lua/tutorials/train-autoencoder.lua'' +If you are comfortable enough with autoencoders, and Torch, you might +want to jump right into the code. + +===== Basic Autoencoder ===== + +An autoencoder is a model that takes a vector input ''y'', maps it into +a hidden representation ''z'' (code) using an encoder which typically has +this form: + +{{auto_encoder.png}} + +where ''s'' is a non-linear activation function (the ''tanh'' function +is a common choice), ''W_e'' the encoding matrix and ''b_e'' a vector +of bias parameters. + +The hidden representation ''z'', often called code, is then mapped back +into the space of ''y'', using a decoder of this form: + +{{auto_decoder.png}} + +where ''W_d'' is the decoding matrix and ''b_d'' a vector of bias +parameters. + +The goal of the autoencoder is to minimize the reconstruction error, +which is represented by a distance between ''y'' and ''y~''. The +most common type of distance is the mean squared error: + +{{mse_loss.png}} + +The code ''z'' typically has less dimensions than ''y'', which forces +the autoencoder to learn a good representation of the data. In its +simplest form (linear), an autoencoder learns to project the data +onto its first principal components. If the code ''z'' has as many +components as ''y'', then no compression is required, and the model +could typically end up learning the identity function. Now if +the encoder has a non-linear form (using a ''tanh'', or using a +multi-layered model), then the autoencoder can learn a potentially +more powerful representation of the data. + +==== Model description ==== + +To describe the model, we use the ''unsup'' package, which provides +templates to build autoencoders. + +The first step is to describe an encoder, which we can do by using +any of the modules available in [[..:nn:index|nn]]: + + +encoder = nn.Sequential() +encoder:add(nn.Linear(inputSize,outputSize)) +encoder:add(nn.Tanh()) + + +The second step is to describe the decoder, a simple linear module: + + +decoder = nn.Sequential() +decoder:add(nn.Linear(outputSize,inputSize)) + + +Finally, we use the built-in AutoEncoder class from unsup, which +automatically provides a mean-square error loss: + + +module = unsup.AutoEncoder(encoder, decoder, params.beta) + + +At this stage, estimating the loss (reconstruction error) can be +done like this, for arbitrary inputs: + + +input = torch.randn(inputSize) +loss = module:updateOutput(input,input) + + +Note that we need to provide the input, and a target that we wish +to reconstruct. In this case the target is the input, but in some +cases, we might want to provide a noisy version of the input, and +force the autoencoder to predict the correct input (this is what +denoising autoencoders do). + +As for any ''nn'' module, gradients can be estimated this way: + + +-- get parameters and gradient pointers +x,dl_dx = module:getParameters() + +-- compute loss +loss = module:updateOutput(inputs[i], targets[i]) + +-- compute gradients wrt input and weights +dl_dx:zero() +module:updateGradInput(input, input) +module:accGradParameters(input, input) + +-- at this stage, dl_dx contains the gradients of the loss wrt +-- the trainable parameters x + + +==== Training ==== + +If you've read the tutorial on supervised learning, training a model +unsupervised is basically equivalent. We first define a closure +that computes the loss, and the gradients of that loss wrt the trainable +parameters, and then pass this closure to one of the optimizers in +''optim''. As usual, we use SGD to train autoencoders on large +amounts of data: + + +-- some parameters +local minibatchsize = 50 + +-- parameters +x,dl_dx = module:getParameters() + +-- SGD config +sgdconf = {learningRate = 1e-3} + +-- assuming a table trainData with the form: +-- trainData = { +-- [1] = sample1, +-- [2] = sample2, +-- [3] ... +-- } +for i = 1,#trainData,minibatchsize do + + -- create minibatch of training samples + samples = torch.Tensor(minibatchsize,inputSize) + for i = 1,minibatchsize do + samples[i] = trainData[i] + end + + -- define closure + local feval = function() + -- reset gradient/f + local f = 0 + dl_dx:zero() + + -- estimate f and gradients, for minibatch + for i = 1,minibatchsize do + -- f + f = f + module:updateOutput(samples[i], samples[i]) + + -- gradients + module:updateGradInput(samples[i], samples[i]) + module:accGradParameters(samples[i], samples[i]) + end + + -- normalize + dl_dx:div(minibatchsize) + f = f/minibatchsize + + -- return f and df/dx + return f,dl_dx + end + + -- do SGD step + optim.sgd(feval, x, sgdconf) + +end + + +Ok, that's it, given some training data, this code will loop +over all samples, and minimize the reconstruction error, using +stochastic gradient descent. + +One big shortcoming of basic autoencoders is that it's usually +hard to train them, and hard to avoid getting to close to learning +the identity function. In practice, using a code ''y'' that is smaller +than ''x'' is enough to avoid learning the identity, but it remains +hard to do much better than PCA. + + +Using codes that are overcomplete +(i.e. with more components than the input) makes the problem even +worse. There are different ways that an autoencoder with an overcomplete +code may still discover interesting representations. One common way +is the addition of sparsity: by forcing units of the hidden representation +to be mostly 0s, the autoencoder has to learn a nice distributed representation +of the data. + +In the following section, we present a method to impose sparsity on the code, +which typically allows codes that are overcomplete, sparse, and very useful for +tasks like classification/recognition. + +===== Predictive Sparse Decomposition (PSD) Autoencoder ===== + +Adaptive sparse coding methods learn a possibly overcomplete set of basis +functions, such that natural image patches can be reconstructed by linearly +combining a small subset of these bases. The applicability of these methods +to visual object recognition tasks has been limited because of the prohibitive +cost of the optimization algorithms required to compute the sparse representation. + +In this tutorial we propose a simple and efficient algorithm to learn overcomplete +basis functions, by introducing a particular form of autoencoder. After training, +the model also provides a fast and smooth approximator to the optimal representation, +achieving even better accuracy than exact sparse coding algorithms on visual object +recognition tasks. + +==== Sparse Coding ==== + +Finding a representation ''z'' in ''R^m'' for a given signal ''y'' in ''R^n'' +by linear combination of an overcomplete set of basis vectors, columns +of matrix ''B'' with m > n, has infinitely many solutions. In +optimal sparse coding, the problem is formulated as: + +{{sparse_coding.png}} + +where the l0 norm is defined as the number of non-zero elements in a given +vector. This is a combinatorial problem, and a common approximation of it +is the following optimization problem: + +{{sparse_coding_optim.png}} + +This particular formulation, called Basis Pursuit Denoising, can be seen as +minimizing an objective that penalizes the reconstruction error using a +linear basis set and the sparsity of the corresponding representation. While +this formulation is nice, inference requires running some sort of +iterative minimization algorithm that is always computationally +expensive. In the following we present a predictive version of this algorithm, +based on an autoencoder formulation, which yields fixed-time, and +fast inference. + +==== Linear PSD ==== + +In order to make inference efficient, we train a non-linear regressor that +maps input signals ''y'' to sparse representations ''z''. We consider the +following nonlinear mapping: + +{{psd_encoder.png}} + +where ''W'' is a weight trainable matrix, ''d'' a trainable vector of +biases, and ''g'' a vector of gains. We want to train this nonlinear mapping +as a predictor for the optimal solution to the sparse coding algorithm +presented in the previsous section. + +The following loss function, called predictive sparse decomposition, can +help us achieve such a goal: + +{{psd_loss.png}} + +The first two terms are the basic sparse coding presented above, while the +3rd term is our predictive sparse regressor. Minimizing this loss yields +an encoder that produces sparse decompositions of the input signal. + +With the ''unsup'' package, this can be implemented very simply. + +We define an encoder first: + + +encoder = nn.Sequential() +encoder:add(nn.Linear(inputSize,outputSize)) +encoder:add(nn.Tanh()) +encoder:add(nn.Diag(outputSize)) + + +Then the decoder is the L1 solution presented above: + + +decoder = unsup.LinearFistaL1(inputSize, outputSize, params.lambda) + + +Under the hood, this decoder relies on FISTA to find the optimal sparse +code. FISTA is available in the ''optim'' package. + +Finally, both modules can be packaged together into an autoencoder. We +can't use the basic AutoEncoder class to do this, because the +LinearFistaL1 decoder is a bit peculiar. Insted, we use a special-purpose +PSD container: + + +module = unsup.PSD(encoder, decoder) + + +==== Convolutional PSD ==== + +For vision/image applications, fully connected linear autoencoders +are often overkill, in their number of trainable parameters. Using +convolutional filters, inspired by convolutional networks (see supervised +learning tutorial on ConvNets) can help learn much better filters for +vision. + +A convolutional version of the PSD autoencoder can be derived by simply +replacing the encoder and decoder by convolutional counterparts: + + +-- connection table: +conntable = nn.tables.full(1, 32) + +-- decoder's table: +local decodertable = conntable:clone() +decodertable[{ {},1 }] = conntable[{ {},2 }] +decodertable[{ {},2 }] = conntable[{ {},1 }] +local outputSize = conntable[{ {},2 }]:max() + +-- encoder: +encoder = nn.Sequential() +encoder:add(nn.SpatialConvolutionMap(conntable, 5, 5)) +encoder:add(nn.Tanh()) +encoder:add(nn.Diag(outputSize)) + +-- decoder is L1 solution: +decoder = unsup.SpatialConvFistaL1(decodertable, 5, 5, 25, 25) + + +==== Training ==== + +Training is done with the exact same procedure as presented above, for +the basic autoencoder. + + + + + + + + + + + + + + + + diff --git a/dok/unsupervised/mse_loss.png b/dok/unsupervised/mse_loss.png new file mode 100644 index 0000000000000000000000000000000000000000..09a1b1be1251de500d9275b1dbb36b03d23c18ee GIT binary patch literal 1636 zcmV-q2AlbbP)xKL&pd<+n|tGWw#@6)`eB#;X)XKkQ@DG+%D; zV_X4IYu}PIjhPES)hjljWtV%NoyIGDSJf2i`rY>dLZ{MA`~H2n^$xFBVTb6^USSlt zv3EluVw%e`z2;8}duVV*n=yJ$8)87o_F%A>GPMGQuU{VaV5oloq}Nos`PNpYZi17M zeUKjI6-6=TL7DQrwo=pzG2%HhJXX31C8aVWomVwwKx92w?Qeq@{FMRyjzZn@+zM3Q zuGOwWyeig^9`TB!$b%C0mJA);Z9$3|9bj9XWxMwd&#haGZ&R-VK!R5)Z%z4f)gK)5 zkskI+qR4|XmO9Sej*0E&VKXdnhEZZ=O4eddS_-eUoPc9Dh3Z~=Y|dQ9mOS$ZIIwf7B6^zq zTyEHz9_1CP#6cPIc&@>};e}liMdvD$zw`Q0PR3zN$GbvSAf~2KwTl`0uWUIC6&#F2 z`<_eV(y}Z=x2El%`zxYfAMZkORLS(zQQ*G$PMb~`T_1@yOQP&dLy?WogMQ>V!e&ZY z_SsYnie(K1W}Y^sjy{YxS_)y^%%MV0A{wRyl^L{+k%|BdJJ7C4(k0Q|9+&@+lR=h) zk&6T#96E8|+!N98?UIN$rpAHRhPQViOCfH9>stuoI4q+;G!;xvXJm7eKE4EXuN1CQL4=W1k_EXn_+g2Q*6fMycHR` zCK_T(Dnu@xvT|3<6LceQqR5Byw5)i_;)l5He0F5lC z@eF?R2wrQs5<0zl50mf5mA+T5RM>Y~uD*{hqey={OdVu*Tv2Ytp*5ZK7lJiav?O>V zTcD(=Q6lT;%3Od4HNk5w2SDk%{xeB%Kd2Es;#D0*+!ez*%OoPgbtnHRcIkwe(X!_v z8YQxPWqvLQZv?NkT#C4V_Mc&ChUpQn4x*r~uaws977XkoakT#Lzu@w(JlLrF2xE{{L%l z61Ji`)4AsyYt}?VlWmhwN?nS-XnsNQCq^4wzv-j0rH3K88g=9I=a1GaJ^tKGYhjuy z&F=wk@ftfs_-?B_etH;|5X_+zq?$umZ2gwmccnI0D}VfSEWwmU1UO(o_TZE+b?YtO z;iZC*V=ITyFMh|A_%hIt`STH3y01g5`9{G6N& z{4<;r!woqV-pS3uIiYw=aE4Lx^1#ZYi2Px6y51wy9@v}9^7PF$Ke|amn7TaMu(Av5 zf^m|;Hs@Xf%)8g92ltAIj5+H7o?!r1f>RDCBkYiK!K?ke_~b86{Gk0xCS-k0fvZxq zp*?w)Ac2Y&1e4>pEYB9I&17$s5T*^SSe4e7rFf-qVSSjrr-<~D&-u@8tD#xkR!Q!r ztYgLY;gkb9yfE0)IfD#5`y_hahcJZz_+{DC8(?j#V1?Of!EjeAh zF+DR@_|P(}6b~50E}LvHTNbY-^FmWI*#DoCIuat=vKkhy`@AvZ9aGBpo!PUC{fq2I`knH(~6M-G7B z!RZa?LQWTN&d1861^w!0vFV*eKaoiQoa9HwhkDO=w1#8x|SRQUgOoaTJQ1WRu*ZV>5?4m%P~^m zrIo$b$6w`e(HedKjIZ*3uX;e}bN5v)?TnXFLU}nAuSGa*=Y^_cRQQ$l47(GxTuQ>C zru^@07*qoM6N<$ Eg2r`=LI3~& literal 0 HcmV?d00001 diff --git a/dok/unsupervised/psd_loss.png b/dok/unsupervised/psd_loss.png new file mode 100644 index 0000000000000000000000000000000000000000..2d4c9d3a95124a979724befbc55f7236411dffe3 GIT binary patch literal 2444 zcmb7GS5y-U5{(T*5=xXVPauGXjz9#FoTxz9mdJ@`1qKc{$IBB5Ch>B2u3*AI06hV+H~O-N{Bi9cQ#b(Kc3aw z2m=6s|F%FGJKa9B?mkVeQH1nX!Kj*SZ!n&p?oo zUf3i2e&=8dUx65a-DO|U@FULam5-ctkYZ$sHgU)twjrLAxBV#s zQ%yWjEJq=GPe2XIUNsFb)@B~+nEeSLXiN3uIL|C5LNC{U0_&8x>_)Ixr_gX~7A!@@ z)rOt8u=pe|OE2Gg@o1bWAl^?LzP>IK20X(Qkpn?1xSU$`cG=6XY;*(OM#rknZCNBW z+{0~rnINOGn>EF|KcCDZ=4)|QM?;-JVDD2p*`=bH4+ibvV$sR3t9=-5JGZh{`~R@0 z?jMj5!g@S5N0k$Azf?%ex`r-(T`0agssBi}-Ip-~Qm~kCN(jUpX!@zcf22XgY=El_ zJkK&BjB z0-ZdUqS4B}X?J`%GN@SdmG>`iq+0wV&n=3QxezRsDDS5% zZ%MPV%wTtViX5LXIUPw=(f;@>Bzz!p%lPt>dRfpmKS4ocW{%a8ClgJ+?)2_`8J3Y= zsp4fp=sp@rKTk6S*0j18Pip7bXKD9*9*fx!&Fz_HMd_9`9~^8NB>2hn!~fDRH%@-h zAcGgQ|K;!hsU{49MT*iKOAJ0V8TIkcH~it(F?qJ`!{DJ~)xM$}FIv(Hn90is&NCxI>kJz)GAb9_@26rEtu>tT)^g@FMeal4 zRA8p;A*?gV2CSqlA&I^7{y_qA;1sJ!lq`|+0JCphLt;4wq+FYVbUOyyr0#E1@jVG z6^q$`yC~j14;GysTdw^2ONP`8%e_Jhbu*Wd)o2x z?YT$k2vsP4y0I^orrM)$hMEzbd@3qYJp}mfQjd~Lyu80~q5C|p%u*dZa3eSJh^Qn3Oa0*QM%K<4V?krROx5?}B4bsoQ+<}qdAgiSS+YX*YuWcPlZO){$vc}A}gml;V zc>oPzq#XrT0GYN1^UGJKUZFs(%c`Tvsr0b4jYki?Jv6Cr;a?-{lZ(Rt2kGARQuIf7 z@pb(&rQQ>*0~LOji8Z&ZP9h9H4;?Kz+{up2{Zp~?{5lsyp6% zYi>YLaa{~mA?nSYvL-o{mPvSFtZhMHIm{Na9un1Ec*pU9r&BjI9PD{&r$Yl%o@HCYxRh3%Rv7V zQ4|%C=0Ise6p2(Kz7^_w)~3{s5`)S^Q^B?oCk?11sBJsluhJ&zD9PDEiFc4;A*uOf z?Mgbqg$gWyv@3lNLe)t(3VI)YvQt-#=9~XzxH-{vp7bT5@$i)Ek$V3wp-kIfY9OBZ zR!@Rw5+asoe-1V4#6=gt(AUd8)Tce~tcOw*AGip})0Tg_Vx~O5Hir(}HoWSflNJHJ z8;j!AbZ6b9fa~4HJgU8~dBU&sin!9sV#;Gj$OGPT8qfr!Ow||`Y_5#L8fs0jb{5wD zoqG368h%jw+tH0gDM`lBfHUrsm1=h zgT3wU=A<_w`+Je!f5A#bhS`nF?{JAL?0uiuAgRyhon-`Nw(n9l$!)wx-E&pcyidrC z^EJ1NPB61@w%x?B+ z-W6@+2_mbYU0(M#w}Ia?T2LBA^s}X>_4r`i8?FnwWbTf_c4^uD-E<(21Kp={R*!{Y z`4g87koNC3C1|8*$z#g1k4HO9O4HI=6@R`%XXaklL`LqV0;hGvLgFG{&P^?rZ>7JMD1Fw!tRZh zX^Gv&QT>2`0=E2KNsRSPP0oRBZhJNF{X*nJg$ig2|H~49Y2wsywz{E zFL$m?Vq{{Zi~S&>Jw<#YY=Zqmy#DWB+ZYqKE}xf%S*NXH$uF8#|NBr`nA)J~Ol~Ir E2XE_cJpcdz literal 0 HcmV?d00001 diff --git a/dok/unsupervised/sparse_coding.png b/dok/unsupervised/sparse_coding.png new file mode 100644 index 0000000000000000000000000000000000000000..7c43888bfc23c7337d7b45a4f0d27a1c741e212d GIT binary patch literal 1031 zcmeAS@N?(olHy`uVBq!ia0vp^7l2rjgBeJc&ke8wQpN#3A+G=b{|7Qj0v{WEcz_xN zN`m}?8Tjhr`u+0c?%U1Ze_rqX`eWu7{xL8x7kj!mhD30ljkw*kSb>MtW#7Ko)y&A+;;~IROrkeBsaWx-zzh~;_xZZk zoJ!)a$~X>*FP$*2CgnM|y!px(h2N)Ecd+eUA-P6gTS!r~l(&3^a%@$rnOLgOij=6C z>Nf)v{#_7`3=8%vx6si!bLmxZgGY0=dEwy%CcRdE8By_1cbHupl#U$VcxWS6lgbxOn#CCYCRC`xlx18AP^>^3w z`wIO%q-a#RBw(-m_t-eaLXRH_6^@!q>!L-DWy(qMv}Uz?%x~J|7Witt+y1z%@u!%q zN+!9b7|mDjn*VIV9;e{CK0dpe{Z}4eY*=cuLQ2_4wA7B@s+IX}u7S|SR=X7btIQ&g zGU2v5MHBCgam#UA!+FIoxxKCIZY zYO9{>n~)C~#_u*LPhHP@uJN32m*i>3|1N(D?OP8$)4ZzWf=@S>{o%$oDsyfeZv$<>t ztudLa@!hL9x71{d#L7g63-_H@Yzopr0CE__^8f$< literal 0 HcmV?d00001 diff --git a/dok/unsupervised/sparse_coding_optim.png b/dok/unsupervised/sparse_coding_optim.png new file mode 100644 index 0000000000000000000000000000000000000000..0d147375adf37951cac4bf9618249a0331639e59 GIT binary patch literal 1569 zcmV++2HyFJP)1-2E`4v^8Qcz zhb;+0FsPr!al;5y5l&u+}FBXFJLGk&(tqiU}k_eVKCf|(wKXiW1*A-HVTM=+CS z7NQ?78+Vc6y&KJW?i90i1vA_K>ef!uW)QgmZdV3zbFK!rA<#E1IC_* z2j?cCCAI6_)4_Witvoa9J+Ih|eOsMF?hGGaQ(DBI?#xrgY!Q>z0%U3EHI+!1!WNti z#$A^gsDs<7cDrT^&rBQdU*KWR-(Hd->UJKh&lI!8BuxTjZHk$!#8Sf>Q^d_gRxIqD zgLM^goUc0XLMm*G=T@)yYTeGNJ|yOf@d53xSqu!GtibDPo7`A*)NLuKA)QUZZRg1y z1@@rkV|)O?BGU0U7qW`p7N5yRo*Ov((VSVdqrBwK6$`YFNh{f^n9U0-Ltj3_zkXn5+>PBF3yT$2TPnJv==_ zI}em+!V1OkJlKcQG8Xr>irK2D^3Aw+WX33rr-wGSs0lzay%oq}ktB3D0-yoQIOXQ|k|QfD9Sy3#)@G1K<$(qTzL+i}l$9zBP*jV~r_9Kc-RZ!5#u@OkFS!Sw(KWO@Vg2Tc0uIUSR!y zu9(?Jaa8M{<(TQsR{L{B#gmjEgHBH+J0VJ#@|5T38@G>36M6qE$1Ls%cSqcNGLuH1FIioP zqPyfbC~NouX6O1E-7pS@^*71RRQVA~`_D)ktjYUlwZ29oD&fW_oSIcVX zk7oA(6dER{`e&`pmFq&cEV3w7o=ZRXouonDiFx)ME3D$&#KfryX2SJphepit_9$lb zUB-{ei)dkXu1^aZU`+Aq!0k+w7vj$m*#64?*;5gWo2}*46foEkheniqfa z_W*y%ZwNv`YW%sUYI!OF1>eENg0Vt}}CgUOZjt3=p%8Lq96N>cmGc ze4?~k|Lmze pixel period = period * size diff --git a/linear-regression/example-linear-regression.lua b/linear-regression/example-linear-regression.lua new file mode 100644 index 0000000..31e25c4 --- /dev/null +++ b/linear-regression/example-linear-regression.lua @@ -0,0 +1,235 @@ +---------------------------------------------------------------------- +-- example-linear-regression.lua +-- +-- This script provides a very simple step-by-step example of +-- linear regression, using Torch7's neural network (nn) package, +-- and the optimization package (optim). +-- + +-- note: to run this script, simply do: +-- torch script.lua + +-- to run the script, and get an interactive shell once it terminates: +-- torch -i script.lua + +-- we first require the necessary packages. +-- note: optim is a 3rd-party package, and needs to be installed +-- separately. This can be easily done using Torch7's package manager: +-- torch-pkg install optim + +require 'torch' +require 'optim' +require 'nn' + + +---------------------------------------------------------------------- +-- 1. Create the training data + +-- In all regression problems, some training data needs to be +-- provided. In a realistic scenarios, data comes from some database +-- or file system, and needs to be loaded from disk. In that +-- tutorial, we create the data source as a Lua table. + +-- In general, the data can be stored in arbitrary forms, and using +-- Lua's flexible table data structure is usually a good idea. +-- Here we store the data as a Torch Tensor (2D Array), where each +-- row represents a training sample, and each column a variable. The +-- first column is the target variable, and the others are the +-- input variables. + +-- The data are from an example in Schaum's Outline: +-- Dominick Salvator and Derrick Reagle +-- Shaum's Outline of Theory and Problems of Statistics and Economics +-- 2nd edition +-- McGraw-Hill +-- 2002 + +-- The data relate the amount of corn produced, given certain amounts +-- of fertilizer and insecticide. See p 157 of the text. + +-- In this example, we want to be able to predict the amount of +-- corn produced, given the amount of fertilizer and intesticide used. +-- In other words: fertilizer & insecticide are our two input variables, +-- and corn is our target value. + +-- {corn, fertilizer, insecticide} +data = torch.Tensor{ + {40, 6, 4}, + {44, 10, 4}, + {46, 12, 5}, + {48, 14, 7}, + {52, 16, 9}, + {58, 18, 12}, + {60, 22, 14}, + {68, 24, 20}, + {74, 26, 21}, + {80, 32, 24} +} + + +---------------------------------------------------------------------- +-- 2. Define the model (predictor) + +-- The model will have one layer (called a module), which takes the +-- 2 inputs (fertilizer and insecticide) and produces the 1 output +-- (corn). + +-- Note that the Linear model specified below has 3 parameters: +-- 1 for the weight assigned to fertilizer +-- 1 for the weight assigned to insecticide +-- 1 for the weight assigned to the bias term + +-- In some other model specification schemes, one needs to augment the +-- training data to include a constant value of 1, but this isn't done +-- with the linear model. + +-- The linear model must be held in a container. A sequential container +-- is appropriate since the outputs of each module become the inputs of +-- the subsequent module in the model. In this case, there is only one +-- module. In more complex cases, multiple modules can be stacked using +-- the sequential container. + +-- The modules are all defined in the neural network package, which is +-- named 'nn'. + +model = nn.Sequential() -- define the container +ninputs = 2; noutputs = 1 +model:add(nn.Linear(ninputs, noutputs)) -- define the only module + + +---------------------------------------------------------------------- +-- 3. Define a loss function, to be minimized. + +-- In that example, we minimize the Mean Square Error (MSE) between +-- the predictions of our linear model and the groundtruth available +-- in the dataset. + +-- Torch provides many common criterions to train neural networks. + +criterion = nn.MSECriterion() + + +---------------------------------------------------------------------- +-- 4. Train the model + +-- To minimize the loss defined above, using the linear model defined +-- in 'model', we follow a stochastic gradient descent procedure (SGD). + +-- SGD is a good optimization algorithm when the amount of training data +-- is large, and estimating the gradient of the loss function over the +-- entire training set is too costly. + +-- Given an arbitrarily complex model, we can retrieve its trainable +-- parameters, and the gradients of our loss function wrt these +-- parameters by doing so: + +x, dl_dx = model:getParameters() + +-- In the following code, we define a closure, feval, which computes +-- the value of the loss function at a given point x, and the gradient of +-- that function with respect to x. x is the vector of trainable weights, +-- which, in this example, are all the weights of the linear matrix of +-- our model, plus one bias. + +feval = function(x_new) + -- set x to x_new, if differnt + -- (in this simple example, x_new will typically always point to x, + -- so the copy is really useless) + if x ~= x_new then + x:copy(x_new) + end + + -- select a new training sample + _nidx_ = (_nidx_ or 0) + 1 + if _nidx_ > (#data)[1] then _nidx_ = 1 end + + local sample = data[_nidx_] + local target = sample[{ {1} }] -- this funny looking syntax allows + local inputs = sample[{ {2,3} }] -- slicing of arrays. + + -- reset gradients (gradients are always accumulated, to accomodate + -- batch methods) + dl_dx:zero() + + -- evaluate the loss function and its derivative wrt x, for that sample + local loss_x = criterion:forward(model:forward(inputs), target) + model:backward(inputs, criterion:backward(model.output, target)) + + -- return loss(x) and dloss/dx + return loss_x, dl_dx +end + +-- Given the function above, we can now easily train the model using SGD. +-- For that, we need to define four key parameters: +-- + a learning rate: the size of the step taken at each stochastic +-- estimate of the gradient +-- + a weight decay, to regularize the solution (L2 regularization) +-- + a momentum term, to average steps over time +-- + a learning rate decay, to let the algorithm converge more precisely + +sgd_params = { + learningRate = 1e-3, + learningRateDecay = 1e-4, + weightDecay = 0, + momentum = 0 +} + +-- We're now good to go... all we have left to do is run over the dataset +-- for a certain number of iterations, and perform a stochastic update +-- at each iteration. The number of iterations is found empirically here, +-- but should typically be determinined using cross-validation. + +-- we cycle 1e4 times over our training data +for i = 1,1e4 do + + -- this variable is used to estimate the average loss + current_loss = 0 + + -- an epoch is a full loop over our training data + for i = 1,(#data)[1] do + + -- optim contains several optimization algorithms. + -- All of these algorithms assume the same parameters: + -- + a closure that computes the loss, and its gradient wrt to x, + -- given a point x + -- + a point x + -- + some parameters, which are algorithm-specific + + _,fs = optim.sgd(feval,x,sgd_params) + + -- Functions in optim all return two things: + -- + the new x, found by the optimization method (here SGD) + -- + the value of the loss functions at all points that were used by + -- the algorithm. SGD only estimates the function once, so + -- that list just contains one value. + + current_loss = current_loss + fs[1] + end + + -- report average error on epoch + current_loss = current_loss / (#data)[1] + print('current loss = ' .. current_loss) + +end + + +---------------------------------------------------------------------- +-- 5. Test the trained model. + +-- Now that the model is trained, one can test it by evaluating it +-- on new samples. + +-- The text solves the model exactly using matrix techniques and determines +-- that +-- corn = 31.98 + 0.65 * fertilizer + 1.11 * insecticides + +-- We compare our approximate results with the text's results. + +text = {40.32, 42.92, 45.33, 48.85, 52.37, 57, 61.82, 69.78, 72.19, 79.42} + +print('id approx text') +for i = 1,(#data)[1] do + local myPrediction = model:forward(data[i][{{2,3}}]) + print(string.format("%2d %6.2f %6.2f", i, myPrediction[1], text[i])) +end + diff --git a/load-data/load-images.lua b/load-data/load-images.lua new file mode 100644 index 0000000..99f6315 --- /dev/null +++ b/load-data/load-images.lua @@ -0,0 +1,79 @@ +---------------------------------------------------------------------- +-- load-images.lua +-- +-- This script shows how to load images from a directory, and sort +-- the files according to their name + +-- This mostly demonstrates how to use Lua's table data structure, +-- and interact with the file system. + +-- note: to run this script, simply do: +-- torch load-images.lua + +-- By default, the script loads jpeg images. You can change that +-- by specifying another extension: +-- torch load-images.lua --ext png + +require 'torch' +require 'xlua' +require 'image' + +---------------------------------------------------------------------- +-- 1. Parse command-line arguments + +op = xlua.OptionParser('load-images.lua [options]') +op:option{'-d', '--dir', action='store', dest='dir', help='directory to load', req=true} +op:option{'-e', '--ext', action='store', dest='ext', help='only load files of this extension', default='jpg'} +opt = op:parse() +op:summarize() + +---------------------------------------------------------------------- +-- 2. Load all files in directory + +-- We process all files in the given dir, and add their full path +-- to a Lua table. + +-- Create empty table to store file names: +files = {} + +-- Go over all files in directory. We use an iterator, paths.files(). +for file in paths.files(opt.dir) do + -- We only load files that match the extension + if file:find(opt.ext .. '$') then + -- and insert the ones we care about in our table + table.insert(files, paths.concat(opt.dir,file)) + end +end + +-- Check files +if #files == 0 then + error('given directory doesnt contain any files of type: ' .. opt.ext) +end + +---------------------------------------------------------------------- +-- 3. Sort file names + +-- We sort files alphabetically, it's quite simple with table.sort() + +table.sort(files, function (a,b) return a < b end) + +print('Found files:') +print(files) + +---------------------------------------------------------------------- +-- 4. Finally we load images + +-- Go over the file list: +images = {} +for i,file in ipairs(files) do + -- load each image + table.insert(images, image.load(file)) +end + +print('Loaded images:') +print(images) + +-- Display a of few them +for i = 1,math.min(#files,10) do + image.display{image=images[i], legend=files[i]} +end diff --git a/logistic-regression/example-logistic-regression.csv b/logistic-regression/example-logistic-regression.csv new file mode 100644 index 0000000..e5432f7 --- /dev/null +++ b/logistic-regression/example-logistic-regression.csv @@ -0,0 +1,736 @@ +"num","brand","female","age" +"1","1",0,24 +"2","1",0,26 +"3","1",0,26 +"4","1",1,27 +"5","1",1,27 +"6","3",1,27 +"7","1",0,27 +"8","1",0,27 +"9","1",1,27 +"10","1",0,27 +"11","1",0,27 +"12","1",1,27 +"13","2",1,28 +"14","3",1,28 +"15","1",1,28 +"16","1",0,28 +"17","1",0,28 +"18","2",1,28 +"19","1",0,28 +"20","1",0,28 +"21","1",1,28 +"22","1",1,28 +"23","3",0,28 +"24","1",1,28 +"25","3",0,28 +"26","1",1,28 +"27","1",1,28 +"28","1",1,29 +"29","1",1,29 +"30","1",1,29 +"31","2",1,29 +"32","1",1,29 +"33","2",1,29 +"34","2",0,29 +"35","2",1,29 +"36","1",1,29 +"37","1",0,29 +"38","1",0,29 +"39","1",0,29 +"40","1",1,29 +"41","2",1,29 +"42","1",0,29 +"43","1",0,29 +"44","1",1,29 +"45","1",0,29 +"46","1",0,29 +"47","2",1,30 +"48","3",0,30 +"49","3",1,30 +"50","1",0,30 +"51","3",1,30 +"52","2",1,30 +"53","1",0,30 +"54","1",1,30 +"55","1",1,30 +"56","1",1,30 +"57","1",1,30 +"58","3",1,30 +"59","1",1,30 +"60","1",1,30 +"61","2",1,30 +"62","1",1,30 +"63","2",0,30 +"64","2",0,30 +"65","2",1,30 +"66","1",1,30 +"67","1",1,30 +"68","1",1,30 +"69","1",0,30 +"70","1",1,31 +"71","1",0,31 +"72","3",1,31 +"73","2",1,31 +"74","2",0,31 +"75","2",1,31 +"76","3",1,31 +"77","1",0,31 +"78","2",1,31 +"79","2",1,31 +"80","1",0,31 +"81","1",0,31 +"82","1",1,31 +"83","2",0,31 +"84","3",1,31 +"85","3",0,31 +"86","1",0,31 +"87","1",1,31 +"88","1",1,31 +"89","3",0,31 +"90","2",0,31 +"91","1",0,31 +"92","2",1,31 +"93","1",1,31 +"94","3",1,31 +"95","1",0,31 +"96","1",1,31 +"97","2",1,31 +"98","1",0,31 +"99","3",1,31 +"100","1",0,31 +"101","2",1,31 +"102","2",0,31 +"103","1",0,31 +"104","2",1,31 +"105","1",1,31 +"106","2",1,31 +"107","2",0,31 +"108","1",1,31 +"109","1",1,31 +"110","2",0,32 +"111","2",1,32 +"112","1",1,32 +"113","2",0,32 +"114","2",0,32 +"115","2",1,32 +"116","2",1,32 +"117","1",1,32 +"118","1",0,32 +"119","2",1,32 +"120","2",1,32 +"121","3",1,32 +"122","2",1,32 +"123","2",0,32 +"124","2",1,32 +"125","1",1,32 +"126","1",1,32 +"127","1",1,32 +"128","1",1,32 +"129","3",1,32 +"130","2",1,32 +"131","1",1,32 +"132","2",1,32 +"133","1",1,32 +"134","2",1,32 +"135","2",0,32 +"136","1",1,32 +"137","3",1,32 +"138","2",1,32 +"139","3",1,32 +"140","1",1,32 +"141","2",1,32 +"142","1",1,32 +"143","1",0,32 +"144","2",1,32 +"145","2",0,32 +"146","2",1,32 +"147","3",1,32 +"148","3",1,32 +"149","2",1,32 +"150","3",0,32 +"151","1",0,32 +"152","2",1,32 +"153","1",1,32 +"154","2",1,32 +"155","2",1,32 +"156","2",1,32 +"157","2",1,32 +"158","1",1,32 +"159","1",0,32 +"160","2",0,32 +"161","2",0,32 +"162","2",0,32 +"163","1",1,32 +"164","3",0,32 +"165","2",0,32 +"166","1",1,32 +"167","3",1,32 +"168","2",1,32 +"169","1",1,32 +"170","3",1,32 +"171","1",1,32 +"172","3",1,32 +"173","1",0,32 +"174","2",1,32 +"175","2",1,32 +"176","1",1,32 +"177","2",0,32 +"178","2",1,32 +"179","2",0,32 +"180","3",1,32 +"181","2",1,32 +"182","2",1,32 +"183","3",1,32 +"184","2",1,32 +"185","2",1,32 +"186","3",0,32 +"187","2",1,32 +"188","2",1,32 +"189","1",1,32 +"190","2",1,32 +"191","2",1,32 +"192","1",1,32 +"193","1",0,32 +"194","1",1,32 +"195","1",0,32 +"196","3",1,32 +"197","2",1,32 +"198","3",1,32 +"199","3",1,32 +"200","1",1,32 +"201","2",1,32 +"202","1",1,32 +"203","1",1,32 +"204","2",0,32 +"205","2",1,32 +"206","1",1,32 +"207","2",0,32 +"208","2",1,32 +"209","1",0,32 +"210","2",0,32 +"211","3",1,32 +"212","1",0,32 +"213","1",0,32 +"214","1",1,32 +"215","1",1,32 +"216","2",0,32 +"217","3",1,32 +"218","1",0,32 +"219","2",1,32 +"220","2",1,32 +"221","2",1,32 +"222","2",0,32 +"223","2",0,32 +"224","3",0,32 +"225","1",1,32 +"226","2",1,32 +"227","2",0,32 +"228","2",0,32 +"229","2",1,32 +"230","1",0,32 +"231","2",1,32 +"232","2",1,32 +"233","1",1,32 +"234","2",0,32 +"235","2",0,32 +"236","3",1,32 +"237","1",0,32 +"238","3",1,32 +"239","3",0,32 +"240","2",1,32 +"241","1",1,32 +"242","2",1,32 +"243","2",1,32 +"244","2",0,32 +"245","2",0,32 +"246","3",0,32 +"247","1",1,32 +"248","1",1,32 +"249","1",1,32 +"250","3",0,32 +"251","2",0,32 +"252","1",1,32 +"253","2",1,32 +"254","1",0,32 +"255","1",1,32 +"256","2",1,32 +"257","2",0,32 +"258","2",1,32 +"259","2",1,32 +"260","2",1,32 +"261","1",0,32 +"262","1",0,32 +"263","2",0,32 +"264","2",1,32 +"265","2",1,32 +"266","1",0,32 +"267","3",0,32 +"268","2",1,32 +"269","2",1,32 +"270","2",1,32 +"271","3",1,32 +"272","1",1,32 +"273","2",0,32 +"274","2",1,32 +"275","2",1,32 +"276","1",1,32 +"277","3",1,32 +"278","3",0,32 +"279","1",1,32 +"280","2",0,32 +"281","1",1,32 +"282","2",0,32 +"283","1",0,32 +"284","3",1,32 +"285","2",1,32 +"286","1",1,32 +"287","1",0,32 +"288","2",0,32 +"289","1",1,32 +"290","2",0,32 +"291","2",1,32 +"292","1",0,32 +"293","1",0,32 +"294","1",0,32 +"295","2",1,32 +"296","1",1,32 +"297","2",1,32 +"298","1",1,32 +"299","2",0,32 +"300","1",1,32 +"301","2",1,32 +"302","2",1,32 +"303","2",1,32 +"304","2",1,32 +"305","3",1,32 +"306","3",1,32 +"307","3",1,32 +"308","1",1,32 +"309","2",1,32 +"310","2",1,32 +"311","2",0,32 +"312","3",0,32 +"313","3",0,32 +"314","1",0,32 +"315","1",0,32 +"316","2",1,32 +"317","1",0,32 +"318","1",1,32 +"319","3",0,32 +"320","1",0,32 +"321","2",0,32 +"322","2",0,32 +"323","1",0,32 +"324","2",1,32 +"325","1",1,32 +"326","2",0,32 +"327","1",0,32 +"328","2",1,32 +"329","2",1,32 +"330","2",0,32 +"331","2",1,32 +"332","2",1,32 +"333","1",0,32 +"334","2",1,32 +"335","2",1,32 +"336","1",0,32 +"337","1",1,32 +"338","1",0,32 +"339","1",0,32 +"340","2",0,32 +"341","3",1,32 +"342","2",1,32 +"343","3",1,32 +"344","2",1,32 +"345","1",0,32 +"346","1",0,32 +"347","3",0,32 +"348","1",1,32 +"349","2",1,32 +"350","2",1,32 +"351","2",1,32 +"352","1",0,32 +"353","3",0,32 +"354","2",1,32 +"355","2",1,32 +"356","2",1,32 +"357","2",1,32 +"358","2",1,32 +"359","2",1,32 +"360","3",1,32 +"361","1",1,32 +"362","3",0,32 +"363","1",1,32 +"364","2",1,32 +"365","2",0,32 +"366","2",0,32 +"367","3",0,32 +"368","2",1,32 +"369","3",1,32 +"370","1",0,32 +"371","1",0,32 +"372","2",1,32 +"373","1",0,32 +"374","3",1,32 +"375","1",0,32 +"376","2",1,32 +"377","1",1,32 +"378","1",1,32 +"379","2",1,32 +"380","2",1,32 +"381","2",0,32 +"382","1",0,32 +"383","2",1,32 +"384","3",1,32 +"385","2",1,32 +"386","2",1,32 +"387","2",0,32 +"388","3",1,32 +"389","2",0,32 +"390","2",1,32 +"391","1",0,32 +"392","2",1,32 +"393","1",0,32 +"394","2",1,32 +"395","2",0,32 +"396","2",1,32 +"397","2",0,32 +"398","2",1,32 +"399","2",1,32 +"400","2",0,32 +"401","3",0,32 +"402","1",1,32 +"403","3",1,32 +"404","2",1,32 +"405","2",1,32 +"406","1",1,32 +"407","2",1,32 +"408","1",0,32 +"409","2",0,32 +"410","1",1,32 +"411","1",1,32 +"412","3",1,32 +"413","2",1,32 +"414","1",0,32 +"415","2",1,32 +"416","1",1,32 +"417","2",1,32 +"418","2",1,32 +"419","1",1,32 +"420","3",1,32 +"421","2",1,32 +"422","2",1,32 +"423","2",1,32 +"424","2",1,32 +"425","3",1,32 +"426","2",1,32 +"427","3",1,32 +"428","2",0,32 +"429","2",1,32 +"430","3",0,32 +"431","1",0,32 +"432","1",1,32 +"433","1",0,32 +"434","2",0,32 +"435","1",0,32 +"436","1",0,32 +"437","2",0,32 +"438","2",1,32 +"439","2",1,32 +"440","1",1,32 +"441","3",0,32 +"442","1",1,32 +"443","3",1,33 +"444","3",1,33 +"445","1",1,33 +"446","2",0,33 +"447","1",1,33 +"448","3",0,33 +"449","3",1,33 +"450","2",1,33 +"451","1",0,33 +"452","3",1,33 +"453","1",0,33 +"454","2",1,33 +"455","2",1,33 +"456","2",1,33 +"457","2",1,33 +"458","1",0,33 +"459","2",1,33 +"460","3",0,33 +"461","3",1,33 +"462","2",0,33 +"463","2",0,33 +"464","3",0,33 +"465","2",1,33 +"466","2",1,33 +"467","2",1,33 +"468","2",1,33 +"469","2",1,33 +"470","2",1,33 +"471","2",1,33 +"472","3",0,33 +"473","3",1,33 +"474","3",1,33 +"475","1",0,33 +"476","3",0,33 +"477","2",1,33 +"478","2",1,33 +"479","2",1,33 +"480","2",1,33 +"481","2",1,33 +"482","2",1,33 +"483","2",0,33 +"484","2",0,33 +"485","2",1,33 +"486","3",1,33 +"487","2",1,33 +"488","2",1,33 +"489","3",1,33 +"490","2",0,33 +"491","3",0,33 +"492","3",1,33 +"493","2",1,33 +"494","2",0,33 +"495","2",0,33 +"496","2",0,33 +"497","1",1,33 +"498","2",1,34 +"499","3",0,34 +"500","2",1,34 +"501","3",1,34 +"502","2",0,34 +"503","3",1,34 +"504","1",1,34 +"505","2",1,34 +"506","1",0,34 +"507","1",0,34 +"508","2",0,34 +"509","3",1,34 +"510","1",1,34 +"511","2",1,34 +"512","1",1,34 +"513","2",0,34 +"514","2",0,34 +"515","3",0,34 +"516","2",1,34 +"517","1",1,34 +"518","2",1,34 +"519","3",1,34 +"520","3",0,34 +"521","3",1,34 +"522","3",1,34 +"523","2",1,34 +"524","2",0,34 +"525","2",0,34 +"526","3",0,34 +"527","2",1,34 +"528","2",0,34 +"529","3",1,34 +"530","3",0,34 +"531","2",1,34 +"532","2",0,34 +"533","2",0,34 +"534","3",0,34 +"535","2",1,34 +"536","2",0,34 +"537","2",1,34 +"538","3",1,34 +"539","3",1,34 +"540","3",1,34 +"541","3",1,34 +"542","3",1,34 +"543","1",1,34 +"544","3",1,34 +"545","2",1,34 +"546","3",0,34 +"547","3",1,34 +"548","2",1,34 +"549","3",1,34 +"550","3",1,34 +"551","1",0,34 +"552","2",1,34 +"553","3",1,34 +"554","1",1,34 +"555","3",1,34 +"556","2",1,34 +"557","2",0,34 +"558","2",1,34 +"559","2",0,34 +"560","1",0,34 +"561","2",0,34 +"562","3",1,35 +"563","1",1,35 +"564","3",0,35 +"565","3",1,35 +"566","3",1,35 +"567","3",1,35 +"568","3",1,35 +"569","3",1,35 +"570","3",1,35 +"571","3",0,35 +"572","2",0,35 +"573","3",0,35 +"574","2",1,35 +"575","2",1,35 +"576","3",1,35 +"577","3",1,35 +"578","3",0,35 +"579","3",1,35 +"580","3",1,35 +"581","3",1,35 +"582","3",1,35 +"583","2",1,35 +"584","3",1,35 +"585","3",1,35 +"586","2",0,35 +"587","3",0,35 +"588","2",1,35 +"589","2",1,35 +"590","2",1,35 +"591","3",1,35 +"592","3",0,35 +"593","3",0,35 +"594","3",0,35 +"595","3",0,35 +"596","3",1,35 +"597","3",0,36 +"598","2",1,36 +"599","2",0,36 +"600","3",1,36 +"601","3",0,36 +"602","1",1,36 +"603","1",1,36 +"604","1",0,36 +"605","1",1,36 +"606","3",1,36 +"607","3",1,36 +"608","2",1,36 +"609","2",1,36 +"610","3",1,36 +"611","2",1,36 +"612","2",1,36 +"613","3",1,36 +"614","2",1,36 +"615","3",1,36 +"616","3",1,36 +"617","2",0,36 +"618","3",0,36 +"619","1",1,36 +"620","3",0,36 +"621","2",1,36 +"622","3",1,36 +"623","2",0,36 +"624","3",1,36 +"625","2",1,36 +"626","3",1,36 +"627","2",1,36 +"628","2",1,36 +"629","1",1,36 +"630","1",0,36 +"631","3",1,36 +"632","2",0,36 +"633","3",0,36 +"634","3",0,36 +"635","2",1,36 +"636","3",0,36 +"637","3",1,36 +"638","2",0,36 +"639","3",0,36 +"640","2",1,36 +"641","2",0,36 +"642","2",0,36 +"643","3",0,36 +"644","3",1,36 +"645","3",1,36 +"646","3",1,36 +"647","2",1,36 +"648","3",1,36 +"649","3",0,36 +"650","2",1,36 +"651","2",0,36 +"652","3",1,36 +"653","3",1,36 +"654","3",1,36 +"655","3",0,36 +"656","3",0,36 +"657","2",0,36 +"658","1",1,36 +"659","3",0,36 +"660","3",1,36 +"661","1",0,36 +"662","2",1,36 +"663","3",1,36 +"664","3",1,36 +"665","3",0,36 +"666","3",1,36 +"667","3",1,36 +"668","3",1,36 +"669","3",1,36 +"670","2",1,36 +"671","3",0,36 +"672","3",1,36 +"673","3",0,36 +"674","1",0,36 +"675","3",1,36 +"676","2",1,36 +"677","2",0,36 +"678","2",1,36 +"679","2",1,36 +"680","3",1,36 +"681","3",1,36 +"682","3",1,37 +"683","3",1,37 +"684","2",0,37 +"685","3",1,37 +"686","3",1,37 +"687","3",1,37 +"688","1",0,37 +"689","3",1,37 +"690","3",1,37 +"691","2",1,37 +"692","2",1,37 +"693","3",1,37 +"694","2",1,37 +"695","3",1,37 +"696","3",0,37 +"697","3",1,37 +"698","3",0,37 +"699","3",1,37 +"700","2",1,37 +"701","3",1,37 +"702","3",0,37 +"703","2",1,37 +"704","2",0,38 +"705","3",1,38 +"706","3",0,38 +"707","2",0,38 +"708","2",1,38 +"709","3",0,38 +"710","3",1,38 +"711","2",0,38 +"712","3",1,38 +"713","3",1,38 +"714","3",1,38 +"715","3",0,38 +"716","2",0,38 +"717","3",0,38 +"718","2",1,38 +"719","2",0,38 +"720","3",1,38 +"721","3",0,38 +"722","3",1,38 +"723","3",0,38 +"724","3",0,38 +"725","3",0,38 +"726","2",1,38 +"727","3",0,38 +"728","3",0,38 +"729","3",1,38 +"730","3",0,38 +"731","3",1,38 +"732","3",0,38 +"733","3",0,38 +"734","3",1,38 +"735","1",1,38 diff --git a/logistic-regression/example-logistic-regression.lua b/logistic-regression/example-logistic-regression.lua new file mode 100644 index 0000000..8b3984e --- /dev/null +++ b/logistic-regression/example-logistic-regression.lua @@ -0,0 +1,532 @@ +---------------------------------------------------------------------- +-- example-logistic-regression.lua +-- +-- Logistic regression and multinomial logistic regression +-- + +require 'nn' +require 'optim' + + +---------------------------------------------------------------------- +-- 1. Create the training data + +print('') +print('============================================================') +print('Constructing dataset') +print('') + +-- The data come from a tutorial on using R from UCLA, which can be found at +-- http://www.ats.ucla.edu/stat/r/dae/mlogit.htm + +-- The model is one of brand preference, where there are 3 brands and 2 +-- explanatory variables. The variables are coded this way: +-- brand: 1, 2 or 3 +-- female: 1 if the person is a female, 0 if a male +-- age: a positive integer + +-- The data are stored in a csv file 'example-logistic-regression.csv' +-- and read with the csv package (torch-pkg install csv) + +require 'csv' + +-- The data are in a comma separated values (CSV) file. The first record +-- contains field names and subsequent records contain data. The fields and +-- their formats are: +-- - num: observation number; an integer surrounded by double quote chars +-- - brand: brand number; 1, 2, or 3 surrounded by double quote chars +-- - female: indicator for is-female: 1 if female, 0 otherwise; no quote chars +-- - age: age of the person; no quote characters + +-- Reading CSV files can be tricky. This code uses the csv package for this: +loaded = csv.load('example-logistic-regression.csv') + +-- Convert the CSV table into dense tensors. The tensor form has the +-- advantage that it stores its elements continguously (which leads to +-- better performance) and a tensor allows one to select columns and rows +-- easily, using slicing methods. + +-- first convert each variable list to a tensor: +brands = torch.Tensor(loaded.brand) +females = torch.Tensor(loaded.female) +ages = torch.Tensor(loaded.age) + +-- copy all the input variables into a single tensor: +dataset_inputs = torch.Tensor( (#brands)[1],2 ) +dataset_inputs[{ {},1 }] = females +dataset_inputs[{ {},2 }] = ages + +-- the outputs are just the brands +dataset_outputs = brands + +-- To implement the model, we need to know how many categories there are. +numberOfBrands = torch.max(dataset_outputs) - torch.min(dataset_outputs) + 1 + +-- summarize the data +function summarizeData() + function p(name,value) + print(string.format('%20s %f', name, value) ) + end + p('number of brands', numberOfBrands) + p('min brand', torch.min(brands)) + p('max brand', torch.max(brands)) + + p('min female', torch.min(females)) + p('max female', torch.max(females)) + + p('min age', torch.min(ages)) + p('max age', torch.max(ages)) +end +summarizeData() + + +---------------------------------------------------------------------- +-- 2. Define the model (predictor) + +-- The model is a multinomial logistic regression. + +-- It will consist of two layers that operate sequentially: +-- - 1: a linear model +-- - 2: a soft max layer + +-- The linear model supposes that the un-normalized probability of choosing +-- a specific brand is proportional to the product of unknown weights and +-- the observed variables plus a bias: +-- Prob(brand = b) = bias + weight1 * female * weight2 * age +-- There are two inputs (female and age) and three outputs (one for each +-- value that brand can take on) + +linLayer = nn.Linear(2,3) + +-- The soft max layer takes the 3 outputs from the linear layer and +-- transforms them to lie in the range (0,1) and to sum to 1. Thus, unlike +-- some text books in which the probabilities are un-normalized, the output +-- of the soft max layer will be normalized probabilities. + +-- The log soft max layer takes the log of these 3 outputs. This is done +-- because we want to feed the log values into the ClassNLLCriterion +-- described below. + +softMaxLayer = nn.LogSoftMax() -- the input and output are a single tensor + +-- We need to put the layers into a sequential container. + +model = nn.Sequential() +model:add(linLayer) +model:add(softMaxLayer) + + +---------------------------------------------------------------------- +-- 3. Define a loss function, to be minimized. + +-- In that example, we minimize the cross entropy between +-- the predictions of our linear model and the groundtruth available +-- in the dataset. + +-- Torch provides many common criterions to train neural networks. + +-- The ClassNLLCriterion expects to be fed the log probabilities in a +-- tensor. Hence, the use of the LogSoftMax layer in the model instead +-- of SoftMax. + +-- Minimizing the cross-entropy is equivalent to maximizing the +-- maximum a-posteriori (MAP) prediction, which is equivalent to +-- minimizing the negative log-likelihoood (NLL), hence the use of +-- the NLL loss. + +criterion = nn.ClassNLLCriterion() + + +---------------------------------------------------------------------- +-- 4.a. Train the model (Using SGD) + +-- To minimize the loss defined above, using the linear model defined +-- in 'model', we follow a stochastic gradient descent procedure (SGD). + +-- SGD is a good optimization algorithm when the amount of training data +-- is large, and estimating the gradient of the loss function over the +-- entire training set is too costly. + +-- Given an arbitrarily complex model, we can retrieve its trainable +-- parameters, and the gradients of our loss function wrt these +-- parameters by doing so: + +x, dl_dx = model:getParameters() + +-- The above statement does not create a copy of the parameters in the +-- model! Instead it create in x and dl_dx a view of the model's weights +-- and derivative wrt the weights. The view is implemented so that when +-- the weights and their derivatives changes, so do the x and dl_dx. The +-- implementation is efficient in that the underlying storage is shared. + +-- A note on terminology: In the machine learning literature, the parameters +-- that one seeks to learn are often called weights and denoted with a W. +-- However, in the optimization literature, the parameter one seeks to +-- optimize is often called x. Hence the use of x and dl_dx above. + +-- In the following code, we define a closure, feval, which computes +-- the value of the loss function at a given point x, and the gradient of +-- that function with respect to x. x is the vector of trainable weights, +-- which, in this example, are all the weights of the linear matrix of +-- our mode, plus one bias. + +feval = function(x_new) + -- set x to x_new, if differnt + -- (in this simple example, x_new will typically always point to x, + -- so the copy is really useless) + if x ~= x_new then + x:copy(x_new) + end + + -- select a new training sample + _nidx_ = (_nidx_ or 0) + 1 + if _nidx_ > (#dataset_inputs)[1] then _nidx_ = 1 end + + local inputs = dataset_inputs[_nidx_] + local target = dataset_outputs[_nidx_] + + -- reset gradients (gradients are always accumulated, to accomodate + -- batch methods) + dl_dx:zero() + + -- evaluate the loss function and its derivative wrt x, for that sample + local loss_x = criterion:forward(model:forward(inputs), target) + model:backward(inputs, criterion:backward(model.output, target)) + + -- return loss(x) and dloss/dx + return loss_x, dl_dx +end + +-- Given the function above, we can now easily train the model using SGD. +-- For that, we need to define four key parameters: +-- + a learning rate: the size of the step taken at each stochastic +-- estimate of the gradient +-- + a weight decay, to regularize the solution (L2 regularization) +-- + a momentum term, to average steps over time +-- + a learning rate decay, to let the algorithm converge more precisely + +sgd_params = { + learningRate = 1e-3, + learningRateDecay = 1e-4, + weightDecay = 0, + momentum = 0 +} + +-- We're now good to go... all we have left to do is run over the dataset +-- for a certain number of iterations, and perform a stochastic update +-- at each iteration. The number of iterations is found empirically here, +-- but should typically be determinined using cross-validation (i.e. +-- using multiple folds of training/test subsets). + +epochs = 1e2 -- number of times to cycle over our training data + +print('') +print('============================================================') +print('Training with SGD') +print('') + +for i = 1,epochs do + + -- this variable is used to estimate the average loss + current_loss = 0 + + -- an epoch is a full loop over our training data + for i = 1,(#dataset_inputs)[1] do + + -- optim contains several optimization algorithms. + -- All of these algorithms assume the same parameters: + -- + a closure that computes the loss, and its gradient wrt to x, + -- given a point x + -- + a point x + -- + some parameters, which are algorithm-specific + + _,fs = optim.sgd(feval,x,sgd_params) + + -- Functions in optim all return two things: + -- + the new x, found by the optimization method (here SGD) + -- + the value of the loss functions at all points that were used by + -- the algorithm. SGD only estimates the function once, so + -- that list just contains one value. + + current_loss = current_loss + fs[1] + end + + -- report average error on epoch + current_loss = current_loss / (#dataset_inputs)[1] + print('epoch = ' .. i .. + ' of ' .. epochs .. + ' current loss = ' .. current_loss) + +end + + +---------------------------------------------------------------------- +-- 4.b. Train the model (Using L-BFGS) + +-- now that we know how to train the model using simple SGD, we can +-- use more complex optimization heuristics. In the following, we +-- use a second-order method: L-BFGS, which typically yields +-- more accurate results (for linear models), but can be significantly +-- slower. For very large datasets, SGD is typically much faster +-- to converge, and L-FBGS can be used to refine the results. + +-- we start again, and reset the trained parameter vector: + +model:reset() + +-- next we re-define the closure that evaluates f and df/dx, so that +-- it estimates the true f, and true (exact) df/dx, over the entire +-- dataset. This is a full batch approach. + +feval = function(x_new) + -- set x to x_new, if differnt + -- (in this simple example, x_new will typically always point to x, + -- so the copy is really useless) + if x ~= x_new then + x:copy(x_new) + end + + -- reset gradients (gradients are always accumulated, to accomodate + -- batch methods) + dl_dx:zero() + + -- and batch over the whole training dataset: + local loss_x = 0 + for i = 1,(#dataset_inputs)[1] do + -- select a new training sample + _nidx_ = (_nidx_ or 0) + 1 + if _nidx_ > (#dataset_inputs)[1] then _nidx_ = 1 end + + local inputs = dataset_inputs[_nidx_] + local target = dataset_outputs[_nidx_] + + -- evaluate the loss function and its derivative wrt x, for that sample + loss_x = loss_x + criterion:forward(model:forward(inputs), target) + model:backward(inputs, criterion:backward(model.output, target)) + end + + -- normalize with batch size + loss_x = loss_x / (#dataset_inputs)[1] + dl_dx = dl_dx:div( (#dataset_inputs)[1] ) + + -- return loss(x) and dloss/dx + return loss_x, dl_dx +end + +-- L-BFGS parameters are different than SGD: +-- + a line search: we provide a line search, which aims at +-- finding the point that minimizes the loss locally +-- + max nb of iterations: the maximum number of iterations for the batch, +-- which is equivalent to the number of epochs +-- on the given batch. In that example, it's simple +-- because the batch is the full dataset, but in +-- some cases, the batch can be a small subset +-- of the full dataset, in which case maxIter +-- becomes a more subtle parameter. + +lbfgs_params = { + lineSearch = optim.lswolfe, + maxIter = epochs, + verbose = true +} + +print('') +print('============================================================') +print('Training with L-BFGS') +print('') + +_,fs = optim.lbfgs(feval,x,lbfgs_params) + +-- fs contains all the evaluations of f, during optimization + +print('history of L-BFGS evaluations:') +print(fs) + + +---------------------------------------------------------------------- +-- 5. Test the trained model. + +print('') +print('============================================================') +print('Testing the model') +print('') + +-- Now that the model is trained, one can test it by evaluating it +-- on new samples. + +-- The model constructed and trained above computes the probabilities +-- of each class given the input values. + +-- We want to compare our model's results with those from the text. +-- The input variables have narrow ranges, so we just compare all possible +-- input variables in the training data. + +-- Determine actual frequency of the each female-age pair in the +-- training data + +-- return index of largest value +function maxIndex(a,b,c) + if a >=b and a >= c then return 1 + elseif b >= a and b >= c then return 2 + else return 3 end +end + +-- return predicted brand and probabilities of each brand +-- for the model in the text + +-- The R code in the text computes the probabilities of choosing +-- brands 2 and 3 relative to the probability of choosing brand 1: +-- Prob(brand=2)/prob(brand=1) = exp(-11.77 + 0.52*female + 0.37*age) +-- Prob(brand=3)/prob(brand=1) = exp(-22.72 + 0.47*female + 0.69*age) +function predictText(age, female) + -- 1: calculate the "logit's" + -- The coefficients come from the text. + -- If you download the R script and run it, you may see slightly + -- different results. + local logit1 = 0 + local logit2 = -11.774655 + 0.523814 * female + 0.368206 * age + local logit3 = -22.721396 + 0.465941 * female + 0.685908 * age + + -- 2: calculate the unnormalized probabilities + local uprob1 = math.exp(logit1) + local uprob2 = math.exp(logit2) + local uprob3 = math.exp(logit3) + + -- 3: normalize the probabilities + local z = uprob1 + uprob2 + uprob3 + local prob1 = (1/z) * uprob1 + local prob2 = (1/z) * uprob2 + local prob3 = (1/z) * uprob3 + + return maxIndex(prob1, prob2, prob3), prob1, prob2, prob3 +end + +-- return predicted brand and the probabilities of each brand +-- for our model +function predictOur(age, female) + local input = torch.Tensor(2) + input[1] = female -- must be in same order as when the model was trained! + input[2] = age + local logProbs = model:forward(input) + --print('predictOur', age, female, input) + local probs = torch.exp(logProbs) + --print('logProbs', logProbs) + --print('probs', probs[1], probs[2], probs[3] ) + local prob1, prob2, prob3 = probs[1], probs[2], probs[3] + return maxIndex(prob1, prob2, prob3), prob1, prob2, prob3 +end + +counts = {} + +function makeKey(age, brand, female) + -- return a string containing the values + + -- Note that returning a table will not work, because each + -- table is unique. + + -- Because Lua interns the strings, a string with a given sequence + -- of characters is stored only once. + return string.format('%2d%1d%1f', age, brand, female) +end + +for i = 1,(#brands)[1] do + local brand = brands[i] + local female = females[i] + local age = ages[i] + local key = makeKey (age, brand, female) + counts[key] = (counts[key] or 0) + 1 +end + +-- return probability of each brand conditioned on age and female +function actualProbabilities(age, female) + function countOf(age, brand, female) + return counts[makeKey(age, brand, female)] or 0 + end + local count1 = countOf(age, 1, female) + local count2 = countOf(age, 2, female) + local count3 = countOf(age, 3, female) + local sumCounts = count1 + count2 + count3 + if sumCounts == 0 then + return 0, 0, 0 + else + return count1/sumCounts, count2/sumCounts, count3/sumCounts + end +end + + +print(' ') +print('summary of data') +summarizeData() + +print(' ') +print('training variables') +for k,v in pairs(sgd_params) do + print(string.format('%20s %f', k, v)) +end +print(string.format('%20s %f', 'epochs', epochs)) + +print(' ') +print('current loss', current_loss) + +-- print the headers +print(' ') +lineFormat = '%-6s %-3s| %-17s | %-17s | %-17s | %-1s %-1s %-1s' +print( + string.format(lineFormat, + '', '', + 'actual probs', 'text probs', 'our probs', + 'best', '', '')) +choices = 'brnd1 brnd2 brnd3' +print(string.format(lineFormat, + 'female', 'age', + choices, choices, choices, + 'a', 't', 'o')) + +-- print each row in the table + +function formatFemale(female) + return string.format('%1d', female) +end + +function formatAge(age) + return string.format('%2d', age) +end + +function formatProbs(p1, p2, p3) + return string.format('%5.3f %5.3f %5.3f', p1, p2, p3) +end + +function indexString(p1, p2, p3) + -- return index of highest probability or '-' if nearly all zeroes + if p1 < 0.001 and p2 < 0.001 and p3 < 0.001 then + return '-' + else + return string.format('%1d', maxIndex(p1, p2, p3)) + end +end + +-- print table rows and accumulate accuracy +for female = 0,1 do + for age = torch.min(ages),torch.max(ages) do + -- calculate the actual probabilities in the training data + local actual1, actual2, actual3 = actualProbabilities(age, female) + -- calculate the prediction and probabilities using the model in the text + local textBrand, textProb1, textProb2, textProb3 = + predictText(age, female) + -- calculate the probabilities using the model we just trained + --print("main", age, female) + local ourBrand, ourProb1, ourProb2, ourProb3 = + predictOur(age, female) + print( + string.format(lineFormat, + formatFemale(female), + formatAge(age), + formatProbs(actual1, actual2, actual3), + formatProbs(textProb1, textProb2, textProb3), + formatProbs(ourProb1, ourProb2, ourProb3), + indexString(actual1,actual2,actual3), + indexString(textProb1,textProb2,textProb3), + indexString(ourProb1,ourProb2,ourProb3)) + ) + end +end diff --git a/neuflow-demos/.DS_Store b/neuflow-demos/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..aa9e186805d2a8e6c28ec8a2b2829bc4a08f5cdf GIT binary patch literal 6148 zcmeHK%}T>S5T0$TCa92$ptr?yk<<$D;30$x9)%Wqu%ZbmG*FV#r1VgtDZY*epT_5L zW_K+Xa~9DVnEhtwXS4YryBPow!6NPeH~?Uw5(+YEgxs~EhDjEPsJKQ71tgF`8ZB!U z`i(ByyA2`kyMXc6{pYi2nnf6Wi)Pa#FI@LsG@7lQ-95{;t%K1pN#kr8WQ#P;@A{K$ z*-zqe6wdFJU4+FV55l>M+=j&jBg!6R*%a-O$sQ(gF?n!KR;!NZb=&gXbIZ0|d7j&r z7v0OUv@OeaUah0kv#XmySUx>hYfYn|os%4D*o-e5iw$Og8DIvO0cN0%0ds>``*lI~ zF$2uNzh;2W2Z>7P8>}>{qXP%k0wA`~tpsiAC8$Ok^bJ-TF@hp=DWWbFro|At9R1Sd z`35VEx*UXQK7@JlXY(S|({X*N(n0tdd1MBdfo%qwdg{>qe~q6^ZIi!U!ZT)o8Texi zh*q!P3-G1P-TLkI=&qHhkEkS+SCN5&_SR1TPUsvtRzV$?WMiIhu+m7YkYAM}@{fQb Lga>Be2N?JOQZ7UX literal 0 HcmV?d00001 diff --git a/profiling/.DS_Store b/profiling/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 +-- January 10th 2013 +---------------------------------------------------------------------- + +require 'torch' -- torch +require 'image' -- to visualize the dataset +require 'nnx' -- provides all sorts of trainable modules/layers + +---------------------------------------------------------------------- + +---------------------------------------------------------------------- +print '==> define parameters' + +-- # of classes at output +noutputs = 2 + +-- input dimensions +nfeats = 3 -- 3 colors/planes input image +width = 50 -- input window size +height = 50 +ninputs = nfeats*width*height + +-- hidden units, filter sizes (for ConvNet only): +nstates = {16,64, 64*16, 16*16, noutputs} -- filters number / neurons in each layer +filtsize = 9 +poolsize = 4 +--node = {64,16} +--depth = {16, 16} +normkernel = image.gaussian1D(3) + + +-- desired image size: +im_pl = 3 +im_sz = 2200 +testima = torch.Tensor(im_pl, im_sz, im_sz) -- test image of 5 Mpix 3 colors/planes + +---------------------------------------------------------------------- +print '==> construct model' +model = nn.Sequential() + +-- stage 1 : filter bank -> squashing -> L2 pooling -> normalization +model:add(nn.SpatialConvolution(nfeats, nstates[1], filtsize, filtsize)) +model:add(nn.Tanh()) +model:add(nn.SpatialLPPooling(nstates[1],2,poolsize,poolsize,poolsize,poolsize)) +model:add(nn.SpatialSubtractiveNormalization(nstates[1], normkernel)) + +-- stage 2 : filter bank -> squashing -> L2 pooling -> normalization +model:add(nn.SpatialConvolution(nstates[1], nstates[2], filtsize, filtsize)) +model:add(nn.Tanh()) +model:add(nn.SpatialLPPooling(nstates[2],2,poolsize,poolsize,poolsize,poolsize)) +model:add(nn.SpatialSubtractiveNormalization(nstates[2], normkernel)) + +---- stage 3 : filter bank -> squashing -> L2 pooling -> normalization +--model:add(nn.SpatialConvolution(nn.tables.random(nstates[2], nstates[3], fanin[3]), filtsize, filtsize)) +--model:add(nn.Tanh()) +--model:add(nn.SpatialLPPooling(nstates[3],2,poolsize,poolsize,poolsize,poolsize)) +--model:add(nn.SpatialSubtractiveNormalization(nstates[2], normkernel)) + +-- stage 4 : fully connected layer +in_size = ((im_sz-9+1)/4 - 9 +1)/4 --nstates[2]*filtsize*filtsize +--out_size = node[1] * depth[1] + +model:add(nn.Reshape(nstates[2]*in_size^2)) +model:add(nn.Linear(nstates[2]*in_size^2, nstates[3])) +model:add(nn.Tanh()) + +-- stage 5 : fully connected layer +model:add(nn.Linear(nstates[3], nstates[4])) +model:add(nn.Tanh()) + +-- linear output. +model:add(nn.Linear(nstates[4], noutputs)) +-- logreg? +model:add(nn.Sigmoid()) + +print '==> testing model on image' +time = sys.clock() +outima = model:forward(testima) +time = sys.clock() - time +print('==> Compute Time = ' .. (time*1000) .. 'ms') diff --git a/profiling/linear-cpu.lua b/profiling/linear-cpu.lua new file mode 100644 index 0000000..090cdd7 --- /dev/null +++ b/profiling/linear-cpu.lua @@ -0,0 +1,41 @@ + +require 'sys' +require 'cunn' + +cutorch.setDevice(arg[1] or 1) +print('DEVID = ' .. cutorch.getDevice()) + +bs = 512 +ninputs = 4096 +nhiddens = 4096 +noutputs = 1000 + +n = nn.Sequential() +n:add( nn.Linear(ninputs, nhiddens) ) +n:add( nn.Threshold() ) +n:add( nn.Linear(nhiddens, noutputs) ) +n:add( nn.Threshold() ) + +i = torch.randn(bs, ninputs) + +ops = (ninputs*nhiddens + nhiddens*noutputs) * 2 * bs * 3 + +n:float() +i = i:float() + +N=5 + +-- pre-alloc states: +n:forward(i) +n:backward(i, n.output) +cutorch.synchronize() + +sys.tic() +for t = 1,N do + n:forward(i) + n:backward(i, n.output) +end +cutorch.synchronize() +t = sys.toc()/N +print('Fprop+Bprop+Acc - GFLOP/s:', ops/t/1e9) + diff --git a/profiling/linear-gpu.lua b/profiling/linear-gpu.lua new file mode 100644 index 0000000..efabce1 --- /dev/null +++ b/profiling/linear-gpu.lua @@ -0,0 +1,52 @@ + +require 'sys' +require 'cunn' + +cutorch.setDevice(arg[1] or 1) +print('DEVID = ' .. cutorch.getDevice()) + +bs = 512 +ninputs = 4096 +nhiddens = 4096 +noutputs = 1000 + +n = nn.Sequential() +n:add( nn.Linear(ninputs, nhiddens) ) +n:add( nn.Threshold() ) +n:add( nn.Linear(nhiddens, noutputs) ) +n:add( nn.Threshold() ) + +i = torch.randn(bs, ninputs) + +ops = (ninputs*nhiddens + nhiddens*noutputs) * 2 * bs * 3 + +n:float() +i = i:float() + +N=5 + +-- sys.tic() +-- for t = 1,N do +-- n:forward(i) +-- n:backward(i, n.output) +-- end +-- t = sys.toc()/N +-- print('MKL - GFLOP/s:', ops/t/1e9) + +n:cuda() +i = i:cuda() + +-- pre-alloc states: +n:forward(i) +n:backward(i, n.output) +cutorch.synchronize() + +sys.tic() +for t = 1,N do + n:forward(i) + n:backward(i, n.output) +end +cutorch.synchronize() +t = sys.toc()/N +print('Fprop+Bprop+Acc - GFLOP/s:', ops/t/1e9) + diff --git a/profiling/results.rtf b/profiling/results.rtf new file mode 100644 index 0000000..16d6db3 --- /dev/null +++ b/profiling/results.rtf @@ -0,0 +1,51 @@ +{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf370 +{\fonttbl\f0\fswiss\fcharset0 ArialMT;\f1\fnil\fcharset0 LucidaGrande;\f2\fswiss\fcharset0 Helvetica; +} +{\colortbl;\red255\green255\blue255;} +{\info +{\author Eugenio Culurciello}}\margl1440\margr1440\vieww17500\viewh19200\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural + +\f0\fs28 \cf0 \ +EC test May 2013:\ +\ +\pard\tx2835\li2835\fi-2836 + +\f1\b\fs22 \cf0 Hardware Overview:\ +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f2\b0\fs24 \cf0 \ +\pard\tx2835\li2835\fi-2836 + +\f1\fs22 \cf0 Model Name: MacBook Pro\ + Model Identifier: MacBookPro10,1\ + Processor Name: Intel Core i7\ + Processor Speed: 2.6 GHz\ + Number of Processors: 1\ + Total Number of Cores: 4\ + L2 Cache (per Core): 256 KB\ + L3 Cache: 6 MB\ + Memory: 16 GB\ + Boot ROM Version: MBP101.00EE.B02\ + SMC Version (system): 2.3f35\ + Serial Number (system): C02J120CDKQ5\ + Hardware UUID: E4515A95-B99F-5FBD-A105-25BE76310224 +\f0\fs28 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural +\cf0 \ +eugenioculurciello@Pora ~/Code/torch/profiling $ torch linear-cpu.lua\ +Torch 7.0 Copyright (C) 2001-2011 Idiap, NEC Labs, NYU\ +DEVID = 1 \ +Fprop+Bprop+Acc - GFLOP/s: 140.25925408311 \ +eugenioculurciello@Pora ~/Code/torch/profiling $ torch linear-gpu.lua\ +Torch 7.0 Copyright (C) 2001-2011 Idiap, NEC Labs, NYU\ +DEVID = 1 \ +Fprop+Bprop+Acc - GFLOP/s: 274.81519512347 \ +eugenioculurciello@Pora ~/Code/torch/profiling $ torch conv-cpu.lua\ +Torch 7.0 Copyright (C) 2001-2011 Idiap, NEC Labs, NYU\ +DEVID = 1 \ +Fprop+Bprop+Acc - GFLOP/s: 55.582031507672 \ +eugenioculurciello@Pora ~/Code/torch/profiling $ torch conv-gpu.lua\ +Torch 7.0 Copyright (C) 2001-2011 Idiap, NEC Labs, NYU\ +DEVID = 1 \ +Fprop+Bprop+Acc - GFLOP/s: 96.082082210762 } \ No newline at end of file diff --git a/simple-frame-grabber/g.ui b/simple-frame-grabber/g.ui old mode 100755 new mode 100644 diff --git a/tensors/slicing.lua b/tensors/slicing.lua new file mode 100644 index 0000000..31b1b80 --- /dev/null +++ b/tensors/slicing.lua @@ -0,0 +1,157 @@ +---------------------------------------------------------------------- +-- slicing.lua +-- +-- This script demonstrates tensor slicing / manipulation. + +-- To run this script, simply do: +-- torch slicing.lua +-- and then press 'y' or 'return' at each step, to keep going. + +-- little function to pause execution, and request user input +function next() + local answer = nil + while answer ~= '' and answer ~= 'y' and answer ~= 'Y' and neverstall ~= true do + io.write("continue ([y]/n/!)? ") + io.flush() + answer=io.read() + if answer == '!' then + neverstall = true + end + if answer == 'n' then + print('exiting...') + os.exit() + end + end + print '' +end + +print '----------------------------------------------------------------------' +print 'creating a few tensors' + +t1 = torch.range(1,75):resize(3,5,5) +print 't1 = torch.range(1,75):resize(3,5,5)' +print 't1 = ' +print(t1) + +t2 = torch.range(1,25):resize(5,5) +print 't2 = torch.range(1,25):resize(5,5)' +print 't2 = ' +print(t2) + +print 'done.' +print '' + +next() +print '----------------------------------------------------------------------' +print 'the most basic slicing is done using the [] operator' +print '' + +print 't1 =' +print( t1 ) + +print 't1[2] =' +print( t1[2] ) + +next() +print '----------------------------------------------------------------------' +print 't1_1 is a view in the existing t1 tensor: changing the values' +print 'in t1_1 directly affects t1:' +print '' + +t1[2]:fill(7) +print 't1[2]:fill(7)' + +print 't1[2] =' +print( t1[2] ) + +print 't1 =' +print( t1 ) + +next() +print '----------------------------------------------------------------------' +print 'more complex slicing can be done using the [{}] operator' +print 'this operator lets you specify one list/number per dimension' +print 'for example, t2 is a 2-dimensional tensor, therefore' +print 'we should pass 2 lists/numbers to the [{}] operator:' +print '' + +t2_slice1 = t2[{ {},2 }] +t2_slice2 = t2[{ 2,{} }] -- equivalent to t2[2] +t2_slice3 = t2[{ {2},{} }] +t2_slice4 = t2[{ {1,3},{3,4} }] +t2_slice5 = t2[{ {3},{4} }] +t2_slice6 = t2[{ 3,4 }] + +print 't2 = ' +print(t2) + +print 't2[{ {},2 }] =' +print(t2_slice1) + +print 't2[{ 2,{} }] =' +print(t2_slice2) + +print 't2[{ {2},{} }] =' +print(t2_slice3) + +print 't2[{ {1,3},{3,4} }] =' +print(t2_slice4) + +print 't2[{ {3},{4} }] =' +print(t2_slice5) + +print 't2[{ 3,4 }] =' +print(t2_slice6) + +next() +print '----------------------------------------------------------------------' +print 'negative indexes can also be used:' +print '' + +t2_slice7 = t2[{ {},{2,-2} }] +t2_slice8 = t2[{ -1,-1 }] + +print 't2[{ {},{2,-2} }] =' +print(t2_slice7) + +print 't2[{ -1,-1 }] =' +print(t2_slice8) + +next() +print '----------------------------------------------------------------------' +print 'in basic Lua, the = operator cannot be overloaded (that speeds up the language parser' +print 'a lot...), but you can use the [{}] operator to copy tensors, and subtensors:' +print '' + +print 't3 = torch.Tensor(5)' +print 't3[{}] = t2[{ {},1 }]' + +t3 = torch.Tensor(5) +t3[{}] = t2[{ {},1 }] + +print 't3 =' +print(t3) + +next() +print '----------------------------------------------------------------------' +print 'if you need to slice arbitrary subtensors, you will need to do it in steps:' +print '' + +t4 = torch.Tensor(5,2) +t4[{ {},1 }] = t2[{ {},2 }] +t4[{ {},2 }] = t2[{ {},5 }] + +print [[ +t4 = torch.Tensor(5,2) +t4[{ {},1 }] = t2[{ {},2 }] +t4[{ {},2 }] = t2[{ {},5 }] +]] + +print 't4 =' +print(t4) + + + + + + diff --git a/train-a-digit-classifier/dataset.lua b/train-a-digit-classifier/dataset-mnist.lua old mode 100755 new mode 100644 similarity index 100% rename from train-a-digit-classifier/dataset.lua rename to train-a-digit-classifier/dataset-mnist.lua diff --git a/train-a-digit-classifier/train-on-mnist.lua b/train-a-digit-classifier/train-on-mnist.lua old mode 100755 new mode 100644 index 714ba1f..3c4ecde --- a/train-a-digit-classifier/train-on-mnist.lua +++ b/train-a-digit-classifier/train-on-mnist.lua @@ -1,6 +1,6 @@ ---------------------------------------------------------------------- -- This script shows how to train different models on the MNIST --- dataset, using multiple optimization techniques (SGD, ASGD, CG) +-- dataset, using multiple optimization techniques (SGD, LBFGS) -- -- This script demonstrates a classical example of training -- well-known models (convnet, MLP, logistic regression) @@ -20,43 +20,43 @@ require 'nn' require 'nnx' require 'optim' require 'image' -require 'dataset' +require 'dataset-mnist' +require 'pl' ---------------------------------------------------------------------- -- parse command-line options -- -dname,fname = sys.fpath() -cmd = torch.CmdLine() -cmd:text() -cmd:text('MNIST Training') -cmd:text() -cmd:text('Options:') -cmd:option('-save', fname:gsub('.lua',''), 'subdirectory to save/log experiments in') -cmd:option('-network', '', 'reload pretrained network') -cmd:option('-model', 'convnet', 'type of model to train: convnet | mlp | linear') -cmd:option('-full', false, 'use full dataset (60,000 samples)') -cmd:option('-visualize', false, 'visualize input data and weights during training') -cmd:option('-seed', 1, 'fixed input seed for repeatable experiments') -cmd:option('-optimization', 'SGD', 'optimization method: SGD | ASGD | CG | LBFGS') -cmd:option('-learningRate', 1e-2, 'learning rate at t=0') -cmd:option('-batchSize', 1, 'mini-batch size (1 = pure stochastic)') -cmd:option('-weightDecay', 0, 'weight decay (SGD only)') -cmd:option('-momentum', 0, 'momentum (SGD only)') -cmd:option('-t0', 1, 'start averaging at t0 (ASGD only), in nb of epochs') -cmd:option('-maxIter', 5, 'maximum nb of iterations for CG and LBFGS') -cmd:option('-openmp', false, 'use OpenMP to //') -cmd:option('-threads', 2, 'nb of threads to use with OpenMP') -cmd:text() -opt = cmd:parse(arg) +local opt = lapp[[ + -s,--save (default "logs") subdirectory to save logs + -n,--network (default "") reload pretrained network + -m,--model (default "convnet") type of model tor train: convnet | mlp | linear + -f,--full use the full dataset + -p,--plot plot while training + -o,--optimization (default "SGD") optimization: SGD | LBFGS + -r,--learningRate (default 0.05) learning rate, for SGD only + -b,--batchSize (default 10) batch size + -m,--momentum (default 0) momentum, for SGD only + -i,--maxIter (default 3) maximum nb of iterations per batch, for LBFGS + --coefL1 (default 0) L1 penalty on the weights + --coefL2 (default 0) L2 penalty on the weights + -t,--threads (default 4) number of threads +]] -- fix seed -torch.manualSeed(opt.seed) +torch.manualSeed(1) --- openmp -if opt.openmp then - require 'openmp' - openmp.setDefaultNumThreads(opt.threads) - print(' enabled with ' .. opt.threads .. ' threads') +-- threads +torch.setnumthreads(opt.threads) +print(' set nb of threads to ' .. torch.getnumthreads()) + +-- use floats, for SGD +if opt.optimization == 'SGD' then + torch.setdefaulttensortype('torch.FloatTensor') +end + +-- batch size? +if opt.optimization == 'LBFGS' and opt.batchSize < 100 then + error('LBFGS should not be used with small mini-batches; 1000 is a recommended') end ---------------------------------------------------------------------- @@ -77,20 +77,18 @@ if opt.network == '' then -- convolutional network ------------------------------------------------------------ -- stage 1 : mean suppresion -> filter bank -> squashing -> max pooling - model:add(nn.SpatialSubtractiveNormalization(1, image.gaussian1D(15))) - model:add(nn.SpatialConvolution(1, 16, 5, 5)) + model:add(nn.SpatialConvolutionMM(1, 32, 5, 5)) model:add(nn.Tanh()) - model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) + model:add(nn.SpatialMaxPooling(3, 3, 3, 3)) -- stage 2 : mean suppresion -> filter bank -> squashing -> max pooling - model:add(nn.SpatialSubtractiveNormalization(16, image.gaussian1D(15))) - model:add(nn.SpatialConvolutionMap(nn.tables.random(16, 128, 4), 5, 5)) + model:add(nn.SpatialConvolutionMM(32, 64, 5, 5)) model:add(nn.Tanh()) model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) - -- stage 3 : standard 2-layer neural network - model:add(nn.Reshape(128*5*5)) - model:add(nn.Linear(128*5*5, 200)) + -- stage 3 : standard 2-layer MLP: + model:add(nn.Reshape(64*2*2)) + model:add(nn.Linear(64*2*2, 200)) model:add(nn.Tanh()) - model:add(nn.Linear(200,#classes)) + model:add(nn.Linear(200, #classes)) ------------------------------------------------------------ elseif opt.model == 'mlp' then @@ -118,8 +116,7 @@ if opt.network == '' then end else print(' reloading previously trained network') - model = nn.Sequential() - model:read(torch.DiskFile(opt.network)) + model = torch.load(opt.network) end -- retrieve parameters and gradients @@ -134,7 +131,6 @@ print(model) -- model:add(nn.LogSoftMax()) criterion = nn.ClassNLLCriterion() ---criterion = nn.DistKLDivCriterion() ---------------------------------------------------------------------- -- get/create dataset @@ -150,7 +146,7 @@ end -- create training set and normalize trainData = mnist.loadTrainSet(nbTrainingPatches, geometry) -mean, std = trainData:normalizeGlobal() +trainData:normalizeGlobal(mean, std) -- create test set and normalize testData = mnist.loadTestSet(nbTestingPatches, geometry) @@ -167,33 +163,6 @@ confusion = optim.ConfusionMatrix(classes) trainLogger = optim.Logger(paths.concat(opt.save, 'train.log')) testLogger = optim.Logger(paths.concat(opt.save, 'test.log')) --- display function -function display(input) - iter = iter or 0 - require 'image' - win_input = image.display{image=input, win=win_input, zoom=2, legend='input'} - if iter%10 == 0 then - if opt.model == 'convnet' then - win_w1 = image.display{image=model:get(2).weight, zoom=4, nrow=10, - min=-1, max=1, - win=win_w1, legend='stage 1: weights', padding=1} - win_w2 = image.display{image=model:get(6).weight, zoom=4, nrow=30, - min=-1, max=1, - win=win_w2, legend='stage 2: weights', padding=1} - elseif opt.model == 'mlp' then - local W1 = torch.Tensor(model:get(2).weight):resize(2048,1024) - win_w1 = image.display{image=W1, zoom=0.5, - min=-1, max=1, - win=win_w1, legend='W1 weights'} - local W2 = torch.Tensor(model:get(2).weight):resize(10,2048) - win_w2 = image.display{image=W2, zoom=0.5, - min=-1, max=1, - win=win_w2, legend='W2 weights'} - end - end - iter = iter + 1 -end - -- training function function train(dataset) -- epoch tracker @@ -206,91 +175,98 @@ function train(dataset) print(' on training set:') print(" online epoch # " .. epoch .. ' [batchSize = ' .. opt.batchSize .. ']') for t = 1,dataset:size(),opt.batchSize do - -- disp progress - xlua.progress(t, dataset:size()) - -- create mini batch - local inputs = {} - local targets = {} + local inputs = torch.Tensor(opt.batchSize,1,geometry[1],geometry[2]) + local targets = torch.Tensor(opt.batchSize) + local k = 1 for i = t,math.min(t+opt.batchSize-1,dataset:size()) do -- load new sample local sample = dataset[i] local input = sample[1]:clone() local _,target = sample[2]:clone():max(1) target = target:squeeze() - table.insert(inputs, input) - table.insert(targets, target) + inputs[k] = input + targets[k] = target + k = k + 1 end -- create closure to evaluate f(X) and df/dX local feval = function(x) - -- get new parameters - if x ~= parameters then - parameters:copy(x) - end + -- just in case: + collectgarbage() - -- reset gradients - gradParameters:zero() + -- get new parameters + if x ~= parameters then + parameters:copy(x) + end - -- f is the average of all criterions - local f = 0 + -- reset gradients + gradParameters:zero() - -- evaluate function for complete mini batch - for i = 1,#inputs do - -- estimate f - local output = model:forward(inputs[i]) - local err = criterion:forward(output, targets[i]) - f = f + err + -- evaluate function for complete mini batch + local outputs = model:forward(inputs) + local f = criterion:forward(outputs, targets) - -- estimate df/dW - local df_do = criterion:backward(output, targets[i]) - model:backward(inputs[i], df_do) + -- estimate df/dW + local df_do = criterion:backward(outputs, targets) + model:backward(inputs, df_do) - -- update confusion - confusion:add(output, targets[i]) + -- penalties (L1 and L2): + if opt.coefL1 ~= 0 or opt.coefL2 ~= 0 then + -- locals: + local norm,sign= torch.norm,torch.sign - -- visualize? - if opt.visualize then - display(inputs[i]) - end - end + -- Loss: + f = f + opt.coefL1 * norm(parameters,1) + f = f + opt.coefL2 * norm(parameters,2)^2/2 - -- normalize gradients and f(X) - gradParameters:div(#inputs) - f = f/#inputs + -- Gradients: + gradParameters:add( sign(parameters):mul(opt.coefL1) + parameters:clone():mul(opt.coefL2) ) + end - -- return f and df/dX - return f,gradParameters - end + -- update confusion + for i = 1,opt.batchSize do + confusion:add(outputs[i], targets[i]) + end - -- optimize on current mini-batch - if opt.optimization == 'CG' then - config = config or {maxIter = opt.maxIter} - optim.cg(feval, parameters, config) + -- return f and df/dX + return f,gradParameters + end - elseif opt.optimization == 'LBFGS' then - config = config or {learningRate = opt.learningRate, - maxIter = opt.maxIter, - nCorrection = 10} - optim.lbfgs(feval, parameters, config) + -- optimize on current mini-batch + if opt.optimization == 'LBFGS' then + + -- Perform LBFGS step: + lbfgsState = lbfgsState or { + maxIter = opt.maxIter, + lineSearch = optim.lswolfe + } + optim.lbfgs(feval, parameters, lbfgsState) + + -- disp report: + print('LBFGS step') + print(' - progress in batch: ' .. t .. '/' .. dataset:size()) + print(' - nb of iterations: ' .. lbfgsState.nIter) + print(' - nb of function evalutions: ' .. lbfgsState.funcEval) elseif opt.optimization == 'SGD' then - config = config or {learningRate = opt.learningRate, - weightDecay = opt.weightDecay, - momentum = opt.momentum, - learningRateDecay = 5e-7} - optim.sgd(feval, parameters, config) - elseif opt.optimization == 'ASGD' then - config = config or {eta0 = opt.learningRate, - t0 = nbTrainingPatches * opt.t0} - _,_,average = optim.asgd(feval, parameters, config) + -- Perform SGD step: + sgdState = sgdState or { + learningRate = opt.learningRate, + momentum = opt.momentum, + learningRateDecay = 5e-7 + } + optim.sgd(feval, parameters, sgdState) + + -- disp progress + xlua.progress(t, dataset:size()) else error('unknown optimization method') end end - + -- time taken time = sys.clock() - time time = time / dataset:size() @@ -308,7 +284,7 @@ function train(dataset) os.execute('mv ' .. filename .. ' ' .. filename .. '.old') end print(' saving network to '..filename) - torch.save(filename, model) + -- torch.save(filename, model) -- next epoch epoch = epoch + 1 @@ -319,26 +295,34 @@ function test(dataset) -- local vars local time = sys.clock() - -- averaged param use? - if average then - cachedparams = parameters:clone() - parameters:copy(average) - end - -- test over given dataset print(' on testing Set:') - for t = 1,dataset:size() do + for t = 1,dataset:size(),opt.batchSize do -- disp progress xlua.progress(t, dataset:size()) - -- get new sample - local sample = dataset[t] - local input = sample[1] - local _,target = sample[2]:max(1) - target = target:squeeze() + -- create mini batch + local inputs = torch.Tensor(opt.batchSize,1,geometry[1],geometry[2]) + local targets = torch.Tensor(opt.batchSize) + local k = 1 + for i = t,math.min(t+opt.batchSize-1,dataset:size()) do + -- load new sample + local sample = dataset[i] + local input = sample[1]:clone() + local _,target = sample[2]:clone():max(1) + target = target:squeeze() + inputs[k] = input + targets[k] = target + k = k + 1 + end + + -- test samples + local preds = model:forward(inputs) - -- test sample - confusion:add(model:forward(input), target) + -- confusion: + for i = 1,opt.batchSize do + confusion:add(preds[i], targets[i]) + end end -- timing @@ -350,12 +334,6 @@ function test(dataset) print(confusion) testLogger:add{['% mean class accuracy (test set)'] = confusion.totalValid * 100} confusion:zero() - - -- averaged param use? - if average then - -- restore parameters - parameters:copy(cachedparams) - end end ---------------------------------------------------------------------- @@ -367,8 +345,10 @@ while true do test(testData) -- plot errors - trainLogger:style{['% mean class accuracy (train set)'] = '-'} - testLogger:style{['% mean class accuracy (test set)'] = '-'} - trainLogger:plot() - testLogger:plot() + if opt.plot then + trainLogger:style{['% mean class accuracy (train set)'] = '-'} + testLogger:style{['% mean class accuracy (test set)'] = '-'} + trainLogger:plot() + testLogger:plot() + end end diff --git a/train-autoencoder/autoencoder-data.lua b/train-autoencoder/autoencoder-data.lua new file mode 100644 index 0000000..806ccd2 --- /dev/null +++ b/train-autoencoder/autoencoder-data.lua @@ -0,0 +1,201 @@ + +local data_verbose = false + +function getdata(datafile, inputsize, std) + local data = torch.DiskFile(datafile,'r'):binary():readObject() + local dataset ={} + + local std = std or 0.2 + local nsamples = data:size(1) + local nrows = data:size(2) + local ncols = data:size(3) + + function dataset:size() + return nsamples + end + + function dataset:selectPatch(nr,nc) + local imageok = false + if simdata_verbose then + print('selectPatch') + end + while not imageok do + --image index + local i = math.ceil(torch.uniform(1e-12,nsamples)) + local im = data:select(1,i) + -- select some patch for original that contains original + pos + local ri = math.ceil(torch.uniform(1e-12,nrows-nr)) + local ci = math.ceil(torch.uniform(1e-12,ncols-nc)) + local patch = im:narrow(1,ri,nr) + patch = patch:narrow(2,ci,nc) + local patchstd = patch:std() + if data_verbose then + print('Image ' .. i .. ' ri= ' .. ri .. ' ci= ' .. ci .. ' std= ' .. patchstd) + end + if patchstd > std then + if data_verbose then + print(patch:min(),patch:max()) + end + return patch,i,im + end + end + end + + local dsample = torch.Tensor(inputsize*inputsize) + + function dataset:conv() + dsample = torch.Tensor(1,inputsize,inputsize) + end + + setmetatable(dataset, {__index = function(self, index) + local sample,i,im = self:selectPatch(inputsize, inputsize) + dsample:copy(sample) + return {dsample,dsample,im} + end}) + return dataset +end + +function getdatacam(inputsize, std) + require 'camera' + local frow = 60 + local fcol = 80 + local gs = 5 + local cam = image.Camera{width=fcol,height=frow} + local dataset ={} + local counter = 1 + + local std = std or 0.2 + local nsamples = 10000 + local gfh = image.gaussian{width=gs,height=1,normalize=true} + local gfv = image.gaussian{width=1,height=gs,normalize=true} + local gf = image.gaussian{width=gs,height=gs,normalize=true} + + function dataset:size() + return nsamples + end + + + local imsq = torch.Tensor() + local lmnh = torch.Tensor() + local lmn = torch.Tensor() + local lmnsqh = torch.Tensor() + local lmnsq = torch.Tensor() + local lvar = torch.Tensor() + local function lcn(im) + local mn = im:mean() + local std = im:std() + if data_verbose then + print('im',mn,std,im:min(),im:max()) + end + im:add(-mn) + im:div(std) + if data_verbose then + print('im',im:min(),im:max(),im:mean(), im:std()) + end + + imsq:resizeAs(im):copy(im):cmul(im) + if data_verbose then + print('imsq',imsq:min(),imsq:max()) + end + + torch.conv2(lmnh,im,gfh) + torch.conv2(lmn,lmnh,gfv) + if data_verbose then + print('lmn',lmn:min(),lmn:max()) + end + + --local lmn = torch.conv2(im,gf) + torch.conv2(lmnsqh,imsq,gfh) + torch.conv2(lmnsq,lmnsqh,gfv) + if data_verbose then + print('lmnsq',lmnsq:min(),lmnsq:max()) + end + + lvar:resizeAs(lmn):copy(lmn):cmul(lmn) + lvar:mul(-1) + lvar:add(lmnsq) + if data_verbose then + print('2',lvar:min(),lvar:max()) + end + + lvar:apply(function (x) if x<0 then return 0 else return x end end) + if data_verbose then + print('2',lvar:min(),lvar:max()) + end + + local lstd = lvar + lstd:sqrt() + lstd:apply(function (x) if x<1 then return 1 else return x end end) + if data_verbose then + print('lstd',lstd:min(),lstd:max()) + end + + local shift = (gs+1)/2 + local nim = im:narrow(1,shift,im:size(1)-(gs-1)):narrow(2,shift,im:size(2)-(gs-1)) + nim:add(-1,lmn) + nim:cdiv(lstd) + if data_verbose then + print('nim',nim:min(),nim:max()) + end + + return nim + end + + function dataset:selectPatch(nr,nc) + local imageok = false + if simdata_verbose then + print('selectPatch') + end + counter = counter + 1 + local imgray = image.rgb2y(cam:forward()) + + local nim = lcn(imgray[1]:clone()) + while not imageok do + + -- select some patch for original that contains original + pos + local ri = math.ceil(torch.uniform(1e-12,nim:size(1)-nr)) + local ci = math.ceil(torch.uniform(1e-12,nim:size(2)-nc)) + local patch = nim:narrow(1,ri,nr) + patch = patch:narrow(2,ci,nc) + local patchstd = patch:std() + if data_verbose then + print('Image ' .. 0 .. ' ri= ' .. ri .. ' ci= ' .. ci .. ' std= ' .. patchstd) + end + if patchstd > std then + if data_verbose then + print(patch:min(),patch:max()) + end + return patch,i,nim + end + end + end + + local dsample = torch.Tensor(inputsize*inputsize) + setmetatable(dataset, {__index = function(self, index) + local sample,i,im = self:selectPatch(inputsize, inputsize) + dsample:copy(sample) + return {dsample,dsample,im} + end}) + return dataset +end + +-- dataset, dataset=createDataset(....) +-- nsamples, how many samples to display from dataset +-- nrow, number of samples per row for displaying samples +-- zoom, zoom at which to draw dataset +function displayData(dataset, nsamples, nrow, zoom) + require 'image' + local nsamples = nsamples or 100 + local zoom = zoom or 1 + local nrow = nrow or 10 + + cntr = 1 + local ex = {} + for i=1,nsamples do + local exx = dataset[1] + ex[cntr] = exx[1]:clone():unfold(1,math.sqrt(exx[1]:size(1)),math.sqrt(exx[1]:size(1))) + cntr = cntr + 1 + end + + return image.display{image=ex, padding=2, symmetric=true, zoom=zoom, nrow=nrow, legend='Training Data'} +end diff --git a/train-autoencoder/train-autoencoder.lua b/train-autoencoder/train-autoencoder.lua new file mode 100644 index 0000000..4edc6f5 --- /dev/null +++ b/train-autoencoder/train-autoencoder.lua @@ -0,0 +1,394 @@ +---------------------------------------------------------------------- +-- This script shows how to train autoencoders on natural images, +-- using the unsup package. +-- +-- Borrowed from Koray Kavukcuoglu's unsup demos +-- +-- In this script, we demonstrate the use of different types of +-- autoencoders. Learned filters can be visualized by providing the +-- flag -display. +-- +-- Note: simple auto-encoders (with no sparsity constraint on the code) typically +-- don't yield filters that are visually appealing, although they might be +-- minimizing the reconstruction error correctly. +-- +-- We demonstrate 2 types of auto-encoders: +-- * plain: regular auto-encoder +-- * predictive sparse decomposition (PSD): the encoder is trained +-- to predict an optimal sparse decomposition of the input +-- +-- Both types of auto-encoders can use linear or convolutional +-- encoders/decoders. The convolutional version typically yields more +-- interesting, less redundant filters for images. +-- +-- Koray Kavukcuoglu, Clement Farabet +---------------------------------------------------------------------- + +require 'unsup' +require 'image' +require 'optim' +require 'autoencoder-data' + +---------------------------------------------------------------------- +-- parse command-line options +-- +cmd = torch.CmdLine() +cmd:text() +cmd:text('Training a simple sparse coding dictionary on Berkeley images') +cmd:text() +cmd:text('Options') +-- general options: +cmd:option('-dir', 'outputs', 'subdirectory to save experiments in') +cmd:option('-seed', 1, 'initial random seed') +cmd:option('-threads', 2, 'threads') + +-- for all models: +cmd:option('-model', 'conv-psd', 'auto-encoder class: linear | linear-psd | conv | conv-psd') +cmd:option('-inputsize', 25, 'size of each input patch') +cmd:option('-nfiltersin', 1, 'number of input convolutional filters') +cmd:option('-nfiltersout', 16, 'number of output convolutional filters') +cmd:option('-lambda', 1, 'sparsity coefficient') +cmd:option('-beta', 1, 'prediction error coefficient') +cmd:option('-eta', 2e-3, 'learning rate') +cmd:option('-batchsize', 1, 'batch size') +cmd:option('-etadecay', 1e-5, 'learning rate decay') +cmd:option('-momentum', 0, 'gradient momentum') +cmd:option('-maxiter', 1000000, 'max number of updates') + +-- use hessian information for training: +cmd:option('-hessian', true, 'compute diagonal hessian coefficients to condition learning rates') +cmd:option('-hessiansamples', 500, 'number of samples to use to estimate hessian') +cmd:option('-hessianinterval', 10000, 'compute diagonal hessian coefs at every this many samples') +cmd:option('-minhessian', 0.02, 'min hessian to avoid extreme speed up') +cmd:option('-maxhessian', 500, 'max hessian to avoid extreme slow down') + +-- for conv models: +cmd:option('-kernelsize', 9, 'size of convolutional kernels') + +-- logging: +cmd:option('-datafile', 'http://data.neuflow.org/data/tr-berkeley-N5K-M56x56-lcn.bin', 'Dataset URL') +cmd:option('-statinterval', 5000, 'interval for saving stats and models') +cmd:option('-v', false, 'be verbose') +cmd:option('-display', false, 'display stuff') +cmd:option('-wcar', '', 'additional flag to differentiate this run') +cmd:text() + +params = cmd:parse(arg) + +rundir = cmd:string('psd', params, {dir=true}) +params.rundir = params.dir .. '/' .. rundir + +if paths.dirp(params.rundir) then + os.execute('rm -r ' .. params.rundir) +end +os.execute('mkdir -p ' .. params.rundir) +cmd:addTime('psd') +cmd:log(params.rundir .. '/log.txt', params) + +torch.manualSeed(params.seed) + +torch.setnumthreads(params.threads) + +---------------------------------------------------------------------- +-- load data +-- +filename = paths.basename(params.datafile) +if not paths.filep(filename) then + os.execute('wget ' .. params.datafile .. '; '.. 'tar xvf ' .. filename) +end +dataset = getdata(filename, params.inputsize) + +if params.display then + displayData(dataset, 100, 10, 2) +end + +---------------------------------------------------------------------- +-- create model +-- +if params.model == 'linear' then + + -- params + inputSize = params.inputsize*params.inputsize + outputSize = params.nfiltersout + + -- encoder + encoder = nn.Sequential() + encoder:add(nn.Linear(inputSize,outputSize)) + encoder:add(nn.Tanh()) + encoder:add(nn.Diag(outputSize)) + + -- decoder + decoder = nn.Sequential() + decoder:add(nn.Linear(outputSize,inputSize)) + + -- complete model + module = unsup.AutoEncoder(encoder, decoder, params.beta) + + -- verbose + print('==> constructed linear auto-encoder') + +elseif params.model == 'conv' then + + -- params: + conntable = nn.tables.full(params.nfiltersin, params.nfiltersout) + kw, kh = params.kernelsize, params.kernelsize + iw, ih = params.inputsize, params.inputsize + + -- connection table: + local decodertable = conntable:clone() + decodertable[{ {},1 }] = conntable[{ {},2 }] + decodertable[{ {},2 }] = conntable[{ {},1 }] + local outputFeatures = conntable[{ {},2 }]:max() + + -- encoder: + encoder = nn.Sequential() + encoder:add(nn.SpatialConvolutionMap(conntable, kw, kh, 1, 1)) + encoder:add(nn.Tanh()) + encoder:add(nn.Diag(outputFeatures)) + + -- decoder: + decoder = nn.Sequential() + decoder:add(nn.SpatialFullConvolutionMap(decodertable, kw, kh, 1, 1)) + + -- complete model + module = unsup.AutoEncoder(encoder, decoder, params.beta) + + -- convert dataset to convolutional (returns 1xKxK tensors (3D), instead of K*K (1D)) + dataset:conv() + + -- verbose + print('==> constructed convolutional auto-encoder') + +elseif params.model == 'linear-psd' then + + -- params + inputSize = params.inputsize*params.inputsize + outputSize = params.nfiltersout + + -- encoder + encoder = nn.Sequential() + encoder:add(nn.Linear(inputSize,outputSize)) + encoder:add(nn.Tanh()) + encoder:add(nn.Diag(outputSize)) + + -- decoder is L1 solution + decoder = unsup.LinearFistaL1(inputSize, outputSize, params.lambda) + + -- PSD autoencoder + module = unsup.PSD(encoder, decoder, params.beta) + + -- verbose + print('==> constructed linear predictive sparse decomposition (PSD) auto-encoder') + +elseif params.model == 'conv-psd' then + + -- params: + conntable = nn.tables.full(params.nfiltersin, params.nfiltersout) + kw, kh = params.kernelsize, params.kernelsize + iw, ih = params.inputsize, params.inputsize + + -- connection table: + local decodertable = conntable:clone() + decodertable[{ {},1 }] = conntable[{ {},2 }] + decodertable[{ {},2 }] = conntable[{ {},1 }] + local outputFeatures = conntable[{ {},2 }]:max() + + -- encoder: + encoder = nn.Sequential() + encoder:add(nn.SpatialConvolutionMap(conntable, kw, kh, 1, 1)) + encoder:add(nn.Tanh()) + encoder:add(nn.Diag(outputFeatures)) + + -- decoder is L1 solution: + decoder = unsup.SpatialConvFistaL1(decodertable, kw, kh, iw, ih, params.lambda) + + -- PSD autoencoder + module = unsup.PSD(encoder, decoder, params.beta) + + -- convert dataset to convolutional (returns 1xKxK tensors (3D), instead of K*K (1D)) + dataset:conv() + + -- verbose + print('==> constructed convolutional predictive sparse decomposition (PSD) auto-encoder') + +else + print('==> unknown model: ' .. params.model) + os.exit() +end + +---------------------------------------------------------------------- +-- trainable parameters +-- + +-- are we using the hessian? +if params.hessian then + module:initDiagHessianParameters() +end + +-- get all parameters +x,dl_dx,ddl_ddx = module:getParameters() + +---------------------------------------------------------------------- +-- train model +-- + +print('==> training model') + +local avTrainingError = torch.FloatTensor(math.ceil(params.maxiter/params.statinterval)):zero() +local err = 0 +local iter = 0 + +for t = 1,params.maxiter,params.batchsize do + + -------------------------------------------------------------------- + -- update diagonal hessian parameters + -- + if params.hessian and math.fmod(t , params.hessianinterval) == 1 then + -- some extra vars: + local hessiansamples = params.hessiansamples + local minhessian = params.minhessian + local maxhessian = params.maxhessian + local ddl_ddx_avg = ddl_ddx:clone(ddl_ddx):zero() + etas = etas or ddl_ddx:clone() + + print('==> estimating diagonal hessian elements') + for i = 1,hessiansamples do + -- next + local ex = dataset[i] + local input = ex[1] + local target = ex[2] + module:updateOutput(input, target) + + -- gradient + dl_dx:zero() + module:updateGradInput(input, target) + module:accGradParameters(input, target) + + -- hessian + ddl_ddx:zero() + module:updateDiagHessianInput(input, target) + module:accDiagHessianParameters(input, target) + + -- accumulate + ddl_ddx_avg:add(1/hessiansamples, ddl_ddx) + end + + -- cap hessian params + print('==> ddl/ddx : min/max = ' .. ddl_ddx_avg:min() .. '/' .. ddl_ddx_avg:max()) + ddl_ddx_avg[torch.lt(ddl_ddx_avg,minhessian)] = minhessian + ddl_ddx_avg[torch.gt(ddl_ddx_avg,maxhessian)] = maxhessian + print('==> corrected ddl/ddx : min/max = ' .. ddl_ddx_avg:min() .. '/' .. ddl_ddx_avg:max()) + + -- generate learning rates + etas:fill(1):cdiv(ddl_ddx_avg) + end + + -------------------------------------------------------------------- + -- progress + -- + iter = iter+1 + xlua.progress(iter, params.statinterval) + + -------------------------------------------------------------------- + -- create mini-batch + -- + local example = dataset[t] + local inputs = {} + local targets = {} + for i = t,t+params.batchsize-1 do + -- load new sample + local sample = dataset[i] + local input = sample[1]:clone() + local target = sample[2]:clone() + table.insert(inputs, input) + table.insert(targets, target) + end + + -------------------------------------------------------------------- + -- define eval closure + -- + local feval = function() + -- reset gradient/f + local f = 0 + dl_dx:zero() + + -- estimate f and gradients, for minibatch + for i = 1,#inputs do + -- f + f = f + module:updateOutput(inputs[i], targets[i]) + + -- gradients + module:updateGradInput(inputs[i], targets[i]) + module:accGradParameters(inputs[i], targets[i]) + end + + -- normalize + dl_dx:div(#inputs) + f = f/#inputs + + -- return f and df/dx + return f,dl_dx + end + + -------------------------------------------------------------------- + -- one SGD step + -- + sgdconf = sgdconf or {learningRate = params.eta, + learningRateDecay = params.etadecay, + learningRates = etas, + momentum = params.momentum} + _,fs = optim.sgd(feval, x, sgdconf) + err = err + fs[1] + + -- normalize + if params.model:find('psd') then + module:normalize() + end + + -------------------------------------------------------------------- + -- compute statistics / report error + -- + if math.fmod(t , params.statinterval) == 0 then + + -- report + print('==> iteration = ' .. t .. ', average loss = ' .. err/params.statinterval) + + -- get weights + eweight = module.encoder.modules[1].weight + if module.decoder.D then + dweight = module.decoder.D.weight + else + dweight = module.decoder.modules[1].weight + end + + -- reshape weights if linear matrix is used + if params.model:find('linear') then + dweight = dweight:transpose(1,2):unfold(2,params.inputsize,params.inputsize) + eweight = eweight:unfold(2,params.inputsize,params.inputsize) + end + + -- render filters + dd = image.toDisplayTensor{input=dweight, + padding=2, + nrow=math.floor(math.sqrt(params.nfiltersout)), + symmetric=true} + de = image.toDisplayTensor{input=eweight, + padding=2, + nrow=math.floor(math.sqrt(params.nfiltersout)), + symmetric=true} + + -- live display + if params.display then + _win1_ = image.display{image=dd, win=_win1_, legend='Decoder filters', zoom=2} + _win2_ = image.display{image=de, win=_win2_, legend='Encoder filters', zoom=2} + end + + -- save stuff + image.save(params.rundir .. '/filters_dec_' .. t .. '.jpg', dd) + image.save(params.rundir .. '/filters_enc_' .. t .. '.jpg', de) + torch.save(params.rundir .. '/model_' .. t .. '.bin', module) + + -- reset counters + err = 0; iter = 0 + end +end diff --git a/train-on-cifar/train-on-cifar.lua b/train-on-cifar/train-on-cifar.lua old mode 100755 new mode 100644 index b7f773e..1c85d46 --- a/train-on-cifar/train-on-cifar.lua +++ b/train-on-cifar/train-on-cifar.lua @@ -20,7 +20,6 @@ require 'nn' require 'nnx' require 'optim' require 'image' -require 'mattorch' ---------------------------------------------------------------------- -- parse command-line options @@ -44,20 +43,16 @@ cmd:option('-weightDecay', 0, 'weight decay (SGD only)') cmd:option('-momentum', 0, 'momentum (SGD only)') cmd:option('-t0', 1, 'start averaging at t0 (ASGD only), in nb of epochs') cmd:option('-maxIter', 5, 'maximum nb of iterations for CG and LBFGS') -cmd:option('-openmp', false, 'use OpenMP to //') -cmd:option('-threads', 2, 'nb of threads to use with OpenMP') +cmd:option('-threads', 2, 'nb of threads to use') cmd:text() opt = cmd:parse(arg) -- fix seed torch.manualSeed(opt.seed) --- openmp -if opt.openmp then - require 'openmp' - openmp.setDefaultNumThreads(opt.threads) - print(' enabled with ' .. opt.threads .. ' threads') -end +-- threads +torch.setnumthreads(opt.threads) +print(' set nb of threads to ' .. opt.threads) ---------------------------------------------------------------------- -- define model to train @@ -72,43 +67,19 @@ if opt.network == '' then if opt.model == 'convnet' then ------------------------------------------------------------ - -- convolutional network - -- this is a typical convolutional network for vision: - -- 1/ the image is transformed into Y-UV space - -- 2/ the Y (luminance) channel is locally normalized - -- 3/ the first layer allocates for filters to the Y - -- channels, and just a few to the U and V channels - -- 4/ the first two stages features are locally pooled - -- using a max-operator - -- 5/ a two-layer neural network is applied on the - -- representation + -- convolutional network ------------------------------------------------------------ - -- reshape vector into a 3-channel image (RGB) - model:add(nn.Reshape(3,32,32)) - -- stage 0 : RGB -> YUV -> normalize(Y) - model:add(nn.SpatialColorTransform('rgb2yuv')) - do - ynormer = nn.Sequential() - ynormer:add(nn.Narrow(1,1,1)) - ynormer:add(nn.SpatialContrastiveNormalization(1, image.gaussian1D(7))) - normer = nn.ConcatTable() - normer:add(ynormer) - normer:add(nn.Narrow(1,2,2)) - end - model:add(normer) - model:add(nn.JoinTable(1)) -- stage 1 : mean+std normalization -> filter bank -> squashing -> max pooling - local table = torch.Tensor{ {1,1},{1,2},{1,3},{1,4},{1,5},{1,6},{1,7},{1,8},{2,9},{2,10},{3,11},{3,12} } - model:add(nn.SpatialConvolutionMap(table, 5, 5)) + model:add(nn.SpatialConvolutionMap(nn.tables.random(3,16,1), 5, 5)) model:add(nn.Tanh()) model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) -- stage 2 : filter bank -> squashing -> max pooling - model:add(nn.SpatialConvolutionMap(nn.tables.random(12, 32, 4), 5, 5)) + model:add(nn.SpatialConvolutionMap(nn.tables.random(16, 256, 4), 5, 5)) model:add(nn.Tanh()) model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) -- stage 3 : standard 2-layer neural network - model:add(nn.Reshape(32*5*5)) - model:add(nn.Linear(32*5*5, 128)) + model:add(nn.Reshape(256*5*5)) + model:add(nn.Linear(256*5*5, 128)) model:add(nn.Tanh()) model:add(nn.Linear(128,#classes)) ------------------------------------------------------------ @@ -117,6 +88,7 @@ if opt.network == '' then ------------------------------------------------------------ -- regular 2-layer MLP ------------------------------------------------------------ + model:add(nn.Reshape(3*32*32)) model:add(nn.Linear(3*32*32, 1*32*32)) model:add(nn.Tanh()) model:add(nn.Linear(1*32*32, #classes)) @@ -126,6 +98,7 @@ if opt.network == '' then ------------------------------------------------------------ -- simple linear model: logistic regression ------------------------------------------------------------ + model:add(nn.Reshape(3*32*32)) model:add(nn.Linear(3*32*32,#classes)) ------------------------------------------------------------ @@ -164,25 +137,27 @@ else tesize = 1000 end -if not paths.dirp('cifar-10-batches-mat') then - local www = 'http://www.cs.toronto.edu/~kriz/cifar-10-matlab.tar.gz' +-- download dataset +if not paths.dirp('cifar-10-batches-t7') then + local www = 'http://data.neuflow.org/data/cifar-10-torch.tar.gz' local tar = sys.basename(www) os.execute('wget ' .. www .. '; '.. 'tar xvf ' .. tar) end +-- load dataset trainData = { data = torch.Tensor(50000, 3072), labels = torch.Tensor(50000), size = function() return trsize end } for i = 0,4 do - subset = mattorch.load('cifar-10-batches-mat/data_batch_' .. (i+1) .. '.mat') + subset = torch.load('cifar-10-batches-t7/data_batch_' .. (i+1) .. '.t7', 'ascii') trainData.data[{ {i*10000+1, (i+1)*10000} }] = subset.data:t() trainData.labels[{ {i*10000+1, (i+1)*10000} }] = subset.labels end trainData.labels = trainData.labels + 1 -subset = mattorch.load('cifar-10-batches-mat/test_batch.mat') +subset = torch.load('cifar-10-batches-t7/test_batch.t7', 'ascii') testData = { data = subset.data:t():double(), labels = subset.labels[1]:double(), @@ -190,12 +165,60 @@ testData = { } testData.labels = testData.labels + 1 +-- resize dataset (if using small version) trainData.data = trainData.data[{ {1,trsize} }] trainData.labels = trainData.labels[{ {1,trsize} }] testData.data = testData.data[{ {1,tesize} }] testData.labels = testData.labels[{ {1,tesize} }] +-- reshape data +trainData.data = trainData.data:reshape(trsize,3,32,32) +testData.data = testData.data:reshape(tesize,3,32,32) + +---------------------------------------------------------------------- +-- preprocess/normalize train/test sets +-- + +print ' preprocessing data (color space + normalization)' + +-- preprocess trainSet +normalization = nn.SpatialContrastiveNormalization(1, image.gaussian1D(7)) +for i = 1,trainData:size() do + -- rgb -> yuv + local rgb = trainData.data[i] + local yuv = image.rgb2yuv(rgb) + -- normalize y locally: + yuv[1] = normalization(yuv[{{1}}]) + trainData.data[i] = yuv +end +-- normalize u globally: +mean_u = trainData.data[{ {},2,{},{} }]:mean() +std_u = trainData.data[{ {},2,{},{} }]:std() +trainData.data[{ {},2,{},{} }]:add(-mean_u) +trainData.data[{ {},2,{},{} }]:div(-std_u) +-- normalize v globally: +mean_v = trainData.data[{ {},3,{},{} }]:mean() +std_v = trainData.data[{ {},3,{},{} }]:std() +trainData.data[{ {},3,{},{} }]:add(-mean_v) +trainData.data[{ {},3,{},{} }]:div(-std_v) + +-- preprocess testSet +for i = 1,testData:size() do + -- rgb -> yuv + local rgb = testData.data[i] + local yuv = image.rgb2yuv(rgb) + -- normalize y locally: + yuv[{1}] = normalization(yuv[{{1}}]) + testData.data[i] = yuv +end +-- normalize u globally: +testData.data[{ {},2,{},{} }]:add(-mean_u) +testData.data[{ {},2,{},{} }]:div(-std_u) +-- normalize v globally: +testData.data[{ {},3,{},{} }]:add(-mean_v) +testData.data[{ {},3,{},{} }]:div(-std_v) + ---------------------------------------------------------------------- -- define training and testing functions -- diff --git a/train-on-housenumbers/train-on-housenumbers.lua b/train-on-housenumbers/train-on-housenumbers.lua new file mode 100644 index 0000000..6106d06 --- /dev/null +++ b/train-on-housenumbers/train-on-housenumbers.lua @@ -0,0 +1,413 @@ +---------------------------------------------------------------------- +-- This script shows how to train different models on the street +-- view house number dataset, +-- using multiple optimization techniques (SGD, ASGD, CG) +-- +-- This script demonstrates a classical example of training +-- well-known models (convnet, MLP, logistic regression) +-- on a 10-class classification problem. +-- +-- It illustrates several points: +-- 1/ description of the model +-- 2/ choice of a loss function (criterion) to minimize +-- 3/ creation of a dataset as a simple Lua table +-- 4/ description of training and test procedures +-- +-- Note: the architecture of the convnet is based on Pierre Sermanet's +-- work on this dataset (http://arxiv.org/abs/1204.3968). In particular +-- the use of LP-pooling (with P=2) has a very positive impact on +-- generalization. Normalization is not done exactly as proposed in +-- the paper, and low-level (first layer) features are not fed to +-- the classifier. +-- +-- Clement Farabet +---------------------------------------------------------------------- + +require 'torch' +require 'nn' +require 'nnx' +require 'optim' +require 'image' + +---------------------------------------------------------------------- +-- parse command-line options +-- +dname,fname = sys.fpath() +cmd = torch.CmdLine() +cmd:text() +cmd:text('HouseNumber Training') +cmd:text() +cmd:text('Options:') +cmd:option('-save', fname:gsub('.lua',''), 'subdirectory to save/log experiments in') +cmd:option('-network', '', 'reload pretrained network') +cmd:option('-extra', false, 'use extra training samples dataset (~500,000 extra training samples)') +cmd:option('-visualize', false, 'visualize input data and weights during training') +cmd:option('-seed', 1, 'fixed input seed for repeatable experiments') +cmd:option('-plot', false, 'live plot') +cmd:option('-optimization', 'SGD', 'optimization method: SGD | ASGD | CG | LBFGS') +cmd:option('-learningRate', 1e-3, 'learning rate at t=0') +cmd:option('-batchSize', 1, 'mini-batch size (1 = pure stochastic)') +cmd:option('-weightDecay', 0, 'weight decay (SGD only)') +cmd:option('-momentum', 0, 'momentum (SGD only)') +cmd:option('-t0', 1, 'start averaging at t0 (ASGD only), in nb of epochs') +cmd:option('-maxIter', 2, 'maximum nb of iterations for CG and LBFGS') +cmd:option('-threads', 2, 'nb of threads to use') +cmd:text() +opt = cmd:parse(arg) + +-- fix seed +torch.manualSeed(opt.seed) + +-- threads +torch.setnumthreads(opt.threads) +print(' set nb of threads to ' .. opt.threads) + +---------------------------------------------------------------------- +-- define model to train +-- on the 10-class classification problem +-- +classes = {'1','2','3','4','5','6','7','8','9','0'} + +if opt.network == '' then + ------------------------------------------------------------ + -- convolutional network + -- this is a typical convolutional network for vision: + -- 1/ the image is transformed into Y-UV space + -- 2/ the Y (luminance) channel is locally normalized, + -- while the U/V channels are more loosely normalized + -- 3/ the first two stages features are locally pooled + -- using LP-pooling (P=2) + -- 4/ a two-layer neural network is applied on the + -- representation + ------------------------------------------------------------ + -- top container + model = nn.Sequential() + -- stage 1 : filter bank -> squashing -> max pooling + model:add(nn.SpatialConvolutionMap(nn.tables.random(3,16,1), 5, 5)) + model:add(nn.Tanh()) + model:add(nn.SpatialLPPooling(16,2,2,2,2,2)) + -- stage 2 : filter bank -> squashing -> max pooling + model:add(nn.SpatialSubtractiveNormalization(16, image.gaussian1D(7))) + model:add(nn.SpatialConvolutionMap(nn.tables.random(16, 256, 4), 5, 5)) + model:add(nn.Tanh()) + model:add(nn.SpatialLPPooling(256,2,2,2,2,2)) + -- stage 3 : standard 2-layer neural network + model:add(nn.SpatialSubtractiveNormalization(256, image.gaussian1D(7))) + model:add(nn.Reshape(256*5*5)) + model:add(nn.Linear(256*5*5, 128)) + model:add(nn.Tanh()) + model:add(nn.Linear(128,#classes)) + model:add(nn.LogSoftMax()) + ------------------------------------------------------------ +else + print(' reloading previously trained network') + model = torch.load(opt.network) +end + +-- retrieve parameters and gradients +parameters,gradParameters = model:getParameters() + +-- verbose +print(' using model:') +print(model) + +---------------------------------------------------------------------- +-- loss function: negative log-likelihood +-- +criterion = nn.ClassNLLCriterion() + +---------------------------------------------------------------------- +-- get/create dataset +-- +if opt.extra then + trsize = 73257 + 531131 + tesize = 26032 +else + print ' WARNING: using reduced train set' + print '(use -extra to use complete training set, with extra samples)' + trsize = 73257 + tesize = 26032 +end + +www = 'http://data.neuflow.org/data/housenumbers/' +train_file = 'train_32x32.t7' +test_file = 'test_32x32.t7' +extra_file = 'extra_32x32.t7' +if not paths.filep(train_file) then + os.execute('wget ' .. www .. train_file) +end +if not paths.filep(test_file) then + os.execute('wget ' .. www .. test_file) +end +if opt.extra and not paths.filep(extra_file) then + os.execute('wget ' .. www .. extra_file) +end + +loaded = torch.load(train_file,'ascii') +trainData = { + data = loaded.X:transpose(3,4), + labels = loaded.y[1], + size = function() return trsize end +} + +if opt.extra then + loaded = torch.load(extra_file,'ascii') + trdata = torch.Tensor(trsize,3,32,32) + trdata[{ {1,(#trainData.data)[1]} }] = trainData.data + trdata[{ {(#trainData.data)[1]+1,-1} }] = loaded.X:transpose(3,4) + trlabels = torch.Tensor(trsize) + trlabels[{ {1,(#trainData.labels)[1]} }] = trainData.labels + trlabels[{ {(#trainData.labels)[1]+1,-1} }] = loaded.y[1] + trainData = { + data = trdata, + labels = trlabels, + size = function() return trsize end + } +end + +loaded = torch.load(test_file,'ascii') +testData = { + data = loaded.X:transpose(3,4), + labels = loaded.y[1], + size = function() return tesize end +} + +---------------------------------------------------------------------- +-- preprocess/normalize train/test sets +-- + +print ' preprocessing data (color space + normalization)' + +-- preprocess requires floating point +trainData.data = trainData.data:float() +testData.data = testData.data:float() + +-- preprocess trainSet +normalization = nn.SpatialContrastiveNormalization(1, image.gaussian1D(7)):float() +for i = 1,trainData:size() do + -- rgb -> yuv + local rgb = trainData.data[i] + local yuv = image.rgb2yuv(rgb) + -- normalize y locally: + yuv[1] = normalization(yuv[{{1}}]) + trainData.data[i] = yuv +end +-- normalize u globally: +mean_u = trainData.data[{ {},2,{},{} }]:mean() +std_u = trainData.data[{ {},2,{},{} }]:std() +trainData.data[{ {},2,{},{} }]:add(-mean_u) +trainData.data[{ {},2,{},{} }]:div(-std_u) +-- normalize v globally: +mean_v = trainData.data[{ {},3,{},{} }]:mean() +std_v = trainData.data[{ {},3,{},{} }]:std() +trainData.data[{ {},3,{},{} }]:add(-mean_v) +trainData.data[{ {},3,{},{} }]:div(-std_v) + +-- preprocess testSet +for i = 1,testData:size() do + -- rgb -> yuv + local rgb = testData.data[i] + local yuv = image.rgb2yuv(rgb) + -- normalize y locally: + yuv[{1}] = normalization(yuv[{{1}}]) + testData.data[i] = yuv +end +-- normalize u globally: +testData.data[{ {},2,{},{} }]:add(-mean_u) +testData.data[{ {},2,{},{} }]:div(-std_u) +-- normalize v globally: +testData.data[{ {},3,{},{} }]:add(-mean_v) +testData.data[{ {},3,{},{} }]:div(-std_v) + +---------------------------------------------------------------------- +-- define training and testing functions +-- + +-- this matrix records the current confusion across classes +confusion = optim.ConfusionMatrix(classes) + +-- log results to files +trainLogger = optim.Logger(paths.concat(opt.save, 'train.log')) +testLogger = optim.Logger(paths.concat(opt.save, 'test.log')) + +-- display +if opt.visualize then + require 'image' + local trset = trainData.data[{ {1,100} }] + local teset = testData.data[{ {1,100} }] + image.display{image=trset, legend='training set', nrow=10, padding=1} + image.display{image=teset, legend='test set', nrow=10, padding=1} +end + +-- training function +function train(dataset) + -- epoch tracker + epoch = epoch or 1 + + -- local vars + local time = sys.clock() + + -- shuffle at each epoch + shuffle = torch.randperm(trsize) + + -- do one epoch + print(' on training set:') + print(" online epoch # " .. epoch .. ' [batchSize = ' .. opt.batchSize .. ']') + for t = 1,dataset:size(),opt.batchSize do + -- disp progress + xlua.progress(t, dataset:size()) + + -- create mini batch + local inputs = {} + local targets = {} + for i = t,math.min(t+opt.batchSize-1,dataset:size()) do + -- load new sample + local input = dataset.data[shuffle[i]]:double() + local target = dataset.labels[shuffle[i]] + table.insert(inputs, input) + table.insert(targets, target) + end + + -- create closure to evaluate f(X) and df/dX + local feval = function(x) + -- get new parameters + if x ~= parameters then + parameters:copy(x) + end + + -- reset gradients + gradParameters:zero() + + -- f is the average of all criterions + local f = 0 + + -- evaluate function for complete mini batch + for i = 1,#inputs do + -- estimate f + local output = model:forward(inputs[i]) + local err = criterion:forward(output, targets[i]) + f = f + err + + -- estimate df/dW + local df_do = criterion:backward(output, targets[i]) + model:backward(inputs[i], df_do) + + -- update confusion + confusion:add(output, targets[i]) + end + + -- normalize gradients and f(X) + gradParameters:div(#inputs) + f = f/#inputs + + -- return f and df/dX + return f,gradParameters + end + + -- optimize on current mini-batch + if opt.optimization == 'CG' then + config = config or {maxIter = opt.maxIter} + optim.cg(feval, parameters, config) + + elseif opt.optimization == 'LBFGS' then + config = config or {learningRate = opt.learningRate, + maxIter = opt.maxIter, + nCorrection = 10} + optim.lbfgs(feval, parameters, config) + + elseif opt.optimization == 'SGD' then + config = config or {learningRate = opt.learningRate, + weightDecay = opt.weightDecay, + momentum = opt.momentum, + learningRateDecay = 5e-7} + optim.sgd(feval, parameters, config) + + elseif opt.optimization == 'ASGD' then + config = config or {eta0 = opt.learningRate, + t0 = trsize * opt.t0} + _,_,average = optim.asgd(feval, parameters, config) + + else + error('unknown optimization method') + end + end + + -- time taken + time = sys.clock() - time + time = time / dataset:size() + print(" time to learn 1 sample = " .. (time*1000) .. 'ms') + + -- print confusion matrix + print(confusion) + trainLogger:add{['% mean class accuracy (train set)'] = confusion.totalValid * 100} + confusion:zero() + + -- save/log current net + local filename = paths.concat(opt.save, 'house.net') + os.execute('mkdir -p ' .. sys.dirname(filename)) + print(' saving network to '..filename) + torch.save(filename, model) + + -- next epoch + epoch = epoch + 1 +end + +-- test function +function test(dataset) + -- local vars + local time = sys.clock() + + -- averaged param use? + if average then + cachedparams = parameters:clone() + parameters:copy(average) + end + + -- test over given dataset + print(' on testing Set:') + for t = 1,dataset:size() do + -- disp progress + xlua.progress(t, dataset:size()) + + -- get new sample + local input = dataset.data[t]:double() + local target = dataset.labels[t] + + -- test sample + local pred = model:forward(input) + confusion:add(pred, target) + end + + -- timing + time = sys.clock() - time + time = time / dataset:size() + print(" time to test 1 sample = " .. (time*1000) .. 'ms') + + -- print confusion matrix + print(confusion) + testLogger:add{['% mean class accuracy (test set)'] = confusion.totalValid * 100} + confusion:zero() + + -- averaged param use? + if average then + -- restore parameters + parameters:copy(cachedparams) + end +end + +---------------------------------------------------------------------- +-- and train! +-- +while true do + -- train/test + train(trainData) + test(testData) + + -- plot errors + trainLogger:style{['% mean class accuracy (train set)'] = '-'} + testLogger:style{['% mean class accuracy (test set)'] = '-'} + if opt.plot then + trainLogger:plot() + testLogger:plot() + end +end