From cde5bf0d946ba7866d2061add952fc282a1cf579 Mon Sep 17 00:00:00 2001 From: yechengxi Date: Tue, 22 Aug 2017 14:19:29 +0800 Subject: [PATCH] second_order_training --- CoreModules/activations/modu.m | 3 + CoreModules/optim/adagrad.m | 31 +++++--- CoreModules/optim/adam.m | 63 ++++++++------- CoreModules/optim/gradient_decorrelation.m | 77 +++++++++++++++++++ CoreModules/optim/rmsprop.m | 49 ++++++------ CoreModules/optim/sgd.m | 31 +++++--- CoreModules/optim/sgd2.m | 38 +++++---- Log.txt | 3 + README.md | 61 +++++++-------- .../Main_Cart_Pole_Policy_Network.m | 9 ++- 10 files changed, 244 insertions(+), 121 deletions(-) create mode 100644 CoreModules/optim/gradient_decorrelation.m diff --git a/CoreModules/activations/modu.m b/CoreModules/activations/modu.m index 283491e..c74b60c 100644 --- a/CoreModules/activations/modu.m +++ b/CoreModules/activations/modu.m @@ -1,4 +1,7 @@ function y = modu(x,dzdy) +%ModU activation function +%Ye, C., Yang, Y., Fermuller, C., & Aloimonos, Y. (2017). +%On the Importance of Consistency in Training Deep Neural Networks. arXiv preprint arXiv:1708.00631. if nargin <= 1 || isempty(dzdy) y = abs(x) ; diff --git a/CoreModules/optim/adagrad.m b/CoreModules/optim/adagrad.m index da1dc07..884c317 100644 --- a/CoreModules/optim/adagrad.m +++ b/CoreModules/optim/adagrad.m @@ -1,7 +1,18 @@ function [ net,res,opts ] = adagrad( net,res,opts ) -%NET_APPLY_GRAD_SGD Summary of this function goes here -% Detailed explanation goes here +% Modified Adagrad using second-order information: +% 1. Duchi, J., Hazan, E., & Singer, Y. (2011). +% Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12(Jul), 2121-2159. +% 2. Ye, C., Yang, Y., Fermuller, C., & Aloimonos, Y. (2017). +% On the Importance of Consistency in Training Deep Neural Networks. arXiv preprint arXiv:1708.00631. +% + if ~isfield(opts.parameters,'second_order') + opts.parameters.second_order=0; + end + if opts.parameters.second_order + [ net,res,opts ] = gradient_decorrelation( net,res,opts ); + end + if ~isfield(opts.parameters,'weightDecay') opts.parameters.weightDecay=1e-4; end @@ -11,18 +22,18 @@ end for layer=1:numel(net.layers) - if isfield(net.layers{1,layer},'weights') + if isfield(net.layers{layer},'weights') - if ~isfield(net.layers{1,layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) - net.layers{1,layer}.momentum{1}=zeros(size(net.layers{1,layer}.weights{1}),'like',net.layers{1,layer}.weights{1}); - net.layers{1,layer}.momentum{2}=zeros(size(net.layers{1,layer}.weights{2}),'like',net.layers{1,layer}.weights{2}); + if ~isfield(net.layers{layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) + net.layers{layer}.momentum{1}=zeros(size(net.layers{layer}.weights{1}),'like',net.layers{layer}.weights{1}); + net.layers{layer}.momentum{2}=zeros(size(net.layers{layer}.weights{2}),'like',net.layers{layer}.weights{2}); end - net.layers{1,layer}.momentum{1}=net.layers{1,layer}.momentum{1}+res(layer).dzdw.^2; - net.layers{1,layer}.weights{1}=net.layers{1,layer}.weights{1}-opts.parameters.lr*res(layer).dzdw./(net.layers{1,layer}.momentum{1}.^0.5+opts.parameters.eps)- opts.parameters.weightDecay * net.layers{1,layer}.weights{1}; + net.layers{layer}.momentum{1}=net.layers{layer}.momentum{1}+res(layer).dzdw.^2; + net.layers{layer}.weights{1}=net.layers{layer}.weights{1}-opts.parameters.lr*res(layer).dzdw./(net.layers{layer}.momentum{1}.^0.5+opts.parameters.eps)- opts.parameters.weightDecay * net.layers{layer}.weights{1}; - net.layers{1,layer}.momentum{2}=net.layers{1,layer}.momentum{2}+res(layer).dzdb.^2; - net.layers{1,layer}.weights{2}=net.layers{1,layer}.weights{2}-opts.parameters.lr*res(layer).dzdb./(net.layers{1,layer}.momentum{2}.^0.5+opts.parameters.eps); + net.layers{layer}.momentum{2}=net.layers{layer}.momentum{2}+res(layer).dzdb.^2; + net.layers{layer}.weights{2}=net.layers{layer}.weights{2}-opts.parameters.lr*res(layer).dzdb./(net.layers{layer}.momentum{2}.^0.5+opts.parameters.eps); end end diff --git a/CoreModules/optim/adam.m b/CoreModules/optim/adam.m index f953c42..1c11fcf 100644 --- a/CoreModules/optim/adam.m +++ b/CoreModules/optim/adam.m @@ -1,57 +1,64 @@ function [ net,res,opts ] = adam( net,res,opts ) -%NET_APPLY_GRAD_SGD Summary of this function goes here -% Detailed explanation goes here +% Modified Adam using second-order information. +% 1. Kingma, D., & Ba, J. (2014). +% Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980. +% 2. Ye, C., Yang, Y., Fermuller, C., & Aloimonos, Y. (2017). +% On the Importance of Consistency in Training Deep Neural Networks. arXiv preprint arXiv:1708.00631. + if ~isfield(opts.parameters,'second_order') + opts.parameters.second_order=0; + end + if opts.parameters.second_order + [ net,res,opts ] = gradient_decorrelation( net,res,opts ); + end + if ~isfield(opts.parameters,'weightDecay') opts.parameters.weightDecay=0; end - if (~isfield(opts.parameters,'mom2')) opts.parameters.mom2=0.999; end - if ~isfield(net,'iterations')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) - net.iterations=0; - end - - if ~isfield(opts.parameters,'eps') opts.parameters.eps=1e-8; end + if ~isfield(net,'iterations')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) + net.iterations=0; + end + net.iterations=net.iterations+1; + mom_factor=(1-opts.parameters.mom.^net.iterations); + mom_factor2=(1-opts.parameters.mom2.^net.iterations); + for layer=1:numel(net.layers) - if isfield(net.layers{1,layer},'weights')%strcmp(net.layers{layer}.type,'conv')||strcmp(net.layers{layer}.type,'mlp') - if ~isfield(net.layers{1,layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1)||length(net.layers{1,layer}.momentum)<4 - net.layers{1,layer}.momentum{1}=zeros(size(net.layers{1,layer}.weights{1}),'like',net.layers{1,layer}.weights{1}); - net.layers{1,layer}.momentum{2}=zeros(size(net.layers{1,layer}.weights{2}),'like',net.layers{1,layer}.weights{2}); - net.layers{1,layer}.momentum{3}=net.layers{1,layer}.momentum{1};%initialize - net.layers{1,layer}.momentum{4}=net.layers{1,layer}.momentum{2};%initialize + if isfield(net.layers{layer},'weights') + if ~isfield(net.layers{layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1)||length(net.layers{layer}.momentum)<4 + net.layers{layer}.momentum{1}=zeros(size(net.layers{layer}.weights{1}),'like',net.layers{layer}.weights{1}); + net.layers{layer}.momentum{2}=zeros(size(net.layers{layer}.weights{2}),'like',net.layers{layer}.weights{2}); + net.layers{layer}.momentum{3}=net.layers{layer}.momentum{1};%initialize + net.layers{layer}.momentum{4}=net.layers{layer}.momentum{2};%initialize end end end - mom_factor=(1-opts.parameters.mom.^net.iterations); - mom_factor2=(1-opts.parameters.mom2.^net.iterations); - - for layer=1:numel(net.layers) - if isfield(net.layers{1,layer},'weights') + if isfield(net.layers{layer},'weights') - net.layers{1,layer}.momentum{1}=opts.parameters.mom.*net.layers{1,layer}.momentum{1}+(1-opts.parameters.mom).*res(layer).dzdw; - net.layers{1,layer}.momentum{3}=opts.parameters.mom.*net.layers{1,layer}.momentum{3}+(1-opts.parameters.mom).*res(layer).dzdw.^2; - net.layers{1,layer}.weights{1}=net.layers{1,layer}.weights{1}-opts.parameters.lr*net.layers{1,layer}.momentum{1} ... - ./(net.layers{1,layer}.momentum{3}.^0.5+opts.parameters.eps) .*mom_factor2^0.5./mom_factor ... - - opts.parameters.weightDecay * net.layers{1,layer}.weights{1}; + net.layers{layer}.momentum{1}=opts.parameters.mom.*net.layers{layer}.momentum{1}+(1-opts.parameters.mom).*res(layer).dzdw; + net.layers{layer}.momentum{3}=opts.parameters.mom.*net.layers{layer}.momentum{3}+(1-opts.parameters.mom).*res(layer).dzdw.^2; + net.layers{layer}.weights{1}=net.layers{layer}.weights{1}-opts.parameters.lr*net.layers{layer}.momentum{1} ... + ./(net.layers{layer}.momentum{3}.^0.5+opts.parameters.eps) .*mom_factor2^0.5./mom_factor ... + - opts.parameters.weightDecay * net.layers{layer}.weights{1}; - net.layers{1,layer}.momentum{2}=opts.parameters.mom.*net.layers{1,layer}.momentum{2}+(1-opts.parameters.mom).*res(layer).dzdb; - net.layers{1,layer}.momentum{4}=opts.parameters.mom.*net.layers{1,layer}.momentum{4}+(1-opts.parameters.mom).*res(layer).dzdb.^2; - net.layers{1,layer}.weights{2}=net.layers{1,layer}.weights{2}-opts.parameters.lr*net.layers{1,layer}.momentum{2} ... - ./(net.layers{1,layer}.momentum{4}.^0.5+opts.parameters.eps) .*mom_factor2^0.5./mom_factor; + net.layers{layer}.momentum{2}=opts.parameters.mom.*net.layers{layer}.momentum{2}+(1-opts.parameters.mom).*res(layer).dzdb; + net.layers{layer}.momentum{4}=opts.parameters.mom.*net.layers{layer}.momentum{4}+(1-opts.parameters.mom).*res(layer).dzdb.^2; + net.layers{layer}.weights{2}=net.layers{layer}.weights{2}-opts.parameters.lr*net.layers{layer}.momentum{2} ... + ./(net.layers{layer}.momentum{4}.^0.5+opts.parameters.eps) .*mom_factor2^0.5./mom_factor; end end diff --git a/CoreModules/optim/gradient_decorrelation.m b/CoreModules/optim/gradient_decorrelation.m new file mode 100644 index 0000000..a0df11a --- /dev/null +++ b/CoreModules/optim/gradient_decorrelation.m @@ -0,0 +1,77 @@ +function [ net,res,opts ] = gradient_decorrelation( net,res,opts ) +% Decorrelating gradient descents using second-order information. +% Ye, C., Yang, Y., Fermuller, C., & Aloimonos, Y. (2017). +% On the Importance of Consistency in Training Deep Neural Networks. arXiv preprint arXiv:1708.00631. + + if ~isfield(opts.parameters,'lambda_sgd2') + opts.parameters.lambda_sgd2=1e0; + end + if ~isfield(opts.parameters,'large_matrix_inversion') + opts.parameters.large_matrix_inversion=0; + end + if ~isfield(opts.parameters,'max_inv_size') + opts.parameters.max_inv_size=500; + end + if ~isfield(opts.parameters,'decorr_bias') + opts.parameters.decorr_bias=1; + end + + max_inv_size=opts.parameters.max_inv_size; + lambda=opts.parameters.lambda_sgd2; + + + for layer=1:numel(net.layers) + if isfield(net.layers{layer},'weights')&&~isempty(net.layers{layer}.weights) + + dzdw=res(layer).dzdw; + dzdb=res(layer).dzdb; + + if length(net.layers{layer}.weights)==2 + x=res(layer).x; + batch_dim=length(size(x));%This assumes the batch size must be >1 + if batch_dim==4%2d cnn + x=permute(x,[3,1,2,4]);x=reshape(x,size(x,1),[]); + dzdw=permute(dzdw,[1,2,4,3]);new_size=size(dzdw);dzdw=reshape(dzdw,prod(new_size(1:3)),new_size(4)); + K=size(dzdw,1)/numel(dzdb);dzdb=repelem(dzdb(:),K,1); + end + if batch_dim==3%1d cnn + x=permute(x,[2,1,3]);x=reshape(x,size(x,1),[]); + dzdw=permute(dzdw,[1,3,2]);new_size=size(dzdw);dzdw=reshape(dzdw,prod(new_size(1:2)),new_size(3)); + K=size(dzdw,1)/numel(dzdb);dzdb=repelem(dzdb(:),K,1); + end + subsample=1;batch_size=size(x,2); + if batch_size>1e4,subsample=ceil(min(50,batch_size/1e4));end + if subsample>1,x=x(:,1:subsample:end);end + if opts.parameters.decorr_bias==1 + %insert bias + x=[ones(1,size(x,2),'like',x);x]; + dzdw=[dzdb,dzdw]; + end + if size(dzdw,2)<=max_inv_size %small scale inversion + dzdw=dzdw/(x*x'./size(x,2)+lambda*eye(size(x,1),'like',x)); + elseif opts.parameters.large_matrix_inversion %divide large scale into smaller scale + order=randperm(size(dzdw,2)); + for i=1:max_inv_size:length(order) %could have been parallelized + block_size=min(max_inv_size,length(order)-i+1); + idx=order(i:i+block_size-1);x_tmp=x(idx,:); + dzdw(:,idx)=dzdw(:,idx)/(x_tmp*x_tmp'./size(x_tmp,2)+lambda*eye(size(x_tmp,1),'like',x)); + end + end + if opts.parameters.decorr_bias==1 + dzdb=dzdw(:,1);dzdw(:,1)=[]; + end + if batch_dim==4,dzdw=reshape(dzdw,new_size);dzdw=permute(dzdw,[1,2,4,3]);end + if batch_dim==3,dzdw=reshape(dzdw,new_size);dzdw=permute(dzdw,[1,3,2]);end + if batch_dim>2%for cnn: + %dzdb is decorrelated with dzdw, take average to smooth the results. + dzdb=reshape(mean(reshape(dzdb(:),K,[]),1),size(res(layer).dzdb)); + end + res(layer).dzdw=dzdw; + res(layer).dzdb=dzdb; + end + + end + end + +end + diff --git a/CoreModules/optim/rmsprop.m b/CoreModules/optim/rmsprop.m index e4303ab..eb36bd2 100644 --- a/CoreModules/optim/rmsprop.m +++ b/CoreModules/optim/rmsprop.m @@ -1,24 +1,30 @@ function [ net,res,opts ] = rmsprop( net,res,opts ) -%NET_APPLY_GRAD_SGD Summary of this function goes here -% Detailed explanation goes here +% Modified RMSProp using second-order information. +% 1.Tieleman, T. and Hinton, G. Lecture 6.5 - RMSProp, COURSERA: Neural Networks for Machine Learning. +% Technical report, 2012. +% 2.Ye, C., Yang, Y., Fermuller, C., & Aloimonos, Y. (2017). +% On the Importance of Consistency in Training Deep Neural Networks. arXiv preprint arXiv:1708.00631. + + if ~isfield(opts.parameters,'second_order') + opts.parameters.second_order=0; + end + if opts.parameters.second_order + [ net,res,opts ] = gradient_decorrelation( net,res,opts ); + end if ~isfield(opts.parameters,'weightDecay') opts.parameters.weightDecay=1e-4; end + - if ~isfield(opts,'results')||~isfield(opts.results,'lrs') - opts.results.lrs=[];%%not really necessary + if ~isfield(opts.parameters,'clip') + opts.parameters.clip=1e0; end - opts.results.lrs=[opts.results.lrs;gather(opts.parameters.lr)]; if ~isfield(opts.parameters,'eps') opts.parameters.eps=1e-6; end - if ~isfield(opts.parameters,'clip') - opts.parameters.clip=1e0; - end - if ~isfield(net,'iterations')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) net.iterations=0; end @@ -28,29 +34,28 @@ mom_factor=(1-opts.parameters.mom.^net.iterations); for layer=1:numel(net.layers) - if isfield(net.layers{1,layer},'weights') - if ~isfield(net.layers{1,layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) - net.layers{1,layer}.momentum{1}=zeros(size(net.layers{1,layer}.weights{1}),'like',net.layers{1,layer}.weights{1}); - net.layers{1,layer}.momentum{2}=zeros(size(net.layers{1,layer}.weights{2}),'like',net.layers{1,layer}.weights{2}); + if isfield(net.layers{layer},'weights') + if ~isfield(net.layers{layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) + net.layers{layer}.momentum{1}=zeros(size(net.layers{layer}.weights{1}),'like',net.layers{layer}.weights{1}); + net.layers{layer}.momentum{2}=zeros(size(net.layers{layer}.weights{2}),'like',net.layers{layer}.weights{2}); end - net.layers{1,layer}.momentum{1}=opts.parameters.mom.*net.layers{1,layer}.momentum{1}+(1-opts.parameters.mom).*res(layer).dzdw.^2; - - normalized_grad=res(layer).dzdw./(net.layers{1,layer}.momentum{1}.^0.5+opts.parameters.eps)./mom_factor; - if isfield(opts.parameters,'clip') + net.layers{layer}.momentum{1}=opts.parameters.mom.*net.layers{layer}.momentum{1}+(1-opts.parameters.mom).*(res(layer).dzdw.^2); + normalized_grad=res(layer).dzdw./(net.layers{layer}.momentum{1}.^0.5+opts.parameters.eps)./mom_factor; + if isfield(opts.parameters,'clip')&&opts.parameters.clip>0 mask=abs(normalized_grad)>opts.parameters.clip; normalized_grad(mask)=sign(normalized_grad(mask)).*opts.parameters.clip; end - net.layers{1,layer}.weights{1}=net.layers{1,layer}.weights{1}-opts.parameters.lr*normalized_grad- opts.parameters.weightDecay * net.layers{1,layer}.weights{1}; + net.layers{layer}.weights{1}=net.layers{layer}.weights{1}-opts.parameters.lr*normalized_grad- opts.parameters.weightDecay * net.layers{layer}.weights{1}; - net.layers{1,layer}.momentum{2}=opts.parameters.mom.*net.layers{1,layer}.momentum{2}+(1-opts.parameters.mom).*res(layer).dzdb.^2; - normalized_grad=res(layer).dzdb./(net.layers{1,layer}.momentum{2}.^0.5+opts.parameters.eps)./mom_factor; - if isfield(opts.parameters,'clip') + net.layers{layer}.momentum{2}=opts.parameters.mom.*net.layers{layer}.momentum{2}+(1-opts.parameters.mom).*(res(layer).dzdb.^2); + normalized_grad=res(layer).dzdb./(net.layers{layer}.momentum{2}.^0.5+opts.parameters.eps)./mom_factor; + if isfield(opts.parameters,'clip')&&opts.parameters.clip>0 mask=abs(normalized_grad)>opts.parameters.clip; normalized_grad(mask)=sign(normalized_grad(mask)).*opts.parameters.clip; end - net.layers{1,layer}.weights{2}=net.layers{1,layer}.weights{2}-opts.parameters.lr*normalized_grad; + net.layers{layer}.weights{2}=net.layers{layer}.weights{2}-opts.parameters.lr*normalized_grad; end end diff --git a/CoreModules/optim/sgd.m b/CoreModules/optim/sgd.m index ff5a6ac..9819cac 100644 --- a/CoreModules/optim/sgd.m +++ b/CoreModules/optim/sgd.m @@ -1,7 +1,14 @@ function [ net,res,opts ] = sgd( net,res,opts ) -%NET_APPLY_GRAD_SGD Summary of this function goes here -% Detailed explanation goes here +%Stochastic gradient descent algorithm + + if ~isfield(opts.parameters,'second_order') + opts.parameters.second_order=0; + end + if opts.parameters.second_order + [ net,res,opts ] = gradient_decorrelation( net,res,opts ); + end + if ~isfield(opts.parameters,'weightDecay') opts.parameters.weightDecay=1e-4; end @@ -14,12 +21,14 @@ net.iterations=0; end - net.iterations=net.iterations+1; + net.iterations=net.iterations+1; mom_factor=(1-opts.parameters.mom.^net.iterations); + + for layer=1:numel(net.layers) - if isfield(net.layers{1,layer},'weights')%strcmp(net.layers{layer}.type,'conv')||strcmp(net.layers{layer}.type,'mlp') + if isfield(net.layers{layer},'weights')&&~isempty(net.layers{layer}.weights) if opts.parameters.clip>0 mask=abs(res(layer).dzdw)>opts.parameters.clip; @@ -27,16 +36,16 @@ mask=abs(res(layer).dzdb)>opts.parameters.clip; res(layer).dzdb(mask)=sign(res(layer).dzdb(mask)).*opts.parameters.clip; end - if ~isfield(net.layers{1,layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) - net.layers{1,layer}.momentum{1}=zeros(size(net.layers{1,layer}.weights{1}),'like',net.layers{1,layer}.weights{1}); - net.layers{1,layer}.momentum{2}=zeros(size(net.layers{1,layer}.weights{2}),'like',net.layers{1,layer}.weights{2}); + if ~isfield(net.layers{layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) + net.layers{layer}.momentum{1}=zeros(size(net.layers{layer}.weights{1}),'like',net.layers{layer}.weights{1}); + net.layers{layer}.momentum{2}=zeros(size(net.layers{layer}.weights{2}),'like',net.layers{layer}.weights{2}); end - net.layers{1,layer}.momentum{1}=opts.parameters.mom.*net.layers{1,layer}.momentum{1}-(1-opts.parameters.mom).*res(layer).dzdw- opts.parameters.weightDecay * net.layers{1,layer}.weights{1}; - net.layers{1,layer}.weights{1}=net.layers{1,layer}.weights{1}+opts.parameters.lr*net.layers{1,layer}.momentum{1}./mom_factor; + net.layers{layer}.momentum{1}=opts.parameters.mom.*net.layers{layer}.momentum{1}-(1-opts.parameters.mom).*res(layer).dzdw - opts.parameters.weightDecay * net.layers{layer}.weights{1}; + net.layers{layer}.weights{1}=net.layers{layer}.weights{1}+opts.parameters.lr*net.layers{layer}.momentum{1}./mom_factor; - net.layers{1,layer}.momentum{2}=opts.parameters.mom.*net.layers{1,layer}.momentum{2}-(1-opts.parameters.mom).*res(layer).dzdb; - net.layers{1,layer}.weights{2}=net.layers{1,layer}.weights{2}+opts.parameters.lr*net.layers{1,layer}.momentum{2}./mom_factor; + net.layers{layer}.momentum{2}=opts.parameters.mom.*net.layers{layer}.momentum{2}-(1-opts.parameters.mom).*res(layer).dzdb; + net.layers{layer}.weights{2}=net.layers{layer}.weights{2}+opts.parameters.lr*net.layers{layer}.momentum{2}./mom_factor; end end diff --git a/CoreModules/optim/sgd2.m b/CoreModules/optim/sgd2.m index 6730db0..d6b73b3 100644 --- a/CoreModules/optim/sgd2.m +++ b/CoreModules/optim/sgd2.m @@ -1,6 +1,7 @@ function [ net,res,opts ] = sgd2( net,res,opts ) -% Summary of this function goes here -% Detailed explanation goes here +% Stochastic gradient descent using second-order information. +% Ye, C., Yang, Y., Fermuller, C., & Aloimonos, Y. (2017). +% On the Importance of Consistency in Training Deep Neural Networks. arXiv preprint arXiv:1708.00631. if ~isfield(opts.parameters,'weightDecay') opts.parameters.weightDecay=1e-4; @@ -32,22 +33,16 @@ for layer=1:numel(net.layers) - if isfield(net.layers{1,layer},'weights')&&~isempty(net.layers{1,layer}.weights) - if opts.parameters.clip>0 - mask=abs(res(layer).dzdw)>opts.parameters.clip; - res(layer).dzdw(mask)=sign(res(layer).dzdw(mask)).*opts.parameters.clip;%%this type of processing seems to be very helpful - mask=abs(res(layer).dzdb)>opts.parameters.clip; - res(layer).dzdb(mask)=sign(res(layer).dzdb(mask)).*opts.parameters.clip; - end - if ~isfield(net.layers{1,layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) - net.layers{1,layer}.momentum{1}=zeros(size(net.layers{1,layer}.weights{1}),'like',net.layers{1,layer}.weights{1}); - net.layers{1,layer}.momentum{2}=zeros(size(net.layers{1,layer}.weights{2}),'like',net.layers{1,layer}.weights{2}); + if isfield(net.layers{layer},'weights')&&~isempty(net.layers{layer}.weights) + if ~isfield(net.layers{layer},'momentum')||(isfield(opts,'reset_mom')&&opts.reset_mom==1) + net.layers{layer}.momentum{1}=zeros(size(net.layers{layer}.weights{1}),'like',net.layers{layer}.weights{1}); + net.layers{layer}.momentum{2}=zeros(size(net.layers{layer}.weights{2}),'like',net.layers{layer}.weights{2}); end dzdw=res(layer).dzdw; dzdb=res(layer).dzdb; - if length(net.layers{1,layer}.weights)==2 + if length(net.layers{layer}.weights)==2 x=res(layer).x; batch_dim=length(size(x));%This assumes the batch size must be >1 if batch_dim==4%2d cnn @@ -89,11 +84,20 @@ end end - net.layers{1,layer}.momentum{1}=opts.parameters.mom.*net.layers{1,layer}.momentum{1}-(1-opts.parameters.mom).*dzdw - opts.parameters.weightDecay * net.layers{1,layer}.weights{1}; - net.layers{1,layer}.weights{1}=net.layers{1,layer}.weights{1}+opts.parameters.lr*net.layers{1,layer}.momentum{1}./mom_factor; + if opts.parameters.clip>0 + mask=abs(res(layer).dzdw)>opts.parameters.clip; + res(layer).dzdw(mask)=sign(res(layer).dzdw(mask)).*opts.parameters.clip;%%this type of processing seems to be very helpful + mask=abs(res(layer).dzdb)>opts.parameters.clip; + res(layer).dzdb(mask)=sign(res(layer).dzdb(mask)).*opts.parameters.clip; + end + + + %sgd updates + net.layers{layer}.momentum{1}=opts.parameters.mom.*net.layers{layer}.momentum{1}-(1-opts.parameters.mom).*dzdw - opts.parameters.weightDecay * net.layers{layer}.weights{1}; + net.layers{layer}.weights{1}=net.layers{layer}.weights{1}+opts.parameters.lr*net.layers{layer}.momentum{1}./mom_factor; - net.layers{1,layer}.momentum{2}=opts.parameters.mom.*net.layers{1,layer}.momentum{2}-(1-opts.parameters.mom).*dzdb; - net.layers{1,layer}.weights{2}=net.layers{1,layer}.weights{2}+opts.parameters.lr*net.layers{1,layer}.momentum{2}./mom_factor; + net.layers{layer}.momentum{2}=opts.parameters.mom.*net.layers{layer}.momentum{2}-(1-opts.parameters.mom).*dzdb; + net.layers{layer}.weights{2}=net.layers{layer}.weights{2}+opts.parameters.lr*net.layers{layer}.momentum{2}./mom_factor; end end diff --git a/Log.txt b/Log.txt index 7c67086..0a05f02 100644 --- a/Log.txt +++ b/Log.txt @@ -55,3 +55,6 @@ 5. A new experiment is added to show SGD2's tolerance to bad initialization. 20170811 1. An example on policy network/policy gradient is added. + 20170822 +1. Adam,RMSProp,Adagrad have been modified to support second-order training, examples are scheduled later. + diff --git a/README.md b/README.md index b46d5d4..77bc079 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,5 @@ # LightNet: A Versatile, Standalone Matlab-based Environment for Deep Learning -Chengxi Ye, Chen Zhao, Yezhou Yang, Cornelia Fermüller, and Yiannis Aloimonos. 2016. LightNet: A Versatile, Standalone Matlab-based Environment for Deep Learning. In Proceedings of the 2016 ACM on Multimedia Conference (MM '16). Amsterdam, The Netherlands, 1156-1159. (http://dl.acm.org/citation.cfm?id=2973791) - ![LightNet Icon](LightNet.png) LightNet is a lightweight, versatile and purely Matlab-based deep learning framework. The aim of the design is to provide an easy-to-understand, easy-to-use and efficient computational platform for deep learning research. The implemented framework supports major deep learning architectures such as the Multilayer Perceptron Networks (MLP), Convolutional Neural Networks (CNN) and Recurrent Neural Networks (RNN). LightNet supports both CPU and GPU for computation and the switch between them is straightforward. Different applications in computer vision, natural language processing and robotics are demonstrated as experiments. @@ -14,19 +12,7 @@ Have fun! ## Recent updates -20170801: Second-order SGD is introduced (SGD2), together with the corresponding normalization technique RMSnorm and a new ModU activation function. -SGD2 is a fast second-order training method (or known as the Newton's method) that trains faster and better, and shows better tolerance to bad initializations. -Check it out in \SGD2. - -20-epoch training of a 10-layer-deep network. The network is initialized using Gaussian distribution with std from 10^(-4) to 10^4. - -![Init](Init.png) - -Implicit expansion is adopted to replace the bsxfun in LightNet. As a result, *Matlab R2016b* or later is required. - -CUDNN is supported by installing Neural Network Toolbox from Mathworks. The convolutional network training is over 10x faster than the previous release! The current version can process 10,000 CIFAR-10 images per second in the training. - -LightNet supports using pretrained ImageNet network models. +20160528: LightNet supports using pretrained ImageNet network models. ![coco](coco.png) Check CNN/Main_CNN_ImageNet_Minimal() @@ -35,34 +21,44 @@ An example recognition using imagenet-vgg-f pretrained net: ![ImageNet Icon](ImageNetPreTrain.png) +20170217: CUDNN is supported by installing Neural Network Toolbox from Mathworks. The convolutional network training is over 10x faster than the previous release! The current version can process 10,000 CIFAR-10 images per second in the training. + +20170801: Second-order SGD is introduced (SGD2), together with the corresponding normalization technique RMSnorm and a new ModU activation function. +SGD2 is a fast second-order training method (or known as the Newton's method) that trains faster and better, and shows better tolerance to bad initializations. +Check it out in \SGD2. + +20-epoch training of a 10-layer-deep network. The network is initialized using Gaussian distribution with std from 10^(-4) to 10^4. + +![Init](Init.png) + ## Major functions in LightNet ####network related: Main_Template: a template script used to train CNN and MLP networks. -net_bp: implementation of the back propagation process which is used in CNN and MLP networks. -net_ff: implementation of the feed forward process which is used in CNN and MLP networks. -test_net: running the network in the testing mode to evaluate the current parameters. -train_net: running the network in the training mode to evaluate and calculate the loss and gradients. TrainingScript: a training template for CNN and MLP networks. +train_net: running the network in the training mode to evaluate and calculate the loss and gradients. +test_net: running the network in the testing mode to evaluate the current parameters. +net_ff: implementation of the feed forward process which is used in CNN and MLP networks. +net_bp: implementation of the back propagation process which is used in CNN and MLP networks. net_init*: how to initialize a neural network. ####layers: -bnorm: implementation of the batch normalization layer. +linear_layer: implementation of (fully-connected) linear layer. (CUDNN enabled) conv_layer_1d: implementation of the 1d convolution layer. (CUDNN enabled) conv_layer_2d: implementation of the 2d convolution layer. (CUDNN enabled) -dropout: implementation of the dropout layer. -linear_layer: implementation of (fully-connected) linear layer. (CUDNN enabled) -lrn: implementation of the local response normalization layer. (CUDNN enabled) maxpool: implementation of the 2d max-pooling layer. (CUDNN enabled) maxpool_1d: implementation of the 1d max-pooling layer. (CUDNN enabled) +bnorm: implementation of the batch normalization layer. rmsnorm: implementation of the RMS normalization function. +dropout: implementation of the dropout layer. +lrn: implementation of the local response normalization layer. (CUDNN enabled) softmax: implementation of the softmax layer. ####activation functions: -leaky_relu: implementation of the leaky ReLU layer. -modu: implementation of the modulus unit layer. relu: implementation of the rectified linear unit layer. +leaky_relu: implementation of the leaky ReLU layer. +modu: implementation of the modulus unit layer. sigmoid_ln: implementation of the sigmoid layer. tanh_ln: implementation of the tanh layer. @@ -71,12 +67,12 @@ tanh_ln: implementation of the tanh layer. softmaxlogloss: implementation of the softmax log loss layer . ####optimization related: -adagrad: implementation of the Adagrad algorithm. -adam: implementation of the Adam algorithm. -rmsprop: implementation of the RMSProp algorithm. -select_learning_rate: implementation of the Selective-SGD algorithm that automatically selects the optimal learning rate at the beginning or in the middle of the training. sgd: implementation of the stochastic gradient descent algorithm with momentum. sgd2: implementation of the second-order stochastic gradient descent algorithm with momentum. +adam: implementation of the Adam algorithm with modification that allows second-order training. +rmsprop: implementation of the RMSProp algorithm with modification that allows second-order training. +adagrad: implementation of the Adagrad algorithm with modification that allows second-order training. +select_learning_rate: implementation of the Selective-SGD algorithm that automatically selects the optimal learning rate at the beginning or in the middle of the training. ####utility functions: generate_output_filename: generate output filename based on the current parameter settings. @@ -88,7 +84,12 @@ SwitchProcessor: a switch function between CPU and GPU. ## How to accelerate LightNet -Nvidia CUDNN can be used to calculate convolutions. +Nvidia CUDNN can be used to calculate convolutions and linear transforms. 1. You will need to install the Neural Network Toolbox from Mathworks. Make sure you can run it properly. (Ref to our tutorial slides.) 2. Set opts.use_nntoolbox=1 in the main tesing script. + +## References +1. Ye, C., Zhao, C., Yang, Y., Fermüller, C., & Aloimonos, Y. (2016, October). LightNet: A Versatile, Standalone Matlab-based Environment for Deep Learning. In Proceedings of the 2016 ACM on Multimedia Conference (pp. 1156-1159). ACM. +2. Ye, C., Yang, Y., Fermuller, C., & Aloimonos, Y. (2017). On the Importance of Consistency in Training Deep Neural Networks. arXiv preprint arXiv:1708.00631. + diff --git a/ReinforcementLearning/Main_Cart_Pole_Policy_Network.m b/ReinforcementLearning/Main_Cart_Pole_Policy_Network.m index 0dca656..135d84a 100644 --- a/ReinforcementLearning/Main_Cart_Pole_Policy_Network.m +++ b/ReinforcementLearning/Main_Cart_Pole_Policy_Network.m @@ -21,6 +21,7 @@ MAX_STEPS = 100000; MaxSteps=[]; +Vs=[]; failures=0; success=0; @@ -93,9 +94,10 @@ MaxSteps=[MaxSteps;samples]; disp(['Trial was ' int2str(failures) ' steps ' num2str(samples)]); %Reinforcement upon failure is -1. - discounted_r=-exp(log(GAMMA).*[samples:-1:1]); - + discounted_r=-exp(2*log(GAMMA).*[samples:-1:1]); + V=mean(-exp(log(GAMMA).*[samples:-1:1]));%E[V(s)] + Vs=[Vs;V]; else %Not a failure.r=0. failed = 0; @@ -154,4 +156,5 @@ disp(['Pole balanced successfully for at least ' int2str(MAX_STEPS) ' steps ' ]); end close all; -figure;plot(MaxSteps);title('Steps'); \ No newline at end of file +figure;subplot(1,2,1);plot(Vs);title('Values'); +subplot(1,2,2);plot(MaxSteps);title('Steps'); \ No newline at end of file