Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

No variable of name 'label' could be found in the DAG. #60

Open
samadak opened this issue Aug 3, 2017 · 0 comments
Open

No variable of name 'label' could be found in the DAG. #60

samadak opened this issue Aug 3, 2017 · 0 comments

Comments

@samadak
Copy link

samadak commented Aug 3, 2017

Hello everyone
I want to finetune vgg16 by using my own dataset.It has 32 classes but I faced with some error.
train: epoch 01: 1/ 62:148 im2= imdb.images.data(:,:,:,batch);
Error using dagnn.DagNN/eval (line 83)
No variable of name 'label' could be found in the DAG.

Error in cnn_train_dag>processEpoch (line 253)
net.eval(inputs, params.derOutputs, 'holdOn', s < params.numSubBatches) ;

Error in cnn_train_dag (line 105)
[net, state] = processEpoch(net, state, params, 'train') ;

Error in fine_test0 (line 113)
info = cnn_train_dag(net, imdb, @(i,b) getBatch(bopts,i,b), opts.train, 'val', find(imdb.images.set == 3)) ;
%and this my code:
%finetune vgg16 for cifar10 dataset
function [net, info] = vgg_train(imdb, expDir)
% Demonstrated MatConNet on CIFAR-10 using DAG
% run(fullfile(fileparts(mfilename('fullpath')), '../../', 'matlab', 'vl_setupnn.m')) ;
run matlab/vl_setupnn
% imdb=load('imdbs.mat', 'imdb')
% imdb=load('imdb_cifar10.mat')
load('imdbs.mat', 'imdb')
% some common options
opts.train.batchSize = 100;
opts.train.numEpochs = 20 ;
opts.train.continue = true ;
opts.train.gpus = [];%[1] ;
opts.train.learningRate = [1e-1ones(1, 10), 1e-2ones(1, 5)];
opts.train.weightDecay = 3e-4;
opts.train.momentum = 0.;
% opts.train.expDir = expDir;
bopts.useGpu = numel(opts.train.gpus) > 0 ; % Usually keep at 0, seems to only work with 3D data.
opts.train.numSubBatches = 1;
% getBatch options
bopts.useGpu = numel(opts.train.gpus) > 0 ;

% network definition!
% MATLAB handle, passed by reference
net = dagnn.DagNN() ;
%vgg16 artitectur
net.addLayer('conv1_1', dagnn.Conv('size', [3 3 3 64], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'input'},{'conv1'}, {'conv1_1f' 'conv1_1b'}); 

net.addLayer('relu1_1', dagnn.ReLU(), {'conv1'}, {'relu1'}, {});
net.addLayer('conv1_2', dagnn.Conv('size', [3 3 64 64], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'relu1'}, {'conv2'}, {'conv1_2f' 'conv1_2b'});

net.addLayer('relu1_2', dagnn.ReLU(), {'conv2'}, {'relu2'}, {});
%net.addLayer('lrn1', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu1'}, {'lrn1'}, {});
net.addLayer('pool1', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu2'}, {'pool1'}, {});
net.addLayer('conv2_1', dagnn.Conv('size', [3 3 64 128], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool1'}, {'conv3'}, {'conv2_1f' 'conv2_1b'});

net.addLayer('relu2_1', dagnn.ReLU(), {'conv3'}, {'relu3'}, {});
%layer8
net.addLayer('conv2_2', dagnn.Conv('size', [3 3 128 128], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu3'}, {'conv4'}, {'conv2_2f' 'conv2_2b'});
net.addLayer('relu2_2', dagnn.ReLU(), {'conv4'}, {'relu4'}, {});
%net.addLayer('lrn2', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu2'}, {'lrn2'}, {});
%change padding in layer10
net.addLayer('pool2', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu4'}, {'pool2'}, {});

net.addLayer('conv3_1', dagnn.Conv('size', [3 3 128 256], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool2'}, {'conv5'}, {'conv3_1f' 'conv3_1b'});

net.addLayer('relu3_1', dagnn.ReLU(), {'conv5'}, {'relu5'}, {});
net.addLayer('conv3_2', dagnn.Conv('size', [3 3 256 256], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'relu5'}, {'conv6'}, {'conv3_2f' 'conv3_2b'});

net.addLayer('relu3_2', dagnn.ReLU(), {'conv6'}, {'relu6'}, {});
%layer 15 ok
net.addLayer('conv3_3', dagnn.Conv('size', [3 3 256 256], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu6'}, {'conv7'}, {'conv3_3f' 'conv3_3b'});

net.addLayer('relu3_3', dagnn.ReLU(), {'conv7'}, {'relu7'}, {});
%net.addLayer('lrn3', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu2'}, {'lrn2'}, {});
%change in padding in 17 layer
net.addLayer('pool3', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu7'}, {'pool3'}, {});

%%%55
net.addLayer('conv4_1', dagnn.Conv('size', [3 3 256 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool3'}, {'conv8'}, {'conv4_1f' 'conv4_1b'});

net.addLayer('relu4_1', dagnn.ReLU(), {'conv8'}, {'relu8'}, {});
net.addLayer('conv4_2', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu8'}, {'conv9'}, {'conv4_2f' 'conv4_2b'});

net.addLayer('relu4_2', dagnn.ReLU(), {'conv9'}, {'relu9'}, {});
net.addLayer('conv4_3', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu9'}, {'conv10'}, {'conv4_3f' 'conv4_3b'});

net.addLayer('relu4_3', dagnn.ReLU(), {'conv10'}, {'relu10'}, {});
%net.addLayer('lrn4', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu2'}, {'lrn2'}, {});
%layer24 change in padding
net.addLayer('pool4', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu10'}, {'pool4'}, {});

net.addLayer('conv5_1', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool4'}, {'conv11'}, {'conv5_1f' 'conv5_1b'});

net.addLayer('relu5_1', dagnn.ReLU(), {'conv11'}, {'relu11'}, {});
net.addLayer('conv5_2', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu11'}, {'conv12'}, {'conv5_2f' 'conv5_2b'});

net.addLayer('relu5_2', dagnn.ReLU(), {'conv12'}, {'relu12'}, {});
%29 layer
net.addLayer('conv5_3', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu12'}, {'conv13'}, {'conv5_3f' 'conv5_3b'});

net.addLayer('relu5_3', dagnn.ReLU(), {'conv13'}, {'relu13'}, {});
net.addLayer('pool5', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]), {'relu13'}, {'pool5'}, {});

net.addLayer('conv6', dagnn.Conv('size', [7 7 512 4096], 'hasBias', true, 'stride', [1, 1], 'pad', [0 0 0 0]), {'pool5'}, {'conv14'}, {'conv6f' 'conv6b'});

net.addLayer('relu6', dagnn.ReLU(), {'conv14'}, {'relu14'}, {});
net.addLayer('drop6', dagnn.DropOut('rate', 0.5), {'relu14'}, {'drop6'}, {});
net.addLayer('conv7', dagnn.Conv('size', [1 1 4096 4096], 'hasBias', true, 'stride', [1, 1], 'pad', [0 0 0 0]), {'drop6'}, {'conv15'}, {'conv7f' 'conv7b'});

net.addLayer('relu7', dagnn.ReLU(), {'conv15'}, {'relu15'}, {});
net.addLayer('drop7', dagnn.DropOut('rate', 0.5), {'relu15'}, {'drop7'}, {});
%%%chane in class number
net.addLayer('classifier', dagnn.Conv('size', [1 1 4096 32], 'hasBias', true, 'stride', [1, 1], 'pad', [0 0 0 0]), {'drop7'}, {'classifier'}, {'conv8f' 'conv8b'});

net.addLayer('prob', dagnn.SoftMax(), {'classifier'}, {'prob'}, {});
% net.addLayer('objective', dagnn.Loss('loss', 'log'), {'prob', 'label'}, {'objective'}, {});
% net.addLayer('error', dagnn.Loss('loss', 'classerror'), {'prob','label'}, 'error') ;
%
%
% initialization of the weights (CRITICAL!!!!)
% if(numel(varargin) > 0)
% initNet_FineTuning(net, netPre);
% else
% initNet_He(net);
% end
% %train
% iinitNet(net, 1/100);

% do the training!
info = cnn_train_dag(net, imdb, @(i,b) getBatch(bopts,i,b), opts.train, 'val', find(imdb.images.set == 3)) ;    

end

function initNet(net, f)
net.initParams();
%

f_ind = net.layers(1).paramIndexes(1);
b_ind = net.layers(1).paramIndexes(2);
net.params(f_ind).value = 10*f*randn(size(net.params(f_ind).value), 'single');
net.params(f_ind).learningRate = 1;
net.params(f_ind).weightDecay = 1;

for l=2:length(net.layers)
	% is a convolution layer?
	if(strcmp(class(net.layers(l).block), 'dagnn.Conv'))
		f_ind = net.layers(l).paramIndexes(1);
		b_ind = net.layers(l).paramIndexes(2);

		[h,w,in,out] = size(net.params(f_ind).value);
		net.params(f_ind).value = f*randn(size(net.params(f_ind).value), 'single');
		net.params(f_ind).learningRate = 1;
		net.params(f_ind).weightDecay = 1;

		net.params(b_ind).value = f*randn(size(net.params(b_ind).value), 'single');
		net.params(b_ind).learningRate = 0.5;
		net.params(b_ind).weightDecay = 1;
	end
end

end
% getBatch for IMDBs that are too big to be in RAM
function inputs = getBatch(opts, imdb, batch)
%images = imdb.images.data(:,:,:,batch) ;
labels = imdb.images.labels(1,batch) ;
% labels(labels>10)=10;
im2= imdb.images.data(:,:,:,batch);
im2=im2(1:224,1:224,:,:);
s=size(im2);

images=zeros(s(1),s(2),3,s(4),'single');
images(:,:,1,:)=im2;
images(:,:,2,:)=im2;
images(:,:,3,:)=im2;
if opts.useGpu > 0
	images = gpuArray(images) ;
end

inputs = {'input', images, 'label', labels} ;

end

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant