Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding dropout-by row #8

Open
wants to merge 12 commits into
base: dropout_schedule
Choose a base branch
from
8 changes: 5 additions & 3 deletions egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ ihm_gmm=tri3 # the gmm for the IHM system (if --use-ihm-ali true).
num_threads_ubm=32
nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned
dropout_schedule='0,0@0.20,0.5@0.50,0@0.50,0'
dropout_per_frame=false
chunk_width=150
chunk_left_context=40
chunk_right_context=0
Expand Down Expand Up @@ -193,15 +194,15 @@ if [ $stage -le 15 ]; then
relu-renorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024

# check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults
lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0
lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 dropout-per-frame=false
relu-renorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024
relu-renorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024
relu-renorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024
lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0
lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 dropout-per-frame=false
relu-renorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024
relu-renorm-layer name=tdnn8 input=Append(-3,0,3) dim=1024
relu-renorm-layer name=tdnn9 input=Append(-3,0,3) dim=1024
lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0
lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 dropout-per-frame=false

## adding the layers for chain branch
output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5
Expand Down Expand Up @@ -243,6 +244,7 @@ if [ $stage -le 16 ]; then
--egs.chunk-left-context $chunk_left_context \
--egs.chunk-right-context $chunk_right_context \
--trainer.dropout-schedule $dropout_schedule \
--trainer.dropout-per-frame $dropout_per_frame \

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

as Vimal says, please remove this from the training code... does not need to be there.

--trainer.num-chunk-per-minibatch 64 \
--trainer.frames-per-iter 1500000 \
--trainer.num-epochs 4 \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ def train_one_iteration(dir, iter, srand, egs_dir,
frame_subsampling_factor, truncate_deriv_weights,
run_opts,
dropout_proportions=None,
dropout_per_frame=None,
background_process_handler=None):
""" Called from steps/nnet3/chain/train.py for one iteration for
neural network training with LF-MMI objective
Expand Down Expand Up @@ -307,7 +308,7 @@ def train_one_iteration(dir, iter, srand, egs_dir,
dropout_info_str = ''
if dropout_proportions is not None:
raw_model_string, dropout_info = common_train_lib.apply_dropout(
dropout_proportions, raw_model_string)
dropout_proportions, dropout_per_frame, raw_model_string)
dropout_info_str = ', {0}'.format(", ".join(dropout_info))

shrink_info_str = ''
Expand Down
15 changes: 10 additions & 5 deletions egs/wsj/s5/steps/libs/nnet3/train/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ def _get_component_dropout(dropout_schedule, num_archives_processed):
+ initial_dropout)


def apply_dropout(dropout_proportions, raw_model_string):
def apply_dropout(dropout_proportions, dropout_per_frame, raw_model_string):
"""Adds an nnet3-copy --edits line to modify raw_model_string to
set dropout proportions according to dropout_proportions.

Expand All @@ -523,10 +523,10 @@ def apply_dropout(dropout_proportions, raw_model_string):

for component_name, dropout_proportion in dropout_proportions:
edit_config_lines.append(
"set-dropout-proportion name={0} proportion={1}".format(
component_name, dropout_proportion))
dropout_info.append("pattern/dropout-proportion={0}/{1}".format(
component_name, dropout_proportion))
"set-dropout-proportion name={0} proportion={1} dropout-per-frame={2}".format(
component_name, dropout_proportion, dropout_per_frame))
dropout_info.append("pattern/dropout-proportion={0}/{1} dropout-per-frame={2}".format(
component_name, dropout_proportion, dropout_per_frame))

return ("""{raw_model_string} nnet3-copy --edits='{edits}' \
- - |""".format(raw_model_string=raw_model_string,
Expand Down Expand Up @@ -771,6 +771,11 @@ def __init__(self):
lstm*=0,0.2,0'. More general should precede
less general patterns, as they are applied
sequentially.""")
self.parser.add_argument("--trainer.dropout-per-frame", type=str,
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this option required? Do you expect to change whether dropout is per frame or not during the training iterations?
I think dropout-per-frame should only be at the config level.
Also I think you can remove dropout_per_frame from the function SetDropoutProportion, because that is something you would have already defined from the config. If you really need to change dropout-per-frame during training, I suggest add a separate function like SetDropoutPerFrame to the DropoutComponent.

action=common_lib.NullstrToNoneAction,
dest='dropout_per_frame', default=None,
help="""this option is used to control whether
using dropout by frame level or by vector level""")

# General options
self.parser.add_argument("--stage", type=int, default=-4,
Expand Down
8 changes: 7 additions & 1 deletion egs/wsj/s5/steps/libs/nnet3/xconfig/lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ def set_default_configs(self):
'zeroing-interval' : 20,
'zeroing-threshold' : 15.0,
'dropout-proportion' : -1.0 # -1.0 stands for no dropout will be added
'dropout-per-frame' : 'false'
}

def set_derived_configs(self):
Expand Down Expand Up @@ -285,6 +286,10 @@ def check_configs(self):
self.config['dropout-proportion'] < 0.0) and
self.config['dropout-proportion'] != -1.0 ):
raise xparser_error("dropout-proportion has invalid value {0}.".format(self.config['dropout-proportion']))

if (self.config['dropout-per-frame'] != 'false' or
self.config['dropout-per-frame'] != 'true'):
raise xparser_error("dropout-per-frame has invalid value {0}.".format(self.config['dropout-per-frame']))

def auxiliary_outputs(self):
return ['c_t']
Expand Down Expand Up @@ -347,7 +352,8 @@ def generate_lstm_config(self):
pes_str = self.config['ng-per-element-scale-options']
lstm_dropout_value = self.config['dropout-proportion']
lstm_dropout_str = 'dropout-proportion='+str(self.config['dropout-proportion'])

lstm_dropout_per_frame_value = self.config['dropout-per-frame']
lstm_dropout_per_frame_str = 'dropout-per-frame='+str(self.config['dropout-per-frame'])
# Natural gradient per element scale parameters
# TODO: decide if we want to keep exposing these options
if re.search('param-mean', pes_str) is None and \
Expand Down
8 changes: 7 additions & 1 deletion egs/wsj/s5/steps/nnet3/chain/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,10 @@ def process_args(args):
"value={0}. We recommend using the option "
"--trainer.deriv-truncate-margin.".format(
args.deriv_truncate_margin))

if ( args.dropout_schedule is None )
and (args.dropout_per_frame is not None) :
raise Exception("The dropout schedule is null, but dropout_per_frame"
"option is not null")
if (not os.path.exists(args.dir)
or not os.path.exists(args.dir+"/configs")):
raise Exception("This scripts expects {0} to exist and have a configs "
Expand Down Expand Up @@ -441,6 +444,9 @@ def learning_rate(iter, current_num_jobs, num_archives_processed):
None if args.dropout_schedule is None
else common_train_lib.get_dropout_proportions(
dropout_schedule, num_archives_processed)),
dropout_per_frame=(
None if args.dropout_schedule is None
else args.dropout_per_frame),
shrinkage_value=shrinkage_value,
num_chunk_per_minibatch=args.num_chunk_per_minibatch,
num_hidden_layers=num_hidden_layers,
Expand Down
2 changes: 1 addition & 1 deletion src/nnet3/nnet-chain-combine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ NnetChainCombiner::NnetChainCombiner(const NnetCombineConfig &combine_config,
nnet_params_(std::min(num_nnets, combine_config_.max_effective_inputs),
NumParameters(first_nnet)),
tot_input_weighting_(nnet_params_.NumRows()) {
SetDropoutProportion(0, &nnet_);
SetDropoutProportion(0, false, &nnet_);
SubVector<BaseFloat> first_params(nnet_params_, 0);
VectorizeNnet(nnet_, &first_params);
tot_input_weighting_(0) += 1.0;
Expand Down
2 changes: 1 addition & 1 deletion src/nnet3/nnet-combine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ NnetCombiner::NnetCombiner(const NnetCombineConfig &config,
nnet_params_(std::min(num_nnets, config_.max_effective_inputs),
NumParameters(first_nnet)),
tot_input_weighting_(nnet_params_.NumRows()) {
SetDropoutProportion(0, &nnet_);
SetDropoutProportion(0, false, &nnet_);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you can remove this 'false' argument from the function... just make it a fixed property of the component that can't be changed after you initialize.

SubVector<BaseFloat> first_params(nnet_params_, 0);
VectorizeNnet(nnet_, &first_params);
tot_input_weighting_(0) += 1.0;
Expand Down
52 changes: 42 additions & 10 deletions src/nnet3/nnet-simple-component.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,27 +87,37 @@ void PnormComponent::Write(std::ostream &os, bool binary) const {
}


void DropoutComponent::Init(int32 dim, BaseFloat dropout_proportion) {
void DropoutComponent::Init(int32 dim, BaseFloat dropout_proportion, bool dropout_per_frame) {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please watch line length (80-char limit)

dropout_proportion_ = dropout_proportion;
dropout_per_frame_ = dropout_per_frame;
dim_ = dim;
}

void DropoutComponent::InitFromConfig(ConfigLine *cfl) {
int32 dim = 0;
BaseFloat dropout_proportion = 0.0;
bool dropout_per_frame = false;
bool ok = cfl->GetValue("dim", &dim) &&
cfl->GetValue("dropout-proportion", &dropout_proportion);
bool ok2 = cfl->GetValue("dropout-per-frame", &dropout_per_frame);
if (!ok || cfl->HasUnusedValues() || dim <= 0 ||
dropout_proportion < 0.0 || dropout_proportion > 1.0)
KALDI_ERR << "Invalid initializer for layer of type "
<< Type() << ": \"" << cfl->WholeLine() << "\"";
Init(dim, dropout_proportion);
if( ! ok2 )
{

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you don't need a branch here because dropout_per_frame defaults to false if not set (that's how you
initialized the variable). Don't have the 'ok2' variable; you don't need to check the return status of
cfl->GetValue("dropout-per-frame", &dropout_per_frame);
because it is an optional parameter.

dropout_per_frame = false;
Init(dim, dropout_proportion, dropout_per_frame);
} else {
Init(dim, dropout_proportion, dropout_per_frame);
}
}

std::string DropoutComponent::Info() const {
std::ostringstream stream;
stream << Type() << ", dim=" << dim_
<< ", dropout-proportion=" << dropout_proportion_;
<< ", dropout-proportion=" << dropout_proportion_
<< ", dropout-per-frame=" << dropout_per_frame_;
return stream.str();
}

Expand All @@ -119,16 +129,34 @@ void DropoutComponent::Propagate(const ComponentPrecomputedIndexes *indexes,

BaseFloat dropout = dropout_proportion_;
KALDI_ASSERT(dropout >= 0.0 && dropout <= 1.0);
if(dropout_per_frame_)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please use the correct code style. Should be

   if (x) { 
 ...

and note the space after if. You can run misc/maintenance/cpplint.py on your code to check for style problems.

{
// This const_cast is only safe assuming you don't attempt
// to use multi-threaded code with the GPU.
const_cast<CuRand<BaseFloat>&>(random_generator_).RandUniform(out);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here, you'll want to create a temporary vector with dimension equal to the num-rows in your 'in'/'out' matrices, and do the rand stuff on that, then you'll need CopyColsFromVec().


// This const_cast is only safe assuming you don't attempt
// to use multi-threaded code with the GPU.
const_cast<CuRand<BaseFloat>&>(random_generator_).RandUniform(out);
out->Add(-dropout); // now, a proportion "dropout" will be <0.0
out->ApplyHeaviside(); // apply the function (x>0?1:0). Now, a proportion "dropout" will
// be zero and (1 - dropout) will be 1.0.

out->Add(-dropout); // now, a proportion "dropout" will be <0.0
out->ApplyHeaviside(); // apply the function (x>0?1:0). Now, a proportion "dropout" will
// be zero and (1 - dropout) will be 1.0.
out->MulElements(in);
} else {

out->MulElements(in);
// This const_cast is only safe assuming you don't attempt
// to use multi-threaded code with the GPU.
const_cast<CuRand<BaseFloat>&>(random_generator_).RandUniform(out);
out->Add(-dropout); // now, a proportion "dropout" will be <0.0
out->ApplyHeaviside(); // apply the function (x>0?1:0). Now, a proportion "dropout" will
// be zero and (1 - dropout) will be 1.0.
CuVector<BaseFloat> *random_drop_vector = new CuVector<BaseFloat>(in.NumRows(), kSetZero);
MatrixIndexT i = 0;
random_drop_vector->CopyColFromMat(*out, i);
for (MatrixIndexT i = 0; i < in.NumCols(); i++)
{
out->CopyColFromVec(*random_drop_vector, i);
}
out->MulElements(in);
}
}


Expand All @@ -154,6 +182,8 @@ void DropoutComponent::Read(std::istream &is, bool binary) {
ReadBasicType(is, binary, &dim_);
ExpectToken(is, binary, "<DropoutProportion>");
ReadBasicType(is, binary, &dropout_proportion_);
ExpectToken(is, binary, "<DropoutPerFrame>");
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Make this backcompatible. Change this to ReadToken and then add an if condition to check which token is present.
See other components where ReadToken is used.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@GaofengCheng, you need to understand what Vimal was saying here- there needs to be back compatibility code for the old format. Search for ReadToken() in the file for examples.

However, the reason for your error is that you need to recompile in 'chainbin/' (and possibly chain/').

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.. and, of course, make this back compatible.

ReadBasicType(is, binary, &dropout_per_frame_);
ExpectToken(is, binary, "</DropoutComponent>");
}

Expand All @@ -163,6 +193,8 @@ void DropoutComponent::Write(std::ostream &os, bool binary) const {
WriteBasicType(os, binary, dim_);
WriteToken(os, binary, "<DropoutProportion>");
WriteBasicType(os, binary, dropout_proportion_);
WriteToken(os, binary, "<DropoutPerFrame>");
WriteBasicType(os, binary, dropout_per_frame_);
WriteToken(os, binary, "</DropoutComponent>");
}

Expand Down
16 changes: 10 additions & 6 deletions src/nnet3/nnet-simple-component.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,11 @@ class PnormComponent: public Component {
// "Dropout: A Simple Way to Prevent Neural Networks from Overfitting".
class DropoutComponent : public RandomComponent {
public:
void Init(int32 dim, BaseFloat dropout_proportion = 0.0);
void Init(int32 dim, BaseFloat dropout_proportion = 0.0, bool dropout_per_frame = false);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please watch line length.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

line too long


DropoutComponent(int32 dim, BaseFloat dropout = 0.0) { Init(dim, dropout); }
DropoutComponent(int32 dim, BaseFloat dropout = 0.0, bool dropout_per_frame = false) { Init(dim, dropout, dropout_per_frame); }

DropoutComponent(): dim_(0), dropout_proportion_(0.0) { }
DropoutComponent(): dim_(0), dropout_proportion_(0.0), dropout_per_frame_(false) { }

virtual int32 Properties() const {
return kLinearInInput|kBackpropInPlace|kSimpleComponent|kBackpropNeedsInput|kBackpropNeedsOutput;
Expand Down Expand Up @@ -120,17 +120,21 @@ class DropoutComponent : public RandomComponent {
Component *to_update,
CuMatrixBase<BaseFloat> *in_deriv) const;
virtual Component* Copy() const { return new DropoutComponent(dim_,
dropout_proportion_); }
dropout_proportion_,
dropout_per_frame_); }
virtual std::string Info() const;

void SetDropoutProportion(BaseFloat dropout_proportion) { dropout_proportion_ = dropout_proportion; }
void SetDropoutProportion(BaseFloat dropout_proportion, bool dropout_per_frame) {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.. remove dropout_per_frame..

dropout_proportion_ = dropout_proportion;
dropout_per_frame_ = dropout_per_frame;
}

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Odd indentation. Make sure you are not introducing tabs into the file.
If you use emacs, use

(setq-default tab-width 4)
(setq-default fill-column 80)
(setq-default indent-tabs-mode `nil)
(add-hook 'write-file-hooks 'delete-trailing-whitespace)


(load-file "~/.google-c-style.el")
(add-hook 'c-mode-common-hook 'google-set-c-style)
(add-hook 'c-mode-common-hook 'google-make-newline-indent)

You can find google-c-style.el from a web search.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danpovey updating the new version, sorry for incorrect format, I'm using visual studio code/sublime, and so far I haven't found a format tool that could keep exactly the same as the way we are using in Kaldi (I have tried to format nnet-simple-component.cc, it changes a lot to the existing code, though it's also google style, ). I PR the nnet-simple-component.cc formated by
format tool under sublime.....


private:
int32 dim_;
/// dropout-proportion is the proportion that is dropped out,
/// e.g. if 0.1, we set 10% to zero value.
BaseFloat dropout_proportion_;

bool dropout_per_frame_;
};

class ElementwiseProductComponent: public Component {
Expand Down
10 changes: 8 additions & 2 deletions src/nnet3/nnet-utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -524,12 +524,14 @@ std::string NnetInfo(const Nnet &nnet) {
}

void SetDropoutProportion(BaseFloat dropout_proportion,
bool dropout_per_frame,
Nnet *nnet) {
dropout_per_frame = false;
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is the input to the function ignored?

for (int32 c = 0; c < nnet->NumComponents(); c++) {
Component *comp = nnet->GetComponent(c);
DropoutComponent *dc = dynamic_cast<DropoutComponent*>(comp);
if (dc != NULL)
dc->SetDropoutProportion(dropout_proportion);
dc->SetDropoutProportion(dropout_proportion, dropout_per_frame);
}
}

Expand Down Expand Up @@ -694,18 +696,22 @@ void ReadEditConfig(std::istream &edit_config_is, Nnet *nnet) {
// matches names of components, not nodes.
config_line.GetValue("name", &name_pattern);
BaseFloat proportion = -1;
bool dropout_per_frame = false;
if (!config_line.GetValue("proportion", &proportion)) {
KALDI_ERR << "In edits-config, expected proportion to be set in line: "
<< config_line.WholeLine();
}
if (!config_line.GetValue("dropout-per-frame", &dropout_per_frame)) {
dropout_per_frame = false;
}
DropoutComponent *component = NULL;
int32 num_dropout_proportions_set = 0;
for (int32 c = 0; c < nnet->NumComponents(); c++) {
if (NameMatchesPattern(nnet->GetComponentName(c).c_str(),
name_pattern.c_str()) &&
(component =
dynamic_cast<DropoutComponent*>(nnet->GetComponent(c)))) {
component->SetDropoutProportion(proportion);
component->SetDropoutProportion(proportion, dropout_per_frame);
num_dropout_proportions_set++;
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/nnet3/nnet-utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ std::string NnetInfo(const Nnet &nnet);

/// This function sets the dropout proportion in all dropout component to
/// dropout_proportion value.
void SetDropoutProportion(BaseFloat dropout_proportion, Nnet *nnet);
void SetDropoutProportion(BaseFloat dropout_proportion, bool dropout_per_frame, Nnet *nnet);

/// This function finds a list of components that are never used, and outputs
/// the integer comopnent indexes (you can use these to index
Expand Down Expand Up @@ -233,7 +233,7 @@ void FindOrphanNodes(const Nnet &nnet, std::vector<int32> *nodes);
remove internal nodes directly; instead you should use the command
'remove-orphans'.

set-dropout-proportion [name=<name-pattern>] proportion=<dropout-proportion>
set-dropout-proportion [name=<name-pattern>] proportion=<dropout-proportion> dropout-per-frame=<dropout-per-frame>
Sets the dropout rates for any components of type DropoutComponent whose
names match the given <name-pattern> (e.g. lstm*). <name-pattern> defaults to "*".
\endverbatim
Expand Down
2 changes: 1 addition & 1 deletion src/nnet3bin/nnet3-combine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ int main(int argc, char *argv[]) {
} else {
KALDI_LOG << "Copying the single input model directly to the output, "
<< "without any combination.";
SetDropoutProportion(0, &nnet);
SetDropoutProportion(0, false, &nnet);
WriteKaldiObject(nnet, nnet_wxfilename, binary_write);
}
KALDI_LOG << "Finished combining neural nets, wrote model to "
Expand Down