Skip to content

Commit

Permalink
[egs] Updating WSJ TDNN example to use batchnorm instead of renorm.
Browse files Browse the repository at this point in the history
  • Loading branch information
danpovey committed Oct 4, 2017
1 parent e082c17 commit 0f2df80
Show file tree
Hide file tree
Showing 2 changed files with 338 additions and 1 deletion.
2 changes: 1 addition & 1 deletion egs/wsj/s5/local/chain/run_tdnn.sh
337 changes: 337 additions & 0 deletions egs/wsj/s5/local/chain/tuning/run_tdnn_1c.sh
@@ -0,0 +1,337 @@
#!/bin/bash

# 1c is as 1b but using batchnorm instead of renorm
# 1b is as 1a but using --proportional-shrink=60.0

# local/chain/compare_wer.sh exp/chain/tdnn1a_sp exp/chain/tdnn1b_sp
# System tdnn1a_sp tdnn1b_sp
#WER dev93 (tgpr) 7.87 7.24
#WER dev93 (tg) 7.61 6.95
#WER dev93 (big-dict,tgpr) 5.71 5.19
#WER dev93 (big-dict,fg) 5.10 4.52
#WER eval92 (tgpr) 5.23 5.09
#WER eval92 (tg) 4.87 4.64
#WER eval92 (big-dict,tgpr) 3.24 2.91
#WER eval92 (big-dict,fg) 2.71 2.39
# Final train prob -0.0414 -0.0570
# Final valid prob -0.0634 -0.0680
# Final train prob (xent) -0.8216 -0.9587
# Final valid prob (xent) -0.9208 -1.0039


# steps/info/chain_dir_info.pl exp/chain/tdnn1b_sp
# exp/chain/tdnn1b_sp: num-iters=102 nj=2..5 num-params=7.6M dim=40+100->2889 combine=-0.066->-0.063 xent:train/valid[67,101,final]=(-1.12,-0.979,-0.959/-1.13,-1.03,-1.00) logprob:train/valid[67,101,final]=(-0.071,-0.058,-0.057/-0.077,-0.069,-0.068)

set -e -o pipefail

# First the options that are passed through to run_ivector_common.sh
# (some of which are also used in this script directly).
stage=0
nj=30
train_set=train_si284
test_sets="test_dev93 test_eval92"
gmm=tri4b # this is the source gmm-dir that we'll use for alignments; it
# should have alignments for the specified training data.
num_threads_ubm=32
nnet3_affix= # affix for exp dirs, e.g. it was _cleaned in tedlium.

# Options which are not passed through to run_ivector_common.sh
affix=1c #affix for TDNN+LSTM directory e.g. "1a" or "1b", in case we change the configuration.
common_egs_dir=
reporting_email=

# LSTM/chain options
train_stage=-10
xent_regularize=0.1

# training chunk-options
chunk_width=140,100,160
# we don't need extra left/right context for TDNN systems.
chunk_left_context=0
chunk_right_context=0

# training options
srand=0
remove_egs=true

#decode options
test_online_decoding=false # if true, it will run the last decoding stage.

# End configuration section.
echo "$0 $@" # Print the command line for logging


. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh


if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi

local/nnet3/run_ivector_common.sh \
--stage $stage --nj $nj \
--train-set $train_set --gmm $gmm \
--num-threads-ubm $num_threads_ubm \
--nnet3-affix "$nnet3_affix"



gmm_dir=exp/${gmm}
ali_dir=exp/${gmm}_ali_${train_set}_sp
lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_lats
dir=exp/chain${nnet3_affix}/tdnn${affix}_sp
train_data_dir=data/${train_set}_sp_hires
train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires
lores_train_data_dir=data/${train_set}_sp

# note: you don't necessarily have to change the treedir name
# each time you do a new experiment-- only if you change the
# configuration in a way that affects the tree.
tree_dir=exp/chain${nnet3_affix}/tree_a_sp
# the 'lang' directory is created by this script.
# If you create such a directory with a non-standard topology
# you should probably name it differently.
lang=data/lang_chain

for f in $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
$lores_train_data_dir/feats.scp $gmm_dir/final.mdl \
$ali_dir/ali.1.gz $gmm_dir/final.mdl; do
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
done


if [ $stage -le 12 ]; then
echo "$0: creating lang directory $lang with chain-type topology"
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
if [ -d $lang ]; then
if [ $lang/L.fst -nt data/lang/L.fst ]; then
echo "$0: $lang already exists, not overwriting it; continuing"
else
echo "$0: $lang already exists and seems to be older than data/lang..."
echo " ... not sure what to do. Exiting."
exit 1;
fi
else
cp -r data/lang $lang
silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
fi
fi

if [ $stage -le 13 ]; then
# Get the alignments as lattices (gives the chain training more freedom).
# use the same num-jobs as the alignments
steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \
data/lang $gmm_dir $lat_dir
rm $lat_dir/fsts.*.gz # save space
fi

if [ $stage -le 14 ]; then
# Build a tree using our new topology. We know we have alignments for the
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
# those. The num-leaves is always somewhat less than the num-leaves from
# the GMM baseline.
if [ -f $tree_dir/final.mdl ]; then
echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
exit 1;
fi
steps/nnet3/chain/build_tree.sh \
--frame-subsampling-factor 3 \
--context-opts "--context-width=2 --central-position=1" \
--cmd "$train_cmd" 3500 ${lores_train_data_dir} \
$lang $ali_dir $tree_dir
fi


if [ $stage -le 15 ]; then
mkdir -p $dir
echo "$0: creating neural net configs using the xconfig parser";

num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)

mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input
# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
# the first splicing is moved before the lda layer, so no splicing here
relu-batchnorm-layer name=tdnn1 dim=512
relu-batchnorm-layer name=tdnn2 dim=512 input=Append(-1,0,1)
relu-batchnorm-layer name=tdnn3 dim=512 input=Append(-1,0,1)
relu-batchnorm-layer name=tdnn4 dim=512 input=Append(-3,0,3)
relu-batchnorm-layer name=tdnn5 dim=512 input=Append(-3,0,3)
relu-batchnorm-layer name=tdnn6 dim=512 input=Append(-6,-3,0)
## adding the layers for chain branch
relu-batchnorm-layer name=prefinal-chain dim=512 target-rms=0.5
output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5
# adding the layers for xent branch
# This block prints the configs for a separate output that will be
# trained with a cross-entropy objective in the 'chain' models... this
# has the effect of regularizing the hidden parts of the model. we use
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
# 0.5 / args.xent_regularize is suitable as it means the xent
# final-layer learns at a rate independent of the regularization
# constant; and the 0.5 was tuned so as to make the relative progress
# similar in the xent and regular final layers.
relu-batchnorm-layer name=prefinal-xent input=tdnn6 dim=512 target-rms=0.5
output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi


if [ $stage -le 16 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{3,4,5,6}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
fi

steps/nnet3/chain/train.py --stage=$train_stage \
--cmd="$decode_cmd" \
--feat.online-ivector-dir=$train_ivector_dir \
--feat.cmvn-opts="--norm-means=false --norm-vars=false" \
--chain.xent-regularize $xent_regularize \
--chain.leaky-hmm-coefficient=0.1 \
--chain.l2-regularize=0.00005 \
--chain.apply-deriv-weights=false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--trainer.srand=$srand \
--trainer.max-param-change=2.0 \
--trainer.num-epochs=4 \
--trainer.frames-per-iter=3000000 \
--trainer.optimization.num-jobs-initial=2 \
--trainer.optimization.num-jobs-final=5 \
--trainer.optimization.initial-effective-lrate=0.001 \
--trainer.optimization.final-effective-lrate=0.0001 \
--trainer.optimization.shrink-value=1.0 \
--trainer.optimization.proportional-shrink=60.0 \
--trainer.num-chunk-per-minibatch=256,128,64 \
--trainer.optimization.momentum=0.0 \
--egs.chunk-width=$chunk_width \
--egs.chunk-left-context=0 \
--egs.chunk-right-context=0 \
--egs.chunk-left-context-initial=0 \
--egs.chunk-right-context-final=0 \
--egs.dir="$common_egs_dir" \
--egs.opts="--frames-overlap-per-eg 0" \
--cleanup.remove-egs=$remove_egs \
--use-gpu=true \
--reporting.email="$reporting_email" \
--feat-dir=$train_data_dir \
--tree-dir=$tree_dir \
--lat-dir=$lat_dir \
--dir=$dir || exit 1;
fi

if [ $stage -le 17 ]; then
# The reason we are using data/lang here, instead of $lang, is just to
# emphasize that it's not actually important to give mkgraph.sh the
# lang directory with the matched topology (since it gets the
# topology file from the model). So you could give it a different
# lang directory, one that contained a wordlist and LM of your choice,
# as long as phones.txt was compatible.

utils/lang/check_phones_compatible.sh \
data/lang_test_tgpr/phones.txt $lang/phones.txt
utils/mkgraph.sh \
--self-loop-scale 1.0 data/lang_test_tgpr \
$tree_dir $tree_dir/graph_tgpr || exit 1;

utils/lang/check_phones_compatible.sh \
data/lang_test_bd_tgpr/phones.txt $lang/phones.txt
utils/mkgraph.sh \
--self-loop-scale 1.0 data/lang_test_bd_tgpr \
$tree_dir $tree_dir/graph_bd_tgpr || exit 1;
fi

if [ $stage -le 18 ]; then
frames_per_chunk=$(echo $chunk_width | cut -d, -f1)
rm $dir/.error 2>/dev/null || true

for data in $test_sets; do
(
data_affix=$(echo $data | sed s/test_//)
nspk=$(wc -l <data/${data}_hires/spk2utt)
for lmtype in tgpr bd_tgpr; do
steps/nnet3/decode.sh \
--acwt 1.0 --post-decode-acwt 10.0 \
--extra-left-context 0 --extra-right-context 0 \
--extra-left-context-initial 0 \
--extra-right-context-final 0 \
--frames-per-chunk $frames_per_chunk \
--nj $nspk --cmd "$decode_cmd" --num-threads 4 \
--online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \
$tree_dir/graph_${lmtype} data/${data}_hires ${dir}/decode_${lmtype}_${data_affix} || exit 1
done
steps/lmrescore.sh \
--self-loop-scale 1.0 \
--cmd "$decode_cmd" data/lang_test_{tgpr,tg} \
data/${data}_hires ${dir}/decode_{tgpr,tg}_${data_affix} || exit 1
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_test_bd_{tgpr,fgconst} \
data/${data}_hires ${dir}/decode_${lmtype}_${data_affix}{,_fg} || exit 1
) || touch $dir/.error &
done
wait
[ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
fi

# Not testing the 'looped' decoding separately, because for
# TDNN systems it would give exactly the same results as the
# normal decoding.

if $test_online_decoding && [ $stage -le 19 ]; then
# note: if the features change (e.g. you add pitch features), you will have to
# change the options of the following command line.
steps/online/nnet3/prepare_online_decoding.sh \
--mfcc-config conf/mfcc_hires.conf \
$lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online

rm $dir/.error 2>/dev/null || true

for data in $test_sets; do
(
data_affix=$(echo $data | sed s/test_//)
nspk=$(wc -l <data/${data}_hires/spk2utt)
# note: we just give it "data/${data}" as it only uses the wav.scp, the
# feature type does not matter.
for lmtype in tgpr bd_tgpr; do
steps/online/nnet3/decode.sh \
--acwt 1.0 --post-decode-acwt 10.0 \
--nj $nspk --cmd "$decode_cmd" \
$tree_dir/graph_${lmtype} data/${data} ${dir}_online/decode_${lmtype}_${data_affix} || exit 1
done
steps/lmrescore.sh \
--self-loop-scale 1.0 \
--cmd "$decode_cmd" data/lang_test_{tgpr,tg} \
data/${data}_hires ${dir}_online/decode_{tgpr,tg}_${data_affix} || exit 1
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_test_bd_{tgpr,fgconst} \
data/${data}_hires ${dir}_online/decode_${lmtype}_${data_affix}{,_fg} || exit 1
) || touch $dir/.error &
done
wait
[ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
fi


exit 0;

7 comments on commit 0f2df80

@osadj
Copy link
Contributor

@osadj osadj commented on 0f2df80 Oct 5, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Dan, would you please provide a brief description of batchnorm and how it compares to renorm (improves speed, performance, both, etc)? Thanks.

@danpovey
Copy link
Contributor Author

@danpovey danpovey commented on 0f2df80 Oct 5, 2017 via email

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@osadj
Copy link
Contributor

@osadj osadj commented on 0f2df80 Oct 5, 2017 via email

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danpovey
Copy link
Contributor Author

@danpovey danpovey commented on 0f2df80 Oct 5, 2017 via email

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@osadj
Copy link
Contributor

@osadj osadj commented on 0f2df80 Oct 5, 2017 via email

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danpovey
Copy link
Contributor Author

@danpovey danpovey commented on 0f2df80 Oct 5, 2017 via email

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@galv
Copy link
Contributor

@galv galv commented on 0f2df80 Oct 10, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Batch norm is well-known in the machine learning literature. I'm sure you can google to easily find the original paper.

Please sign in to comment.