forked from kaldi-asr/kaldi
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'master' of https://github.com/kaldi-asr/kaldi
* 'master' of https://github.com/kaldi-asr/kaldi: [src] Cosmetic change: remove 'train.tra' from usage messages (kaldi-asr#1529) [src] cudamatrix: speed up AddColSumMat with transfrom reduce kernel template (kaldi-asr#1530) [build]: remove openfst check (kaldi-asr#1531) [build,src,doc] Modify get_version.sh to deal better with whitespace (avoid space in version); minor fixes (kaldi-asr#1526) [scripts,egs] Adding options for using PCA instead of LDA+MLLT for ivectors used in ASR. Results are reported in the default TDNN recipe in AMI. Updating steps/online/nnet2/{train_diag_ubm.sh,train_ivector_extractor.sh} so that they now backup the contents of their destination directory if it already exists. (kaldi-asr#1514) [src] (minor) Added missing SetZero() to NaturalGradientAffineComponent::Scale() if scale==0.0 (kaldi-asr#1522) [src,doc] Fix several unrelated minor problems. Thanks: gaoxinglong [src] Adding noexcept to hashing function objects (kaldi-asr#1519) [egs] Fix to egs/wsj/s5/run.sh (unset variable) (kaldi-asr#1517) [misc] remove eXecute permissions where not needed (kaldi-asr#1515) [src,scripts]: Several unrelated cosmetic changes [egs] fixes to babel pipeline; thanks to Fred Richardson (kaldi-asr#1509) [src] Fix exit code of extract-rows.cc (kaldi-asr#1510)
- Loading branch information
Showing
121 changed files
with
5,056 additions
and
424 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
tuning/run_tdnn_1b.sh | ||
tuning/run_tdnn_1d.sh |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,269 @@ | ||
#!/bin/bash | ||
|
||
# same as 1b but uses PCA instead of | ||
# LDA features for the ivector extractor. | ||
|
||
# Results on 03/27/2017: | ||
# local/chain/compare_wer_general.sh ihm tdnn1b_sp_bi tdnn1d_sp_bi | ||
# System tdnn1b_sp_bi tdnn1d_sp_bi | ||
# WER on dev 22.0 21.9 | ||
# WER on eval 22.2 22.3 | ||
# Final train prob -0.0813472 -0.0807054 | ||
# Final valid prob -0.132032 -0.133564 | ||
# Final train prob (xent) -1.41543 -1.41951 | ||
# Final valid prob (xent) -1.62316 -1.63021 | ||
|
||
set -e -o pipefail | ||
# First the options that are passed through to run_ivector_common.sh | ||
# (some of which are also used in this script directly). | ||
stage=0 | ||
mic=ihm | ||
nj=30 | ||
min_seg_len=1.55 | ||
use_ihm_ali=false | ||
train_set=train_cleaned | ||
gmm=tri3_cleaned # the gmm for the target data | ||
ihm_gmm=tri3 # the gmm for the IHM system (if --use-ihm-ali true). | ||
num_threads_ubm=32 | ||
ivector_transform_type=pca | ||
nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned | ||
|
||
# The rest are configs specific to this script. Most of the parameters | ||
# are just hardcoded at this level, in the commands below. | ||
train_stage=-10 | ||
tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration. | ||
tdnn_affix=1d #affix for TDNN directory, e.g. "a" or "b", in case we change the configuration. | ||
common_egs_dir= # you can set this to use previously dumped egs. | ||
|
||
# End configuration section. | ||
echo "$0 $@" # Print the command line for logging | ||
|
||
. ./cmd.sh | ||
. ./path.sh | ||
. ./utils/parse_options.sh | ||
|
||
|
||
if ! cuda-compiled; then | ||
cat <<EOF && exit 1 | ||
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA | ||
If you want to use GPUs (and have them), go to src/, and configure and make on a machine | ||
where "nvcc" is installed. | ||
EOF | ||
fi | ||
|
||
local/nnet3/run_ivector_common.sh --stage $stage \ | ||
--mic $mic \ | ||
--nj $nj \ | ||
--min-seg-len $min_seg_len \ | ||
--train-set $train_set \ | ||
--gmm $gmm \ | ||
--num-threads-ubm $num_threads_ubm \ | ||
--ivector-transform-type "$ivector_transform_type" \ | ||
--nnet3-affix "$nnet3_affix" | ||
|
||
# Note: the first stage of the following script is stage 8. | ||
local/nnet3/prepare_lores_feats.sh --stage $stage \ | ||
--mic $mic \ | ||
--nj $nj \ | ||
--min-seg-len $min_seg_len \ | ||
--use-ihm-ali $use_ihm_ali \ | ||
--train-set $train_set | ||
|
||
if $use_ihm_ali; then | ||
gmm_dir=exp/ihm/${ihm_gmm} | ||
ali_dir=exp/${mic}/${ihm_gmm}_ali_${train_set}_sp_comb_ihmdata | ||
lores_train_data_dir=data/$mic/${train_set}_ihmdata_sp_comb | ||
tree_dir=exp/$mic/chain${nnet3_affix}/tree_bi${tree_affix}_ihmdata | ||
lat_dir=exp/$mic/chain${nnet3_affix}/${gmm}_${train_set}_sp_comb_lats_ihmdata | ||
dir=exp/$mic/chain${nnet3_affix}/tdnn${tdnn_affix}_sp_bi_ihmali | ||
# note: the distinction between when we use the 'ihmdata' suffix versus | ||
# 'ihmali' is pretty arbitrary. | ||
else | ||
gmm_dir=exp/${mic}/$gmm | ||
ali_dir=exp/${mic}/${gmm}_ali_${train_set}_sp_comb | ||
lores_train_data_dir=data/$mic/${train_set}_sp_comb | ||
tree_dir=exp/$mic/chain${nnet3_affix}/tree_bi${tree_affix} | ||
lat_dir=exp/$mic/chain${nnet3_affix}/${gmm}_${train_set}_sp_comb_lats | ||
dir=exp/$mic/chain${nnet3_affix}/tdnn${tdnn_affix}_sp_bi | ||
fi | ||
|
||
train_data_dir=data/$mic/${train_set}_sp_hires_comb | ||
train_ivector_dir=exp/$mic/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires_comb | ||
final_lm=`cat data/local/lm/final_lm` | ||
LM=$final_lm.pr1-7 | ||
|
||
|
||
for f in $gmm_dir/final.mdl $lores_train_data_dir/feats.scp \ | ||
$train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp; do | ||
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1 | ||
done | ||
|
||
|
||
if [ $stage -le 11 ]; then | ||
if [ -f $ali_dir/ali.1.gz ]; then | ||
echo "$0: alignments in $ali_dir appear to already exist. Please either remove them " | ||
echo " ... or use a later --stage option." | ||
exit 1 | ||
fi | ||
echo "$0: aligning perturbed, short-segment-combined ${maybe_ihm}data" | ||
steps/align_fmllr.sh --nj $nj --cmd "$train_cmd" \ | ||
${lores_train_data_dir} data/lang $gmm_dir $ali_dir | ||
fi | ||
|
||
[ ! -f $ali_dir/ali.1.gz ] && echo "$0: expected $ali_dir/ali.1.gz to exist" && exit 1 | ||
|
||
if [ $stage -le 12 ]; then | ||
echo "$0: creating lang directory with one state per phone." | ||
# Create a version of the lang/ directory that has one state per phone in the | ||
# topo file. [note, it really has two states.. the first one is only repeated | ||
# once, the second one has zero or more repeats.] | ||
if [ -d data/lang_chain ]; then | ||
if [ data/lang_chain/L.fst -nt data/lang/L.fst ]; then | ||
echo "$0: data/lang_chain already exists, not overwriting it; continuing" | ||
else | ||
echo "$0: data/lang_chain already exists and seems to be older than data/lang..." | ||
echo " ... not sure what to do. Exiting." | ||
exit 1; | ||
fi | ||
else | ||
cp -r data/lang data/lang_chain | ||
silphonelist=$(cat data/lang_chain/phones/silence.csl) || exit 1; | ||
nonsilphonelist=$(cat data/lang_chain/phones/nonsilence.csl) || exit 1; | ||
# Use our special topology... note that later on may have to tune this | ||
# topology. | ||
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >data/lang_chain/topo | ||
fi | ||
fi | ||
|
||
if [ $stage -le 13 ]; then | ||
# Get the alignments as lattices (gives the chain training more freedom). | ||
# use the same num-jobs as the alignments | ||
steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \ | ||
data/lang $gmm_dir $lat_dir | ||
rm $lat_dir/fsts.*.gz # save space | ||
fi | ||
|
||
if [ $stage -le 14 ]; then | ||
# Build a tree using our new topology. We know we have alignments for the | ||
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use | ||
# those. | ||
if [ -f $tree_dir/final.mdl ]; then | ||
echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." | ||
exit 1; | ||
fi | ||
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ | ||
--context-opts "--context-width=2 --central-position=1" \ | ||
--leftmost-questions-truncate -1 \ | ||
--cmd "$train_cmd" 4200 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir | ||
fi | ||
|
||
xent_regularize=0.1 | ||
|
||
if [ $stage -le 15 ]; then | ||
echo "$0: creating neural net configs using the xconfig parser"; | ||
|
||
num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') | ||
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) | ||
|
||
mkdir -p $dir/configs | ||
cat <<EOF > $dir/configs/network.xconfig | ||
input dim=100 name=ivector | ||
input dim=40 name=input | ||
# please note that it is important to have input layer with the name=input | ||
# as the layer immediately preceding the fixed-affine-layer to enable | ||
# the use of short notation for the descriptor | ||
fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat | ||
# the first splicing is moved before the lda layer, so no splicing here | ||
relu-renorm-layer name=tdnn1 dim=450 | ||
relu-renorm-layer name=tdnn2 input=Append(-1,0,1) dim=450 | ||
relu-renorm-layer name=tdnn3 input=Append(-1,0,1) dim=450 | ||
relu-renorm-layer name=tdnn4 input=Append(-3,0,3) dim=450 | ||
relu-renorm-layer name=tdnn5 input=Append(-3,0,3) dim=450 | ||
relu-renorm-layer name=tdnn6 input=Append(-3,0,3) dim=450 | ||
relu-renorm-layer name=tdnn7 input=Append(-3,0,3) dim=450 | ||
## adding the layers for chain branch | ||
relu-renorm-layer name=prefinal-chain input=tdnn7 dim=450 target-rms=0.5 | ||
output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5 | ||
# adding the layers for xent branch | ||
# This block prints the configs for a separate output that will be | ||
# trained with a cross-entropy objective in the 'chain' models... this | ||
# has the effect of regularizing the hidden parts of the model. we use | ||
# 0.5 / args.xent_regularize as the learning rate factor- the factor of | ||
# 0.5 / args.xent_regularize is suitable as it means the xent | ||
# final-layer learns at a rate independent of the regularization | ||
# constant; and the 0.5 was tuned so as to make the relative progress | ||
# similar in the xent and regular final layers. | ||
relu-renorm-layer name=prefinal-xent input=tdnn7 dim=450 target-rms=0.5 | ||
output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 | ||
EOF | ||
|
||
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ | ||
fi | ||
|
||
if [ $stage -le 16 ]; then | ||
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then | ||
utils/create_split_dir.pl \ | ||
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage | ||
fi | ||
|
||
touch $dir/egs/.nodelete # keep egs around when that run dies. | ||
|
||
steps/nnet3/chain/train.py --stage $train_stage \ | ||
--cmd "$decode_cmd" \ | ||
--feat.online-ivector-dir $train_ivector_dir \ | ||
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \ | ||
--chain.xent-regularize $xent_regularize \ | ||
--chain.leaky-hmm-coefficient 0.1 \ | ||
--chain.l2-regularize 0.00005 \ | ||
--chain.apply-deriv-weights false \ | ||
--chain.lm-opts="--num-extra-lm-states=2000" \ | ||
--egs.dir "$common_egs_dir" \ | ||
--egs.opts "--frames-overlap-per-eg 0" \ | ||
--egs.chunk-width 150 \ | ||
--trainer.num-chunk-per-minibatch 128 \ | ||
--trainer.frames-per-iter 1500000 \ | ||
--trainer.num-epochs 4 \ | ||
--trainer.optimization.num-jobs-initial 2 \ | ||
--trainer.optimization.num-jobs-final 12 \ | ||
--trainer.optimization.initial-effective-lrate 0.001 \ | ||
--trainer.optimization.final-effective-lrate 0.0001 \ | ||
--trainer.max-param-change 2.0 \ | ||
--cleanup.remove-egs true \ | ||
--feat-dir $train_data_dir \ | ||
--tree-dir $tree_dir \ | ||
--lat-dir $lat_dir \ | ||
--dir $dir | ||
fi | ||
|
||
|
||
graph_dir=$dir/graph_${LM} | ||
if [ $stage -le 17 ]; then | ||
# Note: it might appear that this data/lang_chain directory is mismatched, and it is as | ||
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from | ||
# the lang directory. | ||
utils/mkgraph.sh --self-loop-scale 1.0 data/lang_${LM} $dir $graph_dir | ||
fi | ||
|
||
if [ $stage -le 18 ]; then | ||
rm $dir/.error 2>/dev/null || true | ||
for decode_set in dev eval; do | ||
( | ||
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \ | ||
--nj $nj --cmd "$decode_cmd" \ | ||
--online-ivector-dir exp/$mic/nnet3${nnet3_affix}/ivectors_${decode_set}_hires \ | ||
--scoring-opts "--min-lmwt 5 " \ | ||
$graph_dir data/$mic/${decode_set}_hires $dir/decode_${decode_set} || exit 1; | ||
) || touch $dir/.error & | ||
done | ||
wait | ||
if [ -f $dir/.error ]; then | ||
echo "$0: something went wrong in decoding" | ||
exit 1 | ||
fi | ||
fi | ||
exit 0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.