diff --git a/tools/ASR_2ch_track/RESULTS b/tools/ASR_2ch_track/RESULTS new file mode 100644 index 0000000000000000000000000000000000000000..81c18cccf079e376729104841036d746b54cf3bc --- /dev/null +++ b/tools/ASR_2ch_track/RESULTS @@ -0,0 +1,49 @@ +# CHiME-4 2ch track results +# The result is based on Hori et al, "The MERL/SRI system for the 3rd CHiME challenge using beamforming, +# robust feature extraction, and advanced speech recognition," in Proc. ASRU'15, +# and please refer the paper if you think the baseline useful. +# Note that the following result is different from that in the paper since we don't include +# SRI's robust features and system combination + +GMM noisy multi-condition with beamformit +exp/tri3b_tr05_multi_noisy/best_wer_beamformit_2mics.result +------------------- +best overall dt05 WER 17.69% (language model weight = 11) +------------------- +dt05_simu WER: 19.15% (Average), 16.14% (BUS), 23.55% (CAFE), 15.49% (PEDESTRIAN), 21.42% (STREET) +------------------- +dt05_real WER: 16.22% (Average), 20.12% (BUS), 16.25% (CAFE), 12.35% (PEDESTRIAN), 16.18% (STREET) +------------------- + +DNN sMBR +exp/tri4a_dnn_tr05_multi_noisy_smbr_i1lats/best_wer_beamformit_2mics.result +------------------- +best overall dt05 WER 11.63% (language model weight = 11) + (Number of iterations = 4) +------------------- +dt05_simu WER: 12.36% (Average), 10.66% (BUS), 15.55% (CAFE), 9.87% (PEDESTRIAN), 13.36% (STREET) +------------------- +dt05_real WER: 10.90% (Average), 13.62% (BUS), 10.63% (CAFE), 7.69% (PEDESTRIAN), 11.65% (STREET) +------------------- + +5-gram rescoring +exp/tri4a_dnn_tr05_multi_noisy_smbr_lmrescore/best_wer_beamformit_2mics_5gkn_5k.result +------------------- +best overall dt05 WER 10.17% (language model weight = 11) +------------------- +dt05_simu WER: 10.72% (Average), 9.37% (BUS), 13.70% (CAFE), 8.07% (PEDESTRIAN), 11.73% (STREET) +------------------- +dt05_real WER: 9.63% (Average), 11.93% (BUS), 9.75% (CAFE), 6.46% (PEDESTRIAN), 10.37% (STREET) +------------------- + +RNNLM +exp/tri4a_dnn_tr05_multi_noisy_smbr_lmrescore/best_wer_beamformit_2mics_rnnlm_5k_h300_w0.5_n100.result +------------------- +best overall dt05 WER 8.86% (language model weight = 12) +------------------- +dt05_simu WER: 9.50% (Average), 8.19% (BUS), 12.15% (CAFE), 7.12% (PEDESTRIAN), 10.55% (STREET) +------------------- +dt05_real WER: 8.23% (Average), 10.90% (BUS), 7.96% (CAFE), 5.22% (PEDESTRIAN), 8.82% (STREET) +------------------- + + diff --git a/tools/ASR_2ch_track/cmd.sh b/tools/ASR_2ch_track/cmd.sh new file mode 100644 index 0000000000000000000000000000000000000000..2626a1a35b2cb347d0d59119432500ae8af79a19 --- /dev/null +++ b/tools/ASR_2ch_track/cmd.sh @@ -0,0 +1,21 @@ +# you can change cmd.sh depending on what type of queue you are using. +# If you have no queueing system and want to run on a local machine, you +# can change all instances 'queue.pl' to run.pl (but be careful and run +# commands one by one: most recipes will exhaust the memory on your +# machine). queue.pl works with GridEngine (qsub). slurm.pl works +# with slurm. Different queues are configured differently, with different +# queue names and different ways of specifying things like memory; +# to account for these differences you can create and edit the file +# conf/queue.conf to match your queue's configuration. Search for +# conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, +# or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. + +#export train_cmd="queue.pl --mem 2G" +#export decode_cmd="queue.pl --mem 4G" +#export mkgraph_cmd="queue.pl --mem 8G" + +# run it locally... +export train_cmd=run.pl +export decode_cmd=run.pl +export cuda_cmd=run.pl +export mkgraph_cmd=run.pl diff --git a/tools/ASR_2ch_track/conf/chime4.cfg b/tools/ASR_2ch_track/conf/chime4.cfg new file mode 100644 index 0000000000000000000000000000000000000000..70fdd8586514c3d40ab1f8a18917bc15612a23ef --- /dev/null +++ b/tools/ASR_2ch_track/conf/chime4.cfg @@ -0,0 +1,50 @@ +#BeamformIt sample configuration file for AMI data (http://groups.inf.ed.ac.uk/ami/download/) + +# scrolling size to compute the delays +scroll_size = 250 + +# cross correlation computation window size +window_size = 500 + +#amount of maximum points for the xcorrelation taken into account +nbest_amount = 4 + +#flag wether to apply an automatic noise thresholding +do_noise_threshold = 1 + +#Percentage of frames with lower xcorr taken as noisy +noise_percent = 10 + +######## acoustic modelling parameters + +#transition probabilities weight for multichannel decoding +trans_weight_multi = 25 +trans_weight_nbest = 25 + +### + +#flag wether to print the feaures after setting them, or not +print_features = 1 + +#flag wether to use the bad frames in the sum process +do_avoid_bad_frames = 1 + +#flag to use the best channel (SNR) as a reference +#defined from command line +do_compute_reference = 1 + +#flag wether to use a uem file or not(process all the file) +do_use_uem_file = 0 + +#flag wether to use an adaptative weights scheme or fixed weights +do_adapt_weights = 1 + +#flag wether to output the sph files or just run the system to create the auxiliary files +do_write_sph_files = 1 + +####directories where to store/retrieve info#### +#channels_file = ./cfg-files/channels + +#show needs to be passed as argument normally, here a default one is given just in case +#show_id = Ttmp + diff --git a/tools/ASR_2ch_track/conf/decode_dnn.config b/tools/ASR_2ch_track/conf/decode_dnn.config new file mode 100644 index 0000000000000000000000000000000000000000..89dd9929a626ea18ce13861ab70275b254aa54d2 --- /dev/null +++ b/tools/ASR_2ch_track/conf/decode_dnn.config @@ -0,0 +1,2 @@ +beam=18.0 # beam for decoding. Was 13.0 in the scripts. +lattice_beam=10.0 # this has most effect on size of the lattices. diff --git a/tools/ASR_2ch_track/conf/fbank.conf b/tools/ASR_2ch_track/conf/fbank.conf new file mode 100644 index 0000000000000000000000000000000000000000..5fc7774b31fd8acc3a024a9f7dc744913cd29809 --- /dev/null +++ b/tools/ASR_2ch_track/conf/fbank.conf @@ -0,0 +1,11 @@ +# No non-default options for now. +--window-type=hamming # disable Dans window, use the standard +--use-energy=false # only fbank outputs +--sample-frequency=16000 # Cantonese is sampled at 8kHz + +--low-freq=64 # typical setup from Frantisek Grezl +--high-freq=8000 +--dither=1 + +--num-mel-bins=40 # 8kHz so we use 15 bins +--htk-compat=true # try to make it compatible with HTK diff --git a/tools/ASR_2ch_track/conf/mfcc.conf b/tools/ASR_2ch_track/conf/mfcc.conf new file mode 100644 index 0000000000000000000000000000000000000000..7361509099fd2abc04afa64c2258cacb760ed2c3 --- /dev/null +++ b/tools/ASR_2ch_track/conf/mfcc.conf @@ -0,0 +1 @@ +--use-energy=false # only non-default option. diff --git a/tools/ASR_2ch_track/local/chime4_calc_wers.sh b/tools/ASR_2ch_track/local/chime4_calc_wers.sh new file mode 100644 index 0000000000000000000000000000000000000000..cab7f52b564c49c9cc14652f2be4800a653adfe3 --- /dev/null +++ b/tools/ASR_2ch_track/local/chime4_calc_wers.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright 2015 Mitsubishi Electric Research Laboratories (Author: Shinji Watanabe) +# Apache 2.0. + +set -e + +# Config: +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + printf "%s exp/tri3b_tr05_sr_noisy noisy exp/tri4a_dnn_tr05_sr_noisy/graph_tgpr_5k\n\n" `basename $0` + exit 1; +fi + +echo "$0 $@" # Print the command line for logging + +. path.sh + +dir=$1 +enhan=$2 +graph_dir=$3 + +echo "compute dt05 WER for each location" +echo "" +mkdir -p $dir/log +for a in `find $dir/decode_tgpr_5k_dt05_real_$enhan/ | grep "\/wer_" | awk -F'[/]' '{print $NF}' | sort`; do + echo -n "$a " + if [ -e $dir/decode_tgpr_5k_dt05_simu_$enhan ]; then + cat $dir/decode_tgpr_5k_dt05_{real,simu}_$enhan/$a | grep WER | awk '{err+=$4} {wrd+=$6} END{printf("%.2f\n",err/wrd*100)}' + else + cat $dir/decode_tgpr_5k_dt05_real_$enhan/$a | grep WER | awk '{err+=$4} {wrd+=$6} END{printf("%.2f\n",err/wrd*100)}' + fi +done | sort -n -k 2 | head -n 1 > $dir/log/best_wer_$enhan + +lmw=`cut -f 1 -d" " $dir/log/best_wer_$enhan | cut -f 2 -d"_"` +echo "-------------------" +printf "best overall dt05 WER %s" `cut -f 2 -d" " $dir/log/best_wer_$enhan` +echo -n "%" +printf " (language model weight = %s)\n" $lmw +echo "-------------------" +if $eval_flag; then + tasks="dt05 et05" +else + tasks="dt05" +fi +for e_d in $tasks; do + for task in simu real; do + rdir=$dir/decode_tgpr_5k_${e_d}_${task}_$enhan + if [ -e $rdir ]; then + for a in _BUS _CAF _PED _STR; do + grep $a $rdir/scoring/test_filt.txt \ + > $rdir/scoring/test_filt_$a.txt + cat $rdir/scoring/$lmw.tra \ + | utils/int2sym.pl -f 2- $graph_dir/words.txt \ + | sed s:\::g \ + | compute-wer --text --mode=present ark:$rdir/scoring/test_filt_$a.txt ark,p:- \ + 1> $rdir/${a}_wer_$lmw 2> /dev/null + done + echo -n "${e_d}_${task} WER: `grep WER $rdir/wer_$lmw | cut -f 2 -d" "`% (Average), " + echo -n "`grep WER $rdir/_BUS_wer_$lmw | cut -f 2 -d" "`% (BUS), " + echo -n "`grep WER $rdir/_CAF_wer_$lmw | cut -f 2 -d" "`% (CAFE), " + echo -n "`grep WER $rdir/_PED_wer_$lmw | cut -f 2 -d" "`% (PEDESTRIAN), " + echo -n "`grep WER $rdir/_STR_wer_$lmw | cut -f 2 -d" "`% (STREET)" + echo "" + echo "-------------------" + fi + done +done +echo "" + +for e_d in $tasks; do + echo "-----------------------------" + echo "1-best transcription for $e_d" + echo "-----------------------------" + for task in simu real; do + rdir=$dir/decode_tgpr_5k_${e_d}_${task}_$enhan + cat $rdir/scoring/$lmw.tra \ + | utils/int2sym.pl -f 2- $graph_dir/words.txt \ + | sed s:\::g + done +done diff --git a/tools/ASR_2ch_track/local/chime4_calc_wers_smbr.sh b/tools/ASR_2ch_track/local/chime4_calc_wers_smbr.sh new file mode 100644 index 0000000000000000000000000000000000000000..79267aeb4884752fc534248270236351d9625049 --- /dev/null +++ b/tools/ASR_2ch_track/local/chime4_calc_wers_smbr.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +# Copyright 2015 Mitsubishi Electric Research Laboratories (Author: Shinji Watanabe) +# Apache 2.0. + +set -e + +# Config: +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + printf "%s exp/tri3b_tr05_sr_noisy noisy exp/tri4a_dnn_tr05_sr_noisy/graph_tgpr_5k\n\n" `basename $0` + exit 1; +fi + +echo "$0 $@" # Print the command line for logging + +. path.sh + +dir=$1 +enhan=$2 +graph_dir=$3 + +echo "compute WER for each location" +echo "" +mkdir -p $dir/log +# collect scores +for x in `find $dir/ -type d -name "*_it*" | awk -F "_it" '{print $NF}' | sort | uniq`; do + for y in `find $dir/*_${enhan}_it*/ | grep "\/wer_" | awk -F'[/]' '{print $NF}' | sort | uniq`; do + echo -n "${x}_$y " + cat $dir/decode_tgpr_5k_dt05_{real,simu}_${enhan}_it$x/$y | grep WER | awk '{err+=$4} {wrd+=$6} END{printf("%.2f\n",err/wrd*100)}' + done +done | sort -n -k 2 | head -n 1 > $dir/log/best_wer_$enhan + +lmw=`cut -f 1 -d" " $dir/log/best_wer_$enhan | awk -F'[_]' '{print $NF}'` +it=`cut -f 1 -d" " $dir/log/best_wer_$enhan | awk -F'[_]' '{print $1}'` +echo "-------------------" +printf "best overall dt05 WER %s" `cut -f 2 -d" " $dir/log/best_wer_$enhan` +echo -n "%" +printf " (language model weight = %s)\n" $lmw +printf " (Number of iterations = %s)\n" $it +echo "-------------------" +if $eval_flag; then + tasks="dt05 et05" +else + tasks="dt05" +fi +for e_d in $tasks; do + for task in simu real; do + rdir=$dir/decode_tgpr_5k_${e_d}_${task}_${enhan}_it$it + for a in _BUS _CAF _PED _STR; do + grep $a $rdir/scoring/test_filt.txt \ + > $rdir/scoring/test_filt_$a.txt + cat $rdir/scoring/$lmw.tra \ + | utils/int2sym.pl -f 2- $graph_dir/words.txt \ + | sed s:\::g \ + | compute-wer --text --mode=present ark:$rdir/scoring/test_filt_$a.txt ark,p:- \ + 1> $rdir/${a}_wer_$lmw 2> /dev/null + done + echo -n "${e_d}_${task} WER: `grep WER $rdir/wer_$lmw | cut -f 2 -d" "`% (Average), " + echo -n "`grep WER $rdir/_BUS_wer_$lmw | cut -f 2 -d" "`% (BUS), " + echo -n "`grep WER $rdir/_CAF_wer_$lmw | cut -f 2 -d" "`% (CAFE), " + echo -n "`grep WER $rdir/_PED_wer_$lmw | cut -f 2 -d" "`% (PEDESTRIAN), " + echo -n "`grep WER $rdir/_STR_wer_$lmw | cut -f 2 -d" "`% (STREET)" + echo "" + echo "-------------------" + done +done + +for e_d in $tasks; do + echo "-----------------------------" + echo "1-best transcription for $e_d" + echo "-----------------------------" + for task in simu real; do + rdir=$dir/decode_tgpr_5k_${e_d}_${task}_${enhan}_it$it + cat $rdir/scoring/$lmw.tra \ + | utils/int2sym.pl -f 2- $graph_dir/words.txt \ + | sed s:\::g + done +done diff --git a/tools/ASR_2ch_track/local/chime4_train_lms.sh b/tools/ASR_2ch_track/local/chime4_train_lms.sh new file mode 100644 index 0000000000000000000000000000000000000000..2b207513ef91150b9edf2ae80d2b37f264858fc1 --- /dev/null +++ b/tools/ASR_2ch_track/local/chime4_train_lms.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +# Modified from the script for CHiME3 baseline +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori) + +# Config: +order=5 # n-gram order + +. utils/parse_options.sh || exit 1; + +. ./path.sh + +if [ $# -ne 1 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "Please specifies a Chime4 root directory" + echo "If you use kaldi scripts distributed in the Chime4 data," + echo "It would be `pwd`/../.." + exit 1; +fi + +# check data directories +chime4_data=$1 +wsj0_data=$chime4_data/data/WSJ0 # directory of WSJ0 in Chime4. You can also specify your WSJ0 corpus directory +if [ ! -d $chime4_data ]; then + echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1 +fi +if [ ! -d $wsj0_data ]; then + echo "$wsj0_data does not exist. Please specify WSJ0 corpus directory" && exit 1 +fi +lm_train=$wsj0_data/wsj0/doc/lng_modl/lm_train/np_data + +# check whether run_init is executed +if [ ! -d data/lang ]; then + echo "error, execute local/run_init.sh, first" + exit 1; +fi + +# lm directories +dir=data/local/local_lm +srcdir=data/local/nist_lm +mkdir -p $dir + +# check srilm ngram +! which ngram-count \ + && echo "SRILM tools not installed, which are required for LM training" && exit 1; + +# extract 5k vocabulary from a baseline language model +srclm=$srcdir/lm_tgpr_5k.arpa.gz +if [ -f $srclm ]; then + echo "Getting vocabulary from a baseline language model"; + gunzip -c $srclm | awk 'BEGIN{unig=0}{ + if(unig==0){ + if($1=="\\1-grams:"){unig=1}} + else { + if ($1 != "") { + if ($1=="\\2-grams:" || $1=="\\end\\") {exit} + else {print $2}} + }}' > $dir/vocab_5k.txt +else + echo "Language model $srclm does not exist" && exit 1; +fi + +# collect training data from WSJ0 +touch $dir/train.gz +if [ `du -m $dir/train.gz | cut -f 1` -eq 63 ]; then + echo "Not getting training data again [already exists]"; +else + echo "Collecting training data from $lm_train"; + gunzip -c $lm_train/{87,88,89}/*.z \ + | awk -v voc=$dir/vocab_5k.txt ' + BEGIN{ while((getline0) { invoc[$1]=1; }} + /^ "); } + } + printf("\n"); + }' | gzip -c > $dir/train.gz +fi + +# get validation data from Chime4 dev set +touch $dir/valid.gz +if [ `du -k $dir/valid.gz | cut -f 1` -eq 68 ]; then + echo "Not getting validation data again [already exists]"; +else + echo "Collecting validation data from $chime4_data/data/transcriptions"; + cut -d" " -f2- $chime4_data/data/transcriptions/dt05_real.trn_all \ + $chime4_data/data/transcriptions/dt05_simu.trn_all \ + |gzip -c > $dir/valid.gz +fi + +# train a large n-gram language model +lm_suffix=${order}gkn_5k +if [ -f $dir/lm_${lm_suffix}.arpa.gz ]; then + echo "A $order-gram language model aready exists and is not constructed again" + echo "To reconstruct, remove $dir/$dir/lm_${lm_suffix}.arpa.gz first" +else + echo "Training a $order-gram language model" + ngram-count -text $dir/train.gz -order $order \ + -vocab $dir/vocab_5k.txt -unk -map-unk "" \ + -gt2min 1 -gt3min 1 -gt4min 2 -gt5min 2 \ + -interpolate -kndiscount \ + -lm $dir/lm_${lm_suffix}.arpa.gz +fi +echo "Checking validation perplexity of $order-gram language model" +ngram -order $order -ppl $dir/valid.gz -lm $dir/lm_${lm_suffix}.arpa.gz +# e.g. 5-gram perplexity: +# file data/local/local_lm/valid.txt: 3280 sentences, 54239 words, 3 OOVs +# 0 zeroprobs, logprob= -96775.5 ppl= 48.1486 ppl1= 60.8611 + +# Next, create the corresponding FST and lang_test_* directory. +echo "Preparing language models for test" +tmpdir=data/local/lm_tmp +lexicon=data/local/lang_tmp/lexiconp.txt +mkdir -p $tmpdir + +test=data/lang_test_${lm_suffix} +mkdir -p $test +for f in phones.txt words.txt phones.txt L.fst L_disambig.fst \ + phones; do + cp -r data/lang/$f $test +done +gunzip -c $dir/lm_${lm_suffix}.arpa.gz | \ + utils/find_arpa_oovs.pl $test/words.txt > $tmpdir/oovs_${lm_suffix}.txt + +# grep -v ' ' because the LM seems to have some strange and useless +# stuff in it with multiple 's in the history. Encountered some other similar +# things in a LM from Geoff. Removing all "illegal" combinations of and , +# which are supposed to occur only at being/end of utt. These can cause +# determinization failures of CLG [ends up being epsilon cycles]. +gunzip -c $dir/lm_${lm_suffix}.arpa.gz | \ + grep -v ' ' | \ + grep -v ' ' | \ + grep -v ' ' | \ + arpa2fst - | fstprint | \ + utils/remove_oovs.pl $tmpdir/oovs_${lm_suffix}.txt | \ + utils/eps2disambig.pl | utils/s2eps.pl | fstcompile --isymbols=$test/words.txt \ + --osymbols=$test/words.txt --keep_isymbols=false --keep_osymbols=false | \ + fstrmepsilon | fstarcsort --sort_type=ilabel > $test/G.fst +fstisstochastic $test/G.fst +# The output is like: +# 9.14233e-05 -0.259833 +# we do expect the first of these 2 numbers to be close to zero (the second is +# nonzero because the backoff weights make the states sum to >1). +# Because of the fiasco for these particular LMs, the first number is not +# as close to zero as it could be. + +# Everything below is only for diagnostic. +# Checking that G has no cycles with empty words on them (e.g. , ); +# this might cause determinization failure of CLG. +# #0 is treated as an empty word. +mkdir -p $tmpdir/g +awk '{if(NF==1){ printf("0 0 %s %s\n", $1,$1); }} END{print "0 0 #0 #0"; print "0";}' \ + < "$lexicon" >$tmpdir/g/select_empty.fst.txt +fstcompile --isymbols=$test/words.txt --osymbols=$test/words.txt $tmpdir/g/select_empty.fst.txt | \ + fstarcsort --sort_type=olabel | fstcompose - $test/G.fst > $tmpdir/g/empty_words.fst +fstinfo $tmpdir/g/empty_words.fst | grep cyclic | grep -w 'y' && + echo "Language model has cycles with empty words" && exit 1 +rm -r $tmpdir/g + +echo "Succeeded in preparing a large ${order}-gram LM" +rm -r $tmpdir + diff --git a/tools/ASR_2ch_track/local/chime4_train_rnnlms.sh b/tools/ASR_2ch_track/local/chime4_train_rnnlms.sh new file mode 100644 index 0000000000000000000000000000000000000000..8324c8e06b1d448a40f39c95bf02a560d805bfdb --- /dev/null +++ b/tools/ASR_2ch_track/local/chime4_train_rnnlms.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori) + +# Config: +hidden=300 # Num-hidden units +class=200 # Num-classes +rnnlm_ver=rnnlm-0.3e # version of RNNLM to use +threads=1 # for RNNLM-HS +bptt=4 # length of BPTT unfolding in RNNLM +bptt_block=10 # length of BPTT unfolding in RNNLM + +. utils/parse_options.sh || exit 1; + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +if [ $# -ne 1 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "Please specifies a Chime4 root directory" + echo "If you use kaldi scripts distributed in the Chime4 data," + echo "It would be `pwd`/../.." + exit 1; +fi + +# check data directories +chime4_data=$1 +wsj0_data=$chime4_data/data/WSJ0 # directory of WSJ0 in Chime4. You can also specify your WSJ0 corpus directory +if [ ! -d $chime4_data ]; then + echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1 +fi +if [ ! -d $wsj0_data ]; then + echo "$wsj0_data does not exist. Please specify WSJ0 corpus directory" && exit 1 +fi +lm_train=$wsj0_data/wsj0/doc/lng_modl/lm_train/np_data + +# lm directories +dir=data/local/local_lm +srcdir=data/local/nist_lm +mkdir -p $dir + +# extract 5k vocabulary from a baseline language model +srclm=$srcdir/lm_tgpr_5k.arpa.gz +if [ -f $srclm ]; then + echo "Getting vocabulary from a baseline language model"; + gunzip -c $srclm | awk 'BEGIN{unig=0}{ + if(unig==0){ + if($1=="\\1-grams:"){unig=1}} + else { + if ($1 != "") { + if ($1=="\\2-grams:" || $1=="\\end\\") {exit} + else {print $2}} + }}' | sed "s///" > $dir/vocab_5k.rnn +else + echo "Language model $srclm does not exist" && exit 1; +fi + +# collect training data from WSJ0 +touch $dir/train.rnn +if [ `du -m $dir/train.rnn | cut -f 1` -eq 223 ]; then + echo "Not getting training data again [already exists]"; +else + echo "Collecting training data from $lm_train"; + gunzip -c $lm_train/{87,88,89}/*.z \ + | awk -v voc=$dir/vocab_5k.rnn ' + BEGIN{ while((getline0) { invoc[$1]=1; }} + /^ "); } + } + printf("\n"); + }' > $dir/train.rnn +fi + +# get validation data from Chime4 dev set +touch $dir/valid.rnn +if [ `cat $dir/valid.rnn | wc -w` -eq 54239 ]; then + echo "Not getting validation data again [already exists]"; +else + echo "Collecting validation data from $chime4_data/data/transcriptions"; + cut -d" " -f2- $chime4_data/data/transcriptions/dt05_real.trn_all \ + $chime4_data/data/transcriptions/dt05_simu.trn_all \ + > $dir/valid.rnn +fi + +# RNN language model traing +$KALDI_ROOT/tools/extras/check_for_rnnlm.sh "$rnnlm_ver" || exit 1 + +# train a RNN language model +rnnmodel=$dir/rnnlm_5k_h${hidden}_bptt${bptt} +if [ -f $rnnmodel ]; then + echo "A RNN language model aready exists and is not constructed again" + echo "To reconstruct, remove $rnnmodel first" +else + echo "Training a RNN language model with $rnnlm_ver" + echo "(runtime log is written to $dir/rnnlm.log)" + $train_cmd $dir/rnnlm.log \ + $KALDI_ROOT/tools/$rnnlm_ver/rnnlm -train $dir/train.rnn -valid $dir/valid.rnn \ + -rnnlm $rnnmodel -hidden $hidden -class $class \ + -rand-seed 1 -independent -debug 1 -bptt $bptt -bptt-block $bptt_block || exit 1; +fi + +# store in a RNNLM directory with necessary files +rnndir=data/lang_test_rnnlm_5k_h${hidden} +mkdir -p $rnndir +cp $rnnmodel $rnndir/rnnlm +grep -v -e "" -e "" $dir/vocab_5k.rnn > $rnndir/wordlist.rnn +touch $rnndir/unk.probs # make an empty file because we don't know unk-word probs. + diff --git a/tools/ASR_2ch_track/local/clean_chime4_format_data.sh b/tools/ASR_2ch_track/local/clean_chime4_format_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..3c8e038f93932cd66f52b7b0f53ef240ddd693a5 --- /dev/null +++ b/tools/ASR_2ch_track/local/clean_chime4_format_data.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright 2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# This script takes data prepared in a corpus-dependent way +# in data/local/, and converts it into the "canonical" form, +# in various subdirectories of data/, e.g. data/lang, data/lang_test_ug, +# data/train_si84, etc. + +# Modified from the script for Chime2 baseline +# Shinji Watanabe 02/13/2015 + +. ./path.sh || exit 1; + +echo "Preparing train and test data" +srcdir=data/local/data +lmdir=data/local/nist_lm +tmpdir=data/local/lm_tmp +lexicon=data/local/lang_tmp/lexiconp.txt +mkdir -p $tmpdir + +for x in et05_orig_clean dt05_orig_clean tr05_orig_clean; do + mkdir -p data/$x + cp $srcdir/${x}_wav.scp data/$x/wav.scp || exit 1; + cp $srcdir/$x.txt data/$x/text || exit 1; + cp $srcdir/$x.spk2utt data/$x/spk2utt || exit 1; + cp $srcdir/$x.utt2spk data/$x/utt2spk || exit 1; + utils/filter_scp.pl data/$x/spk2utt $srcdir/spk2gender > data/$x/spk2gender || exit 1; +done + + +# Next, for each type of language model, create the corresponding FST +# and the corresponding lang_test_* directory. + +echo Preparing language models for test + +for lm_suffix in tgpr_5k; do + test=data/lang_test_${lm_suffix} + mkdir -p $test + for f in phones.txt words.txt phones.txt L.fst L_disambig.fst \ + phones; do + cp -r data/lang/$f $test + done + gunzip -c $lmdir/lm_${lm_suffix}.arpa.gz | \ + utils/find_arpa_oovs.pl $test/words.txt > $tmpdir/oovs_${lm_suffix}.txt + + # grep -v ' ' because the LM seems to have some strange and useless + # stuff in it with multiple 's in the history. Encountered some other similar + # things in a LM from Geoff. Removing all "illegal" combinations of and , + # which are supposed to occur only at being/end of utt. These can cause + # determinization failures of CLG [ends up being epsilon cycles]. + gunzip -c $lmdir/lm_${lm_suffix}.arpa.gz | \ + grep -v ' ' | \ + grep -v ' ' | \ + grep -v ' ' | \ + arpa2fst - | fstprint | \ + utils/remove_oovs.pl $tmpdir/oovs_${lm_suffix}.txt | \ + utils/eps2disambig.pl | utils/s2eps.pl | fstcompile --isymbols=$test/words.txt \ + --osymbols=$test/words.txt --keep_isymbols=false --keep_osymbols=false | \ + fstrmepsilon | fstarcsort --sort_type=ilabel > $test/G.fst + fstisstochastic $test/G.fst + # The output is like: + # 9.14233e-05 -0.259833 + # we do expect the first of these 2 numbers to be close to zero (the second is + # nonzero because the backoff weights make the states sum to >1). + # Because of the fiasco for these particular LMs, the first number is not + # as close to zero as it could be. + + # Everything below is only for diagnostic. + # Checking that G has no cycles with empty words on them (e.g. , ); + # this might cause determinization failure of CLG. + # #0 is treated as an empty word. + mkdir -p $tmpdir/g + awk '{if(NF==1){ printf("0 0 %s %s\n", $1,$1); }} END{print "0 0 #0 #0"; print "0";}' \ + < "$lexicon" >$tmpdir/g/select_empty.fst.txt + fstcompile --isymbols=$test/words.txt --osymbols=$test/words.txt $tmpdir/g/select_empty.fst.txt | \ + fstarcsort --sort_type=olabel | fstcompose - $test/G.fst > $tmpdir/g/empty_words.fst + fstinfo $tmpdir/g/empty_words.fst | grep cyclic | grep -w 'y' && + echo "Language model has cycles with empty words" && exit 1 + rm -r $tmpdir/g +done + +echo "Succeeded in formatting data." +rm -r $tmpdir diff --git a/tools/ASR_2ch_track/local/clean_wsj0_data_prep.sh b/tools/ASR_2ch_track/local/clean_wsj0_data_prep.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c6989bc0b24abea6ca9025b15a2aa0c31a14f93 --- /dev/null +++ b/tools/ASR_2ch_track/local/clean_wsj0_data_prep.sh @@ -0,0 +1,152 @@ +#!/bin/bash +set -e + +# Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0. + +# This is modified from the script in standard Kaldi recipe to account +# for the way the WSJ data is structured on the Edinburgh systems. +# - Arnab Ghoshal, 29/05/12 + +# Modified from the script for CHiME2 baseline +# Shinji Watanabe 02/13/2015 + +if [ $# -ne 1 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "The argument should be a the top-level WSJ corpus directory." + echo "It is assumed that there will be a 'wsj0' and a 'wsj1' subdirectory" + echo "within the top-level corpus directory." + exit 1; +fi + +wsj0=$1 + +dir=`pwd`/data/local/data +lmdir=`pwd`/data/local/nist_lm +mkdir -p $dir $lmdir +local=`pwd`/local +utils=`pwd`/utils + +. ./path.sh # Needed for KALDI_ROOT +sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe +if [ ! -x $sph2pipe ]; then + echo "Could not find (or execute) the sph2pipe program at $sph2pipe"; + exit 1; +fi + +if [ -z $IRSTLM ] ; then + export IRSTLM=$KALDI_ROOT/tools/irstlm/ +fi +export PATH=${PATH}:$IRSTLM/bin +if ! command -v prune-lm >/dev/null 2>&1 ; then + echo "$0: Error: the IRSTLM is not available or compiled" >&2 + echo "$0: Error: We used to install it by default, but." >&2 + echo "$0: Error: this is no longer the case." >&2 + echo "$0: Error: To install it, go to $KALDI_ROOT/tools" >&2 + echo "$0: Error: and run extras/install_irstlm.sh" >&2 + exit 1 +fi + +cd $dir + +# This version for SI-84 +cat $wsj0/wsj0/doc/indices/train/tr_s_wv1.ndx \ + | $local/cstr_ndx2flist.pl $wsj0 | sort -u > tr05_orig_clean.flist + +# Now for the test sets. +# $wsj0/wsj1/doc/indices/readme.doc +# describes all the different test sets. +# Note: each test-set seems to come in multiple versions depending +# on different vocabulary sizes, verbalized vs. non-verbalized +# pronunciations, etc. We use the largest vocab and non-verbalized +# pronunciations. +# The most normal one seems to be the "baseline 60k test set", which +# is h1_p0. + +# Nov'92 (330 utts, 5k vocab) +cat $wsj0/wsj0/doc/indices/test/nvp/si_et_05.ndx | \ + $local/cstr_ndx2flist.pl $wsj0 | sort > et05_orig_clean.flist + +# Note: the ???'s below match WSJ and SI_DT, or wsj and si_dt. +# Sometimes this gets copied from the CD's with upcasing, don't know +# why (could be older versions of the disks). +find $wsj0/wsj0/si_dt_05 -print | grep -i ".wv1" | sort > dt05_orig_clean.flist + +# Finding the transcript files: +find -L $wsj0 -iname '*.dot' > dot_files.flist + +# Convert the transcripts into our format (no normalization yet) +# adding suffix to utt_id +# 0 for clean condition +for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do + $local/flist2scp.pl $x.flist | sort > ${x}_sph_tmp.scp + cat ${x}_sph_tmp.scp | awk '{print $1}' \ + | $local/find_transcripts.pl dot_files.flist > ${x}_tmp.trans1 + cat ${x}_sph_tmp.scp | awk '{printf("%s %s\n", $1, $2);}' > ${x}_sph.scp + cat ${x}_tmp.trans1 | awk '{printf("%s ", $1); for(i=2;i<=NF;i++) printf("%s ", $i); printf("\n");}' > ${x}.trans1 +done + +# Do some basic normalization steps. At this point we don't remove OOVs-- +# that will be done inside the training scripts, as we'd like to make the +# data-preparation stage independent of the specific lexicon used. +noiseword=""; +for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do + cat $x.trans1 | $local/normalize_transcript.pl $noiseword \ + | sort > $x.txt || exit 1; +done + +# Create scp's with wav's. (the wv1 in the distribution is not really wav, it is sph.) +for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do + awk '{printf("%s '$sph2pipe' -f wav %s |\n", $1, $2);}' < ${x}_sph.scp \ + > ${x}_wav.scp +done + +# Make the utt2spk and spk2utt files. +for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do + cat ${x}_sph.scp | awk '{print $1}' \ + | perl -ane 'chop; m:^...:; print "$_ $&\n";' > $x.utt2spk + cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1; +done + +#in case we want to limit lm's on most frequent words, copy lm training word frequency list +cp $wsj0/wsj0/doc/lng_modl/vocab/wfl_64.lst $lmdir +chmod u+w $lmdir/*.lst # had weird permissions on source. + +# The 5K vocab language model without verbalized pronunciations. +# This is used for 3rd CHiME challenge +# trigram would be: !only closed vocabulary here! +cp $wsj0/wsj0/doc/lng_modl/base_lm/tcb05cnp.z $lmdir/lm_tg_5k.arpa.gz || exit 1; +chmod u+rw $lmdir/lm_tg_5k.arpa.gz +gunzip $lmdir/lm_tg_5k.arpa.gz +tail -n 4328839 $lmdir/lm_tg_5k.arpa | gzip -c -f > $lmdir/lm_tg_5k.arpa.gz +rm $lmdir/lm_tg_5k.arpa + +prune-lm --threshold=1e-7 $lmdir/lm_tg_5k.arpa.gz $lmdir/lm_tgpr_5k.arpa || exit 1; +gzip -f $lmdir/lm_tgpr_5k.arpa || exit 1; + + +if [ ! -f wsj0-train-spkrinfo.txt ] || [ `cat wsj0-train-spkrinfo.txt | wc -l` -ne 134 ]; then + rm -f wsj0-train-spkrinfo.txt + wget http://www.ldc.upenn.edu/Catalog/docs/LDC93S6A/wsj0-train-spkrinfo.txt \ + || ( echo "Getting wsj0-train-spkrinfo.txt from backup location" && \ + wget --no-check-certificate https://sourceforge.net/projects/kaldi/files/wsj0-train-spkrinfo.txt ); +fi + +if [ ! -f wsj0-train-spkrinfo.txt ]; then + echo "Could not get the spkrinfo.txt file from LDC website (moved)?" + echo "This is possibly omitted from the training disks; couldn't find it." + echo "Everything else may have worked; we just may be missing gender info" + echo "which is only needed for VTLN-related diagnostics anyway." + exit 1 +fi +# Note: wsj0-train-spkrinfo.txt doesn't seem to be on the disks but the +# LDC put it on the web. Perhaps it was accidentally omitted from the +# disks. + +cat $wsj0/wsj0/doc/spkrinfo.txt \ + ./wsj0-train-spkrinfo.txt | \ + perl -ane 'tr/A-Z/a-z/; m/^;/ || print;' | \ + awk '{print $1, $2}' | grep -v -- -- | sort | uniq > spk2gender + + +echo "Data preparation succeeded" diff --git a/tools/ASR_2ch_track/local/cstr_ndx2flist.pl b/tools/ASR_2ch_track/local/cstr_ndx2flist.pl new file mode 100644 index 0000000000000000000000000000000000000000..d19db421a9fbedfe7bf27c5ada4e37bc04d1e163 --- /dev/null +++ b/tools/ASR_2ch_track/local/cstr_ndx2flist.pl @@ -0,0 +1,54 @@ +#!/usr/bin/env perl + +# Copyright 2010-2011 Microsoft Corporation + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + +# This is modified from the script in standard Kaldi recipe to account +# for the way the WSJ data is structured on the Edinburgh systems. +# - Arnab Ghoshal, 12/1/12 + +# This program takes as its standard input an .ndx file from the WSJ corpus that looks +# like this: +#;; File: tr_s_wv1.ndx, updated 04/26/94 +#;; +#;; Index for WSJ0 SI-short Sennheiser training data +#;; Data is read WSJ sentences, Sennheiser mic. +#;; Contains 84 speakers X (~100 utts per speaker MIT/SRI and ~50 utts +#;; per speaker TI) = 7236 utts +#;; +#11_1_1:wsj0/si_tr_s/01i/01ic0201.wv1 +#11_1_1:wsj0/si_tr_s/01i/01ic0202.wv1 +#11_1_1:wsj0/si_tr_s/01i/01ic0203.wv1 + +# and as command-line argument it takes the names of the WSJ disk locations, e.g.: +# /group/corpora/public/wsjcam0/data on DICE machines. +# It outputs a list of absolute pathnames. + +$wsj_dir = $ARGV[0]; + +while(){ + if(m/^;/){ next; } # Comment. Ignore it. + else { + m/^([0-9_]+):\s*(\S+)$/ || die "Could not parse line $_"; + $filename = $2; # as a subdirectory of the distributed disk. + if ($filename !~ m/\.wv1$/) { $filename .= ".wv1"; } + $filename = "$wsj_dir/$filename"; + if (-e $filename) { + print "$filename\n"; + } else { + print STDERR "File $filename found in the index but not on disk\n"; + } + } +} diff --git a/tools/ASR_2ch_track/local/find_noisy_transcripts.pl b/tools/ASR_2ch_track/local/find_noisy_transcripts.pl new file mode 100644 index 0000000000000000000000000000000000000000..fdeb38d9444a9bf22a72b9824f9ef23a4ef1fddc --- /dev/null +++ b/tools/ASR_2ch_track/local/find_noisy_transcripts.pl @@ -0,0 +1,65 @@ +#!/usr/bin/env perl +# Copyright 2010-2011 Microsoft Corporation + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + + + +# This program takes on its standard input a list of utterance +# id's, one for each line. (e.g. 4k0c030a is a an utterance id). +# It takes as +# Extracts from the dot files the transcripts for a given +# dataset (represented by a file list). +# + +@ARGV == 1 || die "find_transcripts.pl dot_files_flist < utterance_ids > transcripts"; +$dot_flist = shift @ARGV; + +open(L, "<$dot_flist") || die "Opening file list of dot files: $dot_flist\n"; +while(){ + chop; + m:\S+/(\w{6})00.dot: || die "Bad line in dot file list: $_"; + $spk = $1; + $spk2dot{$spk} = $_; +} + + + +while(){ + chop; + $uttid_orig = $_; + $uttid = substr $uttid_orig, 0, 8; + $uttid =~ m:(\w{6})\w\w: || die "Bad utterance id $_"; + $spk = $1; + if($spk ne $curspk) { + %utt2trans = { }; # Don't keep all the transcripts in memory... + $curspk = $spk; + $dotfile = $spk2dot{$spk}; + defined $dotfile || die "No dot file for speaker $spk\n"; + open(F, "<$dotfile") || die "Error opening dot file $dotfile\n"; + while() { + $_ =~ m:(.+)\((\w{8})\)\s*$: || die "Bad line $_ in dot file $dotfile (line $.)\n"; + $trans = $1; + $utt = $2; + $utt2trans{$utt} = $trans; + } + } + if(!defined $utt2trans{$uttid}) { + print STDERR "No transcript for utterance $uttid (current dot file is $dotfile)\n"; + } else { + print "$uttid_orig $utt2trans{$uttid}\n"; + } +} + + diff --git a/tools/ASR_2ch_track/local/find_transcripts.pl b/tools/ASR_2ch_track/local/find_transcripts.pl new file mode 100644 index 0000000000000000000000000000000000000000..6429411b86429f8eb9e82ba8ccc210473adbdac7 --- /dev/null +++ b/tools/ASR_2ch_track/local/find_transcripts.pl @@ -0,0 +1,64 @@ +#!/usr/bin/env perl +# Copyright 2010-2011 Microsoft Corporation + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + + + +# This program takes on its standard input a list of utterance +# id's, one for each line. (e.g. 4k0c030a is a an utterance id). +# It takes as +# Extracts from the dot files the transcripts for a given +# dataset (represented by a file list). +# + +@ARGV == 1 || die "find_transcripts.pl dot_files_flist < utterance_ids > transcripts"; +$dot_flist = shift @ARGV; + +open(L, "<$dot_flist") || die "Opening file list of dot files: $dot_flist\n"; +while(){ + chop; + m:\S+/(\w{6})00.dot: || die "Bad line in dot file list: $_"; + $spk = $1; + $spk2dot{$spk} = $_; +} + + + +while(){ + chop; + $uttid = $_; + $uttid =~ m:(\w{6})\w\w: || die "Bad utterance id $_"; + $spk = $1; + if($spk ne $curspk) { + %utt2trans = { }; # Don't keep all the transcripts in memory... + $curspk = $spk; + $dotfile = $spk2dot{$spk}; + defined $dotfile || die "No dot file for speaker $spk\n"; + open(F, "<$dotfile") || die "Error opening dot file $dotfile\n"; + while() { + $_ =~ m:(.+)\((\w{8})\)\s*$: || die "Bad line $_ in dot file $dotfile (line $.)\n"; + $trans = $1; + $utt = $2; + $utt2trans{$utt} = $trans; + } + } + if(!defined $utt2trans{$uttid}) { + print STDERR "No transcript for utterance $uttid (current dot file is $dotfile)\n"; + } else { + print "$uttid $utt2trans{$uttid}\n"; + } +} + + diff --git a/tools/ASR_2ch_track/local/flist2scp.pl b/tools/ASR_2ch_track/local/flist2scp.pl new file mode 100644 index 0000000000000000000000000000000000000000..234e4add1edd557ad42b39d76be92d24dea18f13 --- /dev/null +++ b/tools/ASR_2ch_track/local/flist2scp.pl @@ -0,0 +1,31 @@ +#!/usr/bin/env perl +# Copyright 2010-2011 Microsoft Corporation + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + + +# takes in a file list with lines like +# /mnt/matylda2/data/WSJ1/13-16.1/wsj1/si_dt_20/4k0/4k0c030a.wv1 +# and outputs an scp in kaldi format with lines like +# 4k0c030a /mnt/matylda2/data/WSJ1/13-16.1/wsj1/si_dt_20/4k0/4k0c030a.wv1 +# (the first thing is the utterance-id, which is the same as the basename of the file. + + +while(<>){ + m:^\S+/(\w+)\.[wW][vV]1$: || die "Bad line $_"; + $id = $1; + $id =~ tr/A-Z/a-z/; # Necessary because of weirdness on disk 13-16.1 (uppercase filenames) + print "$id $_"; +} + diff --git a/tools/ASR_2ch_track/local/normalize_transcript.pl b/tools/ASR_2ch_track/local/normalize_transcript.pl new file mode 100644 index 0000000000000000000000000000000000000000..09cee06172ec63bb2de123f19bc0ac9efd997095 --- /dev/null +++ b/tools/ASR_2ch_track/local/normalize_transcript.pl @@ -0,0 +1,59 @@ +#!/usr/bin/env perl +# Copyright 2010-2011 Microsoft Corporation + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + + +# This takes data from the standard input that's unnormalized transcripts in the format +# 4k2c0308 Of course there isn\'t any guarantee the company will keep its hot hand [misc_noise] +# 4k2c030a [loud_breath] And new hardware such as the set of personal computers I\. B\. M\. introduced last week can lead to unexpected changes in the software business [door_slam] +# and outputs normalized transcripts. +# c.f. /mnt/matylda2/data/WSJ0/11-10.1/wsj0/transcrp/doc/dot_spec.doc + +@ARGV == 1 || die "usage: normalize_transcript.pl noise_word < transcript > transcript2"; +$noise_word = shift @ARGV; + +while() { + $_ =~ m:^(\S+) (.+): || die "bad line $_"; + $utt = $1; + $trans = $2; + print "$utt"; + foreach $w (split (" ",$trans)) { + $w =~ tr:a-z:A-Z:; # Upcase everything to match the CMU dictionary. . + $w =~ s:\\::g; # Remove backslashes. We don't need the quoting. + $w =~ s:^\%PERCENT$:PERCENT:; # Normalization for Nov'93 test transcripts. + $w =~ s:^\.POINT$:POINT:; # Normalization for Nov'93 test transcripts. + if($w =~ m:^\[\<\w+\]$: || # E.g. [\]$: || # E.g. [door_slam>], this means a door slammed in the next word. Delete. + $w =~ m:\[\w+/\]$: || # E.g. [phone_ring/], which indicates the start of this phenomenon. + $w =~ m:\[\/\w+]$: || # E.g. [/phone_ring], which indicates the end of this phenomenon. + $w eq "~" || # This is used to indicate truncation of an utterance. Not a word. + $w eq ".") { # "." is used to indicate a pause. Silence is optional anyway so not much + # point including this in the transcript. + next; # we won't print this word. + } elsif($w =~ m:\[\w+\]:) { # Other noises, e.g. [loud_breath]. + print " $noise_word"; + } elsif($w =~ m:^\<([\w\']+)\>$:) { + # e.g. replace with and. (the <> means verbal deletion of a word).. but it's pronounced. + print " $1"; + } elsif($w eq "--DASH") { + print " -DASH"; # This is a common issue; the CMU dictionary has it as -DASH. +# } elsif($w =~ m:(.+)\-DASH$:) { # E.g. INCORPORATED-DASH... seems the DASH gets combined with previous word +# print " $1 -DASH"; + } else { + print " $w"; + } + } + print "\n"; +} diff --git a/tools/ASR_2ch_track/local/real_enhan_chime4_data_prep.sh b/tools/ASR_2ch_track/local/real_enhan_chime4_data_prep.sh new file mode 100644 index 0000000000000000000000000000000000000000..7d4f9c892a8c60d6477350bd6852aa311919b13c --- /dev/null +++ b/tools/ASR_2ch_track/local/real_enhan_chime4_data_prep.sh @@ -0,0 +1,103 @@ +#!/bin/bash +set -e + +# Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0. + +# This is modified from the script in standard Kaldi recipe to account +# for the way the WSJ data is structured on the Edinburgh systems. +# - Arnab Ghoshal, 29/05/12 + +# Modified from the script for CHiME2 baseline +# Shinji Watanabe 02/13/2015 + +# Config: +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +if [ $# -ne 2 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "The argument should be a the directory that only contains enhanced speech data." + exit 1; +fi + +echo "$0 $@" # Print the command line for logging + +enhan=$1 +audio_dir=$2 + +dir=`pwd`/data/local/data +mkdir -p $dir +local=`pwd`/local +utils=`pwd`/utils +odir=`pwd`/data + +. ./path.sh # Needed for KALDI_ROOT +export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin +sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe +if [ ! -x $sph2pipe ]; then + echo "Could not find (or execute) the sph2pipe program at $sph2pipe"; + exit 1; +fi + +if $eval_flag; then +list_set="tr05_real_$enhan dt05_real_$enhan et05_real_$enhan" +else +list_set="tr05_real_$enhan dt05_real_$enhan" +fi + +cd $dir + +find $audio_dir/ -name '*.wav' | grep 'tr05_bus_real\|tr05_caf_real\|tr05_ped_real\|tr05_str_real' | sort -u > tr05_real_$enhan.flist +find $audio_dir/ -name '*.wav' | grep 'dt05_bus_real\|dt05_caf_real\|dt05_ped_real\|dt05_str_real' | sort -u > dt05_real_$enhan.flist +if $eval_flag; then +find $audio_dir/ -name '*.wav' | grep 'et05_bus_real\|et05_caf_real\|et05_ped_real\|et05_str_real' | sort -u > et05_real_$enhan.flist +fi + +# make a scp file from file list +for x in $list_set; do + cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_REAL/' > ${x}_wav.ids + paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp +done + +#make a transcription from dot +cat tr05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_REAL"}'> tr05_real_$enhan.ids +cat tr05_real.dot | sed -e 's/(.*)//' > tr05_real_$enhan.txt +paste -d" " tr05_real_$enhan.ids tr05_real_$enhan.txt | sort -k 1 > tr05_real_$enhan.trans1 +cat dt05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_REAL"}'> dt05_real_$enhan.ids +cat dt05_real.dot | sed -e 's/(.*)//' > dt05_real_$enhan.txt +paste -d" " dt05_real_$enhan.ids dt05_real_$enhan.txt | sort -k 1 > dt05_real_$enhan.trans1 +if $eval_flag; then +cat et05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_REAL"}'> et05_real_$enhan.ids +cat et05_real.dot | sed -e 's/(.*)//' > et05_real_$enhan.txt +paste -d" " et05_real_$enhan.ids et05_real_$enhan.txt | sort -k 1 > et05_real_$enhan.trans1 +fi + +# Do some basic normalization steps. At this point we don't remove OOVs-- +# that will be done inside the training scripts, as we'd like to make the +# data-preparation stage independent of the specific lexicon used. +noiseword=""; +for x in $list_set;do + cat $x.trans1 | $local/normalize_transcript.pl $noiseword \ + | sort > $x.txt || exit 1; +done + +# Make the utt2spk and spk2utt files. +for x in $list_set; do + cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk + cat ${x}_wav.scp | awk '{print $1}' > $x.utt + paste -d" " $x.utt $x.spk > $x.utt2spk + cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1; +done + +# copying data to data/... +for x in $list_set; do + mkdir -p $odir/$x + cp ${x}_wav.scp $odir/$x/wav.scp || exit 1; + cp ${x}.txt $odir/$x/text || exit 1; + cp ${x}.spk2utt $odir/$x/spk2utt || exit 1; + cp ${x}.utt2spk $odir/$x/utt2spk || exit 1; +done + +echo "Data preparation succeeded" diff --git a/tools/ASR_2ch_track/local/real_noisy_chime4_data_prep.sh b/tools/ASR_2ch_track/local/real_noisy_chime4_data_prep.sh new file mode 100644 index 0000000000000000000000000000000000000000..fa68383e55e91db604264baaea2460028b69a823 --- /dev/null +++ b/tools/ASR_2ch_track/local/real_noisy_chime4_data_prep.sh @@ -0,0 +1,114 @@ +#!/bin/bash +set -e + +# Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0. + +# This is modified from the script in standard Kaldi recipe to account +# for the way the WSJ data is structured on the Edinburgh systems. +# - Arnab Ghoshal, 29/05/12 + +# Modified from the script for CHiME2 baseline +# Shinji Watanabe 02/13/2015 + +# Config: +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +if [ $# -ne 1 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "The argument should be a the top-level Chime4 directory." + echo "It is assumed that there will be a 'data' subdirectory" + echo "within the top-level corpus directory." + exit 1; +fi + +echo "$0 $@" # Print the command line for logging + +audio_dir=$1/data/audio/16kHz/isolated +trans_dir=$1/data/transcriptions + +echo "extract 5th channel (CH5.wav, the center bottom edge in the front of the tablet) for noisy data" + +dir=`pwd`/data/local/data +lmdir=`pwd`/data/local/nist_lm +mkdir -p $dir $lmdir +local=`pwd`/local +utils=`pwd`/utils + +. ./path.sh # Needed for KALDI_ROOT +export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin +sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe +if [ ! -x $sph2pipe ]; then + echo "Could not find (or execute) the sph2pipe program at $sph2pipe"; + exit 1; +fi + +if $eval_flag; then +list_set="tr05_real_noisy dt05_real_noisy et05_real_noisy" +else +list_set="tr05_real_noisy dt05_real_noisy" +fi + +cd $dir + +find $audio_dir -name '*CH5.wav' | grep 'tr05_bus_real\|tr05_caf_real\|tr05_ped_real\|tr05_str_real' | sort -u > tr05_real_noisy.flist +find $audio_dir -name '*CH5.wav' | grep 'dt05_bus_real\|dt05_caf_real\|dt05_ped_real\|dt05_str_real' | sort -u > dt05_real_noisy.flist +if $eval_flag; then +find $audio_dir -name '*CH5.wav' | grep 'et05_bus_real\|et05_caf_real\|et05_ped_real\|et05_str_real' | sort -u > et05_real_noisy.flist +fi + +# make a dot format from json annotation files +cp $trans_dir/tr05_real.dot_all tr05_real.dot +cp $trans_dir/dt05_real.dot_all dt05_real.dot +if $eval_flag; then +cp $trans_dir/et05_real.dot_all et05_real.dot +fi + +# make a scp file from file list +for x in $list_set; do + cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_REAL/' > ${x}_wav.ids + paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp +done + +#make a transcription from dot +cat tr05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_REAL"}'> tr05_real_noisy.ids +cat tr05_real.dot | sed -e 's/(.*)//' > tr05_real_noisy.txt +paste -d" " tr05_real_noisy.ids tr05_real_noisy.txt | sort -k 1 > tr05_real_noisy.trans1 +cat dt05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_REAL"}'> dt05_real_noisy.ids +cat dt05_real.dot | sed -e 's/(.*)//' > dt05_real_noisy.txt +paste -d" " dt05_real_noisy.ids dt05_real_noisy.txt | sort -k 1 > dt05_real_noisy.trans1 +if $eval_flag; then +cat et05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_REAL"}'> et05_real_noisy.ids +cat et05_real.dot | sed -e 's/(.*)//' > et05_real_noisy.txt +paste -d" " et05_real_noisy.ids et05_real_noisy.txt | sort -k 1 > et05_real_noisy.trans1 +fi + +# Do some basic normalization steps. At this point we don't remove OOVs-- +# that will be done inside the training scripts, as we'd like to make the +# data-preparation stage independent of the specific lexicon used. +noiseword=""; +for x in $list_set;do + cat $x.trans1 | $local/normalize_transcript.pl $noiseword \ + | sort > $x.txt || exit 1; +done + +# Make the utt2spk and spk2utt files. +for x in $list_set; do + cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk + cat ${x}_wav.scp | awk '{print $1}' > $x.utt + paste -d" " $x.utt $x.spk > $x.utt2spk + cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1; +done + +# copying data to data/... +for x in $list_set; do + mkdir -p ../../$x + cp ${x}_wav.scp ../../$x/wav.scp || exit 1; + cp ${x}.txt ../../$x/text || exit 1; + cp ${x}.spk2utt ../../$x/spk2utt || exit 1; + cp ${x}.utt2spk ../../$x/utt2spk || exit 1; +done + +echo "Data preparation succeeded" diff --git a/tools/ASR_2ch_track/local/run_beamform_2ch_track.sh b/tools/ASR_2ch_track/local/run_beamform_2ch_track.sh new file mode 100644 index 0000000000000000000000000000000000000000..29d7ee0ff5ee620bb1ea9c9722793dac9acd1012 --- /dev/null +++ b/tools/ASR_2ch_track/local/run_beamform_2ch_track.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Shinji Watanabe) + +. ./cmd.sh +. ./path.sh + +# Config: +nj=10 +cmd=run.pl + +. utils/parse_options.sh || exit 1; + +if [ $# != 2 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: local/run_beamform_2ch_track.sh [options] " + echo "main options (for others, see top of script file)" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + exit 1; +fi + +sdir=$1 +odir=$2 + +wdir=data/beamforming_2ch_track + +if [ -z $BEAMFORMIT ] ; then + export BEAMFORMIT=$KALDI_ROOT/tools/BeamformIt +fi +export PATH=${PATH}:$BEAMFORMIT +! hash BeamformIt && echo "Missing BeamformIt, run 'cd ../../../tools/; make beamformit;'" && exit 1 + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +mkdir -p $odir +mkdir -p $wdir/log + +allwavs=`find $sdir/ | grep "\.wav" | tr ' ' '\n' | awk -F '/' '{print $(NF-1)"/"$NF}'` + +# wavfiles.list can be used as the name of the output files +output_wavfiles=$wdir/wavfiles.list +echo $allwavs | tr ' ' '\n' | awk -F '.' '{print $1}' | sort | uniq > $output_wavfiles + +# channel list +input_arrays=$wdir/channels +echo $allwavs | tr ' ' '\n' | sort | awk 'NR%2==1' > $wdir/channels.1st +echo $allwavs | tr ' ' '\n' | sort | awk 'NR%2==0' > $wdir/channels.2nd +paste -d" " $output_wavfiles $wdir/channels.1st $wdir/channels.2nd > $input_arrays + +# split the list for parallel processing +split_wavfiles="" +for n in `seq $nj`; do + split_wavfiles="$split_wavfiles $output_wavfiles.$n" +done +utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1; + +echo -e "Beamforming\n" +# making a shell script for each job +for n in `seq $nj`; do +cat << EOF > $wdir/log/beamform.$n.sh +while read line; do + $BEAMFORMIT/BeamformIt -s \$line -c $input_arrays \ + --config_file `pwd`/conf/chime4.cfg \ + --source_dir $sdir \ + --result_dir $odir +done < $output_wavfiles.$n +EOF +done +# making a subdirectory for the output wav files +for x in `awk -F '/' '{print $1}' $output_wavfiles | sort | uniq`; do + mkdir -p $odir/$x +done + +chmod a+x $wdir/log/beamform.*.sh +$cmd JOB=1:$nj $wdir/log/beamform.JOB.log \ + $wdir/log/beamform.JOB.sh + +echo "`basename $0` Done." diff --git a/tools/ASR_2ch_track/local/run_beamform_6ch_track.sh b/tools/ASR_2ch_track/local/run_beamform_6ch_track.sh new file mode 100644 index 0000000000000000000000000000000000000000..891465a330291c1ad00a685d6dbe0c153e6886d6 --- /dev/null +++ b/tools/ASR_2ch_track/local/run_beamform_6ch_track.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Shinji Watanabe) + +. ./cmd.sh +. ./path.sh + +# Config: +nj=10 +cmd=run.pl +bmf="1 3 4 5 6" +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +if [ $# != 2 ]; then + echo "Wrong #arguments ($#, expected 2)" + echo "Usage: local/run_beamform_6ch_track.sh [options] " + echo "main options (for others, see top of script file)" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --bmf \"1 3 4 5 6\" # microphones used for beamforming (2th mic is omitted in default)" + exit 1; +fi + +sdir=$1 +odir=$2 +wdir=data/beamforming_`echo $bmf | tr ' ' '_'` + +if [ -z $BEAMFORMIT ] ; then + export BEAMFORMIT=$KALDI_ROOT/tools/BeamformIt +fi +export PATH=${PATH}:$BEAMFORMIT +! hash BeamformIt && echo "Missing BeamformIt, run 'cd ../../../tools/; make beamformit;'" && exit 1 + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +mkdir -p $odir +mkdir -p $wdir/log + +echo "Will use the following channels: $bmf" +# number of channels +numch=`echo $bmf | tr ' ' '\n' | wc -l` +echo "the number of channels: $numch" + +# wavfiles.list can be used as the name of the output files +# we only process dev and eval waves +output_wavfiles=$wdir/wavfiles.list +if $eval_flag; then + find $sdir/{dt,et}*{simu,real}/ | grep CH1.wav \ + | awk -F '/' '{print $(NF-1) "/" $NF}' | sed -e "s/\.CH1\.wav//" | sort > $output_wavfiles +else + find $sdir/dt*{simu,real}/ | grep CH1.wav \ + | awk -F '/' '{print $(NF-1) "/" $NF}' | sed -e "s/\.CH1\.wav//" | sort > $output_wavfiles +fi + +# this is an input file list of the microphones +# format: 1st_wav 2nd_wav ... nth_wav +input_arrays=$wdir/channels_$numch +for x in `cat $output_wavfiles`; do + echo -n "$x" + for ch in $bmf; do + echo -n " $x.CH$ch.wav" + done + echo "" +done > $input_arrays + +# split the list for parallel processing +split_wavfiles="" +for n in `seq $nj`; do + split_wavfiles="$split_wavfiles $output_wavfiles.$n" +done +utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1; + +echo -e "Beamforming\n" +# making a shell script for each job +for n in `seq $nj`; do +cat << EOF > $wdir/log/beamform.$n.sh +while read line; do + $BEAMFORMIT/BeamformIt -s \$line -c $input_arrays \ + --config_file `pwd`/conf/chime4.cfg \ + --source_dir $sdir \ + --result_dir $odir +done < $output_wavfiles.$n +EOF +done +# making a subdirectory for the output wav files +for x in `awk -F '/' '{print $1}' $output_wavfiles | sort | uniq`; do + mkdir -p $odir/$x +done + +chmod a+x $wdir/log/beamform.*.sh +$cmd JOB=1:$nj $wdir/log/beamform.JOB.log \ + $wdir/log/beamform.JOB.sh + +echo "`basename $0` Done." diff --git a/tools/ASR_2ch_track/local/run_dnn.sh b/tools/ASR_2ch_track/local/run_dnn.sh new file mode 100644 index 0000000000000000000000000000000000000000..5371c5aa4b204535c43f90230ced13f1a4972c13 --- /dev/null +++ b/tools/ASR_2ch_track/local/run_dnn.sh @@ -0,0 +1,230 @@ +#!/bin/bash + +# Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +# This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2 +# made by Chao Weng + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +# Config: +nj=30 +stage=0 # resume training with --stage=N +train=noisy +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +# This is a shell script, but it's recommended that you run the commands one by +# one by copying and pasting into the shell. + +if [ $# -ne 1 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "First argument specifies a unique name for different enhancement method" + exit 1; +fi + +# set enhanced data +enhan=$1 + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +# check whether run_init is executed +if [ ! -d data/lang ]; then + echo "error, execute local/run_init.sh, first" + exit 1; +fi + +# check whether run_init is executed +if [ ! -d exp/tri3b_tr05_multi_${train} ]; then + echo "error, execute local/run_init.sh, first" + exit 1; +fi + +# get alignments +if [ $stage -le 0 ]; then + steps/align_fmllr.sh --nj $nj --cmd "$train_cmd" \ + data/tr05_multi_${train} data/lang exp/tri3b_tr05_multi_${train} exp/tri3b_tr05_multi_${train}_ali + steps/align_fmllr.sh --nj 4 --cmd "$train_cmd" \ + data/dt05_multi_$enhan data/lang exp/tri3b_tr05_multi_${train} exp/tri3b_tr05_multi_${train}_ali_dt05 +fi + +# make fmllr feature for training multi = simu + real +gmmdir=exp/tri3b_tr05_multi_${train}_ali +data_fmllr=data-fmllr-tri3b +mkdir -p $data_fmllr +fmllrdir=fmllr-tri3b/${train} +if [ $stage -le 1 ]; then + for x in tr05_real_${train} tr05_simu_${train}; do + steps/nnet/make_fmllr_feats.sh --nj 4 --cmd "$train_cmd" \ + --transform-dir $gmmdir \ + $data_fmllr/$x data/$x $gmmdir exp/make_fmllr_tri3b/$x $fmllrdir + done +fi + +# make fmllr feature for dev and eval +gmmdir=exp/tri3b_tr05_multi_${train} +fmllrdir=fmllr-tri3b/$enhan +if [ $stage -le 2 ]; then + if $eval_flag; then + tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan" + else + tasks="dt05_real_$enhan dt05_simu_$enhan" + fi + for x in $tasks; do + steps/nnet/make_fmllr_feats.sh --nj 4 --cmd "$train_cmd" \ + --transform-dir $gmmdir/decode_tgpr_5k_$x \ + $data_fmllr/$x data/$x $gmmdir exp/make_fmllr_tri3b/$x $fmllrdir + done +fi + +# make mixed training set from real and simulation enhanced data +# multi = simu + real +if [ $stage -le 3 ]; then + utils/combine_data.sh $data_fmllr/tr05_multi_${train} $data_fmllr/tr05_simu_${train} $data_fmllr/tr05_real_${train} + utils/combine_data.sh $data_fmllr/dt05_multi_$enhan $data_fmllr/dt05_simu_$enhan $data_fmllr/dt05_real_$enhan + if $eval_flag; then + utils/combine_data.sh $data_fmllr/et05_multi_$enhan $data_fmllr/et05_simu_$enhan $data_fmllr/et05_real_$enhan + fi +fi + +# pre-train dnn +dir=exp/tri4a_dnn_pretrain_tr05_multi_${train} +if [ $stage -le 4 ]; then + $cuda_cmd $dir/_pretrain_dbn.log \ + steps/nnet/pretrain_dbn.sh --nn-depth 7 --rbm-iter 3 $data_fmllr/tr05_multi_${train} $dir +fi + +# train dnn +dir=exp/tri4a_dnn_tr05_multi_${train} +ali=exp/tri3b_tr05_multi_${train}_ali +ali_dev=exp/tri3b_tr05_multi_${train}_ali_dt05 +feature_transform=exp/tri4a_dnn_pretrain_tr05_multi_${train}/final.feature_transform +dbn=exp/tri4a_dnn_pretrain_tr05_multi_${train}/7.dbn +if [ $stage -le 5 ]; then + $cuda_cmd $dir/_train_nnet.log \ + steps/nnet/train.sh --feature-transform $feature_transform --dbn $dbn --hid-layers 0 --learn-rate 0.008 \ + $data_fmllr/tr05_multi_${train} $data_fmllr/dt05_multi_$enhan data/lang $ali $ali_dev $dir +fi + +# decode enhanced speech +if [ $stage -le 6 ]; then + utils/mkgraph.sh data/lang_test_tgpr_5k $dir $dir/graph_tgpr_5k + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \ + $dir/graph_tgpr_5k $data_fmllr/dt05_real_$enhan $dir/decode_tgpr_5k_dt05_real_$enhan & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \ + $dir/graph_tgpr_5k $data_fmllr/dt05_simu_$enhan $dir/decode_tgpr_5k_dt05_simu_$enhan & + if $eval_flag; then + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \ + $dir/graph_tgpr_5k $data_fmllr/et05_real_$enhan $dir/decode_tgpr_5k_et05_real_$enhan & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \ + $dir/graph_tgpr_5k $data_fmllr/et05_simu_$enhan $dir/decode_tgpr_5k_et05_simu_$enhan & + fi + wait; +fi + +# Sequence training using sMBR criterion, we do Stochastic-GD +# with per-utterance updates. We use usually good acwt 0.1 +# Lattices are re-generated after 1st epoch, to get faster convergence. +dir=exp/tri4a_dnn_tr05_multi_${train}_smbr +srcdir=exp/tri4a_dnn_tr05_multi_${train} +acwt=0.1 + +# First we generate lattices and alignments: +# gawk must be installed to perform awk -v FS="/" '{ print gensub(".gz","","",$NF)" gunzip -c "$0" |"; }' in +# steps/nnet/make_denlats.sh +if [ $stage -le 7 ]; then + steps/nnet/align.sh --nj $nj --cmd "$train_cmd" \ + $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali + steps/nnet/make_denlats.sh --nj $nj --cmd "$decode_cmd" --config conf/decode_dnn.config --acwt $acwt \ + $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_denlats +fi + +# Re-train the DNN by 1 iteration of sMBR +if [ $stage -le 8 ]; then + steps/nnet/train_mpe.sh --cmd "$cuda_cmd" --num-iters 1 --acwt $acwt --do-smbr true \ + $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali ${srcdir}_denlats $dir +fi + +# Decode (reuse HCLG graph) +if [ $stage -le 9 ]; then + for ITER in 1; do + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_real_${enhan} $dir/decode_tgpr_5k_dt05_real_${enhan}_it${ITER} & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_simu_${enhan} $dir/decode_tgpr_5k_dt05_simu_${enhan}_it${ITER} & + if $eval_flag; then + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_real_${enhan} $dir/decode_tgpr_5k_et05_real_${enhan}_it${ITER} & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_simu_${enhan} $dir/decode_tgpr_5k_et05_simu_${enhan}_it${ITER} & + fi + done +fi + +# Re-generate lattices, run 4 more sMBR iterations +dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats +srcdir=exp/tri4a_dnn_tr05_multi_${train}_smbr +acwt=0.1 + +# Generate lattices and alignments: +if [ $stage -le 10 ]; then + steps/nnet/align.sh --nj $nj --cmd "$train_cmd" \ + $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali + steps/nnet/make_denlats.sh --nj $nj --cmd "$decode_cmd" --config conf/decode_dnn.config --acwt $acwt \ + $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_denlats +fi + +# Re-train the DNN by 4 iterations of sMBR +if [ $stage -le 11 ]; then + steps/nnet/train_mpe.sh --cmd "$cuda_cmd" --num-iters 4 --acwt $acwt --do-smbr true \ + $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali ${srcdir}_denlats $dir || exit 1 +fi + +# Decode (reuse HCLG graph) +if [ $stage -le 12 ]; then + for ITER in 1 2 3 4; do + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_real_${enhan} $dir/decode_tgpr_5k_dt05_real_${enhan}_it${ITER} & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_simu_${enhan} $dir/decode_tgpr_5k_dt05_simu_${enhan}_it${ITER} & + if $eval_flag; then + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_real_${enhan} $dir/decode_tgpr_5k_et05_real_${enhan}_it${ITER} & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_simu_${enhan} $dir/decode_tgpr_5k_et05_simu_${enhan}_it${ITER} & + fi + done + wait +fi + +# scoring +if [ $stage -le 13 ]; then + # decoded results of enhanced speech using DNN AMs trained with enhanced data + local/chime4_calc_wers.sh exp/tri4a_dnn_tr05_multi_${train} $enhan exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k \ + > exp/tri4a_dnn_tr05_multi_${train}/best_wer_$enhan.result + head -n 15 exp/tri4a_dnn_tr05_multi_${train}/best_wer_$enhan.result + # decoded results of enhanced speech using sequence-training DNN + ./local/chime4_calc_wers_smbr.sh exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats ${enhan} exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k \ + > exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats/best_wer_${enhan}.result + head -n 15 exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats/best_wer_${enhan}.result +fi + +echo "`basename $0` Done." diff --git a/tools/ASR_2ch_track/local/run_dnn_recog.sh b/tools/ASR_2ch_track/local/run_dnn_recog.sh new file mode 100644 index 0000000000000000000000000000000000000000..5e6ade02387f24c8472eaa01aa76e323a4f06c33 --- /dev/null +++ b/tools/ASR_2ch_track/local/run_dnn_recog.sh @@ -0,0 +1,143 @@ +#!/bin/bash + +# Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +# This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2 +# made by Chao Weng + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +# Config: +nj=30 +stage=0 # resume training with --stage=N +train=noisy +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +# This is a shell script, but it's recommended that you run the commands one by +# one by copying and pasting into the shell. + +if [ $# -ne 2 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "First argument specifies a unique name for different enhancement method" + echo "Second argument specifies acoustic and language model directory" + exit 1; +fi + +# set enhanced data +enhan=$1 +# set model directory +mdir=$2 + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +# check data/loca/data +if [ ! -d $mdir/data/local/data ]; then + echo "error, set $mdir correctly" + exit 1; +elif [ ! -d data/local/data ]; then + echo "copy $mdir/data/local/data" + mkdir -p data/local + cp -r $mdir/data/local/data data/local/ +fi + +# check gmm model +if [ ! -d $mdir/exp/tri3b_tr05_multi_${train} ]; then + echo "error, set $mdir correctly" + exit 1; +elif [ ! -d exp/tri3b_tr05_multi_${train} ]; then + echo "copy $mdir/exp/tri3b_tr05_multi_${train}" + mkdir -p exp + cp -r $mdir/exp/tri3b_tr05_multi_${train} exp/ +fi + +# check dnn graph +if [ ! -d $mdir/exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then + echo "error, set $mdir correctly" + exit 1; +elif [ ! -d exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then + echo "copy $mdir/exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k" + mkdir -p exp/tri4a_dnn_tr05_multi_${train} + cp -r $mdir/exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k exp/tri4a_dnn_tr05_multi_${train}/ +fi + +# check dnn smbr model +if [ ! -d $mdir/exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats ]; then + echo "error, set $mdir correctly" + exit 1; +elif [ ! -d exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats ]; then + echo "copy $mdir/exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats" + mkdir -p exp + cp -r $mdir/exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats exp/ +fi + +# make fmllr feature for dev and eval +gmmdir=exp/tri3b_tr05_multi_${train} +data_fmllr=data-fmllr-tri3b +mkdir -p $data_fmllr +fmllrdir=fmllr-tri3b/$enhan +if [ $stage -le 4 ]; then + if $eval_flag; then + tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan" + else + tasks="dt05_real_$enhan dt05_simu_$enhan" + fi + for x in $tasks; do + steps/nnet/make_fmllr_feats.sh --nj 4 --cmd "$train_cmd" \ + --transform-dir $gmmdir/decode_tgpr_5k_$x \ + $data_fmllr/$x data/$x $gmmdir exp/make_fmllr_tri3b/$x $fmllrdir + done +fi + +# make mixed training set from real and simulation enhanced data +# multi = simu + real +if [ $stage -le 5 ]; then + utils/combine_data.sh $data_fmllr/dt05_multi_$enhan $data_fmllr/dt05_simu_$enhan $data_fmllr/dt05_real_$enhan + if $eval_flag; then + utils/combine_data.sh $data_fmllr/et05_multi_$enhan $data_fmllr/et05_simu_$enhan $data_fmllr/et05_real_$enhan + fi +fi + +# Re-generate lattices, run 4 more sMBR iterations +dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats +acwt=0.1 + +# Decode (reuse HCLG graph) +if [ $stage -le 6 ]; then + for ITER in 1 2 3 4; do + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_real_${enhan} $dir/decode_tgpr_5k_dt05_real_${enhan}_it${ITER} & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_simu_${enhan} $dir/decode_tgpr_5k_dt05_simu_${enhan}_it${ITER} & + if $eval_flag; then + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_real_${enhan} $dir/decode_tgpr_5k_et05_real_${enhan}_it${ITER} & + steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \ + --nnet $dir/${ITER}.nnet --acwt $acwt \ + exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_simu_${enhan} $dir/decode_tgpr_5k_et05_simu_${enhan}_it${ITER} & + fi + wait + done +fi + +# scoring +if [ $stage -le 7 ]; then + # decoded results of enhanced speech using sequence-training DNN + ./local/chime4_calc_wers_smbr.sh $dir ${enhan} exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k > $dir/best_wer_${enhan}.result + head -n 15 $dir/best_wer_${enhan}.result +fi + +echo "`basename $0` Done." diff --git a/tools/ASR_2ch_track/local/run_gmm.sh b/tools/ASR_2ch_track/local/run_gmm.sh new file mode 100644 index 0000000000000000000000000000000000000000..31c0a84de3b51a7c982143f69a8ef8c3986014b9 --- /dev/null +++ b/tools/ASR_2ch_track/local/run_gmm.sh @@ -0,0 +1,186 @@ +#!/bin/bash + +# Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +# This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2 +# made by Chao Weng + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +# Config: +nj=30 +stage=0 # resume training with --stage=N +train=noisy # noisy data multi-condition training +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +# This is a shell script, but it's recommended that you run the commands one by +# one by copying and pasting into the shell. + +if [ $# -ne 3 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "First argument specifies a unique name for different enhancement method" + echo "Second argument specifies the directory of enhanced wav files" + echo "Third argument specifies the CHiME4 root directory" + exit 1; +fi + +# set enhanced data +enhan=$1 +enhan_data=$2 +# set chime4 data +chime4_data=$3 + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +# check whether run_init is executed +if [ ! -d data/lang ]; then + echo "error, execute local/run_init.sh, first" + exit 1; +fi + +####################### +#### training ######### +if [ $stage -le 1 ]; then + # process for distant talking speech for real and simulation data + local/real_noisy_chime4_data_prep.sh $chime4_data + local/simu_noisy_chime4_data_prep.sh $chime4_data +fi + +# Now make MFCC features for clean, close, and noisy data +# mfccdir should be some place with a largish disk where you +# want to store MFCC features. +mfccdir=mfcc +if [ $stage -le 2 ]; then + if $eval_flag; then + tasks="tr05_real_${train} dt05_real_${train} tr05_simu_${train} dt05_simu_${train} et05_real_${train} et05_simu_${train}" + else + tasks="tr05_real_${train} dt05_real_${train} tr05_simu_${train} dt05_simu_${train}" + fi + for x in $tasks; do + steps/make_mfcc.sh --nj 8 --cmd "$train_cmd" \ + data/$x exp/make_mfcc/$x $mfccdir + steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir + done +fi + +# make mixed training set from real and simulation training data +# multi = simu + real +if [ $stage -le 3 ]; then + utils/combine_data.sh data/tr05_multi_${train} data/tr05_simu_${train} data/tr05_real_${train} + utils/combine_data.sh data/dt05_multi_${train} data/dt05_simu_${train} data/dt05_real_${train} + if $eval_flag; then + utils/combine_data.sh data/et05_multi_${train} data/et05_simu_${train} data/et05_real_${train} + fi +fi + +# training models for noisy data +if [ $stage -le 4 ]; then + nspk=`wc -l data/tr05_multi_${train}/spk2utt | awk '{print $1}'` + if [ $nj -gt $nspk ]; then + nj2=$nspk + else + nj2=$nj + fi + # training monophone model + steps/train_mono.sh --boost-silence 1.25 --nj $nj2 --cmd "$train_cmd" \ + data/tr05_multi_${train} data/lang exp/mono0a_tr05_multi_${train} + steps/align_si.sh --boost-silence 1.25 --nj $nj2 --cmd "$train_cmd" \ + data/tr05_multi_${train} data/lang exp/mono0a_tr05_multi_${train} exp/mono0a_ali_tr05_multi_${train} + + # training triphone model with lad mllt features + steps/train_deltas.sh --boost-silence 1.25 --cmd "$train_cmd" \ + 2000 10000 data/tr05_multi_${train} data/lang exp/mono0a_ali_tr05_multi_${train} exp/tri1_tr05_multi_${train} + steps/align_si.sh --nj $nj2 --cmd "$train_cmd" \ + data/tr05_multi_${train} data/lang exp/tri1_tr05_multi_${train} exp/tri1_ali_tr05_multi_${train} + + steps/train_lda_mllt.sh --cmd "$train_cmd" \ + --splice-opts "--left-context=3 --right-context=3" \ + 2500 15000 data/tr05_multi_${train} data/lang exp/tri1_ali_tr05_multi_${train} exp/tri2b_tr05_multi_${train} + steps/align_si.sh --nj $nj2 --cmd "$train_cmd" \ + --use-graphs true data/tr05_multi_${train} data/lang exp/tri2b_tr05_multi_${train} exp/tri2b_ali_tr05_multi_${train} + + steps/train_sat.sh --cmd "$train_cmd" \ + 2500 15000 data/tr05_multi_${train} data/lang exp/tri2b_ali_tr05_multi_${train} exp/tri3b_tr05_multi_${train} + utils/mkgraph.sh data/lang_test_tgpr_5k exp/tri3b_tr05_multi_${train} exp/tri3b_tr05_multi_${train}/graph_tgpr_5k +fi +#### training done #### +####################### + + +##################### +#### tsting ######### +# process for enhanced data +if [ $stage -le 5 ]; then + if [ ! -d data/dt05_real_$enhan ] || [ ! -d data/et05_real_$enhan ]; then + local/real_enhan_chime4_data_prep.sh $enhan $enhan_data + local/simu_enhan_chime4_data_prep.sh $enhan $enhan_data + fi +fi + +# Now make MFCC features for enhanced data +# mfccdir should be some place with a largish disk where you +# want to store MFCC features. +mfccdir=mfcc/$enhan +if [ $stage -le 6 ]; then + if $eval_flag; then + tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan" + else + tasks="dt05_real_$enhan dt05_simu_$enhan" + fi + for x in $tasks; do + if [ ! -e data/$x/feats.scp ]; then + steps/make_mfcc.sh --nj 8 --cmd "$train_cmd" \ + data/$x exp/make_mfcc/$x $mfccdir + steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir + fi + done +fi + +# make mixed training set from real and simulation enhanced data +# multi = simu + real +if [ $stage -le 7 ]; then + if [ ! -d data/dt05_multi_$enhan ]; then + utils/combine_data.sh data/dt05_multi_$enhan data/dt05_simu_$enhan data/dt05_real_$enhan + if $eval_flag; then + utils/combine_data.sh data/et05_multi_$enhan data/et05_simu_$enhan data/et05_real_$enhan + fi + fi +fi + +# decode enhanced speech using AMs trained with enhanced data +if [ $stage -le 8 ]; then + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_real_$enhan & + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_simu_$enhan & + if $eval_flag; then + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_real_$enhan & + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_simu_$enhan & + fi + wait; +fi + +# scoring +if [ $stage -le 9 ]; then + # decoded results of enhanced speech using AMs trained with enhanced data + local/chime4_calc_wers.sh exp/tri3b_tr05_multi_${train} $enhan exp/tri3b_tr05_multi_${train}/graph_tgpr_5k \ + > exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result + head -n 15 exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result +fi +#### tsting done #### +##################### + +echo "`basename $0` Done." diff --git a/tools/ASR_2ch_track/local/run_gmm_recog.sh b/tools/ASR_2ch_track/local/run_gmm_recog.sh new file mode 100644 index 0000000000000000000000000000000000000000..65db6d9aa4d61d2d8173923684e77950558f08fd --- /dev/null +++ b/tools/ASR_2ch_track/local/run_gmm_recog.sh @@ -0,0 +1,127 @@ +#!/bin/bash + +# Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +# This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2 +# made by Chao Weng + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +# Config: +nj=30 +stage=0 # resume training with --stage=N +train=noisy +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +# This is a shell script, but it's recommended that you run the commands one by +# one by copying and pasting into the shell. + +if [ $# -ne 3 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "First argument specifies a unique name for different enhancement method" + echo "Second argument specifies the directory of enhanced wav files" + echo "Third argument specifies acoustic and language model directory" + exit 1; +fi + +# set enhanced data +enhan=$1 +enhan_data=$2 +# set model directory +mdir=$3 + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +# check data/loca/data +if [ ! -d $mdir/data/local/data ]; then + echo "error, set $mdir correctly" + exit 1; +elif [ ! -d data/local/data ]; then + echo "copy $mdir/data/local/data" + mkdir -p data/local + cp -r $mdir/data/local/data data/local/ +fi + +# check gmm model +if [ ! -d $mdir/exp/tri3b_tr05_multi_${train} ]; then + echo "error, set $mdir correctly" + exit 1; +elif [ ! -d exp/tri3b_tr05_multi_${train} ]; then + echo "copy $mdir/exp/tri3b_tr05_multi_${train}" + mkdir -p exp + cp -r $mdir/exp/tri3b_tr05_multi_${train} exp/ +fi + +# process for enhanced data +if [ $stage -le 0 ]; then + if [ ! -d data/dt05_real_$enhan ] || [ ! -d data/et05_real_$enhan ]; then + local/real_enhan_chime4_data_prep.sh $enhan $enhan_data + local/simu_enhan_chime4_data_prep.sh $enhan $enhan_data + fi +fi + +# Now make MFCC features for enhanced data +# mfccdir should be some place with a largish disk where you +# want to store MFCC features. +mfccdir=mfcc/$enhan +if [ $stage -le 1 ]; then + if $eval_flag; then + tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan" + else + tasks="dt05_real_$enhan dt05_simu_$enhan" + fi + for x in $tasks; do + if [ ! -e data/$x/feats.scp ]; then + steps/make_mfcc.sh --nj 8 --cmd "$train_cmd" \ + data/$x exp/make_mfcc/$x $mfccdir + steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir + fi + done +fi + +# make mixed training set from real and simulation enhanced data +# multi = simu + real +if [ $stage -le 2 ]; then + if [ ! -d data/dt05_multi_$enhan ]; then + utils/combine_data.sh data/dt05_multi_$enhan data/dt05_simu_$enhan data/dt05_real_$enhan + if $eval_flag; then + utils/combine_data.sh data/et05_multi_$enhan data/et05_simu_$enhan data/et05_real_$enhan + fi + fi +fi + +# decode enhanced speech using AMs trained with enhanced data +if [ $stage -le 3 ]; then + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_real_$enhan & + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_simu_$enhan & + if $eval_flag; then + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_real_$enhan & + steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \ + exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_simu_$enhan & + fi + wait; +fi + +# scoring +if [ $stage -le 4 ]; then + # decoded results of enhanced speech using AMs trained with enhanced data + local/chime4_calc_wers.sh exp/tri3b_tr05_multi_${train} $enhan exp/tri3b_tr05_multi_${train}/graph_tgpr_5k \ + > exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result + head -n 15 exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result +fi + +echo "`basename $0` Done." diff --git a/tools/ASR_2ch_track/local/run_init.sh b/tools/ASR_2ch_track/local/run_init.sh new file mode 100644 index 0000000000000000000000000000000000000000..f8c4782cc48fbf921fcb6ae440b3e4034cc0ba15 --- /dev/null +++ b/tools/ASR_2ch_track/local/run_init.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +# Config: +nj=30 +stage=0 # resume training with --stage=N +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +# This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2 +# made by Chao Weng + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +if [ $# -ne 1 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "Please specifies a CHiME4 root directory" + echo "If you use scripts distributed in the CHiME4 package," + echo "It would be `pwd`/../.." + exit 1; +fi + +# This is a shell script, but it's recommended that you run the commands one by +# one by copying and pasting into the shell. + +# clean data +chime4_data=$1 +wsj0_data=$chime4_data/data/WSJ0 # directory of WSJ0 in Chime4. You can also specify your WSJ0 corpus directory + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +if [ $stage -le 0 ]; then + # process for clean speech and making LMs etc. from original WSJ0 + # note that training on clean data means original WSJ0 data only (no booth data) + local/clean_wsj0_data_prep.sh $wsj0_data + local/wsj_prepare_dict.sh + utils/prepare_lang.sh data/local/dict "" data/local/lang_tmp data/lang + local/clean_chime4_format_data.sh +fi + +echo "`basename $0` Done." diff --git a/tools/ASR_2ch_track/local/run_lmrescore.sh b/tools/ASR_2ch_track/local/run_lmrescore.sh new file mode 100644 index 0000000000000000000000000000000000000000..58a19c6da25c3c71ef7beca348e946de8831738f --- /dev/null +++ b/tools/ASR_2ch_track/local/run_lmrescore.sh @@ -0,0 +1,136 @@ +#!/bin/bash + +# Copyright 2015 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori) + +nj=12 +stage=1 +order=5 +hidden=300 +rnnweight=0.5 +nbest=100 +train=noisy +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +# This is a shell script, but it's recommended that you run the commands one by +# one by copying and pasting into the shell. + +if [ $# -ne 2 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "First argument specifies a root directory of Chime4 data" + echo "Second argument specifies a unique name for different enhancement method" + exit 1; +fi + +# set language models +lm_suffix=${order}gkn_5k +rnnlm_suffix=rnnlm_5k_h${hidden} + +# data root +chime4_data=$1 +# enhan data +enhan=$2 + +# check data +if [ ! -d $chime4_data ]; then + echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1 +fi + +# check whether run_dnn is executed +srcdir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats +if [ ! -d $srcdir ]; then + echo "error, execute local/run_dnn.sh, first" + exit 1; +fi + +# train a high-order n-gram language model +if [ $stage -le 1 ]; then + local/chime4_train_lms.sh $chime4_data || exit 1; +fi + +# train a RNN language model +if [ $stage -le 2 ]; then + local/chime4_train_rnnlms.sh $chime4_data || exit 1; +fi + +# preparation +dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_lmrescore +mkdir -p $dir +# make a symbolic link to graph info +if [ ! -e $dir/graph_tgpr_5k ]; then + if [ ! -e exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then + echo "graph is missing, execute local/run_dnn.sh, correctly" + exit 1; + fi + pushd . ; cd $dir + ln -s ../tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k . + popd +fi + +# rescore lattices by a high-order N-gram +if [ $stage -le 3 ]; then + # check the best iteration + if [ ! -f $srcdir/log/best_wer_$enhan ]; then + echo "error, execute local/run_dnn.sh, first" + exit 1; + fi + it=`cut -f 1 -d" " $srcdir/log/best_wer_$enhan | awk -F'[_]' '{print $1}'` + # rescore lattices + if $eval_flag; then + tasks="dt05_simu dt05_real et05_simu et05_real" + else + tasks="dt05_simu dt05_real" + fi + for t in $tasks; do + steps/lmrescore.sh --mode 3 \ + data/lang_test_tgpr_5k \ + data/lang_test_${lm_suffix} \ + data-fmllr-tri3b/${t}_$enhan \ + $srcdir/decode_tgpr_5k_${t}_${enhan}_it$it \ + $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix} + done + # rescored results by high-order n-gram LM + mkdir -p $dir/log + local/chime4_calc_wers.sh $dir ${enhan}_${lm_suffix} $dir/graph_tgpr_5k \ + > $dir/best_wer_${enhan}_${lm_suffix}.result + head -n 15 $dir/best_wer_${enhan}_${lm_suffix}.result +fi + +# N-best rescoring using a RNNLM +if [ $stage -le 4 ]; then + # check the best lmw + if [ ! -f $dir/log/best_wer_${enhan}_${lm_suffix} ]; then + echo "error, rescoring with a high-order n-gram seems to be failed" + exit 1; + fi + lmw=`cut -f 1 -d" " $dir/log/best_wer_${enhan}_${lm_suffix} | awk -F'[_]' '{print $NF}'` + # rescore n-best list for all sets + if $eval_flag; then + tasks="dt05_simu dt05_real et05_simu et05_real" + else + tasks="dt05_simu dt05_real" + fi + for t in $tasks; do + steps/rnnlmrescore.sh --inv-acwt $lmw --N $nbest --use-phi true \ + $rnnweight \ + data/lang_test_${lm_suffix} \ + data/lang_test_${rnnlm_suffix} \ + data-fmllr-tri3b/${t}_$enhan \ + $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix} \ + $dir/decode_tgpr_5k_${t}_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest} + done + # calc wers for RNNLM results + local/chime4_calc_wers.sh $dir ${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest} $dir/graph_tgpr_5k \ + > $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result + head -n 15 $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result +fi diff --git a/tools/ASR_2ch_track/local/run_lmrescore_recog.sh b/tools/ASR_2ch_track/local/run_lmrescore_recog.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b57585fda09826a028f3eea151dc37606fbda77 --- /dev/null +++ b/tools/ASR_2ch_track/local/run_lmrescore_recog.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +# Copyright 2015 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori) + +nj=12 +stage=1 +order=5 +hidden=300 +rnnweight=0.5 +nbest=100 +train=noisy +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +. ./path.sh +. ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system. + ## This relates to the queue. + +# This is a shell script, but it's recommended that you run the commands one by +# one by copying and pasting into the shell. + +if [ $# -ne 2 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "First argument specifies a unique name for different enhancement method" + echo "Second argument specifies acoustic and language model directory" + exit 1; +fi + +# set language models +lm_suffix=${order}gkn_5k +rnnlm_suffix=rnnlm_5k_h${hidden} + +# enhan data +enhan=$1 +# set model directory +mdir=$2 +srcdir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats + +# check language models +if [ ! -d $mdir/data/lang ]; then + echo "error, set $mdir correctly" + exit 1; +fi + +# preparation +dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_lmrescore +mkdir -p $dir +# make a symbolic link to graph info +if [ ! -e $dir/graph_tgpr_5k ]; then + if [ ! -e exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then + echo "graph is missing, execute local/run_dnn.sh, correctly" + exit 1; + fi + pushd . ; cd $dir + ln -s ../tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k . + popd +fi + +# rescore lattices by a high-order N-gram +if [ $stage -le 3 ]; then + # check the best iteration + if [ ! -f $srcdir/log/best_wer_$enhan ]; then + echo "$0: error $srcdir/log/best_wer_$enhan not found. execute local/run_dnn.sh, first" + exit 1; + fi + it=`cut -f 1 -d" " $srcdir/log/best_wer_$enhan | awk -F'[_]' '{print $1}'` + # rescore lattices + if $eval_flag; then + tasks="dt05_simu dt05_real et05_simu et05_real" + else + tasks="dt05_simu dt05_real" + fi + for t in $tasks; do + steps/lmrescore.sh --mode 3 \ + $mdir/data/lang_test_tgpr_5k \ + $mdir/data/lang_test_${lm_suffix} \ + data-fmllr-tri3b/${t}_$enhan \ + $srcdir/decode_tgpr_5k_${t}_${enhan}_it$it \ + $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix} + done + # rescored results by high-order n-gram LM + mkdir -p $dir/log + local/chime4_calc_wers.sh $dir ${enhan}_${lm_suffix} $dir/graph_tgpr_5k \ + > $dir/best_wer_${enhan}_${lm_suffix}.result + head -n 15 $dir/best_wer_${enhan}_${lm_suffix}.result +fi + +# N-best rescoring using a RNNLM +if [ $stage -le 4 ]; then + # check the best lmw + if [ ! -f $dir/log/best_wer_${enhan}_${lm_suffix} ]; then + echo "error, rescoring with a high-order n-gram seems to be failed" + exit 1; + fi + lmw=`cut -f 1 -d" " $dir/log/best_wer_${enhan}_${lm_suffix} | awk -F'[_]' '{print $NF}'` + # rescore n-best list for all sets + if $eval_flag; then + tasks="dt05_simu dt05_real et05_simu et05_real" + else + tasks="dt05_simu dt05_real" + fi + for t in $tasks; do + steps/rnnlmrescore.sh --inv-acwt $lmw --N $nbest --use-phi true \ + $rnnweight \ + $mdir/data/lang_test_${lm_suffix} \ + $mdir/data/lang_test_${rnnlm_suffix} \ + data-fmllr-tri3b/${t}_$enhan \ + $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix} \ + $dir/decode_tgpr_5k_${t}_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest} + done + # calc wers for RNNLM results + local/chime4_calc_wers.sh $dir ${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest} $dir/graph_tgpr_5k \ + > $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result + head -n 15 $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result +fi diff --git a/tools/ASR_2ch_track/local/score.sh b/tools/ASR_2ch_track/local/score.sh new file mode 100644 index 0000000000000000000000000000000000000000..b18f350416da473dbc8b570145cd345f04ed3a1b --- /dev/null +++ b/tools/ASR_2ch_track/local/score.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +[ -f ./path.sh ] && . ./path.sh + +# begin configuration section. +cmd=run.pl +stage=0 +decode_mbr=true +reverse=false +word_ins_penalty=0.0 +min_lmwt=5 +max_lmwt=20 +#end configuration section. + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: local/score.sh [--cmd (run.pl|queue.pl...)] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --stage (0|1|2) # start scoring script from part-way through." + echo " --decode_mbr (true/false) # maximum bayes risk decoding (confusion network)." + echo " --min_lmwt # minumum LM-weight for lattice rescoring " + echo " --max_lmwt # maximum LM-weight for lattice rescoring " + echo " --reverse (true/false) # score with time reversed features " + exit 1; +fi + +data=$1 +lang_or_graph=$2 +dir=$3 + +symtab=$lang_or_graph/words.txt + +for f in $symtab $dir/lat.1.gz $data/text; do + [ ! -f $f ] && echo "score.sh: no such file $f" && exit 1; +done + +mkdir -p $dir/scoring/log + +cat $data/text | sed 's:::g' | sed 's:::g' > $dir/scoring/test_filt.txt + +$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/best_path.LMWT.log \ + lattice-scale --inv-acoustic-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ + lattice-add-penalty --word-ins-penalty=$word_ins_penalty ark:- ark:- \| \ + lattice-best-path --word-symbol-table=$symtab \ + ark:- ark,t:$dir/scoring/LMWT.tra || exit 1; + +if $reverse; then + for lmwt in `seq $min_lmwt $max_lmwt`; do + mv $dir/scoring/$lmwt.tra $dir/scoring/$lmwt.tra.orig + awk '{ printf("%s ",$1); for(i=NF; i>1; i--){ printf("%s ",$i); } printf("\n"); }' \ + <$dir/scoring/$lmwt.tra.orig >$dir/scoring/$lmwt.tra + done +fi + +# Note: the double level of quoting for the sed command +$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/score.LMWT.log \ + cat $dir/scoring/LMWT.tra \| \ + utils/int2sym.pl -f 2- $symtab \| sed 's:\::g' \| \ + compute-wer --text --mode=present \ + ark:$dir/scoring/test_filt.txt ark,p:- ">&" $dir/wer_LMWT || exit 1; + +exit 0; diff --git a/tools/ASR_2ch_track/local/simu_enhan_chime4_data_prep.sh b/tools/ASR_2ch_track/local/simu_enhan_chime4_data_prep.sh new file mode 100644 index 0000000000000000000000000000000000000000..d6419fa90b94a42312802c224d68f4ea27626fb9 --- /dev/null +++ b/tools/ASR_2ch_track/local/simu_enhan_chime4_data_prep.sh @@ -0,0 +1,112 @@ +#!/bin/bash +set -e + +# Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0. + +# This is modified from the script in standard Kaldi recipe to account +# for the way the WSJ data is structured on the Edinburgh systems. +# - Arnab Ghoshal, 29/05/12 + +# Modified from the script for CHiME2 baseline +# Shinji Watanabe 02/13/2015 + +# Config: +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +if [ $# -ne 2 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "The argument should be a the directory that only contains enhanced speech data." + exit 1; +fi + +echo "$0 $@" # Print the command line for logging + +enhan=$1 +audio_dir=$2 + +dir=`pwd`/data/local/data +mkdir -p $dir +local=`pwd`/local +utils=`pwd`/utils +odir=`pwd`/data + +. ./path.sh # Needed for KALDI_ROOT +export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin +sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe +if [ ! -x $sph2pipe ]; then + echo "Could not find (or execute) the sph2pipe program at $sph2pipe"; + exit 1; +fi + +if $eval_flag; then +list_set="tr05_simu_$enhan dt05_simu_$enhan et05_simu_$enhan" +else +list_set="tr05_simu_$enhan dt05_simu_$enhan" +fi + +cd $dir + +find $audio_dir/ -name '*.wav' | grep 'tr05_bus_simu\|tr05_caf_simu\|tr05_ped_simu\|tr05_str_simu' | sort -u > tr05_simu_$enhan.flist +find $audio_dir/ -name '*.wav' | grep 'dt05_bus_simu\|dt05_caf_simu\|dt05_ped_simu\|dt05_str_simu' | sort -u > dt05_simu_$enhan.flist +if $eval_flag; then +find $audio_dir/ -name '*.wav' | grep 'et05_bus_simu\|et05_caf_simu\|et05_ped_simu\|et05_str_simu' | sort -u > et05_simu_$enhan.flist +fi + +# make a scp file from file list +for x in $list_set; do + cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_SIMU/' > ${x}_wav.ids + paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp +done + +# make a transcription from dot +# simulation training data extract dot file from original WSJ0 data +# since it is generated from these data +if [ ! -e dot_files.flist ]; then + echo "Could not find $dir/dot_files.flist files, first run local/clean_wsj0_data_prep.sh"; + exit 1; +fi +cat tr05_simu_${enhan}_wav.scp | awk -F'[_]' '{print $2}' | tr '[A-Z]' '[a-z]' \ + | $local/find_noisy_transcripts.pl dot_files.flist | cut -f 2- -d" " > tr05_simu_$enhan.txt +cat tr05_simu_${enhan}_wav.scp | cut -f 1 -d" " > tr05_simu_$enhan.ids +paste -d" " tr05_simu_$enhan.ids tr05_simu_$enhan.txt | sort -k 1 > tr05_simu_$enhan.trans1 +# dt05 and et05 simulation data are generated from the CHiME4 booth recording +# and we use CHiME4 dot files +cat dt05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_SIMU"}'> dt05_simu_$enhan.ids +cat dt05_simu.dot | sed -e 's/(.*)//' > dt05_simu_$enhan.txt +paste -d" " dt05_simu_$enhan.ids dt05_simu_$enhan.txt | sort -k 1 > dt05_simu_$enhan.trans1 +if $eval_flag; then +cat et05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_SIMU"}'> et05_simu_$enhan.ids +cat et05_simu.dot | sed -e 's/(.*)//' > et05_simu_$enhan.txt +paste -d" " et05_simu_$enhan.ids et05_simu_$enhan.txt | sort -k 1 > et05_simu_$enhan.trans1 +fi + +# Do some basic normalization steps. At this point we don't remove OOVs-- +# that will be done inside the training scripts, as we'd like to make the +# data-preparation stage independent of the specific lexicon used. +noiseword=""; +for x in $list_set;do + cat $x.trans1 | $local/normalize_transcript.pl $noiseword \ + | sort > $x.txt || exit 1; +done + +# Make the utt2spk and spk2utt files. +for x in $list_set; do + cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk + cat ${x}_wav.scp | awk '{print $1}' > $x.utt + paste -d" " $x.utt $x.spk > $x.utt2spk + cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1; +done + +# copying data to data/... +for x in $list_set; do + mkdir -p $odir/$x + cp ${x}_wav.scp $odir/$x/wav.scp || exit 1; + cp ${x}.txt $odir/$x/text || exit 1; + cp ${x}.spk2utt $odir/$x/spk2utt || exit 1; + cp ${x}.utt2spk $odir/$x/utt2spk || exit 1; +done + +echo "Data preparation succeeded" diff --git a/tools/ASR_2ch_track/local/simu_noisy_chime4_data_prep.sh b/tools/ASR_2ch_track/local/simu_noisy_chime4_data_prep.sh new file mode 100644 index 0000000000000000000000000000000000000000..70e4c756152ab817353a389b51756a2314fa4b8e --- /dev/null +++ b/tools/ASR_2ch_track/local/simu_noisy_chime4_data_prep.sh @@ -0,0 +1,122 @@ +#!/bin/bash +set -e + +# Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0. + +# This is modified from the script in standard Kaldi recipe to account +# for the way the WSJ data is structured on the Edinburgh systems. +# - Arnab Ghoshal, 29/05/12 + +# Modified from the script for CHiME2 baseline +# Shinji Watanabe 02/13/2015 + +# Config: +eval_flag=true # make it true when the evaluation data are released + +. utils/parse_options.sh || exit 1; + +if [ $# -ne 1 ]; then + printf "\nUSAGE: %s \n\n" `basename $0` + echo "The argument should be a the top-level Chime4 directory." + echo "It is assumed that there will be a 'data' subdirectory" + echo "within the top-level corpus directory." + exit 1; +fi + +echo "$0 $@" # Print the command line for logging + +audio_dir=$1/data/audio/16kHz/isolated +trans_dir=$1/data/transcriptions + +echo "extract 5th channel (CH5.wav, the center bottom edge in the front of the tablet) for noisy data" + +dir=`pwd`/data/local/data +lmdir=`pwd`/data/local/nist_lm +mkdir -p $dir $lmdir +local=`pwd`/local +utils=`pwd`/utils + +. ./path.sh # Needed for KALDI_ROOT +export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin +sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe +if [ ! -x $sph2pipe ]; then + echo "Could not find (or execute) the sph2pipe program at $sph2pipe"; + exit 1; +fi + +if $eval_flag; then +list_set="tr05_simu_noisy dt05_simu_noisy et05_simu_noisy" +else +list_set="tr05_simu_noisy dt05_simu_noisy" +fi + +cd $dir + +find $audio_dir -name '*CH5.wav' | grep 'tr05_bus_simu\|tr05_caf_simu\|tr05_ped_simu\|tr05_str_simu' | sort -u > tr05_simu_noisy.flist +find $audio_dir -name '*CH5.wav' | grep 'dt05_bus_simu\|dt05_caf_simu\|dt05_ped_simu\|dt05_str_simu' | sort -u > dt05_simu_noisy.flist +if $eval_flag; then +find $audio_dir -name '*CH5.wav' | grep 'et05_bus_simu\|et05_caf_simu\|et05_ped_simu\|et05_str_simu' | sort -u > et05_simu_noisy.flist +fi + +# make a dot format from json annotation files +cp $trans_dir/dt05_simu.dot_all dt05_simu.dot +if $eval_flag; then +cp $trans_dir/et05_simu.dot_all et05_simu.dot +fi + +# make a scp file from file list +for x in $list_set; do + cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_SIMU/' > ${x}_wav.ids + paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp +done + +# make a transcription from dot +# simulation training data extract dot file from original WSJ0 data +# since it is generated from these data +if [ ! -e dot_files.flist ]; then + echo "Could not find $dir/dot_files.flist files, first run local/clean_wsj0_data_prep.sh"; + exit 1; +fi +cat tr05_simu_noisy_wav.scp | awk -F'[_]' '{print $2}' | tr '[A-Z]' '[a-z]' \ + | $local/find_noisy_transcripts.pl dot_files.flist | cut -f 2- -d" " > tr05_simu_noisy.txt +cat tr05_simu_noisy_wav.scp | cut -f 1 -d" " > tr05_simu_noisy.ids +paste -d" " tr05_simu_noisy.ids tr05_simu_noisy.txt | sort -k 1 > tr05_simu_noisy.trans1 +# dt05 and et05 simulation data are generated from the CHiME4 booth recording +# and we use CHiME4 dot files +cat dt05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_SIMU"}'> dt05_simu_noisy.ids +cat dt05_simu.dot | sed -e 's/(.*)//' > dt05_simu_noisy.txt +paste -d" " dt05_simu_noisy.ids dt05_simu_noisy.txt | sort -k 1 > dt05_simu_noisy.trans1 +if $eval_flag; then +cat et05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_SIMU"}'> et05_simu_noisy.ids +cat et05_simu.dot | sed -e 's/(.*)//' > et05_simu_noisy.txt +paste -d" " et05_simu_noisy.ids et05_simu_noisy.txt | sort -k 1 > et05_simu_noisy.trans1 +fi + +# Do some basic normalization steps. At this point we don't remove OOVs-- +# that will be done inside the training scripts, as we'd like to make the +# data-preparation stage independent of the specific lexicon used. +noiseword=""; +for x in $list_set;do + cat $x.trans1 | $local/normalize_transcript.pl $noiseword \ + | sort > $x.txt || exit 1; +done + +# Make the utt2spk and spk2utt files. +for x in $list_set; do + cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk + cat ${x}_wav.scp | awk '{print $1}' > $x.utt + paste -d" " $x.utt $x.spk > $x.utt2spk + cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1; +done + +# copying data to data/... +for x in $list_set; do + mkdir -p ../../$x + cp ${x}_wav.scp ../../$x/wav.scp || exit 1; + cp ${x}.txt ../../$x/text || exit 1; + cp ${x}.spk2utt ../../$x/spk2utt || exit 1; + cp ${x}.utt2spk ../../$x/utt2spk || exit 1; +done + +echo "Data preparation succeeded" diff --git a/tools/ASR_2ch_track/local/wsj_prepare_dict.sh b/tools/ASR_2ch_track/local/wsj_prepare_dict.sh new file mode 100644 index 0000000000000000000000000000000000000000..6ddebd602937f8afb0752d2f1c5b593beed8c734 --- /dev/null +++ b/tools/ASR_2ch_track/local/wsj_prepare_dict.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# Copyright 2010-2012 Microsoft Corporation +# 2012-2014 Johns Hopkins University (Author: Daniel Povey) + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + +# Call this script from one level above, e.g. from the s3/ directory. It puts +# its output in data/local/. + +# The parts of the output of this that will be needed are +# [in data/local/dict/ ] +# lexicon.txt +# extra_questions.txt +# nonsilence_phones.txt +# optional_silence.txt +# silence_phones.txt + +# run this from ../ +dir=data/local/dict +mkdir -p $dir + + +# (1) Get the CMU dictionary +svn co https://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict \ + $dir/cmudict || exit 1; + +# can add -r 10966 for strict compatibility. + + +#(2) Dictionary preparation: + + +# Make phones symbol-table (adding in silence and verbal and non-verbal noises at this point). +# We are adding suffixes _B, _E, _S for beginning, ending, and singleton phones. + +# silence phones, one per line. +(echo SIL; echo SPN; echo NSN) > $dir/silence_phones.txt +echo SIL > $dir/optional_silence.txt + +# nonsilence phones; on each line is a list of phones that correspond +# really to the same base phone. +cat $dir/cmudict/cmudict.0.7a.symbols | perl -ane 's:\r::; print;' | \ + perl -e 'while(<>){ + chop; m:^([^\d]+)(\d*)$: || die "Bad phone $_"; + $phones_of{$1} .= "$_ "; } + foreach $list (values %phones_of) {print $list . "\n"; } ' \ + | sort > $dir/nonsilence_phones.txt || exit 1; + +# A few extra questions that will be added to those obtained by automatically clustering +# the "real" phones. These ask about stress; there's also one for silence. +cat $dir/silence_phones.txt| awk '{printf("%s ", $1);} END{printf "\n";}' > $dir/extra_questions.txt || exit 1; +cat $dir/nonsilence_phones.txt | perl -e 'while(<>){ foreach $p (split(" ", $_)) { + $p =~ m:^([^\d]+)(\d*)$: || die "Bad phone $_"; $q{$2} .= "$p "; } } foreach $l (values %q) {print "$l\n";}' \ + >> $dir/extra_questions.txt || exit 1; + +grep -v ';;;' $dir/cmudict/cmudict.0.7a | \ + perl -ane 'if(!m:^;;;:){ s:(\S+)\(\d+\) :$1 :; print; }' \ + > $dir/lexicon1_raw_nosil.txt || exit 1; + +# Add to cmudict the silences, noises etc. + +# the sort | uniq is to remove a duplicated pron from cmudict. +(echo '!SIL SIL'; echo ' SPN'; echo ' SPN'; echo ' NSN'; ) | \ + cat - $dir/lexicon1_raw_nosil.txt | sort | uniq > $dir/lexicon2_raw.txt || exit 1; + + +# lexicon.txt is without the _B, _E, _S, _I markers. +# This is the input to wsj_format_data.sh +cp $dir/lexicon2_raw.txt $dir/lexicon.txt + +rm $dir/lexiconp.txt 2>/dev/null + +echo "Dictionary preparation succeeded" + diff --git a/tools/ASR_2ch_track/path.sh b/tools/ASR_2ch_track/path.sh new file mode 100644 index 0000000000000000000000000000000000000000..926d5def836532af3c1f3a0ab4f594efc4dc9cee --- /dev/null +++ b/tools/ASR_2ch_track/path.sh @@ -0,0 +1,10 @@ +if [ -z $KALDI_ROOT ] ; then + echo "Environemnt variable KALDI_ROOT is unset. Setting it to default /data1/tools/kaldi" + export KALDI_ROOT=/data1/tools/kaldi +fi + +[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh +export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/tools/irstlm/bin/:$KALDI_ROOT/tools/kaldi_lm/:$PWD:$PATH +[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1 +. $KALDI_ROOT/tools/config/common_path.sh +export LC_ALL=C diff --git a/tools/ASR_2ch_track/run.sh b/tools/ASR_2ch_track/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..adeb25008277938a5f3a4ff99ce8f0ae63d2ef6b --- /dev/null +++ b/tools/ASR_2ch_track/run.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Kaldi ASR baseline for the CHiME-4 Challenge (2ch track: 2 channel track) +# +# Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer) +# Inria (Emmanuel Vincent) +# Mitsubishi Electric Research Labs (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +. ./path.sh +. ./cmd.sh + +# Config: +stage=0 # resume training with --stage=N +flatstart=false + +. utils/parse_options.sh || exit 1; + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +#####check data and model paths################ +# Set a main root directory of the CHiME4 data +# If you use scripts distributed in the CHiME4 package, +chime4_data=`pwd`/../.. +# Otherwise, please specify it, e.g., +# chime4_data=/db/laputa1/data/original/public/CHiME4 +if [ ! -d $chime4_data ]; then + echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1 +fi +# Set a model directory for the CHiME4 data. +modeldir=$chime4_data/tools/ASR_models +for d in $modeldir $modeldir/data/{lang,lang_test_tgpr_5k,lang_test_5gkn_5k,lang_test_rnnlm_5k_h300,local} \ + $modeldir/exp/{tri3b_tr05_multi_noisy,tri4a_dnn_tr05_multi_noisy,tri4a_dnn_tr05_multi_noisy_smbr_i1lats}; do + [ ! -d ] && echo "$0: no such directory $d. specify models correctly or execute './run.sh --flatstart true' first" && exit 1; +done +#####check data and model paths finished####### + + +#####main program start################ +# You can execute run_init.sh only "once" +# This creates 3-gram LM, FSTs, and basic task files +if [ $stage -le 0 ] && $flatstart; then + local/run_init.sh $chime4_data +fi + +# Using Beamformit +# See Hori et al, "The MERL/SRI system for the 3rd CHiME challenge using beamforming, +# robust feature extraction, and advanced speech recognition," in Proc. ASRU'15 +# note that beamformed wav files are generated in the following directory +enhancement_method=beamformit_2mics +enhancement_data=`pwd`/enhan/$enhancement_method +if [ $stage -le 1 ]; then + local/run_beamform_2ch_track.sh --cmd "$train_cmd" --nj 20 $chime4_data/data/audio/16kHz/isolated_2ch_track $enhancement_data +fi + +# GMM based ASR experiment without "retraining" +# Please set a directory of your speech enhancement method. +# run_gmm_recog.sh can be done every time when you change a speech enhancement technique. +# The directory structure and audio files must follow the attached baseline enhancement directory +if [ $stage -le 2 ]; then + if $flatstart; then + local/run_gmm.sh $enhancement_method $enhancement_data $chime4_data + else + local/run_gmm_recog.sh $enhancement_method $enhancement_data $modeldir + fi +fi + +# DNN based ASR experiment +# Since it takes time to evaluate DNN, we make the GMM and DNN scripts separately. +# You may execute it after you would have promising results using GMM-based ASR experiments +if [ $stage -le 3 ]; then + if $flatstart; then + local/run_dnn.sh $enhancement_method + else + local/run_dnn_recog.sh $enhancement_method $modeldir + fi +fi + +# LM-rescoring experiment with 5-gram and RNN LMs +# It takes a few days to train a RNNLM. +if [ $stage -le 4 ]; then + if $flatstart; then + local/run_lmrescore.sh $chime4_data $enhancement_method + else + local/run_lmrescore_recog.sh $enhancement_method $modeldir + fi +fi + +echo "Done." diff --git a/tools/ASR_2ch_track/steps/align_basis_fmllr.sh b/tools/ASR_2ch_track/steps/align_basis_fmllr.sh new file mode 100644 index 0000000000000000000000000000000000000000..b3a2107a086cfae6b8054b37cece5fb494406403 --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_basis_fmllr.sh @@ -0,0 +1,153 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Copyright 2013 GoVivace Inc (Author: Nagendra Goel) +# Apache 2.0 + +# Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta) +# + fMLLR (probably with SAT models). +# It first computes an alignment with the final.alimdl (or the final.mdl if final.alimdl +# is not present), then does 2 iterations of fMLLR estimation. + +# If you supply the --use-graphs option, it will use the training +# graphs from the source directory (where the model is). In this +# case the number of jobs must match the source directory. + + +# Begin configuration section. +stage=0 +nj=4 +cmd=run.pl +use_graphs=false +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +boost_silence=1.5 # factor by which to boost silence during alignment. +fmllr_update_type=full +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: steps/align_fmllr.sh " + echo "e.g.: steps/align_fmllr.sh data/train data/lang exp/tri1 exp/tri1_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --fmllr-update-type (full|diag|offset|none) # default full." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 +graphdir=$dir + +oov=`cat $lang/oov.int` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +sdata=$data/split$nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +cp $srcdir/final.occs $dir; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` +cp $srcdir/delta_opts $dir 2>/dev/null + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; +alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/boost_phones.csl` $alimdl - |" +mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |" + +## Work out where we're getting the graphs from. +if $use_graphs; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1; + graphdir=$srcdir +else + graphdir=$dir + if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; + fi +fi + + +if [ $stage -le 1 ]; then + echo "$0: aligning data in $data using $alimdl and speaker-independent features." + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 2 ]; then + echo "$0: computing fMLLR transforms" + if [ "$alimdl" != "$mdl" ]; then + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \ + gmm-est-basis-fmllr-gpost --fmllr-min-count=22 --num-iters=10 \ + --size-scale=0.2 --step-size-iters=3 \ + --write-weights=ark:$dir/pre_wgt.JOB \ + $mdl $srcdir/fmllr.basis "$sifeats" ark,s,cs:- \ + ark:$dir/trans.JOB || exit 1; +# else +# $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ +# ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ +# weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ +# gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ +# --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \ +# ark,s,cs:- ark:$dir/trans.JOB || exit 1; + fi +fi + +feats="$sifeats transform-feats ark:$dir/pre_trans.JOB ark:- ark:- |" + +if [ $stage -le 3 ]; then + echo "$0: doing final alignment." + $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +#rm $dir/pre_ali.*.gz + +echo "$0: done aligning data." + +utils/summarize_warnings.pl $dir/log + +exit 0; diff --git a/tools/ASR_2ch_track/steps/align_fmllr.sh b/tools/ASR_2ch_track/steps/align_fmllr.sh new file mode 100644 index 0000000000000000000000000000000000000000..561963a3176e9f68ca67cc05e07cd17b0e238360 --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_fmllr.sh @@ -0,0 +1,153 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta) +# + fMLLR (probably with SAT models). +# It first computes an alignment with the final.alimdl (or the final.mdl if final.alimdl +# is not present), then does 2 iterations of fMLLR estimation. + +# If you supply the --use-graphs option, it will use the training +# graphs from the source directory (where the model is). In this +# case the number of jobs must match the source directory. + + +# Begin configuration section. +stage=0 +nj=4 +cmd=run.pl +use_graphs=false +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +careful=false +boost_silence=1.0 # factor by which to boost silence during alignment. +fmllr_update_type=full +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: steps/align_fmllr.sh " + echo "e.g.: steps/align_fmllr.sh data/train data/lang exp/tri1 exp/tri1_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --fmllr-update-type (full|diag|offset|none) # default full." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +oov=`cat $lang/oov.int` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +sdata=$data/split$nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +cp $srcdir/final.alimdl $dir 2>/dev/null +cp $srcdir/final.occs $dir; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` +cp $srcdir/delta_opts $dir 2>/dev/null + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + cp $srcdir/full.mat $dir 2>/dev/null + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; +alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |" +mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |" + + +## Work out where we're getting the graphs from. +if $use_graphs; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1; + graphdir=$srcdir +else + graphdir=$dir + if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; + fi +fi + + +if [ $stage -le 1 ]; then + echo "$0: aligning data in $data using $alimdl and speaker-independent features." + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$alimdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 2 ]; then + echo "$0: computing fMLLR transforms" + if [ "$alimdl" != "$mdl" ]; then + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \ + gmm-est-fmllr-gpost --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \ + ark,s,cs:- ark:$dir/trans.JOB || exit 1; + else + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \ + ark,s,cs:- ark:$dir/trans.JOB || exit 1; + fi +fi + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +if [ $stage -le 3 ]; then + echo "$0: doing final alignment." + $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +rm $dir/pre_ali.*.gz + +echo "$0: done aligning data." + +utils/summarize_warnings.pl $dir/log + +exit 0; diff --git a/tools/ASR_2ch_track/steps/align_fmllr_lats.sh b/tools/ASR_2ch_track/steps/align_fmllr_lats.sh new file mode 100644 index 0000000000000000000000000000000000000000..12c2a6479e43846bf5b81b748821a8421da06bfb --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_fmllr_lats.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Copyright 2012-2015 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Version of align_fmllr.sh that generates lattices (lat.*.gz) with +# alignments of alternative pronunciations in them. Mainly intended +# as a precursor to CTC training for now. + +# Begin configuration section. +stage=0 +nj=4 +cmd=run.pl +# Begin configuration. +scale_opts="--transition-scale=1.0 --self-loop-scale=0.1" +acoustic_scale=0.1 +beam=10 +retry_beam=40 +final_beam=20 # For the lattice-generation phase there is no retry-beam. This + # is a limitation of gmm-latgen-faster. We just use an + # intermediate beam. We'll lose a little data and it will be + # slightly slower. (however, the min-active of 200 that + # gmm-latgen-faster defaults to may help.) +boost_silence=1.0 # factor by which to boost silence during alignment. +fmllr_update_type=full +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: steps/align_fmllr_lats.sh " + echo "e.g.: steps/align_fmllr_lats.sh data/train data/lang exp/tri1 exp/tri1_lats" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --fmllr-update-type (full|diag|offset|none) # default full." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +oov=`cat $lang/oov.int` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +sdata=$data/split$nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +cp $srcdir/final.alimdl $dir 2>/dev/null +cp $srcdir/final.occs $dir; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` +cp $srcdir/delta_opts $dir 2>/dev/null + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + cp $srcdir/full.mat $dir 2>/dev/null + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; +alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |" +mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |" + + +## because gmm-latgen-faster doesn't support adding the transition-probs to the +## graph itself, we need to bake them into the compiled graphs. This means we can't reuse previously compiled graphs, +## because the other scripts write them without transition probs. +if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $scale_opts $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; +fi + + +if [ $stage -le 1 ]; then + # Note: we need to set --transition-scale=0.0 --self-loop-scale=0.0 because, + # as explained above, we compiled the transition probs into the training + # graphs. + echo "$0: aligning data in $data using $alimdl and speaker-independent features." + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + gmm-align-compiled --transition-scale=0.0 --self-loop-scale=0.0 --acoustic-scale=$acoustic_scale \ + --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \ + "ark:gunzip -c $dir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 2 ]; then + echo "$0: computing fMLLR transforms" + if [ "$alimdl" != "$mdl" ]; then + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \ + gmm-est-fmllr-gpost --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \ + ark,s,cs:- ark:$dir/trans.JOB || exit 1; + else + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \ + ark,s,cs:- ark:$dir/trans.JOB || exit 1; + fi +fi + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +if [ $stage -le 3 ]; then + # Warning: gmm-latgen-faster doesn't support a retry-beam so you may get more + # alignment errors (however, it does have a default min-active=200 so this + # will tend to reduce alignment errors). + # --allow_partial=false makes sure we reach the end of the decoding graph. + # --word-determinize=false makes sure we retain the alternative pronunciations of + # words (including alternatives regarding optional silences). + # --lattice-beam=$beam keeps all the alternatives that were within the beam, + # it means we do no pruning of the lattice (lattices from a training transcription + # will be small anyway). + echo "$0: generating lattices containing alternate pronunciations." + $cmd JOB=1:$nj $dir/log/generate_lattices.JOB.log \ + gmm-latgen-faster --acoustic-scale=$acoustic_scale --beam=$final_beam \ + --lattice-beam=$final_beam --allow-partial=false --word-determinize=false \ + "$mdl_cmd" "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \ + "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1; +fi + +rm $dir/pre_ali.*.gz + +echo "$0: done generating lattices from training transcripts." + +utils/summarize_warnings.pl $dir/log + +exit 0; diff --git a/tools/ASR_2ch_track/steps/align_lvtln.sh b/tools/ASR_2ch_track/steps/align_lvtln.sh new file mode 100644 index 0000000000000000000000000000000000000000..1df2b5617bd6b5deaecbbc6925d1eaa7309e6bf5 --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_lvtln.sh @@ -0,0 +1,196 @@ +#!/bin/bash + +# Copyright 2014 Vimal Manohar + +# Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta) +# Will ignore fMLLR. +# Will estimate VTLN warping factors +# as a by product, which can be used to extract VTLN-warped features. + +# Begin configuration section +stage=0 +nj=4 +cmd=run.pl +use_graphs=false +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10.0 +retry_beam=40 +boost_silence=1.0 # factor by which to boost silence during alignment. +logdet_scale=1.0 +cleanup=false + +# End configuration section +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Wrong #arguments ($#, expected 4)" + echo "Usage: steps/align_lvtln.sh [options] " + echo " e.g.: steps/align_lvtln.sh data/train data/lang exp/tri2c exp/tri2c_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --cmd # Command to run in parallel with" + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +oov=`cat $lang/oov.int` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +sdata=$data/split$nj + +if [ -f $data/spk2warp ]; then + echo "$0: file $data/spk2warp exists. This script expects non-VTLN features" + exit 1; +fi + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl,final.lvtln} $dir || exit 1; +cp $srcdir/final.occs $dir; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. + +## Set up the unadapted features "$sifeats" +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $srcdir/full.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; +alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |" +mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |" + +## Work out where we're getting the graphs from. +if $use_graphs; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1; + graphdir=$srcdir +else + graphdir=$dir + if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; + fi +fi + +if [ $stage -le 1 ]; then + echo "$0: aligning data in $data using $alimdl and speaker-independent features." + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ -f $data/segments ]; then + subset_utts="ark:extract-segments scp:$sdata/JOB/wav.scp $sdata/JOB/segments ark:- |" +else + echo "$0 [info]: no segments file exists: using wav.scp directly." + subset_utts="ark:wav-copy scp:$sdata/JOB/wav.scp ark:- |" +fi + +## Get the first-pass LVTLN transforms +if [ $stage -le 2 ]; then + echo "$0: computing first-pass LVTLN transforms." + $cmd JOB=1:$nj $dir/log/lvtln_pass1.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \ + gmm-est-lvtln-trans --verbose=1 --spk2utt=ark:$sdata/JOB/spk2utt --logdet-scale=$logdet_scale \ + $mdl $dir/final.lvtln "$sifeats" ark,s,cs:- ark:$dir/trans_pass1.JOB \ + ark,t:$dir/warp_pass1.JOB || exit 1; +fi +## + +feats1="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans_pass1.JOB ark:- ark:- |" + +## Do a second pass of estimating the LVTLN transform. + +if [ $stage -le 3 ]; then + echo "$0: realigning with transformed features" + $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats1" "ark:|gzip -c >$dir/ali_pass2.JOB.gz" || exit 1; +fi + +if [ $stage -le 4 ]; then + echo "$0: re-estimating LVTLN transforms" + $cmd JOB=1:$nj $dir/log/lvtln_pass1.JOB.log \ + ali-to-post "ark:gunzip -c $dir/ali_pass2.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-post-to-gpost $alimdl "$feats1" ark:- ark:- \| \ + gmm-est-lvtln-trans --verbose=1 --spk2utt=ark:$sdata/JOB/spk2utt --logdet-scale=$logdet_scale \ + $mdl $dir/final.lvtln "$sifeats" ark,s,cs:- ark:$dir/trans.JOB \ + ark,t:$dir/warp.JOB || exit 1; +fi + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +if [ $stage -le 5 ]; then + # This second alignment does not affect the transforms. + echo "$0: realigning with the second-pass LVTLN transforms" + $cmd JOB=1:$nj $dir/log/align.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +if [ -f $dir/warp.1 ]; then + for j in $(seq $nj); do cat $dir/warp_pass1.$j; done > $dir/0.warp || exit 1; + for j in $(seq $nj); do cat $dir/warp.$j; done > $dir/final.warp || exit 1; + ns1=$(cat $dir/0.warp | wc -l) + ns2=$(cat $dir/final.warp | wc -l) + ! [ "$ns1" == "$ns2" ] && echo "$0: Number of speakers differ pass1 vs pass2, $ns1 != $ns2" && exit 1; + + paste $dir/0.warp $dir/final.warp | awk '{x=$2 - $4; if ((x>0?x:-x) > 0.010001) { print $1, $2, $4; }}' > $dir/warp_changed + nc=$(cat $dir/warp_changed | wc -l) + echo "$0: For $nc speakers out of $ns1, warp changed pass1 vs pass2 by >0.01, see $dir/warp_changed for details" +fi + +if true; then # Diagnostics + if [ -f $data/spk2gender ]; then + # To make it easier to eyeball the male and female speakers' warps + # separately, separate them out. + for g in m f; do # means: for gender in male female + cat $dir/final.warp | \ + utils/filter_scp.pl <(grep -w $g $data/spk2gender | awk '{print $1}') > $dir/final.warp.$g + echo -n "The last few warp factors for gender $g are: " + tail -n 10 $dir/final.warp.$g | awk '{printf("%s ", $2);}'; + echo + done + fi +fi + +if $cleanup; then + rm $dir/pre_ali.*.gz $dir/ali_pass?.*.gz $dir/trans_pass1.* $dir/warp_pass1.* $dir/warp.* +fi + +exit 0; + diff --git a/tools/ASR_2ch_track/steps/align_raw_fmllr.sh b/tools/ASR_2ch_track/steps/align_raw_fmllr.sh new file mode 100644 index 0000000000000000000000000000000000000000..0f2fe786bca96cb3b522dac4f47d1727037a32b0 --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_raw_fmllr.sh @@ -0,0 +1,146 @@ +#!/bin/bash +# Copyright 2012-2013 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta) +# + fMLLR (probably with SAT models). +# It first computes an alignment with the final.alimdl (or the final.mdl if final.alimdl +# is not present), then does 2 iterations of fMLLR estimation. + +# If you supply the --use-graphs option, it will use the training +# graphs from the source directory (where the model is). In this +# case the number of jobs must match the source directory. + + +# Begin configuration section. +stage=0 +nj=4 +cmd=run.pl +use_graphs=false +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +boost_silence=1.0 # factor by which to boost silence during alignment. +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: steps/align_fmllr.sh " + echo "e.g.: steps/align_fmllr.sh data/train data/lang exp/tri1 exp/tri1_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +oov=`cat $lang/oov.int` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +sdata=$data/split$nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +cp $srcdir/final.occs $dir; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. + +if [[ ! -f $srcdir/final.mat || ! -f $srcdir/full.mat ]]; then + echo "$0: we require final.mat and full.mat in the source directory $srcdir" +fi + +full_lda_mat="get-full-lda-mat --print-args=false $srcdir/final.mat $srcdir/full.mat -|" +cp $srcdir/full.mat $srcdir/final.mat $dir + +raw_dim=$(feat-to-dim scp:$data/feats.scp -) || exit 1; +! [ "$raw_dim" -gt 0 ] && echo "raw feature dim not set" && exit 1; + +splicedfeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- |" +sifeats="$splicedfeats transform-feats $srcdir/final.mat ark:- ark:- |" + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; +alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |" +mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |" + + +## Work out where we're getting the graphs from. +if $use_graphs; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1; + graphdir=$srcdir +else + graphdir=$dir + if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; + fi +fi + + +if [ $stage -le 1 ]; then + echo "$0: aligning data in $data using $alimdl and speaker-independent features." + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 2 ]; then + echo "$0: computing fMLLR transforms" + if [ "$alimdl" != "$mdl" ]; then + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \ + gmm-est-fmllr-raw-gpost --raw-feat-dim=$raw_dim --spk2utt=ark:$sdata/JOB/spk2utt \ + $mdl "$full_lda_mat" "$splicedfeats" ark,s,cs:- ark:$dir/raw_trans.JOB || exit 1; + else + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-est-fmllr-raw --raw-feat-dim=$raw_dim --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$full_lda_mat" \ + "$splicedfeats" ark,s,cs:- ark:$dir/raw_trans.JOB || exit 1; + fi +fi + +feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/raw_trans.JOB ark:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + +if [ $stage -le 3 ]; then + echo "$0: doing final alignment." + $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +rm $dir/pre_ali.*.gz + +echo "$0: done aligning data." + +utils/summarize_warnings.pl $dir/log + +exit 0; diff --git a/tools/ASR_2ch_track/steps/align_sgmm.sh b/tools/ASR_2ch_track/steps/align_sgmm.sh new file mode 100644 index 0000000000000000000000000000000000000000..3356f1ab333e91734234135c33e56cef38d05cfd --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_sgmm.sh @@ -0,0 +1,195 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Computes training alignments and (if needed) speaker-vectors, given an +# SGMM system. If the system is built on top of SAT, you should supply +# transforms with the --transform-dir option. + +# If you supply the --use-graphs option, it will use the training +# graphs from the source directory. + +# Begin configuration section. +stage=0 +nj=4 +cmd=run.pl +use_graphs=false # use graphs from srcdir +use_gselect=false # use gselect info from srcdir [regardless, we use + # Gaussian-selection info, we might have to compute it though.] +gselect=15 # Number of Gaussian-selection indices for SGMMs. +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +transform_dir= # directory to find fMLLR transforms in. +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: steps/align_sgmm.sh " + echo "e.g.: steps/align_sgmm.sh --transform-dir exp/tri3b data/train data/lang \\" + echo " exp/sgmm4a exp/sgmm5a_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --transform-dir # directory to find fMLLR transforms" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +oov=`cat $lang/oov.int` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +sdata=$data/split$nj + +mkdir -p $dir/log +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +[ -f $srcdir/final.alimdl ] && cp $srcdir/final.alimdl $dir +cp $srcdir/final.occs $dir; + +## Set up features. +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option during alignment." +fi +## + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; + +## Work out where we're getting the graphs from. +if $use_graphs; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1; + graphdir=$srcdir + ln.pl $srcdir/fsts.*.gz $dir +else + graphdir=$dir + if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; + fi +fi + +## Work out where we're getting the Gaussian-selection info from +if $use_gselect; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-gselect true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/gselect.1.gz ] && echo "No gselect info in $srcdir" && exit 1; + graphdir=$srcdir + gselect_opt="--gselect=ark,s,cs:gunzip -c $srcdir/gselect.JOB.gz|" + ln.pl $srcdir/gselect.*.gz $dir +else + graphdir=$dir + if [ $stage -le 1 ]; then + echo "$0: computing Gaussian-selection info" + # Note: doesn't matter whether we use $alimdl or $mdl, they will + # have the same gselect info. + $cmd JOB=1:$nj $dir/log/gselect.JOB.log \ + sgmm-gselect --full-gmm-nbest=$gselect $alimdl \ + "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1; + fi + gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|" +fi + + +if [ $alimdl == $mdl ]; then + # Speaker-independent decoding-- just one pass. Not normal. + T=`sgmm-info $mdl | grep 'speaker vector space' | awk '{print $NF}'` || exit 1; + [ "$T" -ne 0 ] && echo "No alignment model, yet speaker vector space nonempty" && exit 1; + + if [ $stage -le 2 ]; then + echo "$0: aligning data in $data using model $mdl (no speaker-vectors)" + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + sgmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam $alimdl \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; + fi + echo "$0: done aligning data." + exit 0; +fi + +# Continue with system with speaker vectors. +if [ $stage -le 2 ]; then + echo "$0: aligning data in $data using model $alimdl" + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + sgmm-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam $alimdl \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 3 ]; then + echo "$0: computing speaker vectors (1st pass)" + $cmd JOB=1:$nj $dir/log/spk_vecs1.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + sgmm-post-to-gpost "$gselect_opt" $alimdl "$feats" ark:- ark:- \| \ + sgmm-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \ + $mdl "$feats" ark,s,cs:- ark:$dir/pre_vecs.JOB || exit 1; +fi + +if [ $stage -le 4 ]; then + echo "$0: computing speaker vectors (2nd pass)" + $cmd JOB=1:$nj $dir/log/spk_vecs2.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + sgmm-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" \ + --spk-vecs=ark:$dir/pre_vecs.JOB $mdl "$feats" ark,s,cs:- ark:$dir/vecs.JOB || exit 1; + rm $dir/pre_vecs.* +fi + +if [ $stage -le 5 ]; then + echo "$0: doing final alignment." + $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \ + sgmm-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam \ + --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \ + $mdl "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +rm $dir/pre_ali.*.gz + +echo "$0: done aligning data." + +utils/summarize_warnings.pl $dir/log + +exit 0; diff --git a/tools/ASR_2ch_track/steps/align_sgmm2.sh b/tools/ASR_2ch_track/steps/align_sgmm2.sh new file mode 100644 index 0000000000000000000000000000000000000000..dec5d47a83661bb10bf9cdcb66f41045ba3a1d5f --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_sgmm2.sh @@ -0,0 +1,195 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Computes training alignments and (if needed) speaker-vectors, given an +# SGMM system. If the system is built on top of SAT, you should supply +# transforms with the --transform-dir option. + +# If you supply the --use-graphs option, it will use the training +# graphs from the source directory. + +# Begin configuration section. +stage=0 +nj=4 +cmd=run.pl +use_graphs=false # use graphs from srcdir +use_gselect=false # use gselect info from srcdir [regardless, we use + # Gaussian-selection info, we might have to compute it though.] +gselect=15 # Number of Gaussian-selection indices for SGMMs. +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +transform_dir= # directory to find fMLLR transforms in. +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: steps/align_sgmm.sh " + echo "e.g.: steps/align_sgmm.sh --transform-dir exp/tri3b data/train data/lang \\" + echo " exp/sgmm4a exp/sgmm5a_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --transform-dir # directory to find fMLLR transforms" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +oov=`cat $lang/oov.int` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +sdata=$data/split$nj + +mkdir -p $dir/log +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +[ -f $srcdir/final.alimdl ] && cp $srcdir/final.alimdl $dir +cp $srcdir/final.occs $dir; + +## Set up features. +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option during alignment." +fi +## + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; + +## Work out where we're getting the graphs from. +if $use_graphs; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1; + graphdir=$srcdir + ln.pl $srcdir/fsts.*.gz $dir +else + graphdir=$dir + if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; + fi +fi + +## Work out where we're getting the Gaussian-selection info from +if $use_gselect; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-gselect true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/gselect.1.gz ] && echo "No gselect info in $srcdir" && exit 1; + graphdir=$srcdir + gselect_opt="--gselect=ark,s,cs:gunzip -c $srcdir/gselect.JOB.gz|" + ln.pl $srcdir/gselect.*.gz $dir +else + graphdir=$dir + if [ $stage -le 1 ]; then + echo "$0: computing Gaussian-selection info" + # Note: doesn't matter whether we use $alimdl or $mdl, they will + # have the same gselect info. + $cmd JOB=1:$nj $dir/log/gselect.JOB.log \ + sgmm2-gselect --full-gmm-nbest=$gselect $alimdl \ + "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1; + fi + gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|" +fi + + +if [ $alimdl == $mdl ]; then + # Speaker-independent decoding-- just one pass. Not normal. + T=`sgmm2-info $mdl | grep 'speaker vector space' | awk '{print $NF}'` || exit 1; + [ "$T" -ne 0 ] && echo "No alignment model, yet speaker vector space nonempty" && exit 1; + + if [ $stage -le 2 ]; then + echo "$0: aligning data in $data using model $mdl (no speaker-vectors)" + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + sgmm2-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam $alimdl \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; + fi + echo "$0: done aligning data." + exit 0; +fi + +# Continue with system with speaker vectors. +if [ $stage -le 2 ]; then + echo "$0: aligning data in $data using model $alimdl" + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + sgmm2-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam $alimdl \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 3 ]; then + echo "$0: computing speaker vectors (1st pass)" + $cmd JOB=1:$nj $dir/log/spk_vecs1.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + sgmm2-post-to-gpost "$gselect_opt" $alimdl "$feats" ark:- ark:- \| \ + sgmm2-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \ + $mdl "$feats" ark,s,cs:- ark:$dir/pre_vecs.JOB || exit 1; +fi + +if [ $stage -le 4 ]; then + echo "$0: computing speaker vectors (2nd pass)" + $cmd JOB=1:$nj $dir/log/spk_vecs2.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + sgmm2-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" \ + --spk-vecs=ark:$dir/pre_vecs.JOB $mdl "$feats" ark,s,cs:- ark:$dir/vecs.JOB || exit 1; + rm $dir/pre_vecs.* +fi + +if [ $stage -le 5 ]; then + echo "$0: doing final alignment." + $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \ + sgmm2-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam \ + --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \ + $mdl "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +rm $dir/pre_ali.*.gz + +echo "$0: done aligning data." + +utils/summarize_warnings.pl $dir/log + +exit 0; diff --git a/tools/ASR_2ch_track/steps/align_si.sh b/tools/ASR_2ch_track/steps/align_si.sh new file mode 100644 index 0000000000000000000000000000000000000000..ff53c773819e82f78d3b541cfdbf6f3050726730 --- /dev/null +++ b/tools/ASR_2ch_track/steps/align_si.sh @@ -0,0 +1,101 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Computes training alignments using a model with delta or +# LDA+MLLT features. + +# If you supply the "--use-graphs true" option, it will use the training +# graphs from the source directory (where the model is). In this +# case the number of jobs must match with the source directory. + + +# Begin configuration section. +nj=4 +cmd=run.pl +use_graphs=false +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +careful=false +boost_silence=1.0 # Factor by which to boost silence during alignment. +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: steps/align_si.sh " + echo "e.g.: steps/align_si.sh data/train data/lang exp/tri1 exp/tri1_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + + +for f in $data/text $lang/oov.int $srcdir/tree $srcdir/final.mdl; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1; +done + +oov=`cat $lang/oov.int` || exit 1; +mkdir -p $dir/log +echo $nj > $dir/num_jobs +sdata=$data/split$nj +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` +cp $srcdir/delta_opts $dir 2>/dev/null + +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +cp $srcdir/final.occs $dir; + + + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $srcdir/full.mat $dir + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac + +echo "$0: aligning data in $data using model from $srcdir, putting alignments in $dir" + +mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/final.mdl - |" + +if $use_graphs; then + [ $nj != "`cat $srcdir/num_jobs`" ] && echo "$0: mismatch in num-jobs" && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "$0: no such file $srcdir/fsts.1.gz" && exit 1; + + $cmd JOB=1:$nj $dir/log/align.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" \ + "ark:gunzip -c $srcdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +else + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + # We could just use gmm-align in the next line, but it's less efficient as it compiles the + # training graphs one by one. + $cmd JOB=1:$nj $dir/log/align.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" ark:- \| \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" ark:- \ + "$feats" "ark,t:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +echo "$0: done aligning data." diff --git a/tools/ASR_2ch_track/steps/append_feats.sh b/tools/ASR_2ch_track/steps/append_feats.sh new file mode 100644 index 0000000000000000000000000000000000000000..abeee5aba23edfd87e2b433ec85a418615b34924 --- /dev/null +++ b/tools/ASR_2ch_track/steps/append_feats.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright 2014 Brno University of Technology (Author: Karel Vesely) +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 +# This script appends the features in two or more data directories. + +# To be run from .. (one directory up from here) +# see ../run.sh for example + +# Begin configuration section. +cmd=run.pl +nj=4 +length_tolerance=10 # length tolerance in frames (trim to shortest) +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# -lt 5 ]; then + echo "usage: $0 [options] [] "; + echo "e.g.: $0 data/train_mfcc data/train_bottleneck data/train_combined exp/append_mfcc_plp mfcc" + echo "options: " + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data_src_arr=(${@:1:$(($#-3))}) #array of source data-dirs +data=${@: -3: 1} +logdir=${@: -2: 1} +ark_dir=${@: -1: 1} #last arg. + +data_src_first=${data_src_arr[0]} # get 1st src dir + +# make $ark_dir an absolute pathname. +ark_dir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $ark_dir ${PWD}` + +for data_src in ${data_src_arr[@]}; do + utils/split_data.sh $data_src $nj || exit 1; +done + +mkdir -p $ark_dir $logdir + +mkdir -p $data +cp $data_src_first/* $data/ 2>/dev/null # so we get the other files, such as utt2spk. +rm $data/cmvn.scp 2>/dev/null +rm $data/feats.scp 2>/dev/null + +# use "name" as part of name of the archive. +name=`basename $data` + +# get list of source scp's for pasting +data_src_args= +for data_src in ${data_src_arr[@]}; do + data_src_args="$data_src_args scp:$data_src/split$nj/JOB/feats.scp" +done + +for n in $(seq $nj); do + # the next command does nothing unless $arkdir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $arkdir/pasted_$name.$n.ark +done + +$cmd JOB=1:$nj $logdir/append.JOB.log \ + paste-feats --length-tolerance=$length_tolerance $data_src_args ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$ark_dir/pasted_$name.JOB.ark,$ark_dir/pasted_$name.JOB.scp || exit 1; + +# concatenate the .scp files together. +for ((n=1; n<=nj; n++)); do + cat $ark_dir/pasted_$name.$n.scp >> $data/feats.scp || exit 1; +done > $data/feats.scp || exit 1; + + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully processed ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi + +echo "Succeeded pasting features for $name into $data" diff --git a/tools/ASR_2ch_track/steps/cleanup/combine_short_segments.py b/tools/ASR_2ch_track/steps/cleanup/combine_short_segments.py new file mode 100644 index 0000000000000000000000000000000000000000..f51da6afa254ec30eaf1b57d36ed890e273a7b16 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/combine_short_segments.py @@ -0,0 +1,312 @@ +#!/usr/bin/env python + +# Copyright 2016 Vijayaditya Peddinti +# Apache 2.0 + +import argparse +import sys +import os +import subprocess +import errno +import copy +import shutil + +def GetArgs(): + # we add compulsary arguments as named arguments for readability + parser = argparse.ArgumentParser(description=""" + This script concatenates segments in the input_data_dir to ensure that""" + " the segments in the output_data_dir have a specified minimum length.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument("--minimum-duration", type=float, required = True, + help="Minimum duration of the segments in the output directory") + parser.add_argument("--input-data-dir", type=str, required = True) + parser.add_argument("--output-data-dir", type=str, required = True) + + print(' '.join(sys.argv)) + args = parser.parse_args() + return args + +def RunKaldiCommand(command, wait = True): + """ Runs commands frequently seen in Kaldi scripts. These are usually a + sequence of commands connected by pipes, so we use shell=True """ + p = subprocess.Popen(command, shell = True, + stdout = subprocess.PIPE, + stderr = subprocess.PIPE) + + if wait: + [stdout, stderr] = p.communicate() + if p.returncode is not 0: + raise Exception("There was an error while running the command {0}\n".format(command)+"-"*10+"\n"+stderr) + return stdout, stderr + else: + return p + +def MakeDir(dir): + try: + os.mkdir(dir) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise exc + raise Exception("Directory {0} already exists".format(dir)) + pass + +def CheckFiles(input_data_dir): + for file_name in ['spk2utt', 'text', 'utt2spk', 'feats.scp']: + file_name = '{0}/{1}'.format(input_data_dir, file_name) + if not os.path.exists(file_name): + raise Exception("There is no such file {0}".format(file_name)) + +def ParseFileToDict(file, assert2fields = False, value_processor = None): + if value_processor is None: + value_processor = lambda x: x[0] + + dict = {} + for line in open(file, 'r'): + parts = line.split() + if assert2fields: + assert(len(parts) == 2) + + dict[parts[0]] = value_processor(parts[1:]) + return dict + +def WriteDictToFile(dict, file_name): + file = open(file_name, 'w') + keys = dict.keys() + keys.sort() + for key in keys: + value = dict[key] + if type(value) in [list, tuple] : + if type(value) is tuple: + value = list(value) + value.sort() + value = ' '.join(value) + file.write('{0}\t{1}\n'.format(key, value)) + file.close() + + +def ParseDataDirInfo(data_dir): + data_dir_file = lambda file_name: '{0}/{1}'.format(data_dir, file_name) + + utt2spk = ParseFileToDict(data_dir_file('utt2spk')) + spk2utt = ParseFileToDict(data_dir_file('spk2utt'), value_processor = lambda x: x) + text = ParseFileToDict(data_dir_file('text'), value_processor = lambda x: " ".join(x)) + # we want to assert feats.scp has just 2 fields, as we don't know how + # to process it otherwise + feat = ParseFileToDict(data_dir_file('feats.scp'), assert2fields = True) + utt2dur = ParseFileToDict(data_dir_file('utt2dur'), value_processor = lambda x: float(x[0])) + utt2uniq = None + if os.path.exists(data_dir_file('utt2uniq')): + utt2uniq = ParseFileToDict(data_dir_file('utt2uniq')) + return utt2spk, spk2utt, text, feat, utt2dur, utt2uniq + + +def GetCombinedUttIndexRange(utt_index, utts, utt_durs, minimum_duration): + # We want the minimum number of concatenations + # to reach the minimum_duration. If two concatenations satisfy + # the minimum duration constraint we choose the shorter one. + left_index = utt_index - 1 + right_index = utt_index + 1 + num_remaining_segments = len(utts) - 1 + cur_utt_dur = utt_durs[utts[utt_index]] + + while num_remaining_segments > 0: + + left_utt_dur = 0 + if left_index >= 0: + left_utt_dur = utt_durs[utts[left_index]] + right_utt_dur = 0 + if right_index <= len(utts) - 1: + right_utt_dur = utt_durs[utts[right_index]] + + right_combined_utt_dur = cur_utt_dur + right_utt_dur + left_combined_utt_dur = cur_utt_dur + left_utt_dur + left_right_combined_utt_dur = cur_utt_dur + left_utt_dur + right_utt_dur + + combine_left_exit = False + combine_right_exit = False + if right_combined_utt_dur >= minimum_duration: + if left_combined_utt_dur >= minimum_duration: + if left_combined_utt_dur <= right_combined_utt_dur: + combine_left_exit = True + else: + combine_right_exit = True + else: + combine_right_exit = True + elif left_combined_utt_dur >= minimum_duration: + combine_left_exit = True + elif left_right_combined_utt_dur >= minimum_duration : + combine_left_exit = True + combine_right_exit = True + + if combine_left_exit and combine_right_exit: + cur_utt_dur = left_right_combined_utt_dur + break + elif combine_left_exit: + cur_utt_dur = left_combined_utt_dur + # move back the right_index as we don't need to combine it + right_index = right_index - 1 + break + elif combine_right_exit: + cur_utt_dur = right_combined_utt_dur + # move back the left_index as we don't need to combine it + left_index = left_index + 1 + break + + # couldn't satisfy minimum duration requirement so continue search + if left_index >= 0: + num_remaining_segments = num_remaining_segments - 1 + if right_index <= len(utts) - 1: + num_remaining_segments = num_remaining_segments - 1 + + left_index = left_index - 1 + right_index = right_index + 1 + + cur_utt_dur = left_right_combined_utt_dur + left_index = max(0, left_index) + right_index = min(len(utts)-1, right_index) + return left_index, right_index, cur_utt_dur + + +def WriteCombinedDirFiles(output_dir, utt2spk, spk2utt, text, feat, utt2dur, utt2uniq): + out_dir_file = lambda file_name: '{0}/{1}'.format(output_dir, file_name) + total_combined_utt_list = [] + for speaker in spk2utt.keys(): + utts = spk2utt[speaker] + for utt in utts: + if type(utt) is tuple: + #this is a combined utt + total_combined_utt_list.append((speaker, utt)) + + for speaker, combined_utt_tuple in total_combined_utt_list: + combined_utt_list = list(combined_utt_tuple) + combined_utt_list.sort() + new_utt_name = "-".join(combined_utt_list)+'-appended' + + # updating the utt2spk dict + for utt in combined_utt_list: + spk_name = utt2spk.pop(utt) + utt2spk[new_utt_name] = spk_name + + # updating the spk2utt dict + spk2utt[speaker].remove(combined_utt_tuple) + spk2utt[speaker].append(new_utt_name) + + # updating the text dict + combined_text = [] + for utt in combined_utt_list: + combined_text.append(text.pop(utt)) + text[new_utt_name] = ' '.join(combined_text) + + # updating the feat dict + combined_feat = [] + for utt in combined_utt_list: + combined_feat.append(feat.pop(utt)) + feat_command = "concat-feats --print-args=false {feats} - |".format(feats = " ".join(combined_feat)) + feat[new_utt_name] = feat_command + + # updating utt2dur + combined_dur = 0 + for utt in combined_utt_list: + combined_dur += utt2dur.pop(utt) + utt2dur[new_utt_name] = combined_dur + + # updating utt2uniq + if utt2uniq is not None: + combined_uniqs = [] + for utt in combined_utt_list: + combined_uniqs.append(utt2uniq.pop(utt)) + # utt2uniq file is used to map perturbed data to original unperturbed + # versions so that the training cross validation sets can avoid overlap + # of data however if perturbation changes the length of the utterance + # (e.g. speed perturbation) the utterance combinations in each + # perturbation of the original recording can be very different. So there + # is no good way to find the utt2uniq mappinng so that we can avoid + # overlap. + utt2uniq[new_utt_name] = combined_uniqs[0] + + + WriteDictToFile(utt2spk, out_dir_file('utt2spk')) + WriteDictToFile(spk2utt, out_dir_file('spk2utt')) + WriteDictToFile(feat, out_dir_file('feats.scp')) + WriteDictToFile(text, out_dir_file('text')) + if utt2uniq is not None: + WriteDictToFile(utt2uniq, out_dir_file('utt2uniq')) + WriteDictToFile(utt2dur, out_dir_file('utt2dur')) + + +def CombineSegments(input_dir, output_dir, minimum_duration): + utt2spk, spk2utt, text, feat, utt2dur, utt2uniq = ParseDataDirInfo(input_dir) + total_combined_utt_list = [] + + # copy the duration dictionary so that we can modify it + utt_durs = copy.deepcopy(utt2dur) + speakers = spk2utt.keys() + speakers.sort() + for speaker in speakers: + + utts = spk2utt[speaker] # this is an assignment of the reference + # In WriteCombinedDirFiles the values of spk2utt will have the list + # of combined utts which will be used as reference + + # we make an assumption that the sorted uttlist corresponds + # to contiguous segments. This is true only if utt naming + # is done according to accepted conventions + # this is an easily violatable assumption. Have to think of a better + # way to do this. + utts.sort() + utt_index = 0 + while utt_index < len(utts): + if utt_durs[utts[utt_index]] < minimum_duration: + left_index, right_index, cur_utt_dur = GetCombinedUttIndexRange(utt_index, utts, utt_durs, minimum_duration) + if not cur_utt_dur >= minimum_duration: + # this is a rare occurrence, better make the user aware of this + # situation and let them deal with it + raise Exception('Speaker {0} does not have enough utterances to satisfy the minimum duration constraint'.format(speaker)) + + combined_duration = 0 + combined_utts = [] + # update the utts_dur dictionary + for utt in utts[left_index:right_index + 1]: + combined_duration += utt_durs.pop(utt) + if type(utt) is tuple: + for item in utt: + combined_utts.append(item) + else: + combined_utts.append(utt) + combined_utts = tuple(combined_utts) # converting to immutable type to use as dictionary key + assert(cur_utt_dur == combined_duration) + + # now modify the utts list + combined_indices = range(left_index, right_index + 1) + # start popping from the largest index so that the lower + # indexes are valid + for i in combined_indices[::-1]: + utts.pop(i) + utts.insert(left_index, combined_utts) + utt_durs[combined_utts] = combined_duration + utt_index = left_index + utt_index = utt_index + 1 + WriteCombinedDirFiles(output_dir, utt2spk, spk2utt, text, feat, utt2dur, utt2uniq) + +def Main(): + args = GetArgs() + + CheckFiles(args.input_data_dir) + MakeDir(args.output_data_dir) + feat_lengths = {} + segments_file = '{0}/segments'.format(args.input_data_dir) + + RunKaldiCommand("utils/data/get_utt2dur.sh {0}".format(args.input_data_dir)) + + CombineSegments(args.input_data_dir, args.output_data_dir, args.minimum_duration) + + RunKaldiCommand("utils/utt2spk_to_spk2utt.pl {od}/utt2spk > {od}/spk2utt".format(od = args.output_data_dir)) + if os.path.exists('{0}/cmvn.scp'.format(args.input_data_dir)): + shutil.copy('{0}/cmvn.scp'.format(args.input_data_dir), args.output_data_dir) + + RunKaldiCommand("utils/fix_data_dir.sh {0}".format(args.output_data_dir)) +if __name__ == "__main__": + Main() + + diff --git a/tools/ASR_2ch_track/steps/cleanup/create_segments_from_ctm.pl b/tools/ASR_2ch_track/steps/cleanup/create_segments_from_ctm.pl new file mode 100644 index 0000000000000000000000000000000000000000..5af5fd346628b27b23bb506aafdb22b23a352fbd --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/create_segments_from_ctm.pl @@ -0,0 +1,471 @@ +#!/usr/bin/env perl + +# Copyright 2014 Guoguo Chen; 2015 Nagendra Kumar Goel +# Apache 2.0 + +use strict; +use warnings; +use Getopt::Long; + +# $SIG{__WARN__} = sub { $DB::single = 1 }; + +my $Usage = <<'EOU'; +This script creates the segments file and text file for a data directory with +new segmentation. It takes a ctm file and an "alignment" file. The ctm file +corresponds to the audio that we want to make segmentations for, and is created +by decoding the audio using existing in-domain models. The "alignment" file is +generated by the binary align-text, and is Levenshtein alignment between the +original transcript and the decoded output. + +Internally, the script first tries to find silence regions (gaps in the CTM). +If a silence region is found, and the neighboring words are free of errors +according to the alignment file, then this silence region will be taken as +a split point, and new segment will be created. If the new segment we are going +to output is too long (longer than --max-seg-length), the script will split +the long segments into smaller pieces with length roughly --max-seg-length. +If you are going to use --wer-cutoff to filter out segments with high WER, make +sure you set it to a reasonable value. If the value you set is higher than the +WER from your alignment file, then most of the segments will be filtered out. + +Usage: steps/cleanup/create_segments_from_ctm.pl [options] \ + + e.g.: steps/cleanup/create_segments_from_ctm.pl \ + train_si284_split.ctm train_si284_split.aligned.txt \ + data/train_si284_reseg/segments data/train_si284_reseg/text + +Allowed options: + --max-seg-length : Maximum length of new segments (default = 10.0) + --min-seg-length : Minimum length of new segments (default = 2.0) + --min-sil-length : Minimum length of silence as split point (default = 0.5) + --separator : Separator for aligned pairs (default = ";") + --special-symbol : Special symbol to aligned with inserted or deleted words + (default = "<***>") + --wer-cutoff : Ignore segments with WER higher than the specified value. + -1 means no segment will be ignored. (default = -1) + --use-silence-midpoints : Set to 1 if you want to use silence midpoints + instead of min_sil_length for silence overhang.(default 0) + --force-correct-boundary-words : Set to zero if the segments will not be + required to have boundary words to be correct. Default 1 + --aligned-ctm-filename : If set, the intermediate aligned ctm + is saved to this file +EOU + +my $max_seg_length = 10.0; +my $min_seg_length = 2.0; +my $min_sil_length = 0.5; +my $separator = ";"; +my $special_symbol = "<***>"; +my $wer_cutoff = -1; +my $use_silence_midpoints = 0; +my $force_correct_boundary_words = 1; +my $aligned_ctm_filename = ""; +GetOptions( + 'wer-cutoff=f' => \$wer_cutoff, + 'max-seg-length=f' => \$max_seg_length, + 'min-seg-length=f' => \$min_seg_length, + 'min-sil-length=f' => \$min_sil_length, + 'use-silence-midpoints=f' => \$use_silence_midpoints, + 'force-correct-boundary-words=f' => \$force_correct_boundary_words, + 'aligned-ctm-filename=s' => \$aligned_ctm_filename, + 'separator=s' => \$separator, + 'special-symbol=s' => \$special_symbol); + +if (@ARGV != 4) { + die $Usage; +} + +my ($ctm_in, $align_in, $segments_out, $text_out) = @ARGV; + +open(CI, "<$ctm_in") || die "Error: fail to open $ctm_in\n"; +open(AI, "<$align_in") || die "Error: fail to open $align_in\n"; +open(my $SO, ">$segments_out") || die "Error: fail to open $segments_out\n"; +open(my $TO, ">$text_out") || die "Error: fail to open $text_out\n"; +my $ACT= undef; +if ($aligned_ctm_filename ne "") { + open($ACT, ">$aligned_ctm_filename"); +} +# Prints the current segment to file. +sub PrintSegment { + my ($aligned_ctm, $wav_id, $min_sil_length, $min_seg_length, + $seg_start_index, $seg_end_index, $seg_count, $SO, $TO) = @_; + + if ($seg_start_index > $seg_end_index) { + return -1; + } + + # Removes the surrounding silence. + while ($seg_start_index < scalar(@{$aligned_ctm}) && + $aligned_ctm->[$seg_start_index]->[0] eq "") { + $seg_start_index += 1; + } + while ($seg_end_index >= 0 && + $aligned_ctm->[$seg_end_index]->[0] eq "") { + $seg_end_index -= 1; + } + if ($seg_start_index > $seg_end_index) { + return -1; + } + + # Filters out segments with high WER. + if ($wer_cutoff != -1) { + my $num_errors = 0; my $num_words = 0; + for (my $i = $seg_start_index; $i <= $seg_end_index; $i += 1) { + if ($aligned_ctm->[$i]->[0] ne "") { + $num_words += 1; + } + $num_errors += $aligned_ctm->[$i]->[3]; + } + if ($num_errors / $num_words > $wer_cutoff || $num_words < 1) { + return -1; + } + } + + # Works out the surrounding silence. + my $index = $seg_start_index - 1; + while ($index >= 0 && $aligned_ctm->[$index]->[0] eq + "" && $aligned_ctm->[$index]->[3] == 0) { + $index -= 1; + } + my $left_of_segment_has_deletion = "false"; + $left_of_segment_has_deletion = "true" + if ($index > 0 && $aligned_ctm->[$index-1]->[0] ne "" + && $aligned_ctm->[$index-1]->[3] == 0); + + my $pad_start_sil = ($aligned_ctm->[$seg_start_index]->[1] - + $aligned_ctm->[$index + 1]->[1]) / 2.0; + if (($left_of_segment_has_deletion eq "true") || !$use_silence_midpoints) { + if ($pad_start_sil > $min_sil_length / 2.0) { + $pad_start_sil = $min_sil_length / 2.0; + } + } + my $right_of_segment_has_deletion = "false"; + $index = $seg_end_index + 1; + while ($index < scalar(@{$aligned_ctm}) && + $aligned_ctm->[$index]->[0] eq "" && + $aligned_ctm->[$index]->[3] == 0) { + $index += 1; + } + $right_of_segment_has_deletion = "true" + if ($index < scalar(@{$aligned_ctm})-1 && $aligned_ctm->[$index+1]->[0] ne + "" && $aligned_ctm->[$index - 1]->[3] > 0); + my $pad_end_sil = ($aligned_ctm->[$index - 1]->[1] + + $aligned_ctm->[$index - 1]->[2] - + $aligned_ctm->[$seg_end_index]->[1] - + $aligned_ctm->[$seg_end_index]->[2]) / 2.0; + if (($right_of_segment_has_deletion eq "true") || !$use_silence_midpoints) { + if ($pad_end_sil > $min_sil_length / 2.0) { + $pad_end_sil = $min_sil_length / 2.0; + } + } + + my $seg_start = $aligned_ctm->[$seg_start_index]->[1] - $pad_start_sil; + my $seg_end = $aligned_ctm->[$seg_end_index]->[1] + + $aligned_ctm->[$seg_end_index]->[2] + $pad_end_sil; + if ($seg_end - $seg_start < $min_seg_length) { + return -1; + } + + $seg_start = sprintf("%.2f", $seg_start); + $seg_end = sprintf("%.2f", $seg_end); + my $seg_id = $wav_id . "_" . sprintf("%05d", $seg_count); + print $SO "$seg_id $wav_id $seg_start $seg_end\n"; + + print $TO "$seg_id "; + for (my $x = $seg_start_index; $x <= $seg_end_index; $x += 1) { + if ($aligned_ctm->[$x]->[0] ne "") { + print $TO "$aligned_ctm->[$x]->[0] "; + } + } + print $TO "\n"; + return 0; +} + +# Computes split point. +sub GetSplitPoint { + my ($aligned_ctm, $seg_start_index, $seg_end_index, $max_seg_length) = @_; + + # Scan in the reversed order so we can maximize the length. + my $split_point = $seg_start_index; + for (my $x = $seg_end_index; $x > $seg_start_index; $x -= 1) { + my $current_seg_length = $aligned_ctm->[$x]->[1] + + $aligned_ctm->[$x]->[2] - + $aligned_ctm->[$seg_start_index]->[1]; + if ($current_seg_length <= $max_seg_length) { + $split_point = $x; + last; + } + } + return $split_point; +} + +# Computes segment length without surrounding silence. +sub GetSegmentLengthNoSil { + my ($aligned_ctm, $seg_start_index, $seg_end_index) = @_; + while ($seg_start_index < scalar(@{$aligned_ctm}) && + $aligned_ctm->[$seg_start_index]->[0] eq "") { + $seg_start_index += 1; + } + while ($seg_end_index >= 0 && + $aligned_ctm->[$seg_end_index]->[0] eq "") { + $seg_end_index -= 1; + } + if ($seg_start_index > $seg_end_index) { + return 0; + } + my $current_seg_length = $aligned_ctm->[$seg_end_index]->[1] + + $aligned_ctm->[$seg_end_index]->[2] - + $aligned_ctm->[$seg_start_index]->[1]; + return $current_seg_length; +} + +# Force splits long segments. +sub SplitLongSegment { + my ($aligned_ctm, $wav_id, $max_seg_length, $min_sil_length, + $seg_start_index, $seg_end_index, $current_seg_count, $SO, $TO) = @_; + # If the segment is too long, we manually split it. We make sure that the + # resulting segments are at least ($max_seg_length / 2) seconds long. + my $current_seg_length = $aligned_ctm->[$seg_end_index]->[1] + + $aligned_ctm->[$seg_end_index]->[2] - + $aligned_ctm->[$seg_start_index]->[1]; + my $current_seg_index = $seg_start_index; + my $aligned_ctm_size = keys($aligned_ctm); + while ($current_seg_length > 1.5 * $max_seg_length && $current_seg_index < $aligned_ctm_size-1) { + my $split_point = GetSplitPoint($aligned_ctm, $current_seg_index, + $seg_end_index, $max_seg_length); + my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length, + $min_seg_length, $current_seg_index, $split_point, + $current_seg_count, $SO, $TO); + $current_seg_count += 1 if ($ans != -1); + $current_seg_index = $split_point + 1; + $current_seg_length = $aligned_ctm->[$seg_end_index]->[1] + + $aligned_ctm->[$seg_end_index]->[2] - + $aligned_ctm->[$current_seg_index]->[1]; + } + + if ($current_seg_index eq $aligned_ctm_size-1) { + my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length, + $min_seg_length, $current_seg_index, $current_seg_index, + $current_seg_count, $SO, $TO); + $current_seg_count += 1 if ($ans != -1); + return ($current_seg_count, $current_seg_index); + } + + if ($current_seg_length > $max_seg_length) { + my $split_point = GetSplitPoint($aligned_ctm, $current_seg_index, + $seg_end_index, + $current_seg_length / 2.0 + 0.01); + my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length, + $min_seg_length, $current_seg_index, $split_point, + $current_seg_count, $SO, $TO); + $current_seg_count += 1 if ($ans != -1); + $current_seg_index = $split_point + 1; + } + + my $split_point = GetSplitPoint($aligned_ctm, $current_seg_index, + $seg_end_index, $max_seg_length + 0.01); + my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length, + $min_seg_length, $current_seg_index, $split_point, + $current_seg_count, $SO, $TO); + $current_seg_count += 1 if ($ans != -1); + $current_seg_index = $split_point + 1; + + return ($current_seg_count, $current_seg_index); +} + +# Processes each wav file. +sub ProcessWav { + my ($max_seg_length, $min_seg_length, $min_sil_length, $special_symbol, + $current_ctm, $current_align, $SO, $TO, $ACT) = @_; + + my $wav_id = $current_ctm->[0]->[0]; + my $channel_id = $current_ctm->[0]->[1]; + defined($wav_id) || die "Error: empty wav section\n"; + + # First, we have to align the ctm file to the Levenshtein alignment. + # @aligned_ctm is a list of the following: + # [word, start_time, duration, num_errors] + my $ctm_index = 0; + my @aligned_ctm = (); + foreach my $entry (@{$current_align}) { + my $ref_word = $entry->[0]; + my $hyp_word = $entry->[1]; + if ($hyp_word eq $special_symbol) { + # Case 1: deletion, $hyp does not correspond to a word in the ctm file. + my $start = 0.0; my $dur = 0.0; + if (defined($aligned_ctm[-1])) { + $start = $aligned_ctm[-1]->[1] + $aligned_ctm[-1]->[2]; + } + push(@aligned_ctm, [$ref_word, $start, $dur, 1]); + } else { + # Case 2: non-deletion, now $hyp corresponds to a word in ctm file. + while ($current_ctm->[$ctm_index]->[4] eq "") { + # Case 2.1: ctm contains silence at the corresponding place. + push(@aligned_ctm, ["", $current_ctm->[$ctm_index]->[2], + $current_ctm->[$ctm_index]->[3], 0]); + $ctm_index += 1; + } + my $ctm_word = $current_ctm->[$ctm_index]->[4]; + $hyp_word eq $ctm_word || + die "Error: got word $hyp_word in alignment but $ctm_word in ctm\n"; + my $start = $current_ctm->[$ctm_index]->[2]; + my $dur = $current_ctm->[$ctm_index]->[3]; + if ($ref_word ne $ctm_word) { + if ($ref_word eq $special_symbol) { + # Case 2.2: insertion, we propagate the duration and error to the + # previous one. + if (defined($aligned_ctm[-1])) { + $aligned_ctm[-1]->[2] += $dur; + $aligned_ctm[-1]->[3] += 1; + } else { + push(@aligned_ctm, ["", $start, $dur, 1]); + } + } else { + # Case 2.3: substitution. + push(@aligned_ctm, [$ref_word, $start, $dur, 1]); + } + } else { + # Case 2.4: correct. + push(@aligned_ctm, [$ref_word, $start, $dur, 0]); + } + $ctm_index += 1; + } + } + + # Save the aligned CTM if needed + if(defined($ACT)){ + for (my $i = 0; $i <= $#aligned_ctm; $i++) { + print $ACT "$wav_id $channel_id $aligned_ctm[$i][1] $aligned_ctm[$i][2] "; + print $ACT "$aligned_ctm[$i][0] $aligned_ctm[$i][3]\n"; + } + } + + # Second, we create segments from @align_ctm, using simple greedy method. + my $current_seg_index = 0; + my $current_seg_count = 0; + for (my $x = 0; $x < @aligned_ctm; $x += 1) { + my $lcorrect = "true"; my $rcorrect = "true"; + $lcorrect = "false" if ($x > 0 && $aligned_ctm[$x - 1]->[3] > 0); + $rcorrect = "false" if ($x < @aligned_ctm - 1 && + $aligned_ctm[$x + 1]->[3] > 0); + + my $current_seg_length = GetSegmentLengthNoSil(\@aligned_ctm, + $current_seg_index, $x); + + # We split the audio, if the silence is longer than the requested silence + # length, and if there are no alignment error around it. We also make sure + # that segment contains actual words, instead of pure silence. + if ($aligned_ctm[$x]->[0] eq "" && + $aligned_ctm[$x]->[2] >= $min_sil_length + && (($force_correct_boundary_words && $lcorrect eq "true" && + $rcorrect eq "true") || !$force_correct_boundary_words)) { + if ($current_seg_length <= $max_seg_length && + $current_seg_length >= $min_seg_length) { + my $ans = PrintSegment(\@aligned_ctm, $wav_id, $min_sil_length, + $min_seg_length, $current_seg_index, $x, + $current_seg_count, $SO, $TO); + $current_seg_count += 1 if ($ans != -1); + $current_seg_index = $x + 1; + } elsif ($current_seg_length > $max_seg_length) { + ($current_seg_count, $current_seg_index) + = SplitLongSegment(\@aligned_ctm, $wav_id, $max_seg_length, + $min_sil_length, $current_seg_index, $x, + $current_seg_count, $SO, $TO); + } + } + } + + # Last segment. + if ($current_seg_index <= @aligned_ctm - 1) { + SplitLongSegment(\@aligned_ctm, $wav_id, $max_seg_length, $min_sil_length, + $current_seg_index, @aligned_ctm - 1, + $current_seg_count, $SO, $TO); + } +} + +# Insert as silence so the down stream process will be easier. Example: +# +# Input ctm: +# 011 A 3.39 0.23 SELL +# 011 A 3.62 0.18 OFF +# 011 A 3.83 0.45 ASSETS +# +# Output ctm: +# 011 A 3.39 0.23 SELL +# 011 A 3.62 0.18 OFF +# 011 A 3.80 0.03 +# 011 A 3.83 0.45 ASSETS +sub InsertSilence { + my ($ctm_in, $ctm_out) = @_; + for (my $x = 1; $x < @{$ctm_in}; $x += 1) { + push(@{$ctm_out}, $ctm_in->[$x - 1]); + + my $new_start = sprintf("%.2f", + $ctm_in->[$x - 1]->[2] + $ctm_in->[$x - 1]->[3]); + if ($new_start < $ctm_in->[$x]->[2]) { + my $new_dur = sprintf("%.2f", $ctm_in->[$x]->[2] - $new_start); + push(@{$ctm_out}, [$ctm_in->[$x - 1]->[0], $ctm_in->[$x - 1]->[1], + $new_start, $new_dur, ""]); + } + } + push(@{$ctm_out}, $ctm_in->[@{$ctm_in} - 1]); +} + +# Reads the alignment. +my %aligned = (); +while () { + chomp; + my @col = split; + @col >= 2 || die "Error: bad line $_\n"; + my $wav = shift @col; + my @pairs = split(" $separator ", join(" ", @col)); + for (my $x = 0; $x < @pairs; $x += 1) { + my @col1 = split(" ", $pairs[$x]); + @col1 == 2 || die "Error: bad pair $pairs[$x]\n"; + $pairs[$x] = \@col1; + } + ! defined($aligned{$wav}) || die "Error: $wav has already been processed\n"; + $aligned{$wav} = \@pairs; +} + +# Reads the ctm file and creates the segmentation. +my $previous_wav_id = ""; +my $previous_channel_id = ""; +my @current_wav = (); +while () { + chomp; + my @col = split; + @col >= 5 || die "Error: bad line $_\n"; + if ($previous_wav_id eq $col[0] && $previous_channel_id eq $col[1]) { + push(@current_wav, \@col); + } else { + if (@current_wav > 0) { + defined($aligned{$previous_wav_id}) || + die "Error: no alignment info for $previous_wav_id\n"; + my @current_wav_silence = (); + InsertSilence(\@current_wav, \@current_wav_silence); + ProcessWav($max_seg_length, $min_seg_length, $min_sil_length, + $special_symbol, \@current_wav_silence, + $aligned{$previous_wav_id}, $SO, $TO, $ACT); + } + @current_wav = (); + push(@current_wav, \@col); + $previous_wav_id = $col[0]; + $previous_channel_id = $col[1]; + } +} + +# The last wav file. +if (@current_wav > 0) { + defined($aligned{$previous_wav_id}) || + die "Error: no alignment info for $previous_wav_id\n"; + my @current_wav_silence = (); + InsertSilence(\@current_wav, \@current_wav_silence); + ProcessWav($max_seg_length, $min_seg_length, $min_sil_length, $special_symbol, + \@current_wav_silence, $aligned{$previous_wav_id}, $SO, $TO, $ACT); +} + +close(CI); +close(AI); +close($SO); +close($TO); +close($ACT) if defined($ACT); diff --git a/tools/ASR_2ch_track/steps/cleanup/debug_lexicon.sh b/tools/ASR_2ch_track/steps/cleanup/debug_lexicon.sh new file mode 100644 index 0000000000000000000000000000000000000000..cdf1ff3e5df74f48f34f46bfa742e443fde97a1e --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/debug_lexicon.sh @@ -0,0 +1,200 @@ +#!/bin/bash +# Copyright 2014 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# this script gets some stats that will help you debug the lexicon. + +# Begin configuration section. +stage=1 +remove_stress=false +nj=10 # number of jobs for various decoding-type things that we run. +cmd=run.pl +alidir= +# End configuration section + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 5 ]; then + echo "usage: $0 " + echo "e.g.: $0 data/train data/lang exp/tri4b data/local/dict/lexicon.txt exp/debug_lexicon" + echo "main options (for others, see top of script file)" + echo " --nj # number of parallel jobs" + echo " --cmd # command to run jobs, e.g. run.pl,queue.pl" + echo " --stage # use to control partial reruns." + echo " --remove-stress # if true, remove stress before printing analysis" + echo " # note: if you change this, you only have to rerun" + echo " # from stage 10." + echo " --alidir # if supplied, training-data alignments and transforms" + echo " # are obtained from here instead of being generated." + exit 1; +fi + +data=$1 +lang=$2 +src=$3 +srcdict=$4 +dir=$5 + +set -e + +for f in $data/feats.scp $lang/phones.txt $src/final.mdl $srcdict; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1; +done + +if [ -z $alidir ]; then + alidir=${src}_ali_$(basename $data) + if [ $stage -le 1 ]; then + steps/align_fmllr.sh --cmd "$cmd" --nj $nj $data $lang $src $alidir + fi +fi + +phone_lang=data/$(basename $lang)_phone_bg + +if [ $stage -le 2 ]; then + utils/make_phone_bigram_lang.sh $lang $alidir $phone_lang +fi + +if [ $stage -le 3 ]; then + utils/mkgraph.sh $phone_lang $src $src/graph_phone_bg +fi + +if [ $stage -le 4 ]; then + steps/decode_si.sh --skip-scoring true \ + --cmd "$cmd" --nj $nj --transform-dir $alidir \ + --acwt 0.25 --beam 10.0 --lattice-beam 5.0 --max-active 2500 \ + $src/graph_phone_bg $data $src/decode_$(basename $data)_phone_bg +fi + +if [ $stage -le 5 ]; then + steps/get_train_ctm.sh --print-silence true --use-segments false \ + --cmd "$cmd" $data $lang $alidir +fi + +if [ $stage -le 6 ]; then + steps/get_ctm.sh --use-segments false --cmd "$cmd" --min-lmwt 3 --max-lmwt 8 \ + $data $phone_lang $src/decode_$(basename $data)_phone_bg +fi + +if [ $stage -le 7 ]; then + mkdir -p $dir + # lmwt=4 corresponds to the scale we decoded at. + cp $src/decode_$(basename $data)_phone_bg/score_4/$(basename $data).ctm $dir/phone.ctm + + cp $alidir/ctm $dir/word.ctm +fi + +if [ $stage -le 8 ]; then +# we'll use 'sort' to do most of the heavy lifting when processing the data. +# suppose word.ctm has an entry like +# sw02054 A 213.32 0.24 and +# we'll convert it into two entries like this, with the start and end separately: +# sw02054-A 0021332 START and +# sw02054-A 0021356 END and +# +# and suppose phone.ctm has lines like +# sw02054 A 213.09 0.24 sil +# sw02054 A 213.33 0.13 ae_B +# we'll convert them into lines where the time is derived the midpoint of the phone, like +# sw02054 A 0021321 PHONE sil +# sw02054 A 0021340 PHONE ae_B +# and then we'll remove the optional-silence phones and, if needed, the word-boundary markers from +# the phones, to get just +# sw02054 A 0021340 PHONE ae +# then after sorting and merge-sorting the two ctm files we can easily +# work out for each word, what the phones were during that time. + + grep -v '' $phone_lang/phones.txt | awk '{print $1, $1}' | \ + sed 's/_B$//' | sed 's/_I$//' | sed 's/_E$//' | sed 's/_S$//' >$dir/phone_map.txt + + cat $dir/phone.ctm | utils/apply_map.pl -f 5 $dir/phone_map.txt > $dir/phone_text.ctm > $dir/phone_mapped.ctm + + export LC_ALL=C + + cat $dir/word.ctm | awk '{printf("%s-%s %09d START %s\n", $1, $2, 100*$3, $5); printf("%s-%s %09d END %s\n", $1, $2, 100*($3+$4), $5);}' | \ + sort >$dir/word_processed.ctm + + cat $dir/phone_mapped.ctm | awk '{printf("%s-%s %09d PHONE %s\n", $1, $2, 100*($3+(0.5*$4)), $5);}' | \ + sort >$dir/phone_processed.ctm + + # merge-sort both ctm's + sort -m $dir/word_processed.ctm $dir/phone_processed.ctm > $dir/combined.ctm + +fi + +if [ $stage -le 9 ]; then + awk '{print $3, $4}' $dir/combined.ctm | \ + perl -e ' while (<>) { chop; @A = split(" ", $_); ($a,$b) = @A; + if ($a eq "START") { $cur_word = $b; @phones = (); } + if ($a eq "END") { print $cur_word, " ", join(" ", @phones), "\n"; } + if ($a eq "PHONE") { push @phones, $b; }} ' | sort | uniq -c | sort -nr > $dir/prons.txt +fi + +if [ $stage -le 10 ]; then + if $remove_stress; then + perl -e 'while(<>) { @A=split(" ", $_); for ($n=1;$n<@A;$n++) { $A[$n] =~ s/[0-9]$//; } print join(" ", @A) . "\n"; } ' \ + <$srcdict >$dir/lexicon.txt + else + cp $srcdict $dir/lexicon.txt + fi + silphone=$(cat $phone_lang/phones/optional_silence.txt) + echo " $silphone" >> $dir/lexicon.txt + + awk '{count[$2] += $1;} END {for (w in count){print w, count[w];}}' \ + <$dir/prons.txt >$dir/counts.txt + + + + cat $dir/prons.txt | \ + if $remove_stress; then + perl -e 'while(<>) { @A=split(" ", $_); for ($n=1;$n<@A;$n++) { $A[$n] =~ s/[0-9]$//; } print join(" ", @A) . "\n"; } ' + else + cat + fi | perl -e ' + print ";; CORRECT|INCORRECT \n"; + open(D, "<$ARGV[0]") || die "opening dict file $ARGV[0]"; + # create a hash of all reference pronuncations, and for each word, record + # a list of the prons, separated by " | ". + while () { + @A = split(" ", $_); $is_pron{join(" ",@A)} = 1; + $w = shift @A; + if (!defined $prons{$w}) { $prons{$w} = join(" ", @A); } + else { $prons{$w} = $prons{$w} . " | " . join(" ", @A); } + } + open(C, "<$ARGV[1]") || die "opening counts file $ARGV[1];"; + while () { @A = split(" ", $_); $word_count{$A[0]} = $A[1]; } + while () { @A = split(" ", $_); + $count = shift @A; $word = $A[0]; $freq = sprintf("%0.2f", $count / $word_count{$word}); + $rank = ++$wcount{$word}; # 1 if top observed pron of word, 2 if second... + $str = (defined $is_pron{join(" ", @A)} ? "CORRECT" : "INCORRECT"); + shift @A; + print "$count $rank $freq $str $word \"" . join(" ", @A) . "\", ref = \"$prons{$word}\"\n"; + } ' $dir/lexicon.txt $dir/counts.txt >$dir/pron_info.txt + + grep -v '^;;' $dir/pron_info.txt | \ + awk '{ word=$5; count=$1; if (tot[word] == 0) { first_line[word] = $0; } + corr[word] += ($4 == "CORRECT" ? count : 0); tot[word] += count; } + END {for (w in tot) { printf("%s\t%s\t%s\t\t%s\n", tot[w], w, (corr[w]/tot[w]), first_line[w]); }} ' \ + | sort -k1 -nr | cat <( echo ';; ') - \ + > $dir/word_info.txt +fi + +if [ $stage -le 11 ]; then + echo "$0: some of the more interesting stuff in $dir/pron_info.txt follows." + echo "# grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | head -n 20" + + grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | head -n 20 + + echo "$0: here are some other interesting things.." + echo "# grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | awk '\$3 > 0.4 && \$1 > 10' | head -n 20" + grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | awk '$3 > 0.4 && $1 > 10' | head -n 20 + + echo "$0: here are some high-frequency words whose reference pronunciations rarely show up." + echo "# awk '\$3 < 0.1' $dir/word_info.txt | head -n 20" + awk '$3 < 0.1 || $1 == ";;"' $dir/word_info.txt | head -n 20 + + +fi + diff --git a/tools/ASR_2ch_track/steps/cleanup/decode_segmentation.sh b/tools/ASR_2ch_track/steps/cleanup/decode_segmentation.sh new file mode 100644 index 0000000000000000000000000000000000000000..83bac07a1b7c459cdf7be81481058cb40632fc11 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/decode_segmentation.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +# Copyright 2014 Guoguo Chen, 2015 GoVivace Inc. (Nagendra Goel) +# Apache 2.0 + +# Begin configuration section. +transform_dir= # this option won't normally be used, but it can be used if you + # want to supply existing fMLLR transforms when decoding. +iter= +model= # You can specify the model to use (e.g. if you want to use the .alimdl) +stage=0 +nj=4 +cmd=run.pl +max_active=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 # note: only really affects pruning (scoring is on lattices). +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +scoring_opts= +# note: there are no more min-lmwt and max-lmwt options, instead use +# e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20" +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "This is a special decoding script for segmentation where we use one" + echo "decoding graph for each segment. We assume a file HCLG.fsts.scp exists" + echo "which is the scp file of the graphs for each segment." + echo "" + echo "Usage: $0 [options] " + echo " e.g.: $0 exp/tri2b/graph_train_si284_split \\" + echo " data/train_si284_split exp/tri2b/decode_train_si284_split" + echo "" + echo "where is assumed to be a sub-directory of the directory" + echo "where the model is." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --model # which model to use (e.g. to" + echo " # specify the final.alimdl)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --transform-dir # dir to find fMLLR transforms " + echo " --acwt # acoustic scale used for lattice generation " + echo " --scoring-opts # options to local/score.sh" + echo " --num-threads # number of threads to use, default 1." + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$model" ]; then # if --model was not specified on the command line... + if [ -z $iter ]; then model=$srcdir/final.mdl; + else model=$srcdir/$iter.mdl; fi +fi + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fsts.scp; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +# Figures out the proper HCLG. +sort -k1,1 -u < $graphdir/HCLG.fsts.scp > $graphdir/HCLG.fsts.scp.sorted +mv $graphdir/HCLG.fsts.scp.sorted $graphdir/HCLG.fsts.scp +for x in `seq 1 $nj`; do + cat $graphdir/HCLG.fsts.scp |\ + utils/filter_scp.pl -f 1 $sdata/$x/feats.scp > $sdata/$x/graphs.scp + num_feats=`cat $sdata/$x/feats.scp | wc -l` + num_graphs=`cat $sdata/$x/graphs.scp | wc -l` + if [ $num_graphs -ne $num_feats ]; then + echo "$0: number of graphs is not consistent with number of features." + exit 1 + fi +done +HCLG="scp:$sdata/JOB/graphs.scp" + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne $nj ] && \ + echo "Mismatch in number of jobs with $transform_dir"; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +fi + +if [ $stage -le 0 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $model"; exit 1; } + fi + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model "$HCLG" "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir || + { echo "$0: Scoring failedr. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/cleanup/find_bad_utts.sh b/tools/ASR_2ch_track/steps/cleanup/find_bad_utts.sh new file mode 100644 index 0000000000000000000000000000000000000000..80a71b0edc5999b769f885f7e6875dc7c53019f9 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/find_bad_utts.sh @@ -0,0 +1,193 @@ +#!/bin/bash +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Computes training alignments using a model with delta or +# LDA+MLLT features. This version, rather than just using the +# text to align, computes mini-language models (unigram) from the text +# and a few common words in the LM. + +# Begin configuration section. +nj=4 +cmd=run.pl +use_graphs=false +# Begin configuration. +scale_opts="--transition-scale=1.0 --self-loop-scale=0.1" +acoustic_scale=0.1 +beam=15.0 +lattice_beam=8.0 +max_active=750 +transform_dir= # directory to find fMLLR transforms in. +top_n_words=100 # Number of common words that we compile into each graph (most frequent + # in $lang/text. +stage=-1 +cleanup=true +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: $0 " + echo "e.g.: $0 data/train data/lang exp/tri1 exp/tri1_debug" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +for f in $data/text $lang/oov.int $srcdir/tree $srcdir/final.mdl \ + $lang/L_disambig.fst $lang/phones/disambig.int; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1; +done + +oov=`cat $lang/oov.int` || exit 1; +mkdir -p $dir/log +echo $nj > $dir/num_jobs +sdata=$data/split$nj +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. + +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +cp $srcdir/final.occs $dir; + + +if [ $stage -le 0 ]; then + utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt <$data/text | \ + awk '{for(x=2;x<=NF;x++) print $x;}' | sort | uniq -c | \ + sort -rn > $dir/word_counts.int || exit 1; + num_words=$(awk '{x+=$1} END{print x}' < $dir/word_counts.int) || exit 1; + # print top-n words with their unigram probabilities. + + head -n $top_n_words $dir/word_counts.int | awk -v tot=$num_words '{print $1/tot, $2;}' >$dir/top_words.int + utils/int2sym.pl -f 2 $lang/words.txt <$dir/top_words.int >$dir/top_words.txt +fi + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $srcdir/full.mat $dir + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ -z "$transform_dir" ] && [ -f $srcdir/trans.1 ]; then + transform_dir=$srcdir +fi +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + nj_orig=$(cat $transform_dir/num_jobs) + if [ $nj -ne $nj_orig ]; then + # Copy the transforms into an archive with an index. + for n in $(seq $nj_orig); do cat $transform_dir/trans.$n; done | \ + copy-feats ark:- ark,scp:$dir/trans.ark,$dir/trans.scp || exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/trans.scp ark:- ark:- |" + else + # number of jobs matches with alignment dir. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" + fi +elif [ -f $srcdir/final.alimdl ]; then + echo "$0: **WARNING**: you seem to be using an fMLLR system as input," + echo " but you are not providing the --transform-dir option during alignment." +fi + + +if [ $stage -le 1 ]; then + echo "$0: decoding $data using utterance-specific decoding graphs using model from $srcdir, output in $dir" + + rm $dir/edits.*.txt $dir/aligned_ref.*.txt 2>/dev/null + + $cmd JOB=1:$nj $dir/log/decode.JOB.log \ + utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text \| \ + steps/cleanup/make_utterance_fsts.pl $dir/top_words.int \| \ + compile-train-graphs-fsts $scale_opts --read-disambig-syms=$lang/phones/disambig.int \ + $dir/tree $dir/final.mdl $lang/L_disambig.fst ark:- ark:- \| \ + gmm-latgen-faster --acoustic-scale=$acoustic_scale --beam=$beam \ + --max-active=$max_active --lattice-beam=$lattice_beam \ + --word-symbol-table=$lang/words.txt \ + $dir/final.mdl ark:- "$feats" ark:- \| \ + lattice-oracle ark:- "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|" \ + ark,t:- ark,t:$dir/edits.JOB.txt \| \ + utils/int2sym.pl -f 2- $lang/words.txt '>' $dir/aligned_ref.JOB.txt || exit 1; +fi + + +if [ $stage -le 2 ]; then + if [ -f $dir/edits.1.txt ]; then + # the awk commands below are to ensure that partially-written files don't confuse us. + for x in $(seq $nj); do cat $dir/edits.$x.txt; done | awk '{if(NF==2){print;}}' > $dir/edits.txt + for x in $(seq $nj); do cat $dir/aligned_ref.$x.txt; done | awk '{if(NF>=1){print;}}' > $dir/aligned_ref.txt + else + echo "$0: warning: no file $dir/edits.1.txt, using previously concatenated file if present." + fi + + # in case any utterances failed to align, get filtered copy of $data/text + utils/filter_scp.pl $dir/edits.txt < $data/text > $dir/text + cat $dir/text | awk '{print $1, (NF-1);}' > $dir/length.txt + + n1=$(wc -l < $dir/edits.txt) + n2=$(wc -l < $dir/aligned_ref.txt) + n3=$(wc -l < $dir/text) + n4=$(wc -l < $dir/length.txt) + if [ $n1 -ne $n2 ] || [ $n2 -ne $n3 ] || [ $n3 -ne $n4 ]; then + echo "$0: mismatch in lengths of files:" + wc $dir/edits.txt $dir/aligned_ref.txt $dir/text $dir/length.txt + exit 1; + fi + + # note: the format of all_info.txt is: + # + # with the fields separated by tabs, e.g. + # adg04_sr009_trn 1 12 SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED AT SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED + + paste $dir/edits.txt \ + <(awk '{print $2}' $dir/length.txt) \ + <(awk '{$1="";print;}' <$dir/aligned_ref.txt) \ + <(awk '{$1="";print;}' <$dir/text) > $dir/all_info.txt + + sort -nr -k2 $dir/all_info.txt > $dir/all_info.sorted.txt + + if $cleanup; then + rm $dir/edits.*.txt $dir/aligned_ref.*.txt + fi + +fi + +if [ $stage -le 3 ]; then + ### + # These stats might help people figure out what is wrong with the data + # a)human-friendly and machine-parsable alignment in the file per_utt_details.txt + # b)evaluation of per-speaker performance to possibly find speakers with + # distinctive accents/speech disorders and similar + # c)Global analysis on (Ins/Del/Sub) operation, which might be used to figure + # out if there is systematic issue with lexicon, pronunciation or phonetic confusability + + mkdir -p $dir/analysis + align-text --special-symbol="***" ark:$dir/text ark:$dir/aligned_ref.txt ark,t:- | \ + utils/scoring/wer_per_utt_details.pl --special-symbol "***" > $dir/analysis/per_utt_details.txt + + cat $dir/analysis/per_utt_details.txt | \ + utils/scoring/wer_per_spk_details.pl $data/utt2spk > $dir/analysis/per_spk_details.txt + + cat $dir/analysis/per_utt_details.txt | \ + utils/scoring/wer_ops_details.pl --special-symbol "***" | \ + sort -i -b -k1,1 -k4,4nr -k2,2 -k3,3 > $dir/analysis/ops_details.txt + +fi + diff --git a/tools/ASR_2ch_track/steps/cleanup/find_bad_utts_nnet.sh b/tools/ASR_2ch_track/steps/cleanup/find_bad_utts_nnet.sh new file mode 100644 index 0000000000000000000000000000000000000000..42c768f9a2d5e9e5053c746652e196b79bbb0edd --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/find_bad_utts_nnet.sh @@ -0,0 +1,162 @@ +#!/bin/bash +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey) +# 2016 Ilya Platonov +# Apache 2.0 +# +# Tweaked version of find_bad_utts.sh to work with nnet2 baseline models. +# +# Begin configuration section. +nj=32 +cmd=run.pl +use_graphs=false +# Begin configuration. +scale_opts="--transition-scale=1.0 --self-loop-scale=0.1" +acoustic_scale=0.1 +beam=15.0 +lattice_beam=8.0 +max_active=750 +transform_dir= # directory to find fMLLR transforms in. +top_n_words=100 # Number of common words that we compile into each graph (most frequent + # in $lang/text. +stage=-1 +cleanup=true +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "usage: $0 " + echo "e.g.: $0 data/train data/lang exp/tri1 exp/tri1_debug" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --use-graphs true # use graphs in src-dir" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +for f in $data/text $lang/oov.int $srcdir/tree $srcdir/final.mdl \ + $lang/L_disambig.fst $lang/phones/disambig.int; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1; +done + +oov=`cat $lang/oov.int` || exit 1; +mkdir -p $dir/log +echo $nj > $dir/num_jobs +sdata=$data/split$nj +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. + +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; + + +if [ $stage -le 0 ]; then + utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt <$data/text | \ + awk '{for(x=2;x<=NF;x++) print $x;}' | sort | uniq -c | \ + sort -rn > $dir/word_counts.int || exit 1; + num_words=$(awk '{x+=$1} END{print x}' < $dir/word_counts.int) || exit 1; + # print top-n words with their unigram probabilities. + + head -n $top_n_words $dir/word_counts.int | awk -v tot=$num_words '{print $1/tot, $2;}' >$dir/top_words.int + utils/int2sym.pl -f 2 $lang/words.txt <$dir/top_words.int >$dir/top_words.txt +fi + +echo "$0: feature type is raw" + +feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |"; + +if [ $stage -le 1 ]; then + echo "$0: decoding $data using utterance-specific decoding graphs using model from $srcdir, output in $dir" + + rm $dir/edits.*.txt $dir/aligned_ref.*.txt 2>/dev/null + + $cmd JOB=1:$nj $dir/log/decode.JOB.log \ + utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text \| \ + steps/cleanup/make_utterance_fsts.pl $dir/top_words.int \| \ + compile-train-graphs-fsts $scale_opts --read-disambig-syms=$lang/phones/disambig.int \ + $dir/tree $dir/final.mdl $lang/L_disambig.fst ark:- ark:- \| \ + nnet-latgen-faster --acoustic-scale=$acoustic_scale --beam=$beam \ + --max-active=$max_active --lattice-beam=$lattice_beam \ + --word-symbol-table=$lang/words.txt \ + $dir/final.mdl ark:- "$feats" ark:- \| \ + lattice-oracle ark:- "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|" \ + ark,t:- ark,t:$dir/edits.JOB.txt \| \ + utils/int2sym.pl -f 2- $lang/words.txt '>' $dir/aligned_ref.JOB.txt || exit 1; +fi + + +if [ $stage -le 2 ]; then + if [ -f $dir/edits.1.txt ]; then + # the awk commands below are to ensure that partially-written files don't confuse us. + for x in $(seq $nj); do cat $dir/edits.$x.txt; done | awk '{if(NF==2){print;}}' > $dir/edits.txt + for x in $(seq $nj); do cat $dir/aligned_ref.$x.txt; done | awk '{if(NF>=1){print;}}' > $dir/aligned_ref.txt + else + echo "$0: warning: no file $dir/edits.1.txt, using previously concatenated file if present." + fi + + # in case any utterances failed to align, get filtered copy of $data/text + utils/filter_scp.pl $dir/edits.txt < $data/text > $dir/text + cat $dir/text | awk '{print $1, (NF-1);}' > $dir/length.txt + + n1=$(wc -l < $dir/edits.txt) + n2=$(wc -l < $dir/aligned_ref.txt) + n3=$(wc -l < $dir/text) + n4=$(wc -l < $dir/length.txt) + if [ $n1 -ne $n2 ] || [ $n2 -ne $n3 ] || [ $n3 -ne $n4 ]; then + echo "$0: mismatch in lengths of files:" + wc $dir/edits.txt $dir/aligned_ref.txt $dir/text $dir/length.txt + exit 1; + fi + + # note: the format of all_info.txt is: + # + # with the fields separated by tabs, e.g. + # adg04_sr009_trn 1 12 SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED AT SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED + + paste $dir/edits.txt \ + <(awk '{print $2}' $dir/length.txt) \ + <(awk '{$1="";print;}' <$dir/aligned_ref.txt) \ + <(awk '{$1="";print;}' <$dir/text) > $dir/all_info.txt + + sort -nr -k2 $dir/all_info.txt > $dir/all_info.sorted.txt + + if $cleanup; then + rm $dir/edits.*.txt $dir/aligned_ref.*.txt + fi + +fi + +if [ $stage -le 3 ]; then + ### + # These stats migh help people figure out what is wrong with the data + # a)human-friendly and machine-parsable alignment in the file per_utt_details.txt + # b)evaluation of per-speaker performance to possibly find speakers with + # distinctive accents/speech disorders and similar + # c)Global analysis on (Ins/Del/Sub) operation, which might be used to figure + # out if there is systematic issue with lexicon, pronunciation or phonetic confusability + + mkdir -p $dir/analysis + align-text --special-symbol="***" ark:$dir/text ark:$dir/aligned_ref.txt ark,t:- | \ + utils/scoring/wer_per_utt_details.pl --special-symbol "***" > $dir/analysis/per_utt_details.txt + + cat $dir/analysis/per_utt_details.txt | \ + utils/scoring/wer_per_spk_details.pl $data/utt2spk > $dir/analysis/per_spk_details.txt + + cat $dir/analysis/per_utt_details.txt | \ + utils/scoring/wer_ops_details.pl --special-symbol "***" | \ + sort -i -b -k1,1 -k4,4nr -k2,2 -k3,3 > $dir/analysis/ops_details.txt + +fi + diff --git a/tools/ASR_2ch_track/steps/cleanup/make_segmentation_data_dir.sh b/tools/ASR_2ch_track/steps/cleanup/make_segmentation_data_dir.sh new file mode 100644 index 0000000000000000000000000000000000000000..62e8dc7de455b12d4548b1a0b31d1a5f5e673e29 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/make_segmentation_data_dir.sh @@ -0,0 +1,206 @@ +#!/bin/bash + +# Copyright 2014 Guoguo Chen +# Apache 2.0 + +# Begin configuration section. +max_seg_length=10 +min_seg_length=2 +min_sil_length=0.5 +time_precision=0.05 +special_symbol="<***>" +separator=";" +wer_cutoff=-1 +# End configuration section. + +set -e + +echo "$0 $@" + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "This script takes the ctm file that corresponds to the data directory" + echo "created by steps/cleanup/split_long_utterance.sh, works out a new" + echo "segmentation and creates a new data directory for the new segmentation." + echo "" + echo "Usage: $0 [options] " + echo " e.g.: $0 train_si284_split.ctm \\" + echo " data/train_si284_split data/train_si284_reseg" + echo "Options:" + echo " --wer-cutoff # ignore segments with WER higher than the" + echo " # specified value. -1 means no segment will" + echo " # be ignored." + echo " --max-seg-length # maximum length of new segments" + echo " --min-seg-length # minimum length of new segments" + echo " --min-sil-length # minimum length of silence as split point" + echo " --time-precision # precision for determining \"same time\"" + echo " --special-symbol # special symbol to be aligned with" + echo " # inserted or deleted words" + echo " --separator # separator for aligned pairs" + exit 1; +fi + +ctm=$1 +old_data_dir=$2 +new_data_dir=$3 + +for f in $ctm $old_data_dir/text.orig $old_data_dir/utt2spk \ + $old_data_dir/wav.scp $old_data_dir/segments; do + if [ ! -f $f ]; then + echo "$0: expected $f to exist" + exit 1; + fi +done + +mkdir -p $new_data_dir/tmp/ +cp -f $old_data_dir/wav.scp $new_data_dir +[ -f old_data_dir/spk2gender ] && cp -f $old_data_dir/spk2gender $new_data_dir + +# Removes the overlapping region (in utils/split_long_utterance.sh we create +# the segmentation with overlapping region). +# +# Note that for each audio file, we expect its segments have been sorted in time +# ascending order (if we ignore the overlap). +cat $ctm | perl -e ' + $precision = $ARGV[0]; + @ctm = (); + %processed_ids = (); + $previous_id = ""; + while () { + chomp; + my @current = split; + @current >= 5 || die "Error: bad line $_\n"; + $id = join("_", ($current[0], $current[1])); + @previous = @{$ctm[-1]}; + + # Start of a new audio file. + if ($previous_id ne $id) { + # Prints existing information. + if (@ctm > 0) { + foreach $line (@ctm) { + print "$line->[0] $line->[1] $line->[2] $line->[3] $line->[4]\n"; + } + } + + # Checks if the ctm file is sorted. + if (defined($processed_ids{$id})) { + die "Error: \"$current[0] $current[1]\" has already been processed\n"; + } else { + $processed_ids{$id} = 1; + } + + @ctm = (); + push(@ctm, \@current); + $previous_id = $id; + next; + } + + $new_start = sprintf("%.2f", $previous[2] + $previous[3]); + + if ($new_start > $current[2]) { + # Case 2: scans for a splice point. + $index = -1; + while (defined($ctm[$index]) + && $ctm[$index]->[2] + $ctm[$index]->[3] > $current[2]) { + if ($ctm[$index]->[4] eq $current[4] + && abs($ctm[$index]->[2] - $current[2]) < $precision + && abs($ctm[$index]->[3] - $current[3]) < $precision) { + pop @ctm for 2..abs($index); + last; + } else { + $index -= 1; + } + } + } else { + push(@ctm, \@current); + } + } + + if (@ctm > 0) { + foreach $line (@ctm) { + print "$line->[0] $line->[1] $line->[2] $line->[3] $line->[4]\n"; + } + }' $time_precision > $new_data_dir/tmp/ctm + +# Creates a text file from the ctm, which will be used in Levenshtein alignment. +# Note that we remove in the text file. +cat $new_data_dir/tmp/ctm | perl -e ' + $previous_wav = ""; + $previous_channel = ""; + $text = ""; + while () { + chomp; + @col = split; + @col >= 5 || die "Error: bad line $_\n"; + if ($previous_wav eq $col[0]) { + $previous_channel eq $col[1] || + die "Error: more than one channels detected\n"; + if ($col[4] ne "") { + $text .= " $col[4]"; + } + } else { + if ($text ne "") { + print "$previous_wav $text\n"; + } + $text = $col[4]; + $previous_wav = $col[0]; + $previous_channel = $col[1]; + } + } + if ($text ne "") { + print "$previous_wav $text\n"; + }' > $new_data_dir/tmp/text + +# Computes the Levenshtein alignment. +align-text --special-symbol=$special_symbol --separator=$separator \ + ark:$old_data_dir/text.orig ark:$new_data_dir/tmp/text \ + ark,t:$new_data_dir/tmp/aligned.txt + +# Creates new segmentation. +steps/cleanup/create_segments_from_ctm.pl \ + --max-seg-length $max_seg_length --min-seg-length $min_seg_length \ + --min-sil-length $min_sil_length \ + --separator $separator --special-symbol $special_symbol \ + --wer-cutoff $wer_cutoff \ + $new_data_dir/tmp/ctm $new_data_dir/tmp/aligned.txt \ + $new_data_dir/segments $new_data_dir/text + +# Now creates the new utt2spk and spk2utt file. +cat $old_data_dir/utt2spk | perl -e ' + ($old_seg_file, $new_seg_file, $utt2spk_file_out) = @ARGV; + open(OS, "<$old_seg_file") || die "Error: fail to open $old_seg_file\n"; + open(NS, "<$new_seg_file") || die "Error: fail to open $new_seg_file\n"; + open(UO, ">$utt2spk_file_out") || + die "Error: fail to open $utt2spk_file_out\n"; + while () { + chomp; + @col = split; + @col == 2 || die "Error: bad line $_\n"; + $utt2spk{$col[0]} = $col[1]; + } + while () { + chomp; + @col = split; + @col == 4 || die "Error: bad line $_\n"; + if (defined($wav2spk{$col[1]})) { + $wav2spk{$col[1]} == $utt2spk{$col[0]} || + die "Error: multiple speakers detected for wav file $col[1]\n"; + } else { + $wav2spk{$col[1]} = $utt2spk{$col[0]}; + } + } + while () { + chomp; + @col = split; + @col == 4 || die "Error: bad line $_\n"; + defined($wav2spk{$col[1]}) || + die "Error: could not find speaker for wav file $col[1]\n"; + print UO "$col[0] $wav2spk{$col[1]}\n"; + } ' $old_data_dir/segments $new_data_dir/segments $new_data_dir/utt2spk +utils/utt2spk_to_spk2utt.pl $new_data_dir/utt2spk > $new_data_dir/spk2utt + +utils/fix_data_dir.sh $new_data_dir + +exit 0; diff --git a/tools/ASR_2ch_track/steps/cleanup/make_segmentation_graph.sh b/tools/ASR_2ch_track/steps/cleanup/make_segmentation_graph.sh new file mode 100644 index 0000000000000000000000000000000000000000..22551716fcfdb31d42209f80a762c82cc177fbd7 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/make_segmentation_graph.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +# Copyright 2014 Guoguo Chen +# Apache 2.0 + +# Begin configuration section. +nj=4 +cmd=run.pl +tscale=1.0 # transition scale. +loopscale=0.1 # scale for self-loops. +cleanup=true +ngram_order=1 +srilm_options="-wbdiscount" # By default, use Witten-Bell discounting in SRILM +# End configuration section. + +set -e + +echo "$0 $@" + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "This script builds one decoding graph for each truncated utterance in" + echo "segmentation. It first calls steps/cleanup/make_utterance_graph.sh to" + echo "build one decoding graph for each original utterance, which will be" + echo "shared by the truncated utterances from the same original utterance." + echo "We assign the decoding graph to each truncated utterance using the scp" + echo "file so that we can avoid duplicating the graphs on the disk." + echo "" + echo "Usage: $0 [options] " + echo " e.g.: $0 data/train_si284_split/ \\" + echo " data/lang exp/tri2b/ exp/tri2b/graph_train_si284_split" + echo "" + echo "Options:" + echo " --ngram-order # order of n-gram language model" + echo " --srilm-options # options for ngram-count in SRILM tool" + echo " --tscale # transition scale" + echo " --loopscale # scale for self-loops" + echo " --cleanup # if true, removes the intermediate files" + exit 1; +fi + +data=$1 +lang=$2 +model_dir=$3 +graph_dir=$4 + +for f in $data/text.orig $data/orig2utt $lang/L_disambig.fst \ + $lang/words.txt $lang/oov.int $model_dir/final.mdl $model_dir/tree; do + if [ ! -f $f ]; then + echo "$0: expected $f to exist" + exit 1; + fi +done + +# If --ngram-order is larger than 1, we will have to use SRILM +if [ $ngram_order -gt 1 ]; then + ngram_count=`which ngram-count`; + if [ -z $ngram_count ]; then + if uname -a | grep 64 >/dev/null; then # some kind of 64 bit... + sdir=`pwd`/../../../tools/srilm/bin/i686-m64 + else + sdir=`pwd`/../../../tools/srilm/bin/i686 + fi + if [ -f $sdir/ngram-count ]; then + echo Using SRILM tools from $sdir + export PATH=$PATH:$sdir + else + echo You appear to not have SRILM tools installed, either on your path, + echo or installed in $sdir. See tools/install_srilm.sh for installation + echo instructions. + exit 1 + fi + fi +fi + +# Creates one graph for each transcript. We parallelize the process a little +# bit. +num_lines=`cat $data/text.orig | wc -l` +if [ $nj -gt $num_lines ]; then + nj=$num_lines + echo "$0: Too many number of jobs, using $nj instead" +fi + +mkdir -p $graph_dir/split$nj +mkdir -p $graph_dir/log + +split_texts="" +for n in $(seq $nj); do + mkdir -p $graph_dir/split$nj/$n + split_texts="$split_texts $graph_dir/split$nj/$n/text" +done +utils/split_scp.pl $data/text.orig $split_texts + +$cmd JOB=1:$nj $graph_dir/log/make_utterance_graph.JOB.log \ + steps/cleanup/make_utterance_graph.sh --cleanup $cleanup \ + --tscale $tscale --loopscale $loopscale \ + --ngram-order $ngram_order --srilm-options "$srilm_options" \ + $graph_dir/split$nj/JOB/text $lang \ + $model_dir $graph_dir/split$nj/JOB || exit 1; + +# Copies files from lang directory. +mkdir -p $graph_dir +cp -r $lang/* $graph_dir + +am-info --print-args=false $model_dir/final.mdl |\ + grep pdfs | awk '{print $NF}' > $graph_dir/num_pdfs + +# Creates the graph table. +cat $graph_dir/split$nj/*/HCLG.fsts.scp > $graph_dir/split$nj/HCLG.fsts.scp +fstcopy scp:$graph_dir/split$nj/HCLG.fsts.scp \ + "ark,scp:$graph_dir/HCLG.fsts,$graph_dir/tmp.HCLG.fsts.scp" + +# The graphs we created above were indexed by the old utterance id. We have to +# duplicate them for the new utterance id. We do this in the scp file so we do +# not have to store the duplicated graphs on the disk. +cat $graph_dir/tmp.HCLG.fsts.scp | perl -e ' + open(O2U, "<$ARGV[0]") || die "Error: fail to open $ARGV[0]\n"; + while () { + chomp; + @col = split; + @col == 2 || die "Error: bad line $_\n"; + $scp{$col[0]} = $col[1]; + } + while () { + chomp; + @col = split; + @col >= 2 || die "Error: bad line $_\n"; + defined($scp{$col[0]}) || + die "Error: $col[0] not defined in original scp file\n"; + for ($i = 1; $i < @col; $i += 1) { + print "$col[$i] $scp{$col[0]}\n" + } + }' $data/orig2utt > $graph_dir/HCLG.fsts.scp +rm $graph_dir/tmp.HCLG.fsts.scp + +if $cleanup; then + rm -r $graph_dir/split$nj +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/cleanup/make_utterance_fsts.pl b/tools/ASR_2ch_track/steps/cleanup/make_utterance_fsts.pl new file mode 100644 index 0000000000000000000000000000000000000000..f457e52f1137697f34c45c783493df8bcd09ba67 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/make_utterance_fsts.pl @@ -0,0 +1,46 @@ +#!/usr/bin/env perl +use warnings; #sed replacement for -w perl parameter + +# makes unigram decoding-graph FSTs specific to each utterances, where the +# supplied top-n-words list together with the supervision text of the utterance are +# combined. + +if (@ARGV != 1) { + print STDERR "Usage: make_utterance_fsts.pl top-words-file.txt < text-archive > fsts-archive\n" . + "e.g.: utils/sym2int.pl -f 2- data/lang/words.txt data/train/text | \\\n" . + " make_utterance_fsts.pl exp/foo/top_words.int | compile-train-graphs-fsts ... \n"; +} + +($top_words_file) = @ARGV; + +open(F, "<$top_words_file") || die "opening $top_words_file"; + +%top_word_probs = ( ); + +while() { + @A = split; + (@A == 2 && $A[0] > 0.0) || die "Bad line $_ in $top_words_file"; + $A[1] =~ m/^[0-9]+$/ || die "Expecting numeric word-ids in $top_words_file: $_\n"; + $top_word_probs{$A[1]} += $A[0]; +} + +while () { + @A = split; + $utterance_id = shift @A; + print "$utterance_id\n"; + $num_words = @A + 0; # length of array @A + %word_probs = %top_word_probs; + foreach $w (@A) { + $w =~ m/^[0-9]+$/ || die "Expecting numeric word-ids as stdin: $_"; + $word_probs{$w} += 1.0 / $num_words; + } + foreach $w (keys %word_probs) { + $prob = $word_probs{$w}; + $prob > 0.0 || die "Word $w with bad probability $prob, utterance-id = $utterance_id\n"; + $cost = -log($prob); + print "0 0 $w $w $cost\n"; + } + $final_cost = -log(1.0 / $num_words); + print "0 $final_cost\n"; + print "\n"; # Empty line terminates the FST in the text-archive format. +} diff --git a/tools/ASR_2ch_track/steps/cleanup/make_utterance_graph.sh b/tools/ASR_2ch_track/steps/cleanup/make_utterance_graph.sh new file mode 100644 index 0000000000000000000000000000000000000000..a3b1e2af70a7c75a57c4699ae2a69a74753bd1f6 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/make_utterance_graph.sh @@ -0,0 +1,172 @@ +#!/bin/bash + +# Copyright 2014 Guoguo Chen +# Apache 2.0 + +# Begin configuration section. +tscale=1.0 # transition scale. +loopscale=0.1 # scale for self-loops. +cleanup=true +ngram_order=1 +srilm_options="-wbdiscount" # By default, use Witten-Bell discounting in SRILM +# End configuration section. + +set -e + +echo "$0 $@" + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "This script builds one decoding graph for each utterance using the" + echo "corresponding text in the given file. If --ngram-order is 1," + echo "then utils/make_unigram_grammar.pl will be used to build the unigram" + echo "language model. Otherwise SRILM will be used instead. You are supposed" + echo "to have SRILM installed if --ngram-order is larger than 1. The format" + echo "of the given file is same as the transcript text files in data" + echo "directory." + echo "" + echo "Usage: $0 [options] " + echo " e.g.: $0 data/train_si284_split/text \\" + echo " data/lang exp/tri2b/ exp/tri2b/graph_train_si284_split" + echo "" + echo "Options:" + echo " --ngram-order # order of n-gram language model" + echo " --srilm-options # options for ngram-count in SRILM tool" + echo " --tscale # transition scale" + echo " --loopscale # scale for self-loops" + echo " --cleanup # if true, removes the intermediate files" + exit 1; +fi + +text=$1 +lang=$2 +model_dir=$3 +graph_dir=$4 + +for f in $lang/L_disambig.fst $lang/words.txt $lang/oov.int \ + $model_dir/final.mdl $model_dir/tree; do + if [ ! -f $f ]; then + echo "$0: expected $f to exist" + exit 1; + fi +done + +mkdir -p $graph_dir/sub_graphs + +# If --ngram-order is larger than 1, we will have to use SRILM +if [ $ngram_order -gt 1 ]; then + ngram_count=`which ngram-count`; + if [ -z $ngram_count ]; then + if uname -a | grep 64 >/dev/null; then # some kind of 64 bit... + sdir=`pwd`/../../../tools/srilm/bin/i686-m64 + else + sdir=`pwd`/../../../tools/srilm/bin/i686 + fi + if [ -f $sdir/ngram-count ]; then + echo Using SRILM tools from $sdir + export PATH=$PATH:$sdir + else + echo You appear to not have SRILM tools installed, either on your path, + echo or installed in $sdir. See tools/install_srilm.sh for installation + echo instructions. + exit 1 + fi + fi +fi + +# Maps OOV words to the oov symbol. +oov=`cat $lang/oov.int` +oov_txt=`cat $lang/oov.txt` + +N=`tree-info --print-args=false $model_dir/tree |\ + grep "context-width" | awk '{print $NF}'` +P=`tree-info --print-args=false $model_dir/tree |\ + grep "central-position" | awk '{print $NF}'` + +# Loops over all utterances. +if [ -f $graph_dir/sub_graphs/HCLG.fsts.scp ]; then + rm $graph_dir/sub_graphs/HCLG.fsts.scp +fi +while read line; do + uttid=`echo $line | cut -d ' ' -f 1` + words=`echo $line | cut -d ' ' -f 2-` + + echo "$0: processing utterance $uttid." + + wdir=$graph_dir/sub_graphs/$uttid + mkdir -p $wdir + + # Compiles G.fst + if [ $ngram_order -eq 1 ]; then + echo $words > $wdir/text + cat $wdir/text | utils/sym2int.pl --map-oov $oov -f 1- $lang/words.txt | \ + utils/make_unigram_grammar.pl | fstcompile |\ + fstarcsort --sort_type=ilabel > $wdir/G.fst || exit 1; + else + echo $words | awk -v voc=$lang/words.txt -v oov="$oov_txt" ' + BEGIN{ while((getline0) { invoc[$1]=1; } } { + for (x=1;x<=NF;x++) { + if (invoc[$x]) { printf("%s ", $x); } else { printf("%s ", oov); } } + printf("\n"); }' > $wdir/text + ngram-count -text $wdir/text -order $ngram_order "$srilm_options" -lm - |\ + arpa2fst --disambig-symbol=#0 \ + --read-symbol-table=$lang/words.txt - $wdir/G.fst || exit 1; + fi + fstisstochastic $wdir/G.fst || echo "$0: $uttid/G.fst not stochastic." + + # Builds LG.fst + fsttablecompose $lang/L_disambig.fst $wdir/G.fst |\ + fstdeterminizestar --use-log=true | fstminimizeencoded |\ + fstarcsort --sort_type=ilabel > $wdir/LG.fst || exit 1; + fstisstochastic $wdir/LG.fst || echo "$0: $uttid/LG.fst not stochastic." + + # Builds CLG.fst + clg=$wdir/CLG_${N}_${P}.fst + fstcomposecontext --context-size=$N --central-position=$P \ + --read-disambig-syms=$lang/phones/disambig.int \ + --write-disambig-syms=$wdir/disambig_ilabels_${N}_${P}.int \ + $wdir/ilabels_${N}_${P} < $wdir/LG.fst | fstdeterminize > $wdir/CLG.fst + fstisstochastic $wdir/CLG.fst || echo "$0: $uttid/CLG.fst not stochastic." + + make-h-transducer --disambig-syms-out=$wdir/disambig_tid.int \ + --transition-scale=$tscale $wdir/ilabels_${N}_${P} \ + $model_dir/tree $model_dir/final.mdl > $wdir/Ha.fst + + # Builds HCLGa.fst + fsttablecompose $wdir/Ha.fst $wdir/CLG.fst | \ + fstdeterminizestar --use-log=true | \ + fstrmsymbols $wdir/disambig_tid.int | fstrmepslocal | \ + fstminimizeencoded > $wdir/HCLGa.fst + fstisstochastic $wdir/HCLGa.fst ||\ + echo "$0: $uttid/HCLGa.fst is not stochastic" + + add-self-loops --self-loop-scale=$loopscale --reorder=true \ + $model_dir/final.mdl < $wdir/HCLGa.fst > $wdir/HCLG.fst + + if [ $tscale == 1.0 -a $loopscale == 1.0 ]; then + fstisstochastic $wdir/HCLG.fst ||\ + echo "$0: $uttid/HCLG.fst is not stochastic." + fi + + echo "$uttid $wdir/HCLG.fst" >> $graph_dir/sub_graphs/HCLG.fsts.scp + echo +done < $text + +# Copies files from lang directory. +mkdir -p $graph_dir +cp -r $lang/* $graph_dir + +am-info --print-args=false $model_dir/final.mdl |\ + grep pdfs | awk '{print $NF}' > $graph_dir/num_pdfs + +# Creates the graph table. +fstcopy scp:$graph_dir/sub_graphs/HCLG.fsts.scp \ + "ark,scp:$graph_dir/HCLG.fsts,$graph_dir/HCLG.fsts.scp" + +if $cleanup; then + rm -r $graph_dir/sub_graphs +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/cleanup/split_long_utterance.sh b/tools/ASR_2ch_track/steps/cleanup/split_long_utterance.sh new file mode 100644 index 0000000000000000000000000000000000000000..f2328ea1897f2f30e453f20223826c53f5ae9d02 --- /dev/null +++ b/tools/ASR_2ch_track/steps/cleanup/split_long_utterance.sh @@ -0,0 +1,146 @@ +#!/bin/bash + +# Copyright 2014 Guoguo Chen +# Apache 2.0 + +# Begin configuration section. +seg_length=30 +min_seg_length=10 +overlap_length=5 +# End configuration section. + +echo "$0 $@" + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 2 ]; then + echo "This script truncates the long audio into smaller overlapping segments" + echo "" + echo "Usage: $0 [options] " + echo " e.g.: $0 data/train_si284_long data/train_si284_split" + echo "" + echo "Options:" + echo " --min-seg-length # minimal segment length" + echo " --seg-length # length of segments in seconds." + echo " --overlap-length # length of overlap in seconds." + exit 1; +fi + +input_dir=$1 +output_dir=$2 + +for f in spk2utt text utt2spk wav.scp; do + [ ! -f $input_dir/$f ] && echo "$0: no such file $input_dir/$f" && exit 1; +done + +[ ! $seg_length -gt $overlap_length ] \ + && echo "$0: --seg-length should be longer than --overlap-length." && exit 1; + +# Checks if sox is on the path. +sox=`which sox` +[ $? -ne 0 ] && echo "$0: sox command not found." && exit 1; +sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe +[ ! -x $sph2pipe ] && echo "$0: sph2pipe command not found." && exit 1; + +mkdir -p $output_dir +cp -f $input_dir/spk2gender $output_dir/spk2gender 2>/dev/null +cp -f $input_dir/text $output_dir/text.orig +cp -f $input_dir/wav.scp $output_dir/wav.scp + +# We assume the audio length in header is correct and get it from there. It is +# a little bit annoying that old version of sox does not support the following: +# $audio_cmd | sox --i -D +# we have to put it in the following format for the old versions: +# $sox --i -D "|$audio_cmd" +# Another way is to count all the samples to get the duration, but it takes +# longer time, so we do not use it here.. The command is: +# $audio_cmd | sox -t wav - -n stat | grep -P "^Length" | awk '{print $1;}' +# +# Note: in the wsj example the process takes couple of minutes because of the +# audio file concatenation; in a real case it should be much faster since +# it just reads the header. +cat $output_dir/wav.scp | perl -e ' + $no_orig_seg = "false"; # Original segment file may or may not exist. + ($u2s_in, $u2s_out, $seg_in, + $seg_out, $orig2utt, $sox, $slen, $mslen, $olen) = @ARGV; + open(UI, "<$u2s_in") || die "Error: fail to open $u2s_in\n"; + open(UO, ">$u2s_out") || die "Error: fail to open $u2s_out\n"; + open(SI, "<$seg_in") || ($no_orig_seg = "true"); + open(SO, ">$seg_out") || die "Error: fail to open $seg_out\n"; + open(UMAP, ">$orig2utt") || die "Error: fail to open $orig2utt\n"; + # If the original segment file exists, we have to work out the segment + # duration from the segment file. Otherwise we work that out from the wav.scp + # file. + if ($no_orig_seg eq "false") { + while () { + chomp; + @col = split; + @col == 4 || die "Error: bad line $_\n"; + ($seg_id, $wav_id, $seg_start, $seg_end) = @col; + $seg2wav{$seg_id} = $wav_id; + $seg_start{$seg_id} = $seg_start; + $seg_end{$seg_id} = $seg_end; + } + } else { + while () { + chomp; + @col = split; + @col >= 2 || "bad line $_\n"; + if ((@col > 2) && ($col[-1] eq "|")) { + $wav_id = shift @col; pop @col; + $audio_cmd = join(" ", @col); + $duration = `$sox --i -D '\''|$audio_cmd'\''`; + } else { + @col == 2 || die "Error: bad line $_\n in wav.scp"; + $wav_id = $col[0]; + $audio_file = $col[1]; + $duration = `$sox --i -D $audio_file`; + } + chomp($duration); + $seg2wav{$wav_id} = $wav_id; + $seg_start{$wav_id} = 0; + $seg_end{$wav_id} = $duration; + } + } + while () { + chomp; + @col = split; + @col == 2 || die "Error: bad line $_\n"; + $utt2spk{$col[0]} = $col[1]; + } + foreach $seg (keys %seg2wav) { + $index = 0; + $step = $slen - $olen; + print UMAP "$seg"; + while ($seg_start{$seg} + $index * $step < $seg_end{$seg}) { + $new_seg = $seg . "_" . sprintf("%05d", $index); + $start = $seg_start{$seg} + $index * $step; + $end = $start + $slen; + defined($utt2spk{$seg}) || die "Error: speaker not found for $seg\n"; + print UO "$new_seg $utt2spk{$seg}\n"; + print UMAP " $new_seg"; + $index += 1; + if ($end - $olen + $mslen >= $seg_end{$seg}) { + # last segment will have at least $mslen seconds. + $end = $seg_end{$seg}; + print SO "$new_seg $seg2wav{$seg} $start $end\n"; + last; + } else { + print SO "$new_seg $seg2wav{$seg} $start $end\n"; + } + } + print UMAP "\n"; + }' $input_dir/utt2spk $output_dir/utt2spk \ + $input_dir/segments $output_dir/segments $output_dir/orig2utt \ + $sox $seg_length $min_seg_length $overlap_length + +# CAVEAT: We are not dealing with channels here. Each channel should have a +# unique file name in wav.scp. +paste -d ' ' <(cut -d ' ' -f 1 $output_dir/wav.scp) \ + <(cut -d ' ' -f 1 $output_dir/wav.scp) | awk '{print $1" "$2" A";}' \ + > $output_dir/reco2file_and_channel + +utils/fix_data_dir.sh $output_dir + +exit 0; diff --git a/tools/ASR_2ch_track/steps/compute_cmvn_stats.sh b/tools/ASR_2ch_track/steps/compute_cmvn_stats.sh new file mode 100644 index 0000000000000000000000000000000000000000..938609df6892caa9bd74eca67552a124a1db374a --- /dev/null +++ b/tools/ASR_2ch_track/steps/compute_cmvn_stats.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 +# To be run from .. (one directory up from here) +# see ../run.sh for example + +# Compute cepstral mean and variance statistics per speaker. +# We do this in just one job; it's fast. +# This script takes no options. +# +# Note: there is no option to do CMVN per utterance. The idea is +# that if you did it per utterance it would not make sense to do +# per-speaker fMLLR on top of that (since you'd be doing fMLLR on +# top of different offsets). Therefore what would be the use +# of the speaker information? In this case you should probably +# make the speaker-ids identical to the utterance-ids. The +# speaker information does not have to correspond to actual +# speakers, it's just the level you want to adapt at. + +echo "$0 $@" # Print the command line for logging + +fake=false # If specified, can generate fake/dummy CMVN stats (that won't normalize) +fake_dims= # as the "fake" option, but you can generate "fake" stats only for certain + # dimensions. +two_channel=false + +if [ "$1" == "--fake" ]; then + fake=true + shift +fi +if [ "$1" == "--fake-dims" ]; then + fake_dims=$2 + shift + shift +fi +if [ "$1" == "--two-channel" ]; then + two_channel=true + shift +fi + +if [ $# != 3 ]; then + echo "Usage: $0 [options] "; + echo "e.g.: $0 data/train exp/make_mfcc/train mfcc" + echo "Options:" + echo " --fake gives you fake cmvn stats that do no normalization." + echo " --two-channel is for two-channel telephone data, there must be no segments " + echo " file and reco2file_and_channel must be present. It will take" + echo " only frames that are louder than the other channel." + echo " --fake-dims Generate stats that won't cause normalization for these" + echo " dimensions (e.g. 13:14:15)" + exit 1; +fi + +if [ -f path.sh ]; then . ./path.sh; fi + +data=$1 +logdir=$2 +cmvndir=$3 + +# make $cmvndir an absolute pathname. +cmvndir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $cmvndir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $cmvndir || exit 1; +mkdir -p $logdir || exit 1; + + +required="$data/feats.scp $data/spk2utt" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_cmvn.sh: no such file $f" + exit 1; + fi +done + +if $fake; then + dim=`feat-to-dim scp:$data/feats.scp -` + ! cat $data/spk2utt | awk -v dim=$dim '{print $1, "["; for (n=0; n < dim; n++) { printf("0 "); } print "1"; + for (n=0; n < dim; n++) { printf("1 "); } print "0 ]";}' | \ + copy-matrix ark:- ark,scp:$cmvndir/cmvn_$name.ark,$cmvndir/cmvn_$name.scp && \ + echo "Error creating fake CMVN stats" && exit 1; +elif $two_channel; then + ! compute-cmvn-stats-two-channel $data/reco2file_and_channel scp:$data/feats.scp \ + ark,scp:$cmvndir/cmvn_$name.ark,$cmvndir/cmvn_$name.scp \ + 2> $logdir/cmvn_$name.log && echo "Error computing CMVN stats (using two-channel method)" && exit 1; +elif [ ! -z "$fake_dims" ]; then + ! compute-cmvn-stats --spk2utt=ark:$data/spk2utt scp:$data/feats.scp ark:- | \ + modify-cmvn-stats "$fake_dims" ark:- ark,scp:$cmvndir/cmvn_$name.ark,$cmvndir/cmvn_$name.scp && \ + echo "Error computing (partially fake) CMVN stats" && exit 1; +else + ! compute-cmvn-stats --spk2utt=ark:$data/spk2utt scp:$data/feats.scp ark,scp:$cmvndir/cmvn_$name.ark,$cmvndir/cmvn_$name.scp \ + 2> $logdir/cmvn_$name.log && echo "Error computing CMVN stats" && exit 1; +fi + +cp $cmvndir/cmvn_$name.scp $data/cmvn.scp || exit 1; + +nc=`cat $data/cmvn.scp | wc -l` +nu=`cat $data/spk2utt | wc -l` +if [ $nc -ne $nu ]; then + echo "$0: warning: it seems not all of the speakers got cmvn stats ($nc != $nu);" + [ $nc -eq 0 ] && exit 1; +fi + +echo "Succeeded creating CMVN stats for $name" diff --git a/tools/ASR_2ch_track/steps/conf/append_eval_to_ctm.py b/tools/ASR_2ch_track/steps/conf/append_eval_to_ctm.py new file mode 100644 index 0000000000000000000000000000000000000000..3a35f5a9281a9f327a4129c4af77e55fbb8fc40a --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/append_eval_to_ctm.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +# Copyright 2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +import sys,operator + +# Append Levenshtein alignment of 'hypothesis' and 'reference' into 'CTM': +# (i.e. the output of 'align-text' post-processed by 'wer_per_utt_details.pl') + +# The tags in the appended column are: +# 'C' = correct +# 'S' = substitution +# 'I' = insertion +# 'U' = unknown (not part of scored segment) + +if len(sys.argv) != 4: + print 'Usage: %s eval-in ctm-in ctm-eval-out' % __file__ + sys.exit(1) +dummy, eval_in, ctm_in, ctm_eval_out = sys.argv + +if ctm_eval_out == '-': ctm_eval_out = '/dev/stdout' + +# Read the evalutation, +eval_vec = dict() +with open(eval_in, 'r') as f: + while True: + # Reading 4 lines encoding one utterance, + ref = f.readline() + hyp = f.readline() + op = f.readline() + csid = f.readline() + if not ref: break + # Parse the input, + utt,tag,hyp_vec = hyp.split(' ',2) + assert(tag == 'hyp') + utt,tag,op_vec = op.split(' ',2) + assert(tag == 'op') + hyp_vec = hyp_vec.split() + op_vec = op_vec.split() + # Fill create eval vector with symbols 'C', 'S', 'I', + assert(utt not in eval_vec) + eval_vec[utt] = [] + for op,hyp in zip(op_vec, hyp_vec): + if hyp != '': eval_vec[utt].append(op) + +# Load the 'ctm' into dictionary, +ctm = dict() +with open(ctm_in) as f: + for l in f: + utt, ch, beg, dur, wrd, conf = l.split() + if not utt in ctm: ctm[utt] = [] + ctm[utt].append((utt, ch, float(beg), float(dur), wrd, float(conf))) + +# Build the 'ctm' with 'eval' column added, +ctm_eval = [] +for utt,ctm_part in ctm.iteritems(): + ctm_part.sort(key = operator.itemgetter(2)) # Sort by 'beg' time, + # extending the 'tuple' by '+': + merged = [ tup + (evl,) for tup,evl in zip(ctm_part,eval_vec[utt]) ] + ctm_eval.extend(merged) + +# Sort again, +ctm_eval.sort(key = operator.itemgetter(0,1,2)) + +# Store, +with open(ctm_eval_out,'w') as f: + for tup in ctm_eval: + f.write('%s %s %f %f %s %f %s\n' % tup) + diff --git a/tools/ASR_2ch_track/steps/conf/append_prf_to_ctm.py b/tools/ASR_2ch_track/steps/conf/append_prf_to_ctm.py new file mode 100644 index 0000000000000000000000000000000000000000..547b6176c9ff3bb2aa83b1a775aa2230d048ddbf --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/append_prf_to_ctm.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +# Copyright 2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +import sys + +# Append Levenshtein alignment of 'hypothesis' and 'reference' into 'CTM': +# (parsed from the 'prf' output of 'sclite') + +# The tags in appended column are: +# 'C' = correct +# 'S' = substitution +# 'I' = insertion +# 'U' = unknown (not part of scored segment) + +# Parse options, +if len(sys.argv) != 4: + print "Usage: %s prf ctm_in ctm_out" % __file__ + sys.exit(1) +prf_file, ctm_file, ctm_out_file = sys.argv[1:] + +if ctm_out_file == '-': ctm_out_file = '/dev/stdout' + +# Load the prf file, +prf = [] +with open(prf_file) as f: + for l in f: + # Store the data, + if l[:5] == 'File:': + file_id = l.split()[1] + if l[:8] == 'Channel:': + chan = l.split()[1] + if l[:5] == 'H_T1:': + h_t1 = l + if l[:5] == 'Eval:': + evl = l + prf.append((file_id,chan,h_t1,evl)) + +# Parse the prf records into dictionary, +prf_dict = dict() +for (f,c,t,e) in prf: + t_pos = 0 # position in the 't' string, + while t_pos < len(t): + t1 = t[t_pos:].split(' ',1)[0] # get 1st token at 't_pos' + try: + # get word evaluation letter 'C,S,I', + evl = e[t_pos] if e[t_pos] != ' ' else 'C' + # add to dictionary, + key='%s,%s' % (f,c) # file,channel + if key not in prf_dict: prf_dict[key] = dict() + prf_dict[key][float(t1)] = evl + except ValueError: + pass + t_pos += len(t1)+1 # advance position for parsing, + +# Load the ctm file (with confidences), +with open(ctm_file) as f: + ctm = [ l.split() for l in f ] + +# Append the sclite alignment tags to ctm, +ctm_out = [] +for f, chan, beg, dur, wrd, conf in ctm: + # U = unknown, C = correct, S = substitution, I = insertion, + sclite_tag = 'U' + try: + sclite_tag = prf_dict[('%s,%s'%(f,chan)).lower()][float(beg)] + except KeyError: + pass + ctm_out.append([f,chan,beg,dur,wrd,conf,sclite_tag]) + +# Save the augmented ctm file, +with open(ctm_out_file, 'w') as f: + f.writelines([' '.join(ctm_record)+'\n' for ctm_record in ctm_out]) + diff --git a/tools/ASR_2ch_track/steps/conf/apply_calibration.sh b/tools/ASR_2ch_track/steps/conf/apply_calibration.sh new file mode 100644 index 0000000000000000000000000000000000000000..c1a22e274b80b921f619b759b81d777f9e40f21f --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/apply_calibration.sh @@ -0,0 +1,91 @@ +#!/bin/bash +# Copyright 2015, Brno University of Technology (Author: Karel Vesely). Apache 2.0. + +# Trains logistic regression, which calibrates the per-word confidences, +# which are extracted by the Minimum Bayes Risk decoding. + +# begin configuration section. +cmd= +stage=0 +# end configuration section. + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 5 ]; then + echo "Usage: $0 [opts] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + exit 1; +fi + +set -euo pipefail + +data=$1 +lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied. +latdir=$3 +caldir=$4 +dir=$5 + +model=$latdir/../final.mdl # assume model one level up from decoding dir. +calibration=$caldir/calibration.mdl +word_feats=$caldir/word_feats +word_categories=$caldir/word_categories + +for f in $lang/words.txt $word_feats $word_categories $latdir/lat.1.gz $calibration $model; do + [ ! -f $f ] && echo "$0: Missing file $f" && exit 1 +done +[ -z "$cmd" ] && echo "$0: Missing --cmd '...'" && exit 1 + +[ -d $dir/log ] || mkdir -p $dir/log +nj=$(cat $latdir/num_jobs) +lmwt=$(cat $caldir/lmwt) +decode_mbr=$(cat $caldir/decode_mbr) + +# Store the setup, +echo $lmwt >$dir/lmwt +echo $decode_mbr >$dir/decode_mbr +cp $calibration $dir/calibration.mdl +cp $word_feats $dir/word_feats +cp $word_categories $dir/word_categories + +# Create the ctm with raw confidences, +# - we keep the timing relative to the utterance, +if [ $stage -le 0 ]; then + $cmd JOB=1:$nj $dir/log/get_ctm.JOB.log \ + lattice-scale --inv-acoustic-scale=$lmwt "ark:gunzip -c $latdir/lat.JOB.gz|" ark:- \| \ + lattice-limit-depth ark:- ark:- \| \ + lattice-push --push-strings=false ark:- ark:- \| \ + lattice-align-words-lexicon --max-expand=10.0 \ + $lang/phones/align_lexicon.int $model ark:- ark:- \| \ + lattice-to-ctm-conf --decode-mbr=$decode_mbr ark:- - \| \ + utils/int2sym.pl -f 5 $lang/words.txt \ + '>' $dir/JOB.ctm + # Merge and clean, + for ((n=1; n<=nj; n++)); do cat $dir/${n}.ctm; done > $dir/ctm + rm $dir/*.ctm + cat $dir/ctm | utils/sym2int.pl -f 5 $lang/words.txt >$dir/ctm_int +fi + +# Compute lattice-depth, +latdepth=$dir/lattice_frame_depth.ark +if [ $stage -le 1 ]; then + [ -e $latdepth ] || steps/conf/lattice_depth_per_frame.sh --cmd "$cmd" $latdir $dir +fi + +# Create the forwarding data for logistic regression, +if [ $stage -le 2 ]; then + steps/conf/prepare_calibration_data.py --conf-feats $dir/forward_feats.ark \ + --lattice-depth $latdepth $dir/ctm_int $word_feats $word_categories +fi + +# Apply calibration model to dev, +if [ $stage -le 3 ]; then + logistic-regression-eval --apply-log=false $calibration \ + ark:$dir/forward_feats.ark ark,t:- | \ + awk '{ key=$1; p_corr=$4; sub(/,.*/,"",key); gsub(/\^/," ",key); print key,p_corr }' | \ + utils/int2sym.pl -f 5 $lang/words.txt \ + >$dir/ctm_calibrated +fi + +exit 0 diff --git a/tools/ASR_2ch_track/steps/conf/convert_ctm_to_tra.py b/tools/ASR_2ch_track/steps/conf/convert_ctm_to_tra.py new file mode 100644 index 0000000000000000000000000000000000000000..276d14b88f86606c14d5de810af6347f69446841 --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/convert_ctm_to_tra.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# Copyright 2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +import sys, operator + +# This scripts loads a 'ctm' file and converts it into the 'tra' format: +# "utt-key word1 word2 word3 ... wordN" +# The 'utt-key' is the 1st column in the CTM. + +# Typically the CTM contains: +# - utterance-relative timimng (i.e. prepared without 'utils/convert_ctm.pl') +# - confidences + +if len(sys.argv) != 3: + print 'Usage: %s ctm-in tra-out' % __file__ + sys.exit(1) +dummy, ctm_in, tra_out = sys.argv + +if ctm_in == '-': ctm_in = '/dev/stdin' +if tra_out == '-': tra_out = '/dev/stdout' + +# Load the 'ctm' into dictionary, +tra = dict() +with open(ctm_in) as f: + for l in f: + utt, ch, beg, dur, wrd, conf = l.split() + if not utt in tra: tra[utt] = [] + tra[utt].append((float(beg),wrd)) + +# Store the in 'tra' format, +with open(tra_out,'w') as f: + for utt,tuples in tra.iteritems(): + tuples.sort(key = operator.itemgetter(0)) # Sort by 'beg' time, + f.write('%s %s\n' % (utt,' '.join([t[1] for t in tuples]))) + diff --git a/tools/ASR_2ch_track/steps/conf/lattice_depth_per_frame.sh b/tools/ASR_2ch_track/steps/conf/lattice_depth_per_frame.sh new file mode 100644 index 0000000000000000000000000000000000000000..7167bd970bb8731bd1131f91228da36de6d59d16 --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/lattice_depth_per_frame.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright 2015 Brno University of Technology (Author: Karel Vesely) +# Licensed under the Apache License, Version 2.0 (the "License") + +# Extract lattice-depth for each frame. + +# Begin configuration +cmd=run.pl +# End configuration + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 2 ]; then + echo "usage: $0 [opts] " + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --cmd" + exit 1; +fi + +set -euo pipefail + +latdir=$1 +dir=$2 + +[ ! -f $latdir/lat.1.gz ] && echo "Missing $latdir/lat.1.gz" && exit 1 +nj=$(cat $latdir/num_jobs) + +# Get the pdf-posterior vectors, +$cmd JOB=1:$nj $dir/log/lattice_depth_per_frame.JOB.log \ + lattice-depth-per-frame "ark:gunzip -c $latdir/lat.JOB.gz |" ark,t:$dir/lattice_frame_depth.JOB.ark +# Merge, +for ((n=1; n<=nj; n++)); do cat $dir/lattice_frame_depth.${n}.ark; done >$dir/lattice_frame_depth.ark +rm $dir/lattice_frame_depth.*.ark + +# Done! diff --git a/tools/ASR_2ch_track/steps/conf/parse_arpa_unigrams.py b/tools/ASR_2ch_track/steps/conf/parse_arpa_unigrams.py new file mode 100644 index 0000000000000000000000000000000000000000..1be32d4c4d736ea90cdb57542ace82661fb992db --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/parse_arpa_unigrams.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +# Copyright 2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +import sys, gzip, re + +# Parse options, +if len(sys.argv) != 4: + print "Usage: %s " % __file__ + sys.exit(0) +words_txt, arpa_gz, unigrams_out = sys.argv[1:] + +if arpa_gz == '-': arpa_gz = '/dev/stdin' +if unigrams_out == '-': unigrams_out = '/dev/stdout' + +# Load the words.txt, +words = [ l.split() for l in open(words_txt) ] + +# Load the unigram probabilities in 10log from ARPA, +wrd_log10 = dict() +with gzip.open(arpa_gz,'r') as f: + read = False + for l in f: + if l.strip() == '\\1-grams:': read = True + if l.strip() == '\\2-grams:': break + if read and len(l.split())>=2: + log10_p_unigram, wrd = re.split('[\t ]+',l.strip(),2)[:2] + wrd_log10[wrd] = float(log10_p_unigram) + +# Create list, 'wrd id log_p_unigram', +words_unigram = [[wrd, id, (wrd_log10[wrd] if wrd in wrd_log10 else -99)] for wrd,id in words ] + +print >>sys.stderr, words_unigram[0] +# Store, +with open(unigrams_out,'w') as f: + f.writelines(['%s %s %g\n' % (w,i,p) for (w,i,p) in words_unigram]) + diff --git a/tools/ASR_2ch_track/steps/conf/prepare_calibration_data.py b/tools/ASR_2ch_track/steps/conf/prepare_calibration_data.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8f92a2f7ff0878871251989257046fe7df2104 --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/prepare_calibration_data.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python + +# Copyright 2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +import sys, math + +from optparse import OptionParser +desc = """ +Prepare input features and training targets for logistic regression, +which calibrates the Minimum Bayes Risk posterior confidences. + +The logisitc-regression input features are: +- posteriors from 'ctm' transformed by logit, +- logarithm of word-length in letters, +- 10base logarithm of unigram probability of a word from language model, +- logarithm of average lattice-depth at position of the word (optional), + +The logistic-regresion targets are: +- 1 for correct word, +- 0 for incorrect word (substitution, insertion), + +The iput 'ctm' is augmented by per-word tags (or 'U' is added if no tags), +'C' = correct +'S' = substitution +'I' = insertion +'U' = unknown (not part of scored segment) + +The script can be used both to prepare the training data, +or to prepare input features for forwarding through trained model. +""" +usage = "%prog [opts] ctm word-filter word-length unigrams depth-per-frame-ascii.ark word-categories" +parser = OptionParser(usage=usage, description=desc) +parser.add_option("--conf-targets", help="Targets file for logistic regression (no targets generated if '') [default %default]", default='') +parser.add_option("--conf-feats", help="Feature file for logistic regression. [default %default]", default='') +parser.add_option("--lattice-depth", help="Per-frame lattice depths, ascii-ark (optional). [default %default]", default='') +(o, args) = parser.parse_args() + +if len(args) != 3: + parser.print_help() + sys.exit(1) +ctm_file, word_feats_file, word_categories_file = args + +assert(o.conf_feats != '') + +# Load the ctm (optionally add eval colmn with 'U'): +ctm = [ l.split() for l in open(ctm_file) ] +if len(ctm[0]) == 6: [ l.append('U') for l in ctm ] +assert(len(ctm[0]) == 7) + +# Load the word-features, the format: "wrd wrd_id filter length other_feats" +# (typically 'other_feats' are unigram log-probabilities), +word_feats = [ l.split(None,4) for l in open(word_feats_file) ] + +# Prepare filtering dict, +word_filter = { wrd_id:bool(int(filter)) for (wrd,wrd_id,filter,length,other_feats) in word_feats } +# Prepare the lenght dict, +word_length = { wrd_id:float(length) for (wrd,wrd_id,filter,length,other_feats) in word_feats } +# Prepare other_feats dict, +other_feats = { wrd_id:other_feats.strip() for (wrd,wrd_id,filter,length,other_feats) in word_feats } + +# Build the targets, +if o.conf_targets != '': + with open(o.conf_targets,'w') as f: + for (utt, chan, beg, dur, wrd_id, conf, score_tag) in ctm: + # Skip the words we don't know if being correct, + if score_tag == 'U': continue + # Some words are excluded from training (partial words, hesitations, etc.), + # (Value: 1 == keep word, 0 == exclude word from the targets), + if not word_filter[wrd_id]: continue + # Build the key, + key = "%s^%s^%s^%s^%s,%s,%s" % (utt, chan, beg, dur, wrd_id, conf, score_tag) + # Build the target, + tgt = 1 if score_tag == 'C' else 0 # Correct = 1, else 0, + # Write, + f.write('%s %d\n' % (key,tgt)) + +# Load the per-frame lattice-depth, +# - we assume, the 1st column in 'ctm' is the 'utterance-key' in depth file, +# - if the 'ctm' and 'ark' keys don't match, we leave this feature out, +if o.lattice_depth: + depths = dict() + for l in open(o.lattice_depth): + utt,d = l.split(' ',1) + depths[utt] = map(int,d.split()) + +# Load the 'word_categories' mapping for categorical input features derived from 'lang/words.txt', +wrd_to_cat = [ l.split() for l in open(word_categories_file) ] +wrd_to_cat = { wrd_id:int(category) for wrd,wrd_id,category in wrd_to_cat } +wrd_cat_num = max(wrd_to_cat.values()) + 1 + +# Build the input features, +with open(o.conf_feats,'w') as f: + for (utt, chan, beg, dur, wrd_id, conf, score_tag) in ctm: + # Build the key, same as previously, + key = "%s^%s^%s^%s^%s,%s,%s" % (utt, chan, beg, dur, wrd_id, conf, score_tag) + + # Build input features, + # - logit of MBR posterior, + damper = 0.001 # avoid -inf,+inf from log, + logit = math.log(float(conf)+damper) - math.log(1.0 - float(conf)+damper) + # - log of word-length, + log_word_length = math.log(word_length[wrd_id]) # i.e. number of phones in a word, + # - categorical distribution of words (with frequency higher than min-count), + wrd_1_of_k = [0]*wrd_cat_num; + wrd_1_of_k[wrd_to_cat[wrd_id]] = 1; + + # Compose the input feature vector, + feats = [ logit, log_word_length, other_feats[wrd_id] ] + wrd_1_of_k + + # Optionally add average-depth of lattice at the word position, + if o.lattice_depth != '': + depth_slice = depths[utt][int(round(100.0*float(beg))):int(round(100.0*(float(beg)+float(dur))))] + log_avg_depth = math.log(float(sum(depth_slice))/len(depth_slice)) + feats += [ log_avg_depth ] + + # Store the input features, + f.write(key + ' [ ' + ' '.join(map(str,feats)) + ' ]\n') + diff --git a/tools/ASR_2ch_track/steps/conf/prepare_word_categories.py b/tools/ASR_2ch_track/steps/conf/prepare_word_categories.py new file mode 100644 index 0000000000000000000000000000000000000000..3b758001c5a9640778ea0a3ee850fc5a004791ed --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/prepare_word_categories.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +# Copyright 2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +import sys + +from optparse import OptionParser +desc = """ +Prepare mapping of words into categories. Each word with minimal frequency +has its own category, the rest is merged into single class. +""" +usage = "%prog [opts] words.txt ctm category_mapping" +parser = OptionParser(usage=usage, description=desc) +parser.add_option("--min-count", help="Minimum word-count to have a single word category. [default %default]", type='int', default=20) +(o, args) = parser.parse_args() + +if len(args) != 3: + parser.print_help() + sys.exit(1) +words_file, text_file, category_mapping_file = args + +if text_file == '-': text_file = '/dev/stdin' +if category_mapping_file == '-': category_mapping_file = '/dev/stdout' + +# Read the words from the 'tra' file, +with open(text_file) as f: + text_words = [ l.split()[1:] for l in f ] + +# Flatten the array of arrays of words, +import itertools +text_words = list(itertools.chain.from_iterable(text_words)) + +# Count the words (regardless if correct or incorrect), +word_counts = dict() +for w in text_words: + if w not in word_counts: word_counts[w] = 0 + word_counts[w] += 1 + +# Read the words.txt, +with open(words_file) as f: + word_id = [ l.split() for l in f ] + +# Append the categories, +n=1 +word_id_cat=[] +for word, idx in word_id: + cat = 0 + if word in word_counts: + if word_counts[word] > o.min_count: + cat = n; n += 1 + word_id_cat.append([word, idx, str(cat)]) + +# Store the mapping, +with open(category_mapping_file,'w') as f: + f.writelines([' '.join(record)+'\n' for record in word_id_cat]) diff --git a/tools/ASR_2ch_track/steps/conf/train_calibration.sh b/tools/ASR_2ch_track/steps/conf/train_calibration.sh new file mode 100644 index 0000000000000000000000000000000000000000..c2aca05056ef94c311990bb90b0cb11cd6e3e6f4 --- /dev/null +++ b/tools/ASR_2ch_track/steps/conf/train_calibration.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# Copyright 2015, Brno University of Technology (Author: Karel Vesely). Apache 2.0. + +# Trains logistic regression, which calibrates the per-word confidences in 'CTM'. +# The 'raw' confidences are obtained by Minimum Bayes Risk decoding. + +# The input features of logistic regression are: +# - logit of Minumum Bayer Risk posterior +# - log of word-length in characters +# - log of average-depth depth of a lattice at words' position +# - log of frames per character ratio +# (- categorical distribution of 'lang/words.txt', DISABLED) + +# begin configuration section. +cmd= +lmwt=12 +decode_mbr=true +word_min_count=10 # Minimum word-count for single-word category, +normalizer=0.0025 # L2 regularization constant, +category_text= # Alternative corpus for counting words to get word-categories (by default using 'ctm'), +stage=0 +# end configuration section. + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 5 ]; then + echo "Usage: $0 [opts] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --lmwt # scaling for confidence extraction" + echo " --decode-mbr # use Minimum Bayes Risk decoding" + echo " --grep-filter # remove words from calibration targets" + exit 1; +fi + +set -euo pipefail + +data=$1 +lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied. +word_feats=$3 +latdir=$4 +dir=$5 + +model=$latdir/../final.mdl # assume model one level up from decoding dir. + +for f in $data/text $lang/words.txt $word_feats $latdir/lat.1.gz; do + [ ! -f $f ] && echo "$0: Missing file $f" && exit 1 +done +[ -z "$cmd" ] && echo "$0: Missing --cmd '...'" && exit 1 + +[ -d $dir/log ] || mkdir -p $dir/log +nj=$(cat $latdir/num_jobs) + +# Store the setup, +echo $lmwt >$dir/lmwt +echo $decode_mbr >$dir/decode_mbr +cp $word_feats $dir/word_feats + +# Create the ctm with raw confidences, +# - we keep the timing relative to the utterance, +if [ $stage -le 0 ]; then + $cmd JOB=1:$nj $dir/log/get_ctm.JOB.log \ + lattice-scale --inv-acoustic-scale=$lmwt "ark:gunzip -c $latdir/lat.JOB.gz|" ark:- \| \ + lattice-limit-depth ark:- ark:- \| \ + lattice-push --push-strings=false ark:- ark:- \| \ + lattice-align-words-lexicon --max-expand=10.0 \ + $lang/phones/align_lexicon.int $model ark:- ark:- \| \ + lattice-to-ctm-conf --decode-mbr=$decode_mbr ark:- - \| \ + utils/int2sym.pl -f 5 $lang/words.txt \ + '>' $dir/JOB.ctm + # Merge and clean, + for ((n=1; n<=nj; n++)); do cat $dir/${n}.ctm; done > $dir/ctm + rm $dir/*.ctm +fi + +# Get evaluation of the 'ctm' using the 'text' reference, +if [ $stage -le 1 ]; then + steps/conf/convert_ctm_to_tra.py $dir/ctm - | \ + align-text --special-symbol="" ark:$data/text ark:- ark,t:- | \ + utils/scoring/wer_per_utt_details.pl --special-symbol "" \ + >$dir/align_text + # Append alignment to ctm, + steps/conf/append_eval_to_ctm.py $dir/align_text $dir/ctm $dir/ctm_aligned + # Convert words to 'ids', + cat $dir/ctm_aligned | utils/sym2int.pl -f 5 $lang/words.txt >$dir/ctm_aligned_int +fi + +# Prepare word-categories (based on wotd frequencies in 'ctm'), +if [ -z "$category_text" ]; then + steps/conf/convert_ctm_to_tra.py $dir/ctm - | \ + steps/conf/prepare_word_categories.py --min-count $word_min_count $lang/words.txt - $dir/word_categories +else + steps/conf/prepare_word_categories.py --min-count $word_min_count $lang/words.txt "$category_text" $dir/word_categories +fi + +# Compute lattice-depth, +latdepth=$dir/lattice_frame_depth.ark +if [ $stage -le 2 ]; then + [ -e $latdepth ] || steps/conf/lattice_depth_per_frame.sh --cmd "$cmd" $latdir $dir +fi + +# Create the training data for logistic regression, +if [ $stage -le 3 ]; then + steps/conf/prepare_calibration_data.py \ + --conf-targets $dir/train_targets.ark --conf-feats $dir/train_feats.ark \ + --lattice-depth $latdepth $dir/ctm_aligned_int $word_feats $dir/word_categories +fi + +# Train the logistic regression, +if [ $stage -le 4 ]; then + logistic-regression-train --binary=false --normalizer=$normalizer ark:$dir/train_feats.ark \ + ark:$dir/train_targets.ark $dir/calibration.mdl 2>$dir/log/logistic-regression-train.log +fi + +# Apply calibration model to dev, +if [ $stage -le 5 ]; then + logistic-regression-eval --apply-log=false $dir/calibration.mdl \ + ark:$dir/train_feats.ark ark,t:- | \ + awk '{ key=$1; p_corr=$4; sub(/,.*/,"",key); gsub(/\^/," ",key); print key,p_corr }' | \ + utils/int2sym.pl -f 5 $lang/words.txt \ + >$dir/ctm_calibrated_int +fi + +exit 0 diff --git a/tools/ASR_2ch_track/steps/decode.sh b/tools/ASR_2ch_track/steps/decode.sh new file mode 100644 index 0000000000000000000000000000000000000000..f2bc1d367fddf0362ff10c924b531baa0133dfe2 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode.sh @@ -0,0 +1,136 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Begin configuration section. +transform_dir= # this option won't normally be used, but it can be used if you want to + # supply existing fMLLR transforms when decoding. +iter= +model= # You can specify the model to use (e.g. if you want to use the .alimdl) +stage=0 +nj=4 +cmd=run.pl +max_active=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 # note: only really affects pruning (scoring is on lattices). +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +scoring_opts= +# note: there are no more min-lmwt and max-lmwt options, instead use +# e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20" +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/decode.sh [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the model is." + echo "e.g.: steps/decode.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr" + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --model # which model to use (e.g. to" + echo " # specify the final.alimdl)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --transform-dir # dir to find fMLLR transforms " + echo " --acwt # acoustic scale used for lattice generation " + echo " --scoring-opts # options to local/score.sh" + echo " --num-threads # number of threads to use, default 1." + echo " --parallel-opts # ignored now, present for historical reasons." + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$model" ]; then # if --model was not specified on the command line... + if [ -z $iter ]; then model=$srcdir/final.mdl; + else model=$srcdir/$iter.mdl; fi +fi + +if [ $(basename $model) != final.alimdl ] ; then + # Do not use the $srcpath -- look at the path where the model is + if [ -f $(dirname $model)/final.alimdl ] && [ -z "$transform_dir" ]; then + echo -e '\n\n' + echo $0 'WARNING: Running speaker independent system decoding using a SAT model!' + echo $0 'WARNING: This is OK if you know what you are doing...' + echo -e '\n\n' + fi +fi + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "decode.sh: no such file $f" && exit 1; +done + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode.sh: feature type is $feat_type"; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ ! -s $transform_dir/num_jobs ] && \ + echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1; + nj_orig=$(cat $transform_dir/num_jobs) + if [ $nj -ne $nj_orig ]; then + # Copy the transforms into an archive with an index. + echo "$0: num-jobs for transforms mismatches, so copying them." + for n in $(seq $nj_orig); do cat $transform_dir/trans.$n; done | \ + copy-feats ark:- ark,scp:$dir/trans.ark,$dir/trans.scp || exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/trans.scp ark:- ark:- |" + else + # number of jobs matches with alignment dir. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" + fi +fi + +if [ $stage -le 0 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $model"; exit 1; } + fi + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_basis_fmllr.sh b/tools/ASR_2ch_track/steps/decode_basis_fmllr.sh new file mode 100644 index 0000000000000000000000000000000000000000..afb914e7f0d95d1dc2febed043721e0dc14ff255 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_basis_fmllr.sh @@ -0,0 +1,234 @@ +#!/bin/bash + +# Copyright 2012 Carnegie Mellon University (Author: Yajie Miao) +# Johns Hopkins University (Author: Daniel Povey) +# 2014 David Snyder + +# Decoding script that does basis fMLLR. This can be on top of delta+delta-delta, +# or LDA+MLLT features. + +# There are 3 models involved potentially in this script, +# and for a standard, speaker-independent system they will all be the same. +# The "alignment model" is for the 1st-pass decoding and to get the +# Gaussian-level alignments for the "adaptation model" the first time we +# do fMLLR. The "adaptation model" is used to estimate fMLLR transforms +# and to generate state-level lattices. The lattices are then rescored +# with the "final model". +# +# The following table explains where we get these 3 models from. +# Note: $srcdir is one level up from the decoding directory. +# +# Model Default source: +# +# "alignment model" $srcdir/final.alimdl --alignment-model +# (or $srcdir/final.mdl if alimdl absent) +# "adaptation model" $srcdir/final.mdl --adapt-model +# "final model" $srcdir/final.mdl --final-model + + +# Begin configuration section +first_beam=10.0 # Beam used in initial, speaker-indep. pass +first_max_active=2000 # max-active used in initial pass. +alignment_model= +adapt_model= +final_model= +stage=0 +acwt=0.083333 # Acoustic weight used in getting fMLLR transforms, and also in + # lattice generation. + +# Parameters in alignment of training data +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +align_beam=10 +retry_beam=40 + +max_active=7000 +beam=13.0 +lattice_beam=6.0 +nj=4 +silence_weight=0.01 +cmd=run.pl +si_dir= +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored, present for historical reasons. +skip_scoring=false +scoring_opts= +# End configuration section + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/decode_basis_fmllr.sh [options] " + echo " e.g.: steps/decode_basis_fmllr.sh exp/tri2b/graph_tgpr data/train_si84 data/test_dev93 exp/tri2b/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --adapt-model # Model to compute transforms with" + echo " --alignment-model # Model to get Gaussian-level alignments for" + echo " # 1st pass of transform computation." + echo " --final-model # Model to finally decode with" + echo " --si-dir # use this to skip 1st pass of decoding" + echo " # Caution-- must be with same tree" + echo " --acwt # default 0.08333 ... used to get posteriors" + echo " --scoring-opts # options to local/score.sh" + echo " --num-threads # number of threads to use, default 1." + echo " --parallel-opts # ignored, present for historical reasons." + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=`echo $3 | sed 's:/$::g'` # remove any trailing slash. + +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. +sdata=$data/split$nj; + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +silphonelist=`cat $graphdir/phones/silence.csl` || exit 1; + +# Some checks. Note: we don't need $srcdir/tree but we expect +# it should exist, given the current structure of the scripts. +for f in $graphdir/HCLG.fst $data/feats.scp $srcdir/tree $srcdir/fmllr.basis; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; +## + +## Do the speaker-independent decoding, if --si-dir option not present. ## +if [ -z "$si_dir" ]; then # we need to do the speaker-independent decoding pass. + si_dir=${dir}.si # Name it as our decoding dir, but with suffix ".si". + if [ $stage -le 0 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $alignment_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $alignment_model"; exit 1; } + fi + + steps/decode.sh --scoring-opts "$scoring_opts" \ + --num-threads $num_threads --skip-scoring $skip_scoring \ + --acwt $acwt --nj $nj --cmd "$cmd" --beam $first_beam \ + --model $alignment_model --max-active \ + $first_max_active $graphdir $data $si_dir || exit 1; + fi +fi +## + +## Some checks, and setting of defaults for variables. +[ "$nj" -ne "`cat $si_dir/num_jobs`" ] && echo "Mismatch in #jobs with si-dir" && exit 1; +[ ! -f "$si_dir/lat.1.gz" ] && echo "No such file $si_dir/lat.1.gz" && exit 1; +[ -z "$adapt_model" ] && adapt_model=$srcdir/final.mdl +[ -z "$final_model" ] && final_model=$srcdir/final.mdl +for f in $adapt_model $final_model; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done +## + +## Set up the unadapted features "$sifeats" for testing set +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +## + +## Now get the first-pass fMLLR transforms. +## We give all the default parameters in gmm-est-basis-fmllr +if [ $stage -le 1 ]; then + echo "$0: getting first-pass fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr_pass1.JOB.log \ + gunzip -c $si_dir/lat.JOB.gz \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $alignment_model ark:- ark:- \| \ + gmm-post-to-gpost $alignment_model "$sifeats" ark:- ark:- \| \ + gmm-est-basis-fmllr-gpost --spk2utt=ark:$sdata/JOB/spk2utt \ + --fmllr-min-count=200 --num-iters=10 --size-scale=0.2 \ + --step-size-iters=3 --write-weights=ark:$dir/pre_wgt.JOB \ + $adapt_model $srcdir/fmllr.basis "$sifeats" ark,s,cs:- \ + ark:$dir/pre_trans.JOB || exit 1; +fi +## + +pass1feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/pre_trans.JOB ark:- ark:- |" + +## Do the main lattice generation pass. Note: we don't determinize the lattices at +## this stage, as we're going to use them in acoustic rescoring with the larger +## model, and it's more correct to store the full state-level lattice for this purpose. +if [ $stage -le 2 ]; then + echo "$0: doing main lattice generation phase" + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $adapt_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $adapt_model"; exit 1; } + fi + $cmd JOB=1:$nj --num-threads $num_threads $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt \ + --determinize-lattice=false --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $adapt_model $graphdir/HCLG.fst "$pass1feats" "ark:|gzip -c > $dir/lat.tmp.JOB.gz" \ + || exit 1; +fi +## + +## Do a second pass of estimating the transform-- this time with the lattices +## generated from the alignment model. Compose the transforms to get +## $dir/trans.1, etc. +if [ $stage -le 3 ]; then + echo "$0: estimating fMLLR transforms a second time." + $cmd JOB=1:$nj $dir/log/fmllr_pass2.JOB.log \ + lattice-determinize-pruned$thread_string --acoustic-scale=$acwt --beam=4.0 \ + "ark:gunzip -c $dir/lat.tmp.JOB.gz|" ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $adapt_model ark:- ark:- \| \ + gmm-est-basis-fmllr --fmllr-min-count=200 \ + --spk2utt=ark:$sdata/JOB/spk2utt --write-weights=ark:$dir/trans_tmp_wgt.JOB \ + $adapt_model $srcdir/fmllr.basis "$pass1feats" ark,s,cs:- ark:$dir/trans_tmp.JOB '&&' \ + compose-transforms --b-is-affine=true ark:$dir/trans_tmp.JOB ark:$dir/pre_trans.JOB \ + ark:$dir/trans.JOB || exit 1; +fi +## + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +# Rescore the state-level lattices with the final adapted features, and the final model +# (which by default is $srcdir/final.mdl, but which may be specified on the command line, +# useful in case of discriminatively trained systems). +# At this point we prune and determinize the lattices and write them out, ready for +# language model rescoring. + +if [ $stage -le 4 ]; then + echo "$0: doing a final pass of acoustic rescoring." + $cmd JOB=1:$nj $dir/log/acoustic_rescore.JOB.log \ + gmm-rescore-lattice $final_model "ark:gunzip -c $dir/lat.tmp.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned$thread_string --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" '&&' rm $dir/lat.tmp.JOB.gz || exit 1; +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "$0: not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +rm $dir/{trans_tmp,pre_trans}.* + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_biglm.sh b/tools/ASR_2ch_track/steps/decode_biglm.sh new file mode 100644 index 0000000000000000000000000000000000000000..0663391430dbd80be788d3ababa81e1ae439fc0b --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_biglm.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Begin configuration. +nj=4 +cmd=run.pl +maxactive=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 +skip_scoring=false +# End configuration. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 5 ]; then + echo "Usage: steps/decode_si_biglm.sh [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the model is." + echo "e.g.: steps/decode_si.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr" + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + + +graphdir=$1 +oldlm_fst=$2 +newlm_fst=$3 +data=$4 +dir=$5 + +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $srcdir/final.mdl $graphdir/HCLG.fst $oldlm_fst $newlm_fst; do + [ ! -f $f ] && echo "decode_si.sh: no such file $f" && exit 1; +done + + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode_si.sh: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +[ -f `dirname $oldlm_fst`/words.txt ] && ! cmp `dirname $oldlm_fst`/words.txt $graphdir/words.txt && \ + echo "Warning: old LM words.txt does not match with that in $graphdir .. probably will not work."; +[ -f `dirname $newlm_fst`/words.txt ] && ! cmp `dirname $oldlm_fst`/words.txt $graphdir/words.txt && \ + echo "Warning: new LM words.txt does not match with that in $graphdir .. probably will not work."; + +# fstproject replaces the disambiguation symbol #0, which only appears on the +# input side, with the that appears in the corresponding arcs on the output side. +oldlm_cmd="fstproject --project_output=true $oldlm_fst | fstarcsort --sort_type=ilabel |" +newlm_cmd="fstproject --project_output=true $newlm_fst | fstarcsort --sort_type=ilabel |" + +$cmd JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-biglm-faster --max-active=$maxactive --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $srcdir/final.mdl $graphdir/HCLG.fst "$oldlm_cmd" "$newlm_cmd" "$feats" \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_combine.sh b/tools/ASR_2ch_track/steps/decode_combine.sh new file mode 100644 index 0000000000000000000000000000000000000000..e2926ee0e3afd3082ba57a17958b48885b66080f --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_combine.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# Combine two decoding directories by composing the lattices (we +# apply a weight to each of the original weights, by default 0.5 each). +# Note, this is not the only combination method, or the most normal combination +# method. See also egs/wsj/s5/local/score_combine.sh. + +# Begin configuration section. +weight1=0.5 # Weight on 1st set of lattices. +cmd=run.pl +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 5 ]; then + echo "Usage: steps/decode_combine.sh [options] " + echo " e.g.: steps/decode_combine.sh data/lang data/test exp/dir1/decode exp/dir2/decode exp/combine_1_2/decode" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --cmd # Command to run in parallel with" + echo " --weight1 # Weight on 1st set of lattices (default 0.5)" + exit 1; +fi + +data=$1 +lang_or_graphdir=$2 +srcdir1=$3 +srcdir2=$4 +dir=$5 + +for f in $data/utt2spk $lang_or_graphdir/phones.txt $srcdir1/lat.1.gz $srcdir2/lat.1.gz; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +nj1=`cat $srcdir1/num_jobs` || exit 1; +nj2=`cat $srcdir2/num_jobs` || exit 1; +[ $nj1 -ne $nj2 ] && echo "$0: mismatch in number of jobs $nj1 versus $nj2" && exit 1; +nj=$nj1 + +mkdir -p $dir/log +echo $nj > $dir/num_jobs + +# The lattice-interp command does the score interpolation (with composition), +# and the lattice-copy-backoff replaces the result with the 1st lattice, in +# cases where the composed result was empty. +$cmd JOB=1:$nj $dir/log/interp.JOB.log \ + lattice-interp --alpha=$weight1 "ark:gunzip -c $srcdir1/lat.JOB.gz|" \ + "ark,s,cs:gunzip -c $srcdir2/lat.JOB.gz|" ark:- \| \ + lattice-copy-backoff "ark,s,cs:gunzip -c $srcdir1/lat.JOB.gz|" ark,s,cs:- \ + "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1; + +cp $srcdir1/final.mdl $dir/final.mdl + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $data $lang_or_graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_fmllr.sh b/tools/ASR_2ch_track/steps/decode_fmllr.sh new file mode 100644 index 0000000000000000000000000000000000000000..a5ca6c9ef33fbcf6bd7c9ca7dc06df7cf4808b4b --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_fmllr.sh @@ -0,0 +1,225 @@ +#!/bin/bash + +# Copyright 2012-2015 Johns Hopkins University (Author: Daniel Povey) + +# Decoding script that does fMLLR. This can be on top of delta+delta-delta, or +# LDA+MLLT features. + +# There are 3 models involved potentially in this script, +# and for a standard, speaker-independent system they will all be the same. +# The "alignment model" is for the 1st-pass decoding and to get the +# Gaussian-level alignments for the "adaptation model" the first time we +# do fMLLR. The "adaptation model" is used to estimate fMLLR transforms +# and to generate state-level lattices. The lattices are then rescored +# with the "final model". +# +# The following table explains where we get these 3 models from. +# Note: $srcdir is one level up from the decoding directory. +# +# Model Default source: +# +# "alignment model" $srcdir/final.alimdl --alignment-model +# (or $srcdir/final.mdl if alimdl absent) +# "adaptation model" $srcdir/final.mdl --adapt-model +# "final model" $srcdir/final.mdl --final-model + + +# Begin configuration section +first_beam=10.0 # Beam used in initial, speaker-indep. pass +first_max_active=2000 # max-active used in initial pass. +alignment_model= +adapt_model= +final_model= +stage=0 +acwt=0.083333 # Acoustic weight used in getting fMLLR transforms, and also in + # lattice generation. +max_active=7000 +beam=13.0 +lattice_beam=6.0 +nj=4 +silence_weight=0.01 +cmd=run.pl +si_dir= +fmllr_update_type=full +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +skip_scoring=false +scoring_opts= +max_fmllr_jobs=25 # I've seen the fMLLR jobs overload NFS badly if the decoding + # was started with a lot of many jobs, so we limit the number of + # parallel jobs to 25 by default. End configuration section +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: steps/decode_fmllr.sh [options] " + echo " e.g.: steps/decode_fmllr.sh exp/tri2b/graph_tgpr data/test_dev93 exp/tri2b/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --adapt-model # Model to compute transforms with" + echo " --alignment-model # Model to get Gaussian-level alignments for" + echo " # 1st pass of transform computation." + echo " --final-model # Model to finally decode with" + echo " --si-dir # use this to skip 1st pass of decoding" + echo " # Caution-- must be with same tree" + echo " --acwt # default 0.08333 ... used to get posteriors" + echo " --num-threads # number of threads to use, default 1." + echo " --scoring-opts # options to local/score.sh" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=`echo $3 | sed 's:/$::g'` # remove any trailing slash. + +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. +sdata=$data/split$nj; + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + + +mkdir -p $dir/log +split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +silphonelist=`cat $graphdir/phones/silence.csl` || exit 1; + +# Some checks. Note: we don't need $srcdir/tree but we expect +# it should exist, given the current structure of the scripts. +for f in $graphdir/HCLG.fst $data/feats.scp $srcdir/tree; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; +## + +## Do the speaker-independent decoding, if --si-dir option not present. ## +if [ -z "$si_dir" ]; then # we need to do the speaker-independent decoding pass. + si_dir=${dir}.si # Name it as our decoding dir, but with suffix ".si". + if [ $stage -le 0 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $alignment_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $alignment_model"; exit 1; } + fi + steps/decode.sh --scoring-opts "$scoring_opts" \ + --num-threads $num_threads --skip-scoring $skip_scoring \ + --acwt $acwt --nj $nj --cmd "$cmd" --beam $first_beam \ + --model $alignment_model --max-active \ + $first_max_active $graphdir $data $si_dir || exit 1; + fi +fi +## + +## Some checks, and setting of defaults for variables. +[ "$nj" -ne "`cat $si_dir/num_jobs`" ] && echo "Mismatch in #jobs with si-dir" && exit 1; +[ ! -f "$si_dir/lat.1.gz" ] && echo "No such file $si_dir/lat.1.gz" && exit 1; +[ -z "$adapt_model" ] && adapt_model=$srcdir/final.mdl +[ -z "$final_model" ] && final_model=$srcdir/final.mdl +for f in $adapt_model $final_model; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done +## + +## Set up the unadapted features "$sifeats" +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +## + +## Now get the first-pass fMLLR transforms. +if [ $stage -le 1 ]; then + echo "$0: getting first-pass fMLLR transforms." + $cmd --max-jobs-run $max_fmllr_jobs JOB=1:$nj $dir/log/fmllr_pass1.JOB.log \ + gunzip -c $si_dir/lat.JOB.gz \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $alignment_model ark:- ark:- \| \ + gmm-post-to-gpost $alignment_model "$sifeats" ark:- ark:- \| \ + gmm-est-fmllr-gpost --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $adapt_model "$sifeats" ark,s,cs:- \ + ark:$dir/pre_trans.JOB || exit 1; +fi +## + +pass1feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/pre_trans.JOB ark:- ark:- |" + +## Do the main lattice generation pass. Note: we don't determinize the lattices at +## this stage, as we're going to use them in acoustic rescoring with the larger +## model, and it's more correct to store the full state-level lattice for this purpose. +if [ $stage -le 2 ]; then + echo "$0: doing main lattice generation phase" + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $adapt_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $adapt_model"; exit 1; } + fi + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --determinize-lattice=false \ + --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $adapt_model $graphdir/HCLG.fst "$pass1feats" "ark:|gzip -c > $dir/lat.tmp.JOB.gz" \ + || exit 1; +fi +## + +## Do a second pass of estimating the transform-- this time with the lattices +## generated from the alignment model. Compose the transforms to get +## $dir/trans.1, etc. +if [ $stage -le 3 ]; then + echo "$0: estimating fMLLR transforms a second time." + $cmd --max-jobs-run $max_fmllr_jobs JOB=1:$nj $dir/log/fmllr_pass2.JOB.log \ + lattice-determinize-pruned$thread_string --acoustic-scale=$acwt --beam=4.0 \ + "ark:gunzip -c $dir/lat.tmp.JOB.gz|" ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $adapt_model ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $adapt_model "$pass1feats" \ + ark,s,cs:- ark:$dir/trans_tmp.JOB '&&' \ + compose-transforms --b-is-affine=true ark:$dir/trans_tmp.JOB ark:$dir/pre_trans.JOB \ + ark:$dir/trans.JOB || exit 1; +fi +## + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +# Rescore the state-level lattices with the final adapted features, and the final model +# (which by default is $srcdir/final.mdl, but which may be specified on the command line, +# useful in case of discriminatively trained systems). +# At this point we prune and determinize the lattices and write them out, ready for +# language model rescoring. + +if [ $stage -le 4 ]; then + echo "$0: doing a final pass of acoustic rescoring." + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/acoustic_rescore.JOB.log \ + gmm-rescore-lattice $final_model "ark:gunzip -c $dir/lat.tmp.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned$thread_string --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" '&&' rm $dir/lat.tmp.JOB.gz || exit 1; +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "$0: not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir +fi + +rm $dir/{trans_tmp,pre_trans}.* + +exit 0; + diff --git a/tools/ASR_2ch_track/steps/decode_fmllr_extra.sh b/tools/ASR_2ch_track/steps/decode_fmllr_extra.sh new file mode 100644 index 0000000000000000000000000000000000000000..04d4c2ae343a1652f572ee94e7ba82333e60a1b1 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_fmllr_extra.sh @@ -0,0 +1,260 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) + +# Decoding script that does fMLLR. This can be on top of delta+delta-delta, or +# LDA+MLLT features. +# This script does an extra pass of lattice generation over and above what the original +# script did-- it's for robustness in the case where your original cepstral mean +# normalization was way off. +# We also added a new option --distribute=true (by default) to +# weight-silence-post. This weights the silence frames in a different way, +# weighting all posteriors on the frame rather than just the silence ones, which +# removes a particular kind of bias that the old approach suffered from. + +# There are 3 models involved potentially in this script, +# and for a standard, speaker-independent system they will all be the same. +# The "alignment model" is for the 1st-pass decoding and to get the +# Gaussian-level alignments for the "adaptation model" the first time we +# do fMLLR. The "adaptation model" is used to estimate fMLLR transforms +# and to generate state-level lattices. The lattices are then rescored +# with the "final model". +# +# The following table explains where we get these 3 models from. +# Note: $srcdir is one level up from the decoding directory. +# +# Model Default source: +# +# "alignment model" $srcdir/final.alimdl --alignment-model +# (or $srcdir/final.mdl if alimdl absent) +# "adaptation model" $srcdir/final.mdl --adapt-model +# "final model" $srcdir/final.mdl --final-model + + +# Begin configuration section +first_beam=10.0 # Beam used in initial, speaker-indep. pass +first_max_active=2000 # max-active used in first two passes. +first_lattice_beam=4.0 # lattice pruning beam for si decode and first-pass fMLLR decode. + # the different spelling from lattice_beam is unfortunate; these scripts + # have a history. +alignment_model= +adapt_model= +final_model= +cleanup=true +stage=0 +acwt=0.083333 # Acoustic weight used in getting fMLLR transforms, and also in + # lattice generation. +max_active=7000 +max_mem=50000000 +beam=13.0 +lattice_beam=6.0 +nj=4 +silence_weight=0.01 +distribute=true # option to weight-silence-post. +cmd=run.pl +si_dir= +fmllr_update_type=full +skip_scoring=false +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +scoring_opts= + +# End configuration section + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/decode_fmllr.sh [options] " + echo " e.g.: steps/decode_fmllr.sh exp/tri2b/graph_tgpr data/test_dev93 exp/tri2b/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --adapt-model # Model to compute transforms with" + echo " --alignment-model # Model to get Gaussian-level alignments for" + echo " # 1st pass of transform computation." + echo " --final-model # Model to finally decode with" + echo " --si-dir # use this to skip 1st pass of decoding" + echo " # Caution-- must be with same tree" + echo " --acwt # default 0.08333 ... used to get posteriors" + echo " --num-threads # number of threads to use, default 1." + echo " --scoring-opts # options to local/score.sh" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=`echo $3 | sed 's:/$::g'` # remove any trailing slash. + +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. +sdata=$data/split$nj; + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +silphonelist=`cat $graphdir/phones/silence.csl` || exit 1; + +# Some checks. Note: we don't need $srcdir/tree but we expect +# it should exist, given the current structure of the scripts. +for f in $graphdir/HCLG.fst $data/feats.scp $srcdir/tree; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; +## + +## Do the speaker-independent decoding, if --si-dir option not present. ## +if [ -z "$si_dir" ]; then # we need to do the speaker-independent decoding pass. + si_dir=${dir}.si # Name it as our decoding dir, but with suffix ".si". + if [ $stage -le 0 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $alignment_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $alignment_model" exit 1; } + fi + steps/decode.sh --acwt $acwt --nj $nj --cmd "$cmd" --beam $first_beam --model $alignment_model\ + --max-active $first_max_active --num-threads $num_threads\ + --skip-scoring true $graphdir $data $si_dir || exit 1; + fi +fi +## + +## Some checks, and setting of defaults for variables. +[ "$nj" -ne "`cat $si_dir/num_jobs`" ] && echo "Mismatch in #jobs with si-dir" && exit 1; +[ ! -f "$si_dir/lat.1.gz" ] && echo "No such file $si_dir/lat.1.gz" && exit 1; +[ -z "$adapt_model" ] && adapt_model=$srcdir/final.mdl +[ -z "$final_model" ] && final_model=$srcdir/final.mdl +for f in $adapt_model $final_model; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done +## + +## Set up the unadapted features "$sifeats" +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +## + +## Now get the first-pass fMLLR transforms. +if [ $stage -le 1 ]; then + echo "$0: getting first-pass fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr_pass1.JOB.log \ + gunzip -c $si_dir/lat.JOB.gz \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post --distribute=$distribute $silence_weight $silphonelist $alignment_model ark:- ark:- \| \ + gmm-post-to-gpost $alignment_model "$sifeats" ark:- ark:- \| \ + gmm-est-fmllr-gpost --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $adapt_model "$sifeats" ark,s,cs:- \ + ark:$dir/trans1.JOB || exit 1; +fi +## + +pass1feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans1.JOB ark:- ark:- |" + +## Do the first adapted lattice generation pass. +if [ $stage -le 2 ]; then + echo "$0: doing first adapted lattice generation phase" + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $adapt_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $adapt_model" exit 1; } + fi + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode1.JOB.log\ + gmm-latgen-faster$thread_string --max-active=$first_max_active --max-mem=$max_mem --beam=$first_beam --lattice-beam=$first_lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $adapt_model $graphdir/HCLG.fst "$pass1feats" "ark:|gzip -c > $dir/lat1.JOB.gz" \ + || exit 1; +fi + + +## Do a second pass of estimating the transform. Compose the transforms to get +## $dir/trans2.*. +if [ $stage -le 3 ]; then + echo "$0: estimating fMLLR transforms a second time." + $cmd JOB=1:$nj $dir/log/fmllr_pass2.JOB.log \ + lattice-to-post --acoustic-scale=$acwt "ark:gunzip -c $dir/lat1.JOB.gz|" ark:- \| \ + weight-silence-post --distribute=$distribute $silence_weight $silphonelist $adapt_model ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $adapt_model "$pass1feats" \ + ark,s,cs:- ark:$dir/trans1b.JOB '&&' \ + compose-transforms --b-is-affine=true ark:$dir/trans1b.JOB ark:$dir/trans1.JOB \ + ark:$dir/trans2.JOB || exit 1; + if $cleanup; then + rm $dir/trans1b.* $dir/trans1.* $dir/lat1.*.gz + fi +fi +## + +pass2feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans2.JOB ark:- ark:- |" + +# Generate a 3rd set of lattices, with the "adaptation model"; we'll use these +# to adapt a 3rd time, and we'll rescore them. Since we should be close to the final +# fMLLR, we don't bother dumping un-determinized lattices to disk. + +## Do the final lattice generation pass (but we'll rescore these lattices +## after another stage of adaptation.) +if [ $stage -le 4 ]; then + echo "$0: doing final lattice generation phase" + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode2.JOB.log\ + gmm-latgen-faster$thread_string --max-active=$max_active --max-mem=$max_mem --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $adapt_model $graphdir/HCLG.fst "$pass2feats" "ark:|gzip -c > $dir/lat2.JOB.gz" \ + || exit 1; +fi + + +## Do a third pass of estimating the transform. Compose the transforms to get +## $dir/trans.*. +if [ $stage -le 5 ]; then + echo "$0: estimating fMLLR transforms a third time." + $cmd JOB=1:$nj $dir/log/fmllr_pass3.JOB.log \ + lattice-to-post --acoustic-scale=$acwt "ark:gunzip -c $dir/lat2.JOB.gz|" ark:- \| \ + weight-silence-post --distribute=$distribute $silence_weight $silphonelist $adapt_model ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $adapt_model "$pass2feats" \ + ark,s,cs:- ark:$dir/trans2b.JOB '&&' \ + compose-transforms --b-is-affine=true ark:$dir/trans2b.JOB ark:$dir/trans2.JOB \ + ark:$dir/trans.JOB || exit 1; + if $cleanup; then + rm $dir/trans2b.* $dir/trans2.* + fi +fi +## + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +if [ $stage -le 6 ]; then + echo "$0: doing a final pass of acoustic rescoring." + $cmd JOB=1:$nj $dir/log/acoustic_rescore.JOB.log \ + gmm-rescore-lattice $final_model "ark:gunzip -c $dir/lat2.JOB.gz|" "$feats" \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + if $cleanup; then + rm $dir/lat2.*.gz + fi +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "$0: not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_fmmi.sh b/tools/ASR_2ch_track/steps/decode_fmmi.sh new file mode 100644 index 0000000000000000000000000000000000000000..5460d37ff285924e60a1ddbd5b28cf58f30cf440 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_fmmi.sh @@ -0,0 +1,118 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 +# Decoding of fMMI or fMPE models (feature-space discriminative training). +# If transform-dir supplied, expects e.g. fMLLR transforms in that dir. + +# Begin configuration section. +stage=1 +iter=final +nj=4 +cmd=run.pl +maxactive=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 # note: only really affects pruning (scoring is on lattices). +ngselect=2; # Just use the 2 top Gaussians for fMMI/fMPE. Should match train. +transform_dir= +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +scoring_opts= +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/decode_fmmi.sh [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the model is." + echo "e.g.: steps/decode_fmmi.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr" + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "You can also use fMLLR features-- you have to supply --transform-dir option." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --acwt # acoustic scale used for lattice generation " + echo " --transform-dir # where to find fMLLR transforms." + echo " --scoring-opts # options to local/score.sh" + echo " # speaker-adapted decoding" + echo " --num-threads # number of threads to use, default 1." + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +model=$srcdir/$iter.mdl + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "decode_fmmi.sh: no such file $f" && exit 1; +done + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode_fmmi.sh: feature type is $feat_type"; + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne $nj ] && \ + echo "Mismatch in number of jobs with $transform_dir"; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +fi + +fmpefeats="$feats fmpe-apply-transform $srcdir/$iter.fmpe ark:- 'ark,s,cs:gunzip -c $dir/gselect.JOB.gz|' ark:- |" + +if [ $stage -le 1 ]; then + # Get Gaussian selection info. + $cmd JOB=1:$nj $dir/log/gselect.JOB.log \ + gmm-gselect --n=$ngselect $srcdir/$iter.fmpe "$feats" \ + "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1; +fi + +if [ $stage -le 2 ]; then + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$maxactive --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst "$fmpefeats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +if [ $stage -le 3 ]; then + if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + + local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } + fi +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_fromlats.sh b/tools/ASR_2ch_track/steps/decode_fromlats.sh new file mode 100644 index 0000000000000000000000000000000000000000..ee719c0e1323b1352bfe6e51f95a02916df058a7 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_fromlats.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Decode, limited to the word-sequences that were present in a set +# of lattices on disk. The other lattices do not have to be built +# with the same tree or the same context size-- however, you do +# have to be using the same vocabulary (words.txt)-- if not you'd +# have to map the vocabulary somehow. + +# Note: if the trees are identical, you can use gmm-rescore-lattice. + +# Mechanism: create an unweighted acceptor (on words) for each utterance, +# compose that with G, determinize, and then use compile-train-graphs-fsts +# to compile a graph for each utterance, to decode with. + +# Begin configuration. +cmd=run.pl +maxactive=7000 +beam=20.0 +lattice_beam=7.0 +acwt=0.083333 +batch_size=75 # Limits memory blowup in compile-train-graphs-fsts +scale_opts="--transition-scale=1.0 --self-loop-scale=0.1" +skip_scoring=false +# End configuration. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + + + +if [ $# != 4 ]; then + echo "Usage: steps/decode_si_fromlats.sh [options] " + echo "e.g.: steps/decode_si_fromlats.sh data/test_dev93 data/lang_test_tg exp/tri2b/decode_tgpr_dev93 exp/tri2a/decode_tgpr_dev93_fromlats" + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + + +data=$1 +lang=$2 +olddir=$3 +dir=$4 +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. + +mkdir -p $dir/log + +nj=`cat $olddir/num_jobs` || exit 1; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +sdata=$data/split$nj +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj >$dir/num_jobs + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $srcdir/final.mdl $olddir/lat.1.gz \ + $srcdir/tree $lang/L_disambig.fst $lang/phones.txt; do + [ ! -f $f ] && echo "decode_si_fromlats.sh: no such file $f" && exit 1; +done + + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode_si.sh: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + + +$cmd JOB=1:$nj $dir/log/decode_lats.JOB.log \ + lattice-to-fst "ark:gunzip -c $olddir/lat.JOB.gz|" ark:- \| \ + fsttablecompose "fstproject --project_output=true $lang/G.fst | fstarcsort |" ark:- ark:- \| \ + fstdeterminizestar ark:- ark:- \| \ + compile-train-graphs-fsts --read-disambig-syms=$lang/phones/disambig.int \ + --batch-size=$batch_size $scale_opts $srcdir/tree $srcdir/final.mdl $lang/L_disambig.fst ark:- ark:- \| \ + gmm-latgen-faster --max-active=$maxactive --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --allow-partial=true --word-symbol-table=$lang/words.txt \ + $srcdir/final.mdl ark:- "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $data $lang $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_fwdbwd.sh b/tools/ASR_2ch_track/steps/decode_fwdbwd.sh new file mode 100644 index 0000000000000000000000000000000000000000..f0e36227251a5ff42b336e6f0a9cb7eaa0cbab3d --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_fwdbwd.sh @@ -0,0 +1,132 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey), BUT (Author: Mirko Hannemann) +# Apache 2.0 + +# Begin configuration section. +transform_dir= +first_pass= +iter= +model= # You can specify the model to use (e.g. if you want to use the .alimdl) +nj=4 +reverse=false +cmd=run.pl +max_active=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 # note: only really affects pruning (scoring is on lattices). +extra_beam=0.0 # small additional beam over varying beam +max_beam=100.0 # maximum of varying beam +scoring_opts= +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/decode_fwdbwd.sh [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the model is." + echo "e.g.: steps/decode.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr" + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --first_pass # decoding dir of first pass" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --model # which model to use (e.g. to" + echo " # specify the final.alimdl)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --transform_dir # dir to find fMLLR transforms " + echo " # speaker-adapted decoding" + echo " --scoring-opts # options to local/score.sh" + echo " --reverse [true/false] # time reversal of features" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$model" ]; then # if --model was not specified on the command line... + if [ -z $iter ]; then model=$srcdir/final.mdl; + else model=$srcdir/$iter.mdl; fi +fi + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fst $graphdir/words.txt; do + [ ! -f $f ] && echo "decode_fwdbwd.sh: no such file $f" && exit 1; +done + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode_fwdbwd.sh: feature type is $feat_type"; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne $nj ] && \ + echo "Mismatch in number of jobs with $transform_dir"; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +fi +if $reverse; then + feats="$feats reverse-feats ark:- ark:- |" +fi + +if [ -f $first_pass/lat.1.gz ]; then + echo "converting first pass lattice to graph arc acceptor" + $cmd JOB=1:$nj $dir/log/arc_graph.JOB.log \ + time lattice-arcgraph $model $graphdir/HCLG.fst \ + "ark:gunzip -c $first_pass/lat.JOB.gz|" ark,t:$dir/lat.JOB.arcs || exit 1; + # --write-lattices=ark,t:$dir/lat.det + # --acoustic-scale=$acwt --lattice-beam=$lattice_beam --prune=false \ + + echo "decode with tracking first pass lattice" + $cmd JOB=1:$nj $dir/log/decode_fwdbwd.JOB.log \ + gmm-latgen-tracking --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true \ + --extra-beam=$extra_beam --max-beam=$max_beam \ + --word-symbol-table=$graphdir/words.txt --verbose=2 \ + $model $graphdir/HCLG.fst "$feats" ark:$dir/lat.JOB.arcs "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + +else + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $model"; exit 1; } + fi + $cmd JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true \ + --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" --reverse $reverse $scoring_opts $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +echo "Decoding done." +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_lvtln.sh b/tools/ASR_2ch_track/steps/decode_lvtln.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac58b2ee04b66a79188802d7e28431e96c81fc63 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_lvtln.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Copyright 2014 Vimal Manohar + +# Decoding script for LVTLN models. Will estimate VTLN warping factors +# as a by product, which can be used to extract VTLN-warped features. + +# Begin configuration section +stage=0 +acwt=0.083333 +max_active=3000 # Have a smaller than normal max-active, to limit decoding time. +beam=13.0 +lattice_beam=6.0 +nj=4 +silence_weight=0.0 +logdet_scale=0.0 +cmd=run.pl +skip_scoring=false +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +scoring_opts= +cleanup=true +# End configuration section +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: steps/decode_lvtln.sh [options] " + echo " e.g.: steps/decode_lvtln.sh exp/tri2d/graph_tgpr data/test_dev93 exp/tri2d/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --acwt # default 0.08333 ... used to get posteriors" + echo " --scoring-opts # options to local/score.sh" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=`echo $3 | sed 's:/$::g'` # remove any trailing slash. + +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. +sdata=$data/split$nj; + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +if [ -f $data/spk2warp ]; then + echo "$0: file $data/spk2warp exists. This script expects non-VTLN features" + exit 1; +fi + + +mkdir -p $dir/log +split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` + +silphonelist=`cat $graphdir/phones/silence.csl` || exit 1; + +# Some checks. Note: we don't need $srcdir/tree but we expect +# it should exist, given the current structure of the scripts. +for f in $graphdir/HCLG.fst $data/feats.scp $srcdir/tree $srcdir/final.mdl \ + $srcdir/final.alimdl $srcdir/final.lvtln; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +## Set up the unadapted features "$sifeats" +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + + +## Generate lattices. +if [ $stage -le 0 ]; then + echo "$0: doing main lattice generation phase" + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $srcdir/final.alimdl | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $srcdir/final.alimdl"; exit 1; } + fi + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $srcdir/final.alimdl $graphdir/HCLG.fst "$sifeats" "ark:|gzip -c > $dir/lat_pass1.JOB.gz" \ + || exit 1; +fi + + +## Get the first-pass LVTLN transforms +if [ $stage -le 1 ]; then + echo "$0: getting first-pass LVTLN transforms." + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $srcdir/final.mdl | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $srcdir/final.mdl"; exit 1; } + fi + $cmd JOB=1:$nj $dir/log/lvtln_pass1.JOB.log \ + gunzip -c $dir/lat_pass1.JOB.gz \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $srcdir/final.alimdl ark:- ark:- \| \ + gmm-post-to-gpost $srcdir/final.alimdl "$sifeats" ark:- ark:- \| \ + gmm-est-lvtln-trans --logdet-scale=$logdet_scale --verbose=1 --spk2utt=ark:$sdata/JOB/spk2utt \ + $srcdir/final.mdl $srcdir/final.lvtln "$sifeats" ark,s,cs:- ark:$dir/trans_pass1.JOB \ + ark,t:$dir/warp_pass1.JOB || exit 1; +fi +## + +feats1="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans_pass1.JOB ark:- ark:- |" + +## Do a second pass of estimating the LVTLN transform. + +if [ $stage -le 3 ]; then + echo "$0: rescoring the lattices with first-pass LVTLN transforms" + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/rescore.JOB.log \ + gmm-rescore-lattice $srcdir/final.mdl "ark:gunzip -c $dir/lat_pass1.JOB.gz|" "$feats1" \ + "ark:|gzip -c > $dir/lat_pass2.JOB.gz" || exit 1; +fi + +if [ $stage -le 4 ]; then + echo "$0: re-estimating LVTLN transforms" + $cmd JOB=1:$nj $dir/log/lvtln_pass2.JOB.log \ + gunzip -c $dir/lat_pass2.JOB.gz \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + gmm-post-to-gpost $srcdir/final.mdl "$feats1" ark:- ark:- \| \ + gmm-est-lvtln-trans --logdet-scale=$logdet_scale --verbose=1 --spk2utt=ark:$sdata/JOB/spk2utt \ + $srcdir/final.mdl $srcdir/final.lvtln "$sifeats" ark,s,cs:- ark:$dir/trans.JOB \ + ark,t:$dir/warp.JOB || exit 1; +fi + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +if [ $stage -le 5 ]; then + # This second rescoring is only really necessary for scoring purposes, + # it does not affect the transforms. + echo "$0: rescoring the lattices with second-pass LVTLN transforms" + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/rescore.JOB.log \ + gmm-rescore-lattice $srcdir/final.mdl "ark:gunzip -c $dir/lat_pass2.JOB.gz|" "$feats" \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +if [ -f $dir/warp.1 ]; then + for j in $(seq $nj); do cat $dir/warp_pass1.$j; done > $dir/0.warp || exit 1; + for j in $(seq $nj); do cat $dir/warp.$j; done > $dir/final.warp || exit 1; + ns1=$(cat $dir/0.warp | wc -l) + ns2=$(cat $dir/final.warp | wc -l) + ! [ "$ns1" == "$ns2" ] && echo "$0: Number of speakers differ pass1 vs pass2, $ns1 != $ns2" && exit 1; + + paste $dir/0.warp $dir/final.warp | awk '{x=$2 - $4; if ((x>0?x:-x) > 0.010001) { print $1, $2, $4; }}' > $dir/warp_changed + nc=$(cat $dir/warp_changed | wc -l) + echo "$0: For $nc speakers out of $ns1, warp changed pass1 vs pass2 by >0.01, see $dir/warp_changed for details" +fi + +if true; then # Diagnostics + if [ -f $data/spk2gender ]; then + # To make it easier to eyeball the male and female speakers' warps + # separately, separate them out. + for g in m f; do # means: for gender in male female + cat $dir/final.warp | \ + utils/filter_scp.pl <(grep -w $g $data/spk2gender | awk '{print $1}') > $dir/final.warp.$g + echo -n "The last few warp factors for gender $g are: " + tail -n 10 $dir/final.warp.$g | awk '{printf("%s ", $2);}'; + echo + done + fi +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "$0: not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir +fi + +if $cleanup; then + rm $dir/lat_pass?.*.gz $dir/trans_pass1.* $dir/warp_pass1.* $dir/warp.* +fi + + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_nnet.sh b/tools/ASR_2ch_track/steps/decode_nnet.sh new file mode 100644 index 0000000000000000000000000000000000000000..2ede0e074027f714fe04ed5a5547b82567b77a26 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_nnet.sh @@ -0,0 +1,160 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely), Daniel Povey +# Apache 2.0 + +# Begin configuration section. +nnet= # non-default location of DNN (optional) +feature_transform= # non-default location of feature_transform (optional) +model= # non-default location of transition model (optional) +class_frame_counts= # non-default location of PDF counts (optional) +srcdir= # non-default location of DNN-dir (decouples model dir from decode dir) +ivector= # rx-specifier with i-vectors (ark-with-vectors), + +blocksoftmax_dims= # 'csl' with block-softmax dimensions: dim1,dim2,dim3,... +blocksoftmax_active= # '1' for the 1st block, + +stage=0 # stage=1 skips lattice generation +nj=4 +cmd=run.pl + +acwt=0.10 # note: only really affects pruning (scoring is on lattices). +beam=13.0 +lattice_beam=8.0 +min_active=200 +max_active=7000 # limit of active tokens +max_mem=50000000 # approx. limit to memory consumption during minimization in bytes +nnet_forward_opts="--no-softmax=true --prior-scale=1.0" + +skip_scoring=false +scoring_opts="--min-lmwt 4 --max-lmwt 15" + +num_threads=1 # if >1, will use latgen-faster-parallel +parallel_opts= # Ignored now. +use_gpu="no" # yes|no|optionaly +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 3 ]; then + echo "Usage: $0 [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the DNN and transition model is." + echo "e.g.: $0 exp/dnn1/graph_tgpr data/test exp/dnn1/decode_tgpr" + echo "" + echo "This script works on plain or modified features (CMN,delta+delta-delta)," + echo "which are then sent through feature-transform. It works out what type" + echo "of features you used from content of srcdir." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo "" + echo " --nnet # non-default location of DNN (opt.)" + echo " --srcdir # non-default dir with DNN/models, can be different" + echo " # from parent dir of ' (opt.)" + echo "" + echo " --acwt # select acoustic scale for decoding" + echo " --scoring-opts # options forwarded to local/score.sh" + echo " --num-threads # N>1: run multi-threaded decoder" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +[ -z $srcdir ] && srcdir=`dirname $dir`; # Default model directory one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log + +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +# Select default locations to model files (if not already set externally) +[ -z "$nnet" ] && nnet=$srcdir/final.nnet +[ -z "$model" ] && model=$srcdir/final.mdl +[ -z "$feature_transform" -a -e $srcdir/final.feature_transform ] && feature_transform=$srcdir/final.feature_transform +# +[ -z "$class_frame_counts" -a -f $srcdir/prior_counts ] && class_frame_counts=$srcdir/prior_counts # priority, +[ -z "$class_frame_counts" ] && class_frame_counts=$srcdir/ali_train_pdf.counts + +# Check that files exist, +for f in $sdata/1/feats.scp $nnet $model $feature_transform $class_frame_counts $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "$0: missing file $f" && exit 1; +done + +# Possibly use multi-threaded decoder +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + + +# PREPARE FEATURE EXTRACTION PIPELINE +# import config, +cmvn_opts= +delta_opts= +D=$srcdir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector --print-args=false "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +# select a block from blocksoftmax, +if [ ! -z "$blocksoftmax_dims" ]; then + # blocksoftmax_active is a csl! dim1,dim2,dim3,... + [ -z "$blocksoftmax_active" ] && echo "$0 Missing option --blocksoftmax-active N" && exit 1 + # getting dims, + dim_total=$(awk -F'[:,]' '{ for(i=1;i<=NF;i++) { sum += $i }; print sum; }' <(echo $blocksoftmax_dims)) + dim_block=$(awk -F'[:,]' -v active=$blocksoftmax_active '{ print $active; }' <(echo $blocksoftmax_dims)) + offset=$(awk -F'[:,]' -v active=$blocksoftmax_active '{ sum=0; for(i=1;i $dim_total $dim_block $((1+offset)):$((offset+dim_block)) "; + echo " $dim_block $dim_block") $dir/copy_and_softmax.nnet + # nnet is assembled on-the fly, is removed, while + is added, + nnet="nnet-concat 'nnet-copy --remove-last-components=1 $nnet - |' $dir/copy_and_softmax.nnet - |" +fi + +# Run the decoding in the queue, +if [ $stage -le 0 ]; then + $cmd --num-threads $((num_threads+1)) JOB=1:$nj $dir/log/decode.JOB.log \ + nnet-forward $nnet_forward_opts --feature-transform=$feature_transform --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu "$nnet" "$feats" ark:- \| \ + latgen-faster-mapped$thread_string --min-active=$min_active --max-active=$max_active --max-mem=$max_mem --beam=$beam \ + --lattice-beam=$lattice_beam --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst ark:- "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +# Run the scoring +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir || exit 1; +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_nolats.sh b/tools/ASR_2ch_track/steps/decode_nolats.sh new file mode 100644 index 0000000000000000000000000000000000000000..9c05d3eea30d5d74f50697ad9eb1fbbbe56817d1 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_nolats.sh @@ -0,0 +1,126 @@ +#!/bin/bash + +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey) +# Vimal Manohar +# Apache 2.0 + +##Changes +# Vimal Manohar (Jan 2014): +# Added options to boost silence probabilities in the model before +# decoding. This can help in favoring the silence phones when +# some silence regions are wrongly decoded as speech phones like glottal stops + +# Begin configuration section. +transform_dir= +iter= +model= # You can specify the model to use (e.g. if you want to use the .alimdl) +boost_silence=1.0 # Boost silence pdfs in the model by this factor before decoding +silence_phones_list= # List of silence phones that would be boosted before decoding +stage=0 +nj=4 +cmd=run.pl +max_active=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 # note: only really affects pruning (scoring is on lattices). +write_alignments=false +write_words=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +[ -z $silence_phones_list ] && boost_silence=1.0 + +if [ $# != 3 ]; then + echo "Usage: $0 [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the model is. This version produces just linear output, no lattices" + echo "" + echo "e.g.: steps/decode.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr" + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --model # which model to use (e.g. to" + echo " # specify the final.alimdl)" + echo " --write-alignments # if true, output ali.*.gz" + echo " --write-words # if true, output words.*.gz" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --transform-dir # dir to find fMLLR transforms " + echo " --acwt # acoustic scale used for lattice generation " + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$model" ]; then # if --model was not specified on the command line... + if [ -z $iter ]; then model=$srcdir/final.mdl; + else model=$srcdir/$iter.mdl; fi +fi + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "decode.sh: no such file $f" && exit 1; +done + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode.sh: feature type is $feat_type"; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne $nj ] && \ + echo "Mismatch in number of jobs with $transform_dir"; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +fi + +if [ $stage -le 0 ]; then + if $write_alignments; then + ali="ark:|gzip -c > $dir/ali.JOB.gz" + else + ali="ark:/dev/null" + fi + if $write_words; then + words="ark:|gzip -c > $dir/words.JOB.gz" + else + words="ark:/dev/null" + fi + + [ ! -z "$silence_phones_list" ] && \ + model="gmm-boost-silence --boost=$boost_silence $silence_phones_list $model - |" + + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $model"; exit 1; } + fi + $cmd JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-decode-faster --max-active=$max_active --beam=$beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + "$model" $graphdir/HCLG.fst "$feats" "$words" "$ali" || exit 1; +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_raw_fmllr.sh b/tools/ASR_2ch_track/steps/decode_raw_fmllr.sh new file mode 100644 index 0000000000000000000000000000000000000000..069dc84a711d300d039d2b29051d27528fac5657 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_raw_fmllr.sh @@ -0,0 +1,234 @@ +#!/bin/bash + +# Copyright 2012-2013 Johns Hopkins University (Author: Daniel Povey) + +# This decoding script is like decode_fmllr.sh, but it does the fMLLR on +# the raw cepstra, using the model in the LDA+MLLT space +# +# Decoding script that does fMLLR. This can be on top of delta+delta-delta, or +# LDA+MLLT features. + +# There are 3 models involved potentially in this script, +# and for a standard, speaker-independent system they will all be the same. +# The "alignment model" is for the 1st-pass decoding and to get the +# Gaussian-level alignments for the "adaptation model" the first time we +# do fMLLR. The "adaptation model" is used to estimate fMLLR transforms +# and to generate state-level lattices. The lattices are then rescored +# with the "final model". +# +# The following table explains where we get these 3 models from. +# Note: $srcdir is one level up from the decoding directory. +# +# Model Default source: +# +# "alignment model" $srcdir/final.alimdl --alignment-model +# (or $srcdir/final.mdl if alimdl absent) +# "adaptation model" $srcdir/final.mdl --adapt-model +# "final model" $srcdir/final.mdl --final-model + + +# Begin configuration section +first_beam=10.0 # Beam used in initial, speaker-indep. pass +first_max_active=2000 # max-active used in initial pass. +alignment_model= +adapt_model= +final_model= +stage=0 +acwt=0.083333 # Acoustic weight used in getting fMLLR transforms, and also in + # lattice generation. +max_active=7000 +use_normal_fmllr=false +beam=13.0 +lattice_beam=6.0 +nj=4 +silence_weight=0.01 +cmd=run.pl +si_dir= +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +skip_scoring=false +scoring_opts= +# End configuration section +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: steps/decode_fmllr.sh [options] " + echo " e.g.: steps/decode_fmllr.sh exp/tri2b/graph_tgpr data/test_dev93 exp/tri2b/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --adapt-model # Model to compute transforms with" + echo " --alignment-model # Model to get Gaussian-level alignments for" + echo " # 1st pass of transform computation." + echo " --final-model # Model to finally decode with" + echo " --si-dir # use this to skip 1st pass of decoding" + echo " # Caution-- must be with same tree" + echo " --acwt # default 0.08333 ... used to get posteriors" + echo " --num-threads # number of threads to use, default 1." + echo " --scoring-opts # options to local/score.sh" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=`echo $3 | sed 's:/$::g'` # remove any trailing slash. + +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. +sdata=$data/split$nj; + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + + +mkdir -p $dir/log +split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +raw_dim=$(feat-to-dim scp:$data/feats.scp -) || exit 1; +! [ "$raw_dim" -gt 0 ] && echo "raw feature dim not set" && exit 1; + +silphonelist=`cat $graphdir/phones/silence.csl` || exit 1; + +# Some checks. Note: we don't need $srcdir/tree but we expect +# it should exist, given the current structure of the scripts. +for f in $graphdir/HCLG.fst $data/feats.scp $srcdir/tree; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; +## + +## Do the speaker-independent decoding, if --si-dir option not present. ## +if [ -z "$si_dir" ]; then # we need to do the speaker-independent decoding pass. + si_dir=${dir}.si # Name it as our decoding dir, but with suffix ".si". + if [ $stage -le 0 ]; then + steps/decode.sh --scoring-opts "$scoring_opts" \ + --num-threads $num_threads --skip-scoring $skip_scoring \ + --acwt $acwt --nj $nj --cmd "$cmd" --beam $first_beam \ + --model $alignment_model --max-active \ + $first_max_active $graphdir $data $si_dir || exit 1; + fi +fi +## + +## Some checks, and setting of defaults for variables. +[ "$nj" -ne "`cat $si_dir/num_jobs`" ] && echo "Mismatch in #jobs with si-dir" && exit 1; +[ ! -f "$si_dir/lat.1.gz" ] && echo "No such file $si_dir/lat.1.gz" && exit 1; +[ -z "$adapt_model" ] && adapt_model=$srcdir/final.mdl +[ -z "$final_model" ] && final_model=$srcdir/final.mdl +for f in $adapt_model $final_model; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done +## + +if [[ ! -f $srcdir/final.mat || ! -f $srcdir/full.mat ]]; then + echo "$0: we require final.mat and full.mat in the source directory $srcdir" +fi + +splicedfeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- |" +sifeats="$splicedfeats transform-feats $srcdir/final.mat ark:- ark:- |" + +full_lda_mat="get-full-lda-mat --print-args=false $srcdir/final.mat $srcdir/full.mat -|" + +## + +## Now get the first-pass fMLLR transforms. +if [ $stage -le 1 ]; then + echo "$0: getting first-pass raw-fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr_pass1.JOB.log \ + gunzip -c $si_dir/lat.JOB.gz \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $alignment_model ark:- ark:- \| \ + gmm-post-to-gpost $alignment_model "$sifeats" ark:- ark:- \| \ + gmm-est-fmllr-raw-gpost --raw-feat-dim=$raw_dim --spk2utt=ark:$sdata/JOB/spk2utt $adapt_model "$full_lda_mat" \ + "$splicedfeats" ark,s,cs:- ark:$dir/pre_trans.JOB || exit 1; +fi +## + +pass1splicedfeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/pre_trans.JOB ark:- ark:- | splice-feats $splice_opts ark:- ark:- |" +pass1feats="$pass1splicedfeats transform-feats $srcdir/final.mat ark:- ark:- |" + +## Do the main lattice generation pass. Note: we don't determinize the lattices at +## this stage, as we're going to use them in acoustic rescoring with the larger +## model, and it's more correct to store the full state-level lattice for this purpose. +if [ $stage -le 2 ]; then + echo "$0: doing main lattice generation phase" + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --determinize-lattice=false \ + --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $adapt_model $graphdir/HCLG.fst "$pass1feats" "ark:|gzip -c > $dir/lat.tmp.JOB.gz" \ + || exit 1; +fi +## + +## Do a second pass of estimating the transform-- this time with the lattices +## generated from the alignment model. Compose the transforms to get +## $dir/trans.1, etc. +if [ $stage -le 3 ]; then + echo "$0: estimating raw-fMLLR transforms a second time." + $cmd JOB=1:$nj $dir/log/fmllr_pass2.JOB.log \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=4.0 \ + "ark:gunzip -c $dir/lat.tmp.JOB.gz|" ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $adapt_model ark:- ark:- \| \ + gmm-est-fmllr-raw --raw-feat-dim=$raw_dim --spk2utt=ark:$sdata/JOB/spk2utt \ + $adapt_model "$full_lda_mat" "$pass1splicedfeats" ark,s,cs:- ark:$dir/trans_tmp.JOB '&&' \ + compose-transforms --b-is-affine=true ark:$dir/trans_tmp.JOB ark:$dir/pre_trans.JOB \ + ark:$dir/raw_trans.JOB || exit 1; +fi +## + +feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/raw_trans.JOB ark:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + +if [ $stage -le 4 ] && $use_normal_fmllr; then + echo "$0: estimating normal fMLLR transforms" + $cmd JOB=1:$nj $dir/log/fmllr_pass3.JOB.log \ + gmm-rescore-lattice $final_model "ark:gunzip -c $dir/lat.tmp.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=4.0 ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post $silence_weight $silphonelist $adapt_model ark:- ark:- \| \ + gmm-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt \ + $adapt_model "$feats" ark,s,cs:- ark:$dir/trans.JOB || exit 1; +fi + +if $use_normal_fmllr; then + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" +fi + +# Rescore the state-level lattices with the final adapted features, and the final model +# (which by default is $srcdir/final.mdl, but which may be specified on the command line, +# useful in case of discriminatively trained systems). +# At this point we prune and determinize the lattices and write them out, ready for +# language model rescoring. + +if [ $stage -le 5 ]; then + echo "$0: doing a final pass of acoustic rescoring." + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/acoustic_rescore.JOB.log \ + gmm-rescore-lattice $final_model "ark:gunzip -c $dir/lat.tmp.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned$thread_string --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" '&&' rm $dir/lat.tmp.JOB.gz || exit 1; +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "$0: not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir +fi + +#rm $dir/{trans_tmp,pre_trans}.* + +exit 0; + diff --git a/tools/ASR_2ch_track/steps/decode_sgmm.sh b/tools/ASR_2ch_track/steps/decode_sgmm.sh new file mode 100644 index 0000000000000000000000000000000000000000..ba8d89d9a7127b79b246ad81dfd666ed8b55a43f --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_sgmm.sh @@ -0,0 +1,266 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script does decoding with an SGMM system, with speaker vectors. +# If the SGMM system was +# built on top of fMLLR transforms from a conventional system, you should +# provide the --transform-dir option. + +# Begin configuration section. +stage=1 +alignment_model= +transform_dir= # dir to find fMLLR transforms. +nj=4 # number of decoding jobs. +acwt=0.1 # Just a default value, used for adaptation and beam-pruning.. +cmd=run.pl +beam=15.0 +gselect=15 # Number of Gaussian-selection indices for SGMMs. [Note: + # the first_pass_gselect variable is used for the 1st pass of + # decoding and can be tighter. +first_pass_gselect=3 # Use a smaller number of Gaussian-selection indices in + # the 1st pass of decoding (lattice generation). +max_active=7000 + +#WARNING: This option is renamed lattice_beam (it was renamed to follow the naming +# in the other scripts +lattice_beam=6.0 # Beam we use in lattice generation. +vecs_beam=4.0 # Beam we use to prune lattices while getting posteriors for + # speaker-vector computation. Can be quite tight (actually we could + # probably just do best-path. +use_fmllr=false +fmllr_iters=10 +fmllr_min_count=1000 +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: steps/decode_sgmm.sh [options] " + echo " e.g.: steps/decode_sgmm.sh --transform-dir exp/tri3b/decode_dev93_tgpr \\" + echo " exp/sgmm3a/graph_tgpr data/test_dev93 exp/sgmm3a/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --alignment-model # Model for the first-pass decoding." + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --beam # Decoding beam; default 13.0" + exit 1; +fi + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. + +for f in $graphdir/HCLG.fst $data/feats.scp $srcdir/final.mdl; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +sdata=$data/split$nj; +silphonelist=`cat $graphdir/phones/silence.csl` || exit 1 +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|" +gselect_opt_1stpass="$gselect_opt copy-gselect --n=$first_pass_gselect ark:- ark:- |" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + + +## Set up features. +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." +fi +## + +## Calculate FMLLR pre-transforms if needed. We are doing this here since this +## step is requried by models both with and without speaker vectors +if $use_fmllr; then + if [ ! -f $srcdir/final.fmllr_mdl ] || [ $srcdir/final.fmllr_mdl -ot $srcdir/final.mdl ]; then + echo "$0: computing pre-transform for fMLLR computation." + sgmm-comp-prexform $srcdir/final.mdl $srcdir/final.occs $srcdir/final.fmllr_mdl || exit 1; + fi +fi + +## Save Gaussian-selection info to disk. +# Note: we can use final.mdl regardless of whether there is an alignment model-- +# they use the same UBM. +if [ $stage -le 1 ]; then + $cmd JOB=1:$nj $dir/log/gselect.JOB.log \ + sgmm-gselect --full-gmm-nbest=$gselect $srcdir/final.mdl \ + "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1; +fi + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; + +# Generate state-level lattice which we can rescore. This is done with the +# alignment model and no speaker-vectors. +if [ $stage -le 2 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $alignment_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $alignment_model"; exit 1; } + fi + $cmd JOB=1:$nj $dir/log/decode_pass1.JOB.log \ + sgmm-latgen-faster --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --determinize-lattice=false --allow-partial=true \ + --word-symbol-table=$graphdir/words.txt "$gselect_opt_1stpass" $alignment_model \ + $graphdir/HCLG.fst "$feats" "ark:|gzip -c > $dir/pre_lat.JOB.gz" || exit 1; +fi + +## Check if the model has speaker vectors +spkdim=`sgmm-info $srcdir/final.mdl | grep 'speaker vector' | awk '{print $NF}'` + +if [ $spkdim -gt 0 ]; then ### For models with speaker vectors: + +# Estimate speaker vectors (1st pass). Prune before determinizing +# because determinization can take a while on un-pruned lattices. +# Note: the sgmm-post-to-gpost stage is necessary because we have +# a separate alignment-model and final model, otherwise we'd skip it +# and use sgmm-est-spkvecs. + if [ $stage -le 3 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass1.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $alignment_model ark:- ark:- \| \ + sgmm-post-to-gpost "$gselect_opt" $alignment_model "$feats" ark:- ark:- \| \ + sgmm-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/pre_vecs.JOB" || exit 1; + fi + +# Estimate speaker vectors (2nd pass). Since we already have spk vectors, +# at this point we need to rescore the lattice to get the correct posteriors. + if [ $stage -le 4 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass2.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm-rescore-lattice --speedup=true --spk-vecs=ark:$dir/pre_vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/pre_vecs.JOB \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/vecs.JOB" || exit 1; + fi + rm $dir/pre_vecs.* + + if $use_fmllr; then + # Estimate fMLLR transforms (note: these may be on top of any + # fMLLR transforms estimated with the baseline GMM system. + if [ $stage -le 5 ]; then # compute fMLLR transforms. + echo "$0: computing fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm-rescore-lattice --speedup=true --spk-vecs=ark:$dir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/vecs.JOB \ + --fmllr-iters=$fmllr_iters --fmllr-min-count=$fmllr_min_count \ + $srcdir/final.fmllr_mdl "$feats" ark,s,cs:- "ark:$dir/trans.JOB" || exit 1; + fi + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" + fi + +# Now rescore the state-level lattices with the adapted features and the +# corresponding model. Prune and determinize the lattices to limit +# their size. + if [ $stage -le 6 ]; then + $cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm-rescore-lattice "$gselect_opt" --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \ + $srcdir/final.mdl "ark:gunzip -c $dir/pre_lat.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + fi + rm $dir/pre_lat.*.gz + +else ### For models without speaker vectors: + + if $use_fmllr; then + # Estimate fMLLR transforms (note: these may be on top of any + # fMLLR transforms estimated with the baseline GMM system. + if [ $stage -le 5 ]; then # compute fMLLR transforms. + echo "$0: computing fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm-rescore-lattice --speedup=true --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" \ + --fmllr-iters=$fmllr_iters --fmllr-min-count=$fmllr_min_count \ + $srcdir/final.fmllr_mdl "$feats" ark,s,cs:- "ark:$dir/trans.JOB" || exit 1; + fi + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" + fi + +# Now rescore the state-level lattices with the adapted features and the +# corresponding model. Prune and determinize the lattices to limit +# their size. + if [ $stage -le 6 ] && $use_fmllr; then + $cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm-rescore-lattice "$gselect_opt" --utt2spk=ark:$sdata/JOB/utt2spk \ + $srcdir/final.mdl "ark:gunzip -c $dir/pre_lat.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + rm $dir/pre_lat.*.gz + else # If no adaptation needed, determinize the lattice. + $cmd JOB=1:$nj $dir/log/determinize.JOB.log \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$lattice_beam \ + "ark:gunzip -c $dir/pre_lat.JOB.gz|" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + rm $dir/pre_lat.*.gz + fi + +fi + +# The output of this script is the files "lat.*.gz"-- we'll rescore this at +# different acoustic scales to get the final output. + + +if [ $stage -le 7 ]; then + if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + echo "score best paths" + local/score.sh --cmd "$cmd" $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } + #echo "score confidence and timing with sclite" + #local/score_sclite_conf.sh --cmd "$cmd" --language turkish $data $graphdir $dir + fi +fi +echo "Decoding done." +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_sgmm2.sh b/tools/ASR_2ch_track/steps/decode_sgmm2.sh new file mode 100644 index 0000000000000000000000000000000000000000..99f422308121f4aa071fe0bc7c66b5a8c0272c50 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_sgmm2.sh @@ -0,0 +1,225 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script does decoding with an SGMM system, with speaker vectors. +# If the SGMM system was +# built on top of fMLLR transforms from a conventional system, you should +# provide the --transform-dir option. + +# Begin configuration section. +stage=1 +transform_dir= # dir to find fMLLR transforms. +nj=4 # number of decoding jobs. +acwt=0.1 # Just a default value, used for adaptation and beam-pruning.. +cmd=run.pl +beam=13.0 +gselect=15 # Number of Gaussian-selection indices for SGMMs. [Note: + # the first_pass_gselect variable is used for the 1st pass of + # decoding and can be tighter. +first_pass_gselect=3 # Use a smaller number of Gaussian-selection indices in + # the 1st pass of decoding (lattice generation). +max_active=7000 +max_mem=50000000 +#WARNING: This option is renamed lattice_beam (it was renamed to follow the naming +# in the other scripts +lattice_beam=6.0 # Beam we use in lattice generation. +vecs_beam=4.0 # Beam we use to prune lattices while getting posteriors for + # speaker-vector computation. Can be quite tight (actually we could + # probably just do best-path. +use_fmllr=false +fmllr_iters=10 +fmllr_min_count=1000 +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +skip_scoring=false +scoring_opts= +# note: there are no more min-lmwt and max-lmwt options, instead use +# e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20" +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: steps/decode_sgmm2.sh [options] " + echo " e.g.: steps/decode_sgmm2.sh --transform-dir exp/tri3b/decode_dev93_tgpr \\" + echo " exp/sgmm3a/graph_tgpr data/test_dev93 exp/sgmm3a/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --beam # Decoding beam; default 13.0" + exit 1; +fi + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. + +for f in $graphdir/HCLG.fst $data/feats.scp $srcdir/final.mdl; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +sdata=$data/split$nj; +silphonelist=`cat $graphdir/phones/silence.csl` || exit 1 +gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|" +gselect_opt_1stpass="$gselect_opt copy-gselect --n=$first_pass_gselect ark:- ark:- |" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +## Set up features. +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + if [ -f $transform_dir/trans.1 ]; then + echo "$0: using transforms from $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" + elif [ -f $transform_dir/raw_trans.1 ]; then + feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/raw_trans.JOB ark:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + else + echo "$0: no such file $transform_dir/trans.1 or $transform_dir/raw_trans.1, invalid --transform-dir option?" + exit 1; + fi +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." +fi +## + +## Save Gaussian-selection info to disk. +# Note: we can use final.mdl regardless of whether there is an alignment model-- +# they use the same UBM. + +if [ $stage -le 1 ]; then + $cmd JOB=1:$nj $dir/log/gselect.JOB.log \ + sgmm2-gselect --full-gmm-nbest=$gselect $srcdir/final.mdl \ + "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1; +fi + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; + + +# Generate state-level lattice which we can rescore. This is done with the alignment +# model and no speaker-vectors. +if [ $stage -le 2 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $alignment_model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $alignment_model"; exit 1; } + fi + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode_pass1.JOB.log \ + sgmm2-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --determinize-lattice=false --allow-partial=true \ + --word-symbol-table=$graphdir/words.txt --max-mem=$max_mem "$gselect_opt_1stpass" $alignment_model \ + $graphdir/HCLG.fst "$feats" "ark:|gzip -c > $dir/pre_lat.JOB.gz" || exit 1; +fi + +# Estimate speaker vectors (1st pass). Prune before determinizing +# because determinization can take a while on un-pruned lattices. +# Note: the sgmm2-post-to-gpost stage is necessary because we have +# a separate alignment-model and final model, otherwise we'd skip it +# and use sgmm2-est-spkvecs. +if [ $stage -le 3 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass1.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $alignment_model ark:- ark:- \| \ + sgmm2-post-to-gpost "$gselect_opt" $alignment_model "$feats" ark:- ark:- \| \ + sgmm2-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/pre_vecs.JOB" || exit 1; +fi + +# Estimate speaker vectors (2nd pass). Since we already have spk vectors, +# at this point we need to rescore the lattice to get the correct posteriors. +if [ $stage -le 4 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass2.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm2-rescore-lattice --speedup=true --spk-vecs=ark:$dir/pre_vecs.JOB \ + --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm2-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/pre_vecs.JOB \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/vecs.JOB" || exit 1; +fi +rm $dir/pre_vecs.* + +if $use_fmllr; then + # Estimate fMLLR transforms (note: these may be on top of any + # fMLLR transforms estimated with the baseline GMM system. + if [ $stage -le 5 ]; then # compute fMLLR transforms. + echo "$0: computing fMLLR transforms." + if [ ! -f $srcdir/final.fmllr_mdl ] || [ $srcdir/final.fmllr_mdl -ot $srcdir/final.mdl ]; then + echo "$0: computing pre-transform for fMLLR computation." + sgmm2-comp-prexform $srcdir/final.mdl $srcdir/final.occs $srcdir/final.fmllr_mdl || exit 1; + fi + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm2-rescore-lattice --speedup=true --spk-vecs=ark:$dir/vecs.JOB \ + --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm2-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/vecs.JOB \ + --fmllr-iters=$fmllr_iters --fmllr-min-count=$fmllr_min_count \ + $srcdir/final.fmllr_mdl "$feats" ark,s,cs:- "ark:$dir/trans.JOB" || exit 1; + fi + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" +fi + +# Now rescore the state-level lattices with the adapted features and the +# corresponding model. Prune and determinize the lattices to limit +# their size. +if [ $stage -le 6 ]; then + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm2-rescore-lattice "$gselect_opt" --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \ + $srcdir/final.mdl "ark:gunzip -c $dir/pre_lat.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned$thread_string --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi +#rm $dir/pre_lat.*.gz ##TEMP! + +# The output of this script is the files "lat.*.gz"-- we'll rescore this at different +# acoustic scales to get the final output. + + +if [ $stage -le 7 ]; then + if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir + fi +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_sgmm2_fromlats.sh b/tools/ASR_2ch_track/steps/decode_sgmm2_fromlats.sh new file mode 100644 index 0000000000000000000000000000000000000000..7a3a4f6bd48eef4d533e0e61c3b0372c3cf36584 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_sgmm2_fromlats.sh @@ -0,0 +1,272 @@ +#!/bin/bash + +# Copyright 2012-2013 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script does decoding with an SGMM2 system, with speaker vectors. If the +# SGMM2 system was built on top of fMLLR transforms from a conventional system, +# you should provide the --transform-dir option. + +# This script does not use a decoding graph, but instead you provide +# a previous decoding directory with lattices in it. This script will only +# make use of the word sequences in the lattices; it limits the decoding +# to those sequences. You should also provide a "lang" directory from +# which this script will use the G.fst and L.fst. + +# Begin configuration section. +stage=1 +alignment_model= +transform_dir= # dir to find fMLLR transforms. +acwt=0.08333 # Just a default value, used for adaptation and beam-pruning.. +batch_size=75 # Limits memory blowup in compile-train-graphs-fsts +cmd=run.pl +beam=20.0 +gselect=15 # Number of Gaussian-selection indices for SGMMs. [Note: + # the first_pass_gselect variable is used for the 1st pass of + # decoding and can be tighter. +first_pass_gselect=3 # Use a smaller number of Gaussian-selection indices in + # the 1st pass of decoding (lattice generation). +max_active=7000 +lattice_beam=8.0 # Beam we use in lattice generation. +vecs_beam=4.0 # Beam we use to prune lattices while getting posteriors for + # speaker-vector computation. Can be quite tight (actually we could + # probably just do best-path. +use_fmllr=false +fmllr_iters=10 +fmllr_min_count=1000 +scale_opts="--transition-scale=1.0 --self-loop-scale=0.1" +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "Usage: steps/decode_sgmm_fromlats.sh [options] " + echo "" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --alignment-model # Model for the first-pass decoding." + echo " --config # config containing options" + echo " --cmd # Command to run in parallel with" + echo " --beam # Decoding beam; default 13.0" + exit 1; +fi + +data=$1 +lang=$2 +olddir=$3 +dir=$4 +srcdir=`dirname $dir` + +for f in $data/feats.scp $lang/G.fst $lang/L_disambig.fst $lang/phones/disambig.int \ + $srcdir/final.mdl $srcdir/tree $olddir/lat.1.gz; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +nj=`cat $olddir/num_jobs` || exit 1; +sdata=$data/split$nj; +silphonelist=`cat $lang/phones/silence.csl` || exit 1 +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|" +gselect_opt_1stpass="$gselect_opt copy-gselect --n=$first_pass_gselect ark:- ark:- |" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + + +## Set up features + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" +if [ -z "$transform_dir" ] && [ -f $olddir/trans.1 ]; then + transform_dir=$olddir +fi + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." +fi + +## Calculate FMLLR pre-transforms if needed. We are doing this here since this +## step is requried by models both with and without speaker vectors +if $use_fmllr; then + if [ ! -f $srcdir/final.fmllr_mdl ] || [ $srcdir/final.fmllr_mdl -ot $srcdir/final.mdl ]; then + echo "$0: computing pre-transform for fMLLR computation." + sgmm2-comp-prexform $srcdir/final.mdl $srcdir/final.occs $srcdir/final.fmllr_mdl || exit 1; + fi +fi + +## Save Gaussian-selection info to disk. +# Note: we can use final.mdl regardless of whether there is an alignment model-- +# they use the same UBM. +if [ $stage -le 1 ]; then + $cmd JOB=1:$nj $dir/log/gselect.JOB.log \ + sgmm2-gselect --full-gmm-nbest=$gselect $srcdir/final.mdl \ + "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1; +fi + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; + +# Generate state-level lattice which we can rescore. This is done with the +# alignment model and no speaker-vectors. +if [ $stage -le 2 ]; then + $cmd JOB=1:$nj $dir/log/decode_pass1.JOB.log \ + lattice-to-fst "ark:gunzip -c $olddir/lat.JOB.gz|" ark:- \| \ + fsttablecompose "fstproject --project_output=true $lang/G.fst | fstarcsort |" ark:- ark:- \| \ + fstdeterminizestar ark:- ark:- \| \ + compile-train-graphs-fsts --read-disambig-syms=$lang/phones/disambig.int \ + --batch-size=$batch_size $scale_opts \ + $srcdir/tree $srcdir/final.mdl $lang/L_disambig.fst ark:- ark:- \| \ + sgmm2-latgen-faster --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --determinize-lattice=false --allow-partial=true \ + --word-symbol-table=$lang/words.txt "$gselect_opt_1stpass" $alignment_model \ + "ark:-" "$feats" "ark:|gzip -c > $dir/pre_lat.JOB.gz" || exit 1; +fi + +## Check if the model has speaker vectors +spkdim=`sgmm2-info $srcdir/final.mdl | grep 'speaker vector' | awk '{print $NF}'` + +if [ $spkdim -gt 0 ]; then ### For models with speaker vectors: + +# Estimate speaker vectors (1st pass). Prune before determinizing +# because determinization can take a while on un-pruned lattices. +# Note: the sgmm2-post-to-gpost stage is necessary because we have +# a separate alignment-model and final model, otherwise we'd skip it +# and use sgmm2-est-spkvecs. + if [ $stage -le 3 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass1.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $alignment_model ark:- ark:- \| \ + sgmm2-post-to-gpost "$gselect_opt" $alignment_model "$feats" ark:- ark:- \| \ + sgmm2-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/pre_vecs.JOB" || exit 1; + fi + +# Estimate speaker vectors (2nd pass). Since we already have spk vectors, +# at this point we need to rescore the lattice to get the correct posteriors. + if [ $stage -le 4 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass2.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm2-rescore-lattice --spk-vecs=ark:$dir/pre_vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm2-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/pre_vecs.JOB \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/vecs.JOB" || exit 1; + fi + rm $dir/pre_vecs.* + + if $use_fmllr; then + # Estimate fMLLR transforms (note: these may be on top of any + # fMLLR transforms estimated with the baseline GMM system. + if [ $stage -le 5 ]; then # compute fMLLR transforms. + echo "$0: computing fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm2-rescore-lattice --spk-vecs=ark:$dir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm2-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/vecs.JOB \ + --fmllr-iters=$fmllr_iters --fmllr-min-count=$fmllr_min_count \ + $srcdir/final.fmllr_mdl "$feats" ark,s,cs:- "ark:$dir/trans.JOB" || exit 1; + fi + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" + fi + +# Now rescore the state-level lattices with the adapted features and the +# corresponding model. Prune and determinize the lattices to limit +# their size. + if [ $stage -le 6 ]; then + $cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm2-rescore-lattice "$gselect_opt" --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \ + $srcdir/final.mdl "ark:gunzip -c $dir/pre_lat.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + fi + rm $dir/pre_lat.*.gz + +else ### For models without speaker vectors: + + if $use_fmllr; then + # Estimate fMLLR transforms (note: these may be on top of any + # fMLLR transforms estimated with the baseline GMM system. + if [ $stage -le 5 ]; then # compute fMLLR transforms. + echo "$0: computing fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm2-rescore-lattice --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm2-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" \ + --fmllr-iters=$fmllr_iters --fmllr-min-count=$fmllr_min_count \ + $srcdir/final.fmllr_mdl "$feats" ark,s,cs:- "ark:$dir/trans.JOB" || exit 1; + fi + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" + fi + +# Now rescore the state-level lattices with the adapted features and the +# corresponding model. Prune and determinize the lattices to limit +# their size. + if [ $stage -le 6 ] && $use_fmllr; then + $cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm2-rescore-lattice "$gselect_opt" --utt2spk=ark:$sdata/JOB/utt2spk \ + $srcdir/final.mdl "ark:gunzip -c $dir/pre_lat.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + rm $dir/pre_lat.*.gz + else # Already done with decoding if no adaptation needed. + for n in `seq 1 $nj`; do + mv $dir/pre_lat.${n}.gz $dir/lat.${n}.gz + done + fi + +fi + +# The output of this script is the files "lat.*.gz"-- we'll rescore this at +# different acoustic scales to get the final output. + + +if [ $stage -le 7 ]; then + if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + echo "score best paths" + local/score.sh --cmd "$cmd" $data $lang $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } + fi +fi +echo "Decoding done." +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_sgmm2_rescore.sh b/tools/ASR_2ch_track/steps/decode_sgmm2_rescore.sh new file mode 100644 index 0000000000000000000000000000000000000000..a37a47350d7cbd0b552342950a3445432c54c18a --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_sgmm2_rescore.sh @@ -0,0 +1,113 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script does decoding with an SGMM system, by rescoring lattices +# generated from a previous SGMM system. The directory with the lattices +# is assumed to contain speaker vectors, if used. Basically it rescores +# the lattices one final time, using the same setup as the final decoding +# pass of the source dir. The assumption is that the model may have +# been discriminatively trained. + +# If the system was built on top of fMLLR transforms from a conventional system, +# you should provide the --transform-dir option. + +# Begin configuration section. +transform_dir= # dir to find fMLLR transforms. +cmd=run.pl +iter=final +skip_scoring=false +scoring_opts= +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "Usage: steps/decode_sgmm_rescore.sh [options] " + echo " e.g.: steps/decode_sgmm_rescore.sh --transform-dir exp/tri3b/decode_dev93_tgpr \\" + echo " exp/sgmm3a/graph_tgpr data/test_dev93 exp/sgmm3a/decode_dev93_tgpr exp/sgmm3a_mmi/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --config # config containing options" + echo " --cmd # Command to run in parallel with" + echo " --iter # iteration of model to use (default: final)" + exit 1; +fi + +graphdir=$1 +data=$2 +olddir=$3 +dir=$4 +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. + +for f in $graphdir/words.txt $data/feats.scp $olddir/lat.1.gz $olddir/gselect.1.gz \ + $srcdir/$iter.mdl; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +nj=`cat $olddir/num_jobs` || exit 1; +sdata=$data/split$nj; +gselect_opt="--gselect=ark,s,cs:gunzip -c $olddir/gselect.JOB.gz|" +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -f $olddir/vecs.1 ]; then + echo "$0: using speaker vectors from $olddir" + spkvecs_opt="--spk-vecs=ark:$olddir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk" +else + echo "$0: no speaker vectors found." + spkvecs_opt= +fi + + +## Set up features. +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." +fi + +if [ -f $olddir/trans.1 ]; then + echo "$0: using (in addition to any previous transforms) transforms from $olddir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$olddir/trans.JOB ark:- ark:- |" +fi +## + +# Rescore the state-level lattices with the model provided. Just +# one command in this script. +echo "$0: rescoring lattices with SGMM model in $srcdir/$iter.mdl" +$cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm2-rescore-lattice "$gselect_opt" $spkvecs_opt \ + $srcdir/$iter.mdl "ark:gunzip -c $olddir/lat.JOB.gz|" "$feats" \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_sgmm2_rescore_project.sh b/tools/ASR_2ch_track/steps/decode_sgmm2_rescore_project.sh new file mode 100644 index 0000000000000000000000000000000000000000..277d72fbeba81507c2330d45ea62c538d15834f5 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_sgmm2_rescore_project.sh @@ -0,0 +1,177 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script does decoding with an SGMM system, by rescoring lattices +# generated from a previous SGMM system. This version does the "predictive" +# SGMM, where we subtract some constant times the log-prob of the left +# few spliced frames, and the same for the right few. +# The directory with the lattices +# is assumed to contain any speaker vectors, if used. This script just +# adds into the acoustic scores, (some constant, default -0.25) times +# the acoustic score of the left model, and the same for the right model. + +# the lattices one final time, using the same setup as the final decoding +# pass of the source dir. The assumption is that the model may have +# been discriminatively trained. + +# If the system was built on top of fMLLR transforms from a conventional system, +# you should provide the --transform-dir option. + +# Begin configuration section. +stage=0 +transform_dir= # dir to find fMLLR transforms. +cmd=run.pl +iter=final +prob_scale=-0.25 +dimensions=0:13:104:117 +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 5 ]; then + echo "Usage: steps/decode_sgmm_rescore_project.sh [options] " + echo " e.g.: steps/decode_sgmm_rescore_project.sh --transform-dir exp/tri3b/decode_dev93_tgpr \\" + echo " exp/tri2b/full.mat exp/sgmm3a/graph_tgpr data/test_dev93 exp/sgmm3a/decode_dev93_tgpr exp/sgmm3a/decode_dev93_tgpr_predict" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --config # config containing options" + echo " --cmd # Command to run in parallel with" + echo " --prob-scale # Default -0.25, scale on left and right models." + exit 1; +fi + +full_lda_mat=$1 +graphdir=$2 +data=$3 +olddir=$4 +dir=$5 +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. + +for f in $full_lda_mat $graphdir/words.txt $data/feats.scp $olddir/lat.1.gz \ + $olddir/gselect.1.gz $srcdir/$iter.mdl; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +nj=`cat $olddir/num_jobs` || exit 1; +sdata=$data/split$nj; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -f $olddir/vecs.1 ]; then + echo "$0: using speaker vectors from $olddir" + spkvecs_opt="--spk-vecs=ark:$olddir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk" +else + echo "$0: no speaker vectors found." + spkvecs_opt= +fi + +if [ $stage -le 0 ]; then + # Get full LDA+MLLT mat and its inverse. Note: the full LDA+MLLT mat is + # the LDA+MLLT mat, plus the "rejected" rows of the LDA matrix. + $cmd $dir/log/get_full_lda.log \ + get-full-lda-mat $srcdir/final.mat $full_lda_mat $dir/full.mat $dir/full_inv.mat || exit 1; +fi + +if [ $stage -le 1 ]; then + left_start=`echo $dimensions | cut '-d:' -f 1`; + left_end=`echo $dimensions | cut '-d:' -f 2`; + right_start=`echo $dimensions | cut '-d:' -f 3`; + right_end=`echo $dimensions | cut '-d:' -f 4`; + + # Prepare left and right models. For now, the dimensions are hardwired (e.g., 13 MFCCs and splice 9 frames). + # Note: the choice of dividing by the prob of the left 4 and the right 4 frames is a bit arbitrary and + # we could investigate different configurations. + $cmd $dir/log/left.log \ + sgmm2-project --start-dim=$left_start --end-dim=$left_end $srcdir/final.mdl $dir/full.mat $dir/left.mdl $dir/left.mat || exit 1; + $cmd $dir/log/right.log \ + sgmm2-project --start-dim=$right_start --end-dim=$right_end $srcdir/final.mdl $dir/full.mat $dir/right.mdl $dir/right.mat || exit 1; +fi + + +# we apply the scaling on the new acoustic probs by adding the inverse +# of that to the old acoustic probs, and then later inverting again. +# this has to do with limitations in sgmm2-rescore-lattice: we can only +# scale the *old* acoustic probs, not the new ones. +inverse_prob_scale=`perl -e "print (1.0 / $prob_scale);"` +cur_lats="ark:gunzip -c $olddir/lat.JOB.gz | lattice-scale --acoustic-scale=$inverse_prob_scale ark:- ark:- |" + +## Set up features. Note: we only support LDA+MLLT features, this +## is inherent in the method, we could not support deltas. + +for model_type in left right; do + + feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- |" # spliced features. + if [ ! -z "$transform_dir" ]; then # using speaker-specific transforms. + # we want to transform in the sequence: $dir/full.mat, then the result of + # (extend-transform-dim $transform_dir/trans.JOB), then $dir/full_inv.mat to + # get back to the spliced space, then the left.mat or right.mat. But + # note that compose-transforms operates in matrix-multiplication order, + # which is opposite from the "order of applying the transforms" order. + new_dim=$[`copy-matrix --binary=false $dir/full.mat - | wc -l` - 1]; # 117 in normal case. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk 'ark:extend-transform-dim --new-dimension=$new_dim ark:$transform_dir/trans.JOB ark:- | compose-transforms ark:- $dir/full.mat ark:- | compose-transforms $dir/full_inv.mat ark:- ark:- | compose-transforms $dir/${model_type}.mat ark:- ark:- |' ark:- ark:- |" + else # else, we transform with the "left" or "right" matrix; these transform from the + # spliced space. + feats="$feats transform-feats $dir/${model_type}.mat |" + # If we don't have the --transform-dir option, make sure the model was + # trained in the same way. + if grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." + fi + fi + if [ -f $olddir/trans.1 ]; then + echo "$0: warning: not using transforms in $olddir (this is just a " + echo " limitation of the script right now, and could be fixed)." + fi + + if [ $stage -le 2 ]; then + echo "Getting gselect info for $model_type model." + $cmd JOB=1:$nj $dir/log/gselect.$model_type.JOB.log \ + sgmm2-gselect $dir/$model_type.mdl "$feats" \ + "ark,t:|gzip -c >$dir/gselect.$model_type.JOB.gz" || exit 1; + fi + gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.$model_type.JOB.gz|" + + + # Rescore the state-level lattices with the model provided. Just + # one command in this script. + # The --old-acoustic-scale=1.0 option means we just add the scores + # to the old scores. + if [ $stage -le 3 ]; then + echo "$0: rescoring lattices with $model_type model" + $cmd JOB=1:$nj $dir/log/rescore.${model_type}.JOB.log \ + sgmm2-rescore-lattice --old-acoustic-scale=1.0 "$gselect_opt" $spkvecs_opt \ + $dir/$model_type.mdl "$cur_lats" "$feats" \ + "ark:|gzip -c > $dir/lat.${model_type}.JOB.gz" || exit 1; + fi + cur_lats="ark:gunzip -c $dir/lat.${model_type}.JOB.gz |" +done + +if [ $stage -le 4 ]; then + echo "$0: getting final lattices." + $cmd JOB=1:$nj $dir/log/scale_lats.JOB.log \ + lattice-scale --acoustic-scale=$prob_scale "$cur_lats" "ark:|gzip -c >$dir/lat.JOB.gz" \ + || exit 1; +fi + +rm $dir/lat.{left,right}.*.gz 2>/dev/null # note: if these still exist, it will + # confuse the scoring script. + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_sgmm_fromlats.sh b/tools/ASR_2ch_track/steps/decode_sgmm_fromlats.sh new file mode 100644 index 0000000000000000000000000000000000000000..bb1dacd113f338c95cfb5de21b8e20ec5a0fba70 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_sgmm_fromlats.sh @@ -0,0 +1,277 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script does decoding with an SGMM system, with speaker vectors. +# If the SGMM system was +# built on top of fMLLR transforms from a conventional system, you should +# provide the --transform-dir option. +# This script does not use a decoding graph, but instead you provide +# a previous decoding directory with lattices in it. This script will only +# make use of the word sequences in the lattices; it limits the decoding +# to those sequences. You should also provide a "lang" directory from +# which this script will use the G.fst and L.fst. + +# Begin configuration section. +stage=1 +alignment_model= +transform_dir= # dir to find fMLLR transforms. +acwt=0.08333 # Just a default value, used for adaptation and beam-pruning.. +batch_size=75 # Limits memory blowup in compile-train-graphs-fsts +cmd=run.pl +beam=20.0 +gselect=15 # Number of Gaussian-selection indices for SGMMs. [Note: + # the first_pass_gselect variable is used for the 1st pass of + # decoding and can be tighter. +first_pass_gselect=3 # Use a smaller number of Gaussian-selection indices in + # the 1st pass of decoding (lattice generation). +max_active=7000 + +#WARNING: This option is renamed lattice_beam (it was renamed to follow the naming +# in the other scripts +lattice_beam=8.0 # Beam we use in lattice generation. +vecs_beam=4.0 # Beam we use to prune lattices while getting posteriors for + # speaker-vector computation. Can be quite tight (actually we could + # probably just do best-path. +use_fmllr=false +fmllr_iters=10 +fmllr_min_count=1000 +scale_opts="--transition-scale=1.0 --self-loop-scale=0.1" +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "Usage: steps/decode_sgmm_fromlats.sh [options] " + echo "" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --alignment-model # Model for the first-pass decoding." + echo " --config # config containing options" + echo " --cmd # Command to run in parallel with" + echo " --beam # Decoding beam; default 13.0" + exit 1; +fi + +data=$1 +lang=$2 +olddir=$3 +dir=$4 +srcdir=`dirname $dir` + +for f in $data/feats.scp $lang/G.fst $lang/L_disambig.fst $lang/phones/disambig.int \ + $srcdir/final.mdl $srcdir/tree $olddir/lat.1.gz; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +nj=`cat $olddir/num_jobs` || exit 1; +sdata=$data/split$nj; +silphonelist=`cat $lang/phones/silence.csl` || exit 1 +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|" +gselect_opt_1stpass="$gselect_opt copy-gselect --n=$first_pass_gselect ark:- ark:- |" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + + +## Set up features + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" +if [ -z "$transform_dir" ] && [ -f $olddir/trans.1 ]; then + transform_dir=$olddir +fi + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." +fi + +## Calculate FMLLR pre-transforms if needed. We are doing this here since this +## step is requried by models both with and without speaker vectors +if $use_fmllr; then + if [ ! -f $srcdir/final.fmllr_mdl ] || [ $srcdir/final.fmllr_mdl -ot $srcdir/final.mdl ]; then + echo "$0: computing pre-transform for fMLLR computation." + sgmm-comp-prexform $srcdir/final.mdl $srcdir/final.occs $srcdir/final.fmllr_mdl || exit 1; + fi +fi + +## Save Gaussian-selection info to disk. +# Note: we can use final.mdl regardless of whether there is an alignment model-- +# they use the same UBM. +if [ $stage -le 1 ]; then + $cmd JOB=1:$nj $dir/log/gselect.JOB.log \ + sgmm-gselect --full-gmm-nbest=$gselect $srcdir/final.mdl \ + "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1; +fi + +## Work out name of alignment model. ## +if [ -z "$alignment_model" ]; then + if [ -f "$srcdir/final.alimdl" ]; then alignment_model=$srcdir/final.alimdl; + else alignment_model=$srcdir/final.mdl; fi +fi +[ ! -f "$alignment_model" ] && echo "$0: no alignment model $alignment_model " && exit 1; + +# Generate state-level lattice which we can rescore. This is done with the +# alignment model and no speaker-vectors. +if [ $stage -le 2 ]; then + $cmd JOB=1:$nj $dir/log/decode_pass1.JOB.log \ + lattice-to-fst "ark:gunzip -c $olddir/lat.JOB.gz|" ark:- \| \ + fsttablecompose "fstproject --project_output=true $lang/G.fst | fstarcsort |" ark:- ark:- \| \ + fstdeterminizestar ark:- ark:- \| \ + compile-train-graphs-fsts --read-disambig-syms=$lang/phones/disambig.int \ + --batch-size=$batch_size $scale_opts \ + $srcdir/tree $srcdir/final.mdl $lang/L_disambig.fst ark:- ark:- \| \ + sgmm-latgen-faster --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --determinize-lattice=false --allow-partial=true \ + --word-symbol-table=$lang/words.txt "$gselect_opt_1stpass" $alignment_model \ + "ark:-" "$feats" "ark:|gzip -c > $dir/pre_lat.JOB.gz" || exit 1; +fi + +## Check if the model has speaker vectors +spkdim=`sgmm-info $srcdir/final.mdl | grep 'speaker vector' | awk '{print $NF}'` + +if [ $spkdim -gt 0 ]; then ### For models with speaker vectors: + +# Estimate speaker vectors (1st pass). Prune before determinizing +# because determinization can take a while on un-pruned lattices. +# Note: the sgmm-post-to-gpost stage is necessary because we have +# a separate alignment-model and final model, otherwise we'd skip it +# and use sgmm-est-spkvecs. + if [ $stage -le 3 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass1.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $alignment_model ark:- ark:- \| \ + sgmm-post-to-gpost "$gselect_opt" $alignment_model "$feats" ark:- ark:- \| \ + sgmm-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/pre_vecs.JOB" || exit 1; + fi + +# Estimate speaker vectors (2nd pass). Since we already have spk vectors, +# at this point we need to rescore the lattice to get the correct posteriors. + if [ $stage -le 4 ]; then + $cmd JOB=1:$nj $dir/log/vecs_pass2.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm-rescore-lattice --spk-vecs=ark:$dir/pre_vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/pre_vecs.JOB \ + $srcdir/final.mdl "$feats" ark,s,cs:- "ark:$dir/vecs.JOB" || exit 1; + fi + rm $dir/pre_vecs.* + + if $use_fmllr; then + # Estimate fMLLR transforms (note: these may be on top of any + # fMLLR transforms estimated with the baseline GMM system. + if [ $stage -le 5 ]; then # compute fMLLR transforms. + echo "$0: computing fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm-rescore-lattice --spk-vecs=ark:$dir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" --spk-vecs=ark:$dir/vecs.JOB \ + --fmllr-iters=$fmllr_iters --fmllr-min-count=$fmllr_min_count \ + $srcdir/final.fmllr_mdl "$feats" ark,s,cs:- "ark:$dir/trans.JOB" || exit 1; + fi + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" + fi + +# Now rescore the state-level lattices with the adapted features and the +# corresponding model. Prune and determinize the lattices to limit +# their size. + if [ $stage -le 6 ]; then + $cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm-rescore-lattice "$gselect_opt" --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \ + $srcdir/final.mdl "ark:gunzip -c $dir/pre_lat.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + fi + rm $dir/pre_lat.*.gz + +else ### For models without speaker vectors: + + if $use_fmllr; then + # Estimate fMLLR transforms (note: these may be on top of any + # fMLLR transforms estimated with the baseline GMM system. + if [ $stage -le 5 ]; then # compute fMLLR transforms. + echo "$0: computing fMLLR transforms." + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + gunzip -c $dir/pre_lat.JOB.gz \| \ + sgmm-rescore-lattice --utt2spk=ark:$sdata/JOB/utt2spk \ + "$gselect_opt" $srcdir/final.mdl ark:- "$feats" ark:- \| \ + lattice-prune --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$vecs_beam ark:- ark:- \| \ + lattice-to-post --acoustic-scale=$acwt ark:- ark:- \| \ + weight-silence-post 0.0 $silphonelist $srcdir/final.mdl ark:- ark:- \| \ + sgmm-est-fmllr --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" \ + --fmllr-iters=$fmllr_iters --fmllr-min-count=$fmllr_min_count \ + $srcdir/final.fmllr_mdl "$feats" ark,s,cs:- "ark:$dir/trans.JOB" || exit 1; + fi + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" + fi + +# Now rescore the state-level lattices with the adapted features and the +# corresponding model. Prune and determinize the lattices to limit +# their size. + if [ $stage -le 6 ] && $use_fmllr; then + $cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm-rescore-lattice "$gselect_opt" --utt2spk=ark:$sdata/JOB/utt2spk \ + $srcdir/final.mdl "ark:gunzip -c $dir/pre_lat.JOB.gz|" "$feats" ark:- \| \ + lattice-determinize-pruned --acoustic-scale=$acwt --beam=$lattice_beam ark:- \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + rm $dir/pre_lat.*.gz + else # Already done with decoding if no adaptation needed. + for n in `seq 1 $nj`; do + mv $dir/pre_lat.${n}.gz $dir/lat.${n}.gz + done + fi + +fi + +# The output of this script is the files "lat.*.gz"-- we'll rescore this at +# different acoustic scales to get the final output. + + +if [ $stage -le 7 ]; then + if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + echo "score best paths" + local/score.sh --cmd "$cmd" $data $lang $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } + # echo "score confidence and timing with sclite" + # local/score_sclite_conf.sh --cmd "$cmd" --language turkish $data $lang $dir + fi +fi +echo "Decoding done." +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_sgmm_rescore.sh b/tools/ASR_2ch_track/steps/decode_sgmm_rescore.sh new file mode 100644 index 0000000000000000000000000000000000000000..398c8931e7f2c8246aa28db04d5f3e010ff154d5 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_sgmm_rescore.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script does decoding with an SGMM system, by rescoring lattices +# generated from a previous SGMM system. The directory with the lattices +# is assumed to contain speaker vectors, if used. Basically it rescores +# the lattices one final time, using the same setup as the final decoding +# pass of the source dir. The assumption is that the model may have +# been discriminatively trained. + +# If the system was built on top of fMLLR transforms from a conventional system, +# you should provide the --transform-dir option. + +# Begin configuration section. +transform_dir= # dir to find fMLLR transforms. +cmd=run.pl +iter=final +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "Usage: steps/decode_sgmm_rescore.sh [options] " + echo " e.g.: steps/decode_sgmm_rescore.sh --transform-dir exp/tri3b/decode_dev93_tgpr \\" + echo " exp/sgmm3a/graph_tgpr data/test_dev93 exp/sgmm3a/decode_dev93_tgpr exp/sgmm3a_mmi/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --config # config containing options" + echo " --cmd # Command to run in parallel with" + echo " --iter # iteration of model to use (default: final)" + exit 1; +fi + +graphdir=$1 +data=$2 +olddir=$3 +dir=$4 +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. + +for f in $graphdir/words.txt $data/feats.scp $olddir/lat.1.gz $olddir/gselect.1.gz \ + $srcdir/$iter.mdl; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +nj=`cat $olddir/num_jobs` || exit 1; +sdata=$data/split$nj; +gselect_opt="--gselect=ark,s,cs:gunzip -c $olddir/gselect.JOB.gz|" +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -f $olddir/vecs.1 ]; then + echo "$0: using speaker vectors from $olddir" + spkvecs_opt="--spk-vecs=ark:$olddir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk" +else + echo "$0: no speaker vectors found." + spkvecs_opt= +fi + + +## Set up features. +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1; + [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \ + && echo "$0: #jobs mismatch with transform-dir." && exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" +elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then + echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." +fi + +if [ -f $olddir/trans.1 ]; then + echo "$0: using (in addition to any previous transforms) transforms from $olddir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$olddir/trans.JOB ark:- ark:- |" +fi +## + +# Rescore the state-level lattices with the model provided. Just +# one command in this script. +echo "$0: rescoring lattices with SGMM model in $srcdir/$iter.mdl" +$cmd JOB=1:$nj $dir/log/rescore.JOB.log \ + sgmm-rescore-lattice "$gselect_opt" $spkvecs_opt \ + $srcdir/$iter.mdl "ark:gunzip -c $olddir/lat.JOB.gz|" "$feats" \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; + +[ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; +local/score.sh --cmd "$cmd" $data $graphdir $dir + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_si.sh b/tools/ASR_2ch_track/steps/decode_si.sh new file mode 100644 index 0000000000000000000000000000000000000000..f2bc1d367fddf0362ff10c924b531baa0133dfe2 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_si.sh @@ -0,0 +1,136 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Begin configuration section. +transform_dir= # this option won't normally be used, but it can be used if you want to + # supply existing fMLLR transforms when decoding. +iter= +model= # You can specify the model to use (e.g. if you want to use the .alimdl) +stage=0 +nj=4 +cmd=run.pl +max_active=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 # note: only really affects pruning (scoring is on lattices). +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +scoring_opts= +# note: there are no more min-lmwt and max-lmwt options, instead use +# e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20" +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/decode.sh [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the model is." + echo "e.g.: steps/decode.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr" + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --model # which model to use (e.g. to" + echo " # specify the final.alimdl)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --transform-dir # dir to find fMLLR transforms " + echo " --acwt # acoustic scale used for lattice generation " + echo " --scoring-opts # options to local/score.sh" + echo " --num-threads # number of threads to use, default 1." + echo " --parallel-opts # ignored now, present for historical reasons." + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$model" ]; then # if --model was not specified on the command line... + if [ -z $iter ]; then model=$srcdir/final.mdl; + else model=$srcdir/$iter.mdl; fi +fi + +if [ $(basename $model) != final.alimdl ] ; then + # Do not use the $srcpath -- look at the path where the model is + if [ -f $(dirname $model)/final.alimdl ] && [ -z "$transform_dir" ]; then + echo -e '\n\n' + echo $0 'WARNING: Running speaker independent system decoding using a SAT model!' + echo $0 'WARNING: This is OK if you know what you are doing...' + echo -e '\n\n' + fi +fi + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "decode.sh: no such file $f" && exit 1; +done + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode.sh: feature type is $feat_type"; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ ! -s $transform_dir/num_jobs ] && \ + echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1; + nj_orig=$(cat $transform_dir/num_jobs) + if [ $nj -ne $nj_orig ]; then + # Copy the transforms into an archive with an index. + echo "$0: num-jobs for transforms mismatches, so copying them." + for n in $(seq $nj_orig); do cat $transform_dir/trans.$n; done | \ + copy-feats ark:- ark,scp:$dir/trans.ark,$dir/trans.scp || exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/trans.scp ark:- ark:- |" + else + # number of jobs matches with alignment dir. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" + fi +fi + +if [ $stage -le 0 ]; then + if [ -f "$graphdir/num_pdfs" ]; then + [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \ + { echo "Mismatch in number of pdfs with $model"; exit 1; } + fi + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/decode_with_map.sh b/tools/ASR_2ch_track/steps/decode_with_map.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab507debd118c4b2cd399a2b7ec1d0590e07f9f2 --- /dev/null +++ b/tools/ASR_2ch_track/steps/decode_with_map.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +# Copyright 2012 Neha Agrawal, Cisco Systems; +# Johns Hopkins University (Author: Daniel Povey); +# +# Apache 2.0 + +# Begin configuration section. +transform_dir= +iter= +model= # You can specify the model to use (e.g. if you want to use the .alimdl) +nj=4 +cmd=run.pl +max_active=7000 +beam=13.0 +lattice_beam=6.0 +acwt=0.083333 # note: only really affects pruning (scoring is on lattices). +mean_tau=20 +weight_tau=10 +flags=mw # could also contain "v" for variance; the default + # tau for that is 50. +stage=1 +skip_scoring=false +# End configuration section. + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; +if [ $# != 3 ]; then + echo "Usage: steps/decode.sh [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the model is." + echo "e.g.: steps/decode.sh exp/mono/graph_tgpr data/test_dev93 exp/mono/decode_dev93_tgpr" + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --model # which model to use (e.g. to" + echo " # specify the final.alimdl)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --transform-dir # dir to find fMLLR transforms " + echo " # speaker-adapted decoding" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$model" ]; then # if --model was not specified on the command line... + if [ -z $iter ]; then model=$srcdir/final.mdl; + else model=$srcdir/$iter.mdl; fi +fi + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "decode.sh: no such file $f" && exit 1; +done + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "decode.sh: feature type is $feat_type"; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne $nj ] && \ + echo "Mismatch in number of jobs with $transform_dir"; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +fi + +if [ $stage -le 1 ]; then + echo "Doing first-pass decoding before MAP decoding." + $cmd JOB=1:$nj $dir/log/decode_pass1.JOB.log \ + gmm-decode-faster --max-active=$max_active --beam=$beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst "$feats" ark:$dir/tmp.JOB.tra ark:$dir/pass1_decode.JOB.ali || exit 1; +fi + +if [ $stage -le 2 ]; then + echo "Computing MAP stats and doing MAP-adapted decoding" + $cmd JOB=1:$nj $dir/log/decode_pass2.JOB.log \ + ali-to-post ark:$dir/pass1_decode.JOB.ali ark:- \| \ + gmm-adapt-map --mean-tau=$mean_tau --weight-tau=$weight_tau \ + --update-flags=$flags --spk2utt=ark:$sdata/JOB/spk2utt \ + $model "$feats" ark:- ark:- \| \ + gmm-latgen-map --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --utt2spk=ark:$sdata/JOB/utt2spk --max-active=$max_active --beam=$beam \ + --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model ark,s,cs:- $graphdir/HCLG.fst "$feats" "ark:|gzip -c >$dir/lat.JOB.gz" +fi +#rm -f $dir/pass1_decode.*.ali +#rm -f $dir/tmp.*.tra + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $data $graphdir $dir || + { echo "$0: Scoring failed. (ignore by '--skip-scoring true')"; exit 1; } +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/get_ctm.sh b/tools/ASR_2ch_track/steps/get_ctm.sh new file mode 100644 index 0000000000000000000000000000000000000000..2f2f6794e3d63af4ee906cc76427e6b02808d4af --- /dev/null +++ b/tools/ASR_2ch_track/steps/get_ctm.sh @@ -0,0 +1,88 @@ +#!/bin/bash +# Copyright Johns Hopkins University (Author: Daniel Povey) 2012. Apache 2.0. + +# This script produces CTM files from a decoding directory that has lattices +# present. + + +# begin configuration section. +cmd=run.pl +stage=0 +frame_shift=0.01 +min_lmwt=5 +max_lmwt=20 +use_segments=true # if we have a segments file, use it to convert + # the segments to be relative to the original files. +#end configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: $0 [options] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --stage (0|1|2) # start scoring script from part-way through." + echo " --use-segments (true|false) # use segments and reco2file_and_channel files " + echo " # to produce a ctm relative to the original audio" + echo " # files, with channel information (typically needed" + echo " # for NIST scoring)." + echo " --frame-shift (default=0.01) # specify this if your lattices have a frame-shift" + echo " # not equal to 0.01 seconds" + echo "e.g.:" + echo "$0 data/train data/lang exp/tri4a/decode/" + echo "See also: steps/get_train_ctm.sh" + exit 1; +fi + +data=$1 +lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied. +dir=$3 + +model=$dir/../final.mdl # assume model one level up from decoding dir. + + +for f in $lang/words.txt $model $dir/lat.1.gz; do + [ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1; +done + +name=`basename $data`; # e.g. eval2000 + +mkdir -p $dir/scoring/log + +if [ $stage -le 0 ]; then + if [ -f $data/segments ] && $use_segments; then + f=$data/reco2file_and_channel + [ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1; + filter_cmd="utils/convert_ctm.pl $data/segments $data/reco2file_and_channel" + else + filter_cmd=cat + fi + + if [ -f $lang/phones/word_boundary.int ]; then + $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/get_ctm.LMWT.log \ + set -o pipefail '&&' mkdir -p $dir/score_LMWT/ '&&' \ + lattice-1best --lm-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ + lattice-align-words $lang/phones/word_boundary.int $model ark:- ark:- \| \ + nbest-to-ctm --frame-shift=$frame_shift ark:- - \| \ + utils/int2sym.pl -f 5 $lang/words.txt \| \ + $filter_cmd '>' $dir/score_LMWT/$name.ctm || exit 1; + else + if [ ! -f $lang/phones/align_lexicon.int ]; then + echo "$0: neither $lang/phones/word_boundary.int nor $lang/phones/align_lexicon.int exists: cannot align." + exit 1; + fi + + $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/get_ctm.LMWT.log \ + set -o pipefail '&&' mkdir -p $dir/score_LMWT/ '&&' \ + lattice-1best --lm-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ + lattice-align-words-lexicon $lang/phones/align_lexicon.int $model ark:- ark:- \| \ + nbest-to-ctm --frame-shift=$frame_shift ark:- - \| \ + utils/int2sym.pl -f 5 $lang/words.txt \| \ + $filter_cmd '>' $dir/score_LMWT/$name.ctm || exit 1; + fi +fi + + diff --git a/tools/ASR_2ch_track/steps/get_fmllr_basis.sh b/tools/ASR_2ch_track/steps/get_fmllr_basis.sh new file mode 100644 index 0000000000000000000000000000000000000000..a05c6f2c01a2ae1a8c705834a378a2cf2430f37f --- /dev/null +++ b/tools/ASR_2ch_track/steps/get_fmllr_basis.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +# Copyright 2012 Carnegie Mellon University (Author: Yajie Miao) +# Johns Hopkins University (Author: Daniel Povey) + +# Decoding script that computes basis for basis-fMLLR (see decode_fmllr_basis.sh). +# This can be on top of delta+delta-delta, or LDA+MLLT features. + +stage=0 +# Parameters in alignment of training data +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +per_utt=true # If true, then treat each utterance as a separate speaker for purposes of + # basis training... this is recommended if the number of actual speakers in your + # training set is less than (feature-dim) * (feature-dim+1). +silence_weight=0.01 +cmd=run.pl +# End configuration section + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/get_fmllr_basis.sh [options] " + echo " e.g.: steps/decode_basis_fmllr.sh data/train_si84 data/lang exp/tri3b/" + echo "Note: we currently assume that this is the same data you trained the model with." + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --cmd # Command to run in parallel with" + exit 1; +fi + +data=$1 +lang=$2 +dir=$3 + +nj=`cat $dir/num_jobs` || exit 1; +sdata=$data/split$nj; +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +splice_opts=`cat $dir/splice_opts 2>/dev/null` # frame-splicing options. +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` + +silphonelist=`cat $lang/phones/silence.csl` || exit 1; + +for f in $data/feats.scp $dir/final.mdl $dir/ali.1.gz; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +# Set up the unadapted features "$sifeats". +if [ -f $dir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +# Set up the adapted features "$feats" for training set. +if [ -f $srcdir/trans.1 ]; then + feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$srcdir/trans.JOB ark:- ark:- |"; +else + feats="$sifeats"; +fi + + +if $per_utt; then + spk2utt_opt= # treat each utterance as separate speaker when computing basis. + echo "Doing per-utterance adaptation for purposes of computing the basis." +else + echo "Doing per-speaker adaptation for purposes of computing the basis." + [ `cat $sdata/spk2utt | wc -l` -lt $[41*40] ] && \ + echo "Warning: number of speakers is small, might be better to use --per-utt=true." + spk2utt_opt="--spk2utt=ark:$sdata/JOB/spk2utt" +fi + +# Note: we get Gaussian level alignments with the "final.mdl" and the +# speaker adapted features. +$cmd JOB=1:$nj $dir/log/basis_acc.JOB.log \ + ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ + weight-silence-post $silence_weight $silphonelist $dir/final.mdl ark:- ark:- \| \ + gmm-post-to-gpost $dir/final.mdl "$feats" ark:- ark:- \| \ + gmm-basis-fmllr-accs-gpost $spk2utt_opt \ + $dir/final.mdl "$sifeats" ark,s,cs:- $dir/basis.acc.JOB || exit 1; + +# Compute the basis matrices. +$cmd $dir/log/basis_training.log \ + gmm-basis-fmllr-training $dir/final.mdl $dir/fmllr.basis $dir/basis.acc.* || exit 1; +rm $dir/basis.acc.* 2>/dev/null + +exit 0; + diff --git a/tools/ASR_2ch_track/steps/get_lexicon_probs.sh b/tools/ASR_2ch_track/steps/get_lexicon_probs.sh new file mode 100644 index 0000000000000000000000000000000000000000..9d065a14daa9a243e0f09302a0e0def8774c839e --- /dev/null +++ b/tools/ASR_2ch_track/steps/get_lexicon_probs.sh @@ -0,0 +1,227 @@ +#!/bin/bash +# Copyright 2013 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + + +# From a training or alignment directory, and an original lexicon.txt and lang/ +# directory, obtain a new lexicon with pronunciation probabilities. +# Note: this script is currently deprecated, the recipes are using a different +# script in utils/dict_dir_add_pronprobs.sh. + + +# Begin configuration section. +stage=0 +smooth_count=1.0 # Amount of count to add corresponding to each original lexicon entry; + # this corresponds to add-one smoothing of the pron-probs. +max_one=true # If true, normalize the pron-probs so the maximum value for each word is 1.0, + # rather than summing to one. This is quite standard. + +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 6 ]; then + echo "Usage: steps/get_lexicon_probs.sh " + echo "e.g.: steps/get_lexicon_probs.sh data/train data/lang exp/tri5 data/local/lexicon.txt \\" + echo " exp/tri5_lexprobs data/local_withprob/lexicon.txt" + echo "Note: we assume you ran using word-position-dependent phones but both the old and new lexicon will not have" + echo "these markings. We also assume the new lexicon will have pron-probs but the old one does not; this limitation" + echo "of the script can be removed later." + echo "Main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --stage # used to control partial re-running." + echo " --max-one # If true, normalize so max prob of each" + echo " # word is one. Default: true" + echo " --smooth # Amount to smooth each count by (default: 1.0)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +old_lexicon=$4 +dir=$5 +new_lexicon=$6 + +oov=`cat $lang/oov.int` || exit 1; +nj=`cat $srcdir/num_jobs` || exit 1; + +for f in $data/text $lang/L.fst $lang/phones/word_boundary.int $srcdir/ali.1.gz $old_lexicon; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +mkdir -p $dir/log +utils/split_data.sh $data $nj # Make sure split data-dir exists. +sdata=$data/split$nj + + +mkdir -p $dir/log + +if [ $stage -le 0 ]; then + + ( ( for n in `seq $nj`; do gunzip -c $srcdir/ali.$n.gz; done ) | \ + linear-to-nbest ark:- "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $data/text |" '' '' ark:- | \ + lattice-align-words $lang/phones/word_boundary.int $srcdir/final.mdl ark:- ark:- | \ + lattice-to-phone-lattice --replace-words=false $srcdir/final.mdl ark:- ark,t:- | \ + awk '{ if (NF == 4) { word_phones = sprintf("%s %s", $3, $4); count[word_phones]++; } } + END { for(key in count) { print count[key], key; } }' | \ + sed s:0,0,:: | awk '{print $2, $1, $3;}' | sed 's/_/ /g' | \ + utils/int2sym.pl -f 3- $lang/phones.txt | \ + sed -E 's/_I( |$)/ /g' | sed -E 's/_E( |$)/ /g' | sed -E 's/_B( |$)/ /g' | sed -E 's/_S( |$)/ /g' | \ + utils/int2sym.pl -f 1 $lang/words.txt > $dir/lexicon_counts.txt + ) 2>&1 | tee $dir/log/get_fsts.log + +fi + +cat $old_lexicon | awk '{if (!($2 > 0.0 && $2 < 1.0)) { exit(1); }}' && \ + echo "Error: old lexicon $old_lexicon appears to have pron-probs; we don't expect this." && \ + exit 1; + +mkdir -p `dirname $new_lexicon` || exit 1; + +if [ $stage -le 1 ]; then + grep -v -w '^' $dir/lexicon_counts.txt | \ + perl -e ' ($old_lexicon, $smooth_count, $max_one) = @ARGV; + ($smooth_count >= 0) || die "Invalid smooth_count $smooth_count"; + ($max_one eq "true" || $max_one eq "false") || die "Invalid max_one variable $max_one"; + open(O, "<$old_lexicon")||die "Opening old-lexicon file $old_lexicon"; + while() { + $_ =~ m/(\S+)\s+(.+)/ || die "Bad old-lexicon line $_"; + $word = $1; + $orig_pron = $2; + # Remember the mapping from canonical prons to original prons: in the case of + # syllable based systems we want to remember the locations of tabs in + # the original lexicon. + $pron = join(" ", split(" ", $orig_pron)); + $orig_pron{$word,$pron} = $orig_pron; + $count{$word,$pron} += $smooth_count; + $tot_count{$word} += $smooth_count; + } + while () { + $_ =~ m/(\S+)\s+(\S+)\s+(.+)/ || die "Bad new-lexicon line $_"; + $word = $1; + $this_count = $2; + $pron = join(" ", split(" ", $3)); + $count{$word,$pron} += $this_count; + $tot_count{$word} += $this_count; + } + if ($max_one eq "true") { # replace $tot_count{$word} with max count + # of any pron. + %tot_count = {}; # set to empty assoc array. + foreach $key (keys %count) { + ($word, $pron) = split($; , $key); # $; is separator for strings that index assoc. arrays. + $this_count = $count{$key}; + if (!defined $tot_count{$word} || $this_count > $tot_count{$word}) { + $tot_count{$word} = $this_count; + } + } + } + foreach $key (keys %count) { + ($word, $pron) = split($; , $key); # $; is separator for strings that index assoc. arrays. + $this_orig_pron = $orig_pron{$key}; + if (!defined $this_orig_pron) { die "Word $word and pron $pron did not appear in original lexicon."; } + if (!defined $tot_count{$word}) { die "Tot-count not defined for word $word."; } + $prob = $count{$key} / $tot_count{$word}; + print "$word\t$prob\t$this_orig_pron\n"; # Output happens here. + } ' $old_lexicon $smooth_count $max_one > $new_lexicon || exit 1; +fi + +exit 0; + +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; +cp $srcdir/final.occs $dir; +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options. +cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options. + + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +## Set up model and alignment model. +mdl=$srcdir/final.mdl +if [ -f $srcdir/final.alimdl ]; then + alimdl=$srcdir/final.alimdl +else + alimdl=$srcdir/final.mdl +fi +[ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1; +alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |" +mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |" + + +## Work out where we're getting the graphs from. +if $use_graphs; then + [ "$nj" != "`cat $srcdir/num_jobs`" ] && \ + echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1; + [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1; + graphdir=$srcdir +else + graphdir=$dir + if [ $stage -le 0 ]; then + echo "$0: compiling training graphs" + tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; + fi +fi + + +if [ $stage -le 1 ]; then + echo "$0: aligning data in $data using $alimdl and speaker-independent features." + $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 2 ]; then + echo "$0: computing fMLLR transforms" + if [ "$alimdl" != "$mdl" ]; then + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \ + gmm-est-fmllr-gpost --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \ + ark,s,cs:- ark:$dir/trans.JOB || exit 1; + else + $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \ + ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \ + gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ + --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \ + ark,s,cs:- ark:$dir/trans.JOB || exit 1; + fi +fi + +feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" + +if [ $stage -le 3 ]; then + echo "$0: doing final alignment." + $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \ + gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \ + "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +rm $dir/pre_ali.*.gz + +echo "$0: done aligning data." + +utils/summarize_warnings.pl $dir/log + +exit 0; diff --git a/tools/ASR_2ch_track/steps/get_prons.sh b/tools/ASR_2ch_track/steps/get_prons.sh new file mode 100644 index 0000000000000000000000000000000000000000..e02b3d84d6d4bf65ed73f3dd4bfec8dafc93846f --- /dev/null +++ b/tools/ASR_2ch_track/steps/get_prons.sh @@ -0,0 +1,191 @@ +#!/bin/bash +# Copyright 2014 Johns Hopkins University (Author: Daniel Povey) +# 2014 Guoguo Chen +# Apache 2.0 + +# Begin configuration section. +cmd=run.pl +stage=1 +lmwt=10 +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "usage: $0 " + echo "e.g.: $0 data/train data/lang exp/tri3" + echo "or: $0 data/train data/lang exp/tri3/decode_dev" + echo "This script writes files prons.*.gz in the directory provided, which must" + echo "contain alignments (ali.*.gz) or lattices (lat.*.gz). These files are as" + echo "output by nbest-to-prons (see its usage message)." + echo "Main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --lmwt # scale for LM, only applicable" + echo " # for lattice input (default: 10)" + exit 1; +fi + +data=$1 +lang=$2 +dir=$3 + +for f in $data/utt2spk $lang/words.txt $dir/num_jobs; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1; +done + +nj=$(cat $dir/num_jobs) || exit 1; +sdata=$data/split$nj +oov=`cat $lang/oov.int` || exit 1; +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + + +if [ -f $dir/final.mdl ]; then + mdl=$dir/final.mdl +else + if [ -f $dir/../final.mdl ]; then + mdl=$dir/../final.mdl # e.g. decoding directories. + else + echo "$0: expected $dir/final.mdl or $dir/../final.mdl to exist." + exit 1; + fi +fi + +if [ -f $lang/phones/word_boundary.int ]; then + align_words_cmd="lattice-align-words $lang/phones/word_boundary.int $mdl ark:- ark:-" +else + if [ ! -f $lang/phones/align_lexicon.int ]; then + echo "$0: expected either $lang/phones/word_boundary.int or $lang/phones/align_lexicon.int to exist." + exit 1; + fi + align_words_cmd="lattice-align-words-lexicon $lang/phones/align_lexicon.int $mdl ark:- ark:-" +fi + +if [ -f $dir/ali.1.gz ]; then + echo "$0: $dir/ali.1.gz exists, so starting from alignments." + + if [ $stage -le 1 ]; then + rm $dir/prons.*.gz 2>/dev/null + $cmd JOB=1:$nj $dir/log/nbest_to_prons.JOB.log \ + linear-to-nbest "ark:gunzip -c $dir/ali.JOB.gz|" \ + "ark:sym2int.pl --map-oov $oov -f 2- $lang/words.txt <$sdata/JOB/text |" \ + "" "" ark:- \| $align_words_cmd \| \ + nbest-to-prons $mdl ark:- "|gzip -c >$dir/prons.JOB.gz" || exit 1; + fi +else + if [ ! -f $dir/lat.1.gz ]; then + echo "$0: expected either $dir/ali.1.gz or $dir/lat.1.gz to exist." + exit 1; + fi + echo "$0: $dir/lat.1.gz exists, so starting from lattices." + + if [ $stage -le 1 ]; then + rm $dir/prons.*.gz 2>/dev/null + $cmd JOB=1:$nj $dir/log/nbest_to_prons.JOB.log \ + lattice-1best --lm-scale=$lmwt "ark:gunzip -c $dir/lat.JOB.gz|" ark:- \| \ + $align_words_cmd \| \ + nbest-to-prons $mdl ark:- "|gzip -c >$dir/prons.JOB.gz" || exit 1; + fi +fi + + +if [ $stage -le 2 ]; then + gunzip -c $dir/prons.*.gz | \ + awk '{ $1=""; $2=""; $3=""; count[$0]++; } END{for (k in count) { print count[k], k; }}' > $dir/pron_counts.int || exit 1; +fi + +if [ $stage -le 3 ]; then + cat $dir/pron_counts.int | utils/int2sym.pl -f 2 $lang/words.txt | \ + utils/int2sym.pl -f 3- $lang/phones.txt | sort -nr > $dir/pron_counts.txt +fi + +if [ $stage -le 4 ]; then + if [ -f $lang/phones/word_boundary.int ]; then + # remove the _B, _I, _S, _E markers from phones; this is often convenient + # if we want to go back to a word-position-independent source lexicon. + cat $dir/pron_counts.txt | perl -ane '@A = split(" ", $_); + for ($n=2;$n<@A;$n++) { $A[$n] =~ s/_[BISE]$//; } print join(" ", @A) . "\n"; ' >$dir/pron_counts_nowb.txt + fi +fi + +if [ $stage -le 5 ]; then + # Here we figure the count of silence before and after words (actually prons) + # 1. Create a text like file, but instead of putting words, we write + # "word pron" pairs. We change the format of prons.*.gz from pron-per-line + # to utterance-per-line (with "word pron" pairs tab-separated), and add + # and at the begin and end of each sentence. The _B, _I, _S, _E + # markers are removed from phones. + gunzip -c $dir/prons.*.gz | utils/int2sym.pl -f 4 $lang/words.txt | \ + utils/int2sym.pl -f 5- $lang/phones.txt | cut -d ' ' -f 1,4- | awk ' + BEGIN { utter_id = ""; } + { + if (utter_id == "") { utter_id = $1; printf("%s\t", utter_id); } + else if (utter_id != $1) { + printf "\t\n"; utter_id = $1; printf("%s\t", utter_id); + } + printf("\t%s", $2); + for (n = 3; n <= NF; n++) { sub("_[BISE]$", "", $n); printf(" %s", $n); } + } + END { printf "\t\n"; }' > $dir/pron_perutt_nowb.txt + + # 2. Collect bigram counts for words. To be more specific, we are actually + # collecting counts for "v ? w", where "?" represents silence or + # non-silence. + cat $dir/pron_perutt_nowb.txt | sed 's/[^\t]*\t//g' | perl -e ' + while (<>) { + chomp; @col = split("\t"); + for($i = 1; $i < scalar(@col) - 1; $i += 1) { + $bigram{$col[$i] . "\t" . $col[$i + 1]} += 1; + } + } + foreach $key (keys %bigram) { + print "$bigram{$key}\t$key\n"; + }' > $dir/pron_bigram_counts_nowb.txt + + # 3. Collect bigram counts for silence and words. the count file has 4 fields + # for counts, followed by the "word pron" pair. All fields are separated by + # spaces: + # ... + cat $dir/pron_perutt_nowb.txt | cut -f 2- | perl -e ' + %sil_wpron = (); %nonsil_wpron = (); %wpron_sil = (); %wpron_nonsil = (); + %words = (); + while () { + chomp; + @col = split(/[\t]+/, $_); @col >= 2 || die "'$0': bad line \"$_\"\n"; + for ($n = 0; $n < @col - 1; $n++) { + # First word is not silence, collect the wpron_sil and wpron_nonsil + # stats. + if ($col[$n] !~ m/^ /) { + if ($col[$n + 1] =~ m/^ /) { $wpron_sil{$col[$n]} += 1; } + else { $wpron_nonsil{$col[$n]} += 1; } + $words{$col[$n]} = 1; + } + # Second word is not silence, collect the sil_wpron and nonsil_wpron + # stats. + if ($col[$n + 1] !~ m/^ /) { + if ($col[$n] =~ m/^ /) { $sil_wpron{$col[$n + 1]} += 1; } + else { $nonsil_wpron{$col[$n + 1]} += 1; } + $words{$col[$n + 1]} = 1; + } + } + } + foreach $wpron (sort keys %words) { + $sil_wpron{$wpron} += 0; $nonsil_wpron{$wpron} += 0; + $wpron_sil{$wpron} += 0; $wpron_nonsil{$wpron} += 0;; + print "$sil_wpron{$wpron} $nonsil_wpron{$wpron} "; + print "$wpron_sil{$wpron} $wpron_nonsil{$wpron} $wpron\n"; + } + '> $dir/sil_counts_nowb.txt +fi + +echo "$0: done writing prons to $dir/prons.*.gz, silence counts in " +echo "$0: $dir/sil_counts_nowb.txt and pronunciation counts in " +echo "$0: $dir/pron_counts.{int,txt}" +if [ -f $lang/phones/word_boundary.int ]; then + echo "$0: ... and also in $dir/pron_counts_nowb.txt" +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/get_train_ctm.sh b/tools/ASR_2ch_track/steps/get_train_ctm.sh new file mode 100644 index 0000000000000000000000000000000000000000..10b29708d84706ef3c6747fb4925bbd3ceeb9df3 --- /dev/null +++ b/tools/ASR_2ch_track/steps/get_train_ctm.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# Copyright Johns Hopkins University (Author: Daniel Povey) 2012. Apache 2.0. + +# This script produces CTM files from a training directory that has alignments +# present. + + +# begin configuration section. +cmd=run.pl +frame_shift=0.01 +stage=0 +use_segments=true # if we have a segments file, use it to convert + # the segments to be relative to the original files. +print_silence=false # if true, will print (optional-silence) arcs. + +#end configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: $0 [options] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --stage (0|1|2) # start scoring script from part-way through." + echo " --use-segments (true|false) # use segments and reco2file_and_channel files " + echo " # to produce a ctm relative to the original audio" + echo " # files, with channel information (typically needed" + echo " # for NIST scoring)." + echo " --frame-shift (default=0.01) # specify this if your alignments have a frame-shift" + echo " # not equal to 0.01 seconds" + echo "e.g.:" + echo "$0 data/train data/lang exp/tri3a_ali" + echo "Produces ctm in: exp/tri3a_ali/ctm" + exit 1; +fi + +data=$1 +lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied. +dir=$3 + + +model=$dir/final.mdl # assume model one level up from decoding dir. + + +for f in $lang/words.txt $model $dir/ali.1.gz $lang/oov.int; do + [ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1; +done + +oov=`cat $lang/oov.int` || exit 1; +nj=`cat $dir/num_jobs` || exit 1; +split_data.sh $data $nj || exit 1; +sdata=$data/split$nj + +mkdir -p $dir/log + +if [ $stage -le 0 ]; then + if [ -f $lang/phones/word_boundary.int ]; then + $cmd JOB=1:$nj $dir/log/get_ctm.JOB.log \ + set -o pipefail '&&' linear-to-nbest "ark:gunzip -c $dir/ali.JOB.gz|" \ + "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $sdata/JOB/text |" \ + '' '' ark:- \| \ + lattice-align-words $lang/phones/word_boundary.int $model ark:- ark:- \| \ + nbest-to-ctm --frame-shift=$frame_shift --print-silence=$print_silence ark:- - \| \ + utils/int2sym.pl -f 5 $lang/words.txt \| \ + gzip -c '>' $dir/ctm.JOB.gz || exit 1 + else + if [ ! -f $lang/phones/align_lexicon.int ]; then + echo "$0: neither $lang/phones/word_boundary.int nor $lang/phones/align_lexicon.int exists: cannot align." + exit 1; + fi + $cmd JOB=1:$nj $dir/log/get_ctm.JOB.log \ + set -o pipefail '&&' linear-to-nbest "ark:gunzip -c $dir/ali.JOB.gz|" \ + "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $sdata/JOB/text |" \ + '' '' ark:- \| \ + lattice-align-words-lexicon $lang/phones/align_lexicon.int $model ark:- ark:- \| \ + nbest-to-ctm --frame-shift=$frame_shift --print-silence=$print_silence ark:- - \| \ + utils/int2sym.pl -f 5 $lang/words.txt \| \ + gzip -c '>' $dir/ctm.JOB.gz || exit 1 + fi +fi + +if [ $stage -le 1 ]; then + if [ -f $data/segments ] && $use_segments; then + f=$data/reco2file_and_channel + [ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1; + for n in `seq $nj`; do gunzip -c $dir/ctm.$n.gz; done | \ + utils/convert_ctm.pl $data/segments $data/reco2file_and_channel > $dir/ctm || exit 1; + else + for n in `seq $nj`; do gunzip -c $dir/ctm.$n.gz; done > $dir/ctm || exit 1; + fi + rm $dir/ctm.*.gz +fi + diff --git a/tools/ASR_2ch_track/steps/kl_hmm/build_tree.sh b/tools/ASR_2ch_track/steps/kl_hmm/build_tree.sh new file mode 100644 index 0000000000000000000000000000000000000000..cf3eb16a7e8998b2d685ad00d20f25edb10f9e4a --- /dev/null +++ b/tools/ASR_2ch_track/steps/kl_hmm/build_tree.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +# Copyright 2012-2013 Johns Hopkins University (Author: Daniel Povey), +# Idiap Research Institute (Author: David Imseng) +# Apache 2.0 + +# Begin configuration. +stage=-4 # This allows restarting after partway, when something when wrong. +config= +cmd=run.pl +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +num_iters=35 # Number of iterations of training +max_iter_inc=25 # Last iter to increase #Gauss on. +beam=10 +retry_beam=40 +boost_silence=1.0 # Factor by which to boost silence likelihoods in alignment +power=0.25 # Exponent for number of gaussians according to occurrence counts +cluster_thresh=-1 # for build-tree control final bottom-up clustering of leaves +thresh=20 +use_gpu="no" +nnet_dir= +context_opts= # e.g. set this to "--context-width 5 --central-position 2" for quinphone. +tmpdir= +no_softmax=true +# End configuration. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh; +. parse_options.sh || exit 1; + +if [ $# != 5 ]; then + echo "Usage: steps/train_deltas.sh " + echo "e.g.: steps/train_deltas.sh 2000 data/train_si84_half data/lang exp/mono_ali exp/tri1" + echo "main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --stage # stage to do partial re-run from." + echo " --thresh " + echo " --cluster_thresh " + echo " --nnet_dir " + echo " --context_opts " + echo " --tmpdir " + echo " --no-softmax " + exit 1; +fi + +numleaves=$1 +data=$2 +lang=$3 +alidir=$4 +dir=$5 + + +for f in $alidir/final.mdl $alidir/ali.1.gz $data/feats.scp $lang/phones.txt; do + [ ! -f $f ] && echo "train_deltas.sh: no such file $f" && exit 1; +done + +numgauss=$numleaves +incgauss=$[($totgauss-$numgauss)/$max_iter_inc] # per-iter increment for #Gauss +ciphonelist=`cat $lang/phones/context_indep.csl` || exit 1; +nj=`cat $alidir/num_jobs` || exit 1; +mkdir -p $dir/log +echo $nj > $dir/num_jobs + +sdata=$data/split$nj; +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +nnet=${nnet_dir}/final.nnet +feature_transform=${nnet_dir}/final.feature_transform + +featsdim="ark:copy-feats scp:$data/feats.scp ark:- |" +nnetfeats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# Optionally add cmvn +if [ -f ${nnet_dir}/norm_vars ]; then + norm_vars=$(cat ${nnet_dir}/norm_vars 2>/dev/null) + [ ! -f $sdata/1/cmvn.scp ] && echo "$0: cannot find cmvn stats $sdata/1/cmvn.scp" && exit 1 + nnetfeats="$nnetfeats apply-cmvn --norm-vars=$norm_vars --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" + featsdim="$featsdim apply-cmvn --norm-vars=$norm_vars --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp ark:- ark:- |" +fi +# Optionally add deltas +if [ -f ${nnet_dir}/delta_order ]; then + delta_order=$(cat ${nnet_dir}/delta_order) + nnetfeats="$nnetfeats add-deltas --delta-order=$delta_order ark:- ark:- |" + featsdim="$featsdim add-deltas --delta-order=$delta_order ark:- ark:- |" +fi + +feats="ark,s,cs:nnet-forward " +if [[ ! -z $feature_transform ]]; then + feats=${feats}" --feature-transform=$feature_transform " +fi +feats=${feats}"--no-softmax=$no_softmax --use-gpu=$use_gpu $nnet \"$nnetfeats\" ark:- |" + +feat_dim=$(feat-to-dim --print-args=false "$featsdim" -) +rm $dir/.error 2>/dev/null + +if [[ ! -z $tmpdir ]]; then + mkdir -p $tmpdir +else + tmpdir=$dir +fi + +if [ $stage -le -3 ]; then + echo "$0: accumulating tree stats" + $cmd JOB=1:$nj $dir/log/acc_tree.JOB.log \ + acc-tree-stats $context_opts --var-floor=1.0 --ci-phones=$ciphonelist $alidir/final.mdl "$feats" \ + "ark:gunzip -c $alidir/ali.JOB.gz|" $tmpdir/JOB.treeacc || exit 1; + sum-tree-stats $dir/treeacc $tmpdir/*.treeacc 2>$dir/log/sum_tree_acc.log || exit 1; + rm $tmpdir/*.treeacc +fi + +if [ $stage -le -2 ]; then + echo "$0: getting questions for tree-building, via clustering" + # preparing questions, roots file... + cluster-phones $context_opts $dir/treeacc $lang/phones/sets.int $dir/questions.int 2> $dir/log/questions.log || exit 1; + cat $lang/phones/extra_questions.int >> $dir/questions.int + compile-questions $context_opts $lang/topo $dir/questions.int $dir/questions.qst 2>$dir/log/compile_questions.log || exit 1; + + echo "$0: building the tree" + # $cmd $dir/log/build_tree.log \ + build-tree $context_opts --verbose=1 --max-leaves=$numleaves --cluster-thresh=$cluster_thresh --thresh=$thresh $dir/treeacc $lang/phones/roots.int \ + $dir/questions.qst $lang/topo $dir/tree &> $dir/log/build_tree.log || exit 1; + + gmm-init-model-flat --dim=$feat_dim $dir/tree $lang/topo $dir/1.mdl + + rm $dir/treeacc +fi + +if [ $stage -le -1 ]; then + # Convert the alignments. + echo "$0: converting alignments from $alidir to use current tree" + $cmd JOB=1:$nj $dir/log/convert.JOB.log \ + convert-ali $alidir/final.mdl $dir/1.mdl $dir/tree \ + "ark:gunzip -c $alidir/ali.JOB.gz|" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +if [ $stage -le 0 ]; then + echo "$0: compiling graphs of transcripts" + $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ + compile-train-graphs $dir/tree $dir/1.mdl $lang/L.fst \ + "ark:utils/sym2int.pl -f 2- $lang/words.txt < $data/split$nj/JOB/text |" \ + "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; +fi + +rm $dir/final.mdl 2>/dev/null +ln -s 1.mdl $dir/final.mdl + +# Summarize warning messages... +utils/summarize_warnings.pl $dir/log + +echo "$0: Done building the tree in $dir" + diff --git a/tools/ASR_2ch_track/steps/kl_hmm/decode_kl_hmm.sh b/tools/ASR_2ch_track/steps/kl_hmm/decode_kl_hmm.sh new file mode 100644 index 0000000000000000000000000000000000000000..d085ac907465e09aa72e1d25536c6cb7a81d1a5e --- /dev/null +++ b/tools/ASR_2ch_track/steps/kl_hmm/decode_kl_hmm.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +# Copyright 2012-2013 Karel Vesely, +# Daniel Povey, +# Idiap Research Institute (Author: David Imseng) +# Apache 2.0 + +# Begin configuration section. +nnet= # Optionally pre-select network to use for getting state-likelihoods +feature_transform= # Optionally pre-select feature transform (in front of nnet) +model= # Optionally pre-select transition model + +stage=0 # stage=1 skips lattice generation +nj=4 +cmd=run.pl +max_active=7000 # maximum of active tokens +max_mem=50000000 # limit the fst-size to 50MB (larger fsts are minimized) +beam=13.0 # GMM:13.0 +lattice_beam=8.0 # GMM:6.0 +acwt=0.1 # GMM:0.0833, note: only really affects pruning (scoring is on lattices). +scoring_opts="--min-lmwt 1 --max-lmwt 12" +skip_scoring=false +use_gpu="no" # disable gpu +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: $0 [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the DNN + transition model is." + echo "e.g.: $0 exp/dnn1/graph_tgpr data/test exp/dnn1/decode_tgpr" + echo "" + echo "This script works on plain or modified features (CMN,delta+delta-delta)," + echo "which are then sent through feature-transform. It works out what type" + echo "of features you used from content of srcdir." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo "" + echo " --nnet # which nnet to use (opt.)" + echo " --feature-transform # select transform in front of nnet (opt.)" + echo " --model # which transition model to use (opt.)" + echo "" + echo " --acwt # select acoustic scale for decoding" + echo " --scoring-opts # options forwarded to local/score.sh" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$nnet" ]; then # if --nnet was not specified on the command line... + nnet=$srcdir/final.nnet; +fi +[ -z "$nnet" ] && echo "Error nnet '$nnet' does not exist!" && exit 1; + +if [ -z "$model" ]; then # if --model was not specified on the command line... + model=$srcdir/final.mdl; +fi + +# find the feature_transform to use +if [ -z "$feature_transform" ]; then + feature_transform=$srcdir/final.feature_transform +fi +if [ ! -f $feature_transform ]; then + echo "Missing feature_transform '$feature_transform'" + exit 1 +fi + +# check that files exist +for f in $sdata/1/feats.scp $nnet_i $nnet $model $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +# Create the feature stream: +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# Optionally add cmvn +if [ -f $srcdir/norm_vars ]; then + norm_vars=$(cat $srcdir/norm_vars 2>/dev/null) + [ ! -f $sdata/1/cmvn.scp ] && echo "$0: cannot find cmvn stats $sdata/1/cmvn.scp" && exit 1 + feats="$feats apply-cmvn --norm-vars=$norm_vars --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +fi +# Optionally add deltas +if [ -f $srcdir/delta_order ]; then + delta_order=$(cat $srcdir/delta_order) + feats="$feats add-deltas --delta-order=$delta_order ark:- ark:- |" +fi + + +# Run the decoding in the queue +if [ $stage -le 0 ]; then + $cmd JOB=1:$nj $dir/log/decode.JOB.log \ + nnet-forward --feature-transform=$feature_transform --use-gpu=$use_gpu $nnet "$feats" ark:- \| \ + latgen-faster-mapped --max-active=$max_active --max-mem=$max_mem --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst ark:- "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +# Run the scoring +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir || exit 1; +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/kl_hmm/train_kl_hmm.sh b/tools/ASR_2ch_track/steps/kl_hmm/train_kl_hmm.sh new file mode 100644 index 0000000000000000000000000000000000000000..2667bc67afc540fc29c3138aa345658e9d513287 --- /dev/null +++ b/tools/ASR_2ch_track/steps/kl_hmm/train_kl_hmm.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +# Copyright 2012-2013 Karel Vesely, +# Daniel Povey, +# Idiap Research Institute (Author: David Imseng) +# Apache 2.0 + +# Begin configuration section. +nnet= # Optionally pre-select network to use for getting state-likelihoods +feature_transform= # Optionally pre-select feature transform (in front of nnet) +model= # Optionally pre-select transition model +class_frame_counts= # Optionally pre-select class-counts used to compute PDF priors + +stage=0 # stage=1 skips lattice generation +nj=32 +cmd=$decode_cmd +max_active=7000 # maximum of active tokens +max_mem=50000000 # limit the fst-size to 50MB (larger fsts are minimized) +use_gpu="no" # disable gpu +parallel_opts="" +tmpdir= +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: $0 [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the DNN + transition model is." + echo "e.g.: $0 data/train exp/dnn1/kl-hmm-train" + echo "" + echo "This script works on plain or modified features (CMN,delta+delta-delta)," + echo "which are then sent through feature-transform. It works out what type" + echo "of features you used from content of srcdir." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo "" + echo " --nnet # which nnet to use (opt.)" + echo " --feature-transform # select transform in front of nnet (opt.)" + echo " --model # which transition model to use (opt.)" + echo " --tmpdir >dir> # Temp directory to store the statistics, becuase they can get big (opt.)" + exit 1; +fi + + +data=$1 +alidir=$2 +dir=$3 +srcdir=`dirname $dir`; # The model directory is one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +if [ -z "$nnet" ]; then # if --nnet was not specified on the command line... + nnet=$srcdir/final.nnet; +fi +[ -z "$nnet" ] && echo "Error nnet '$nnet' does not exist!" && exit 1; + +if [ -z "$model" ]; then # if --model was not specified on the command line... + model=$srcdir/final.mdl; +fi + +# find the feature_transform to use +if [ -z "$feature_transform" ]; then + feature_transform=$srcdir/final.feature_transform +fi +if [ ! -f $feature_transform ]; then + echo "Missing feature_transform '$feature_transform'" + exit 1 +fi + +# check that files exist +for f in $sdata/1/feats.scp $nnet_i $nnet $model; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +# Create the feature stream: +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# Optionally add cmvn +if [ -f $srcdir/norm_vars ]; then + norm_vars=$(cat $srcdir/norm_vars 2>/dev/null) + [ ! -f $sdata/1/cmvn.scp ] && echo "$0: cannot find cmvn stats $sdata/1/cmvn.scp" && exit 1 + feats="$feats apply-cmvn --norm-vars=$norm_vars --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +fi +# Optionally add deltas +if [ -f $srcdir/delta_order ]; then + delta_order=$(cat $srcdir/delta_order) + feats="$feats add-deltas --delta-order=$delta_order ark:- ark:- |" +fi + +ali="ark:gunzip -c $alidir/ali.*.gz |" + +if [[ ! -z $tmpdir ]]; then + mkdir -p $tmpdir +else + tmpdir=$dir +fi + +nkl_states=$(hmm-info --print-args=false $alidir/final.mdl | grep pdfs | awk '{ print $NF }') +if [ $stage -le 0 ]; then + $cmd $parallel_opts JOB=1:$nj $dir/log/acc-stats.JOB.log \ + nnet-kl-hmm-acc --nkl-states=${nkl_states} "ark:nnet-forward --feature-transform=$feature_transform --use-gpu=$use_gpu $nnet \"$feats\" ark:- |" "ark:ali-to-pdf --print-args=false $alidir/final.mdl \"$ali\" ark:- |" $tmpdir/kl-hmm-stats.JOB +fi + +sum-matrices $dir/accumulated-kl-hmm-stats $tmpdir/kl-hmm-stats.* + +rm $tmpdir/kl-hmm-stats.* + +nnet-kl-hmm-mat-to-component $dir/kl-hmm.nnet $dir/accumulated-kl-hmm-stats + +nnet-concat $dir/../final.nnet $dir/kl-hmm.nnet $dir/final.nnet + +exit 0; diff --git a/tools/ASR_2ch_track/steps/lmrescore.sh b/tools/ASR_2ch_track/steps/lmrescore.sh new file mode 100644 index 0000000000000000000000000000000000000000..0652c6c13caf6cd1851073bb6cf879002656c6e5 --- /dev/null +++ b/tools/ASR_2ch_track/steps/lmrescore.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +# Begin configuration section. +mode=4 +cmd=run.pl +skip_scoring=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +. ./utils/parse_options.sh + +if [ $# != 5 ]; then + echo "Do language model rescoring of lattices (remove old LM, add new LM)" + echo "Usage: steps/lmrescore.sh [options] " + echo "options: [--cmd (run.pl|queue.pl [queue opts])] [--mode (1|2|3|4)]" + exit 1; +fi + +[ -f path.sh ] && . ./path.sh; + +oldlang=$1 +newlang=$2 +data=$3 +indir=$4 +outdir=$5 + +oldlm=$oldlang/G.fst +newlm=$newlang/G.fst +! cmp $oldlang/words.txt $newlang/words.txt && echo "Warning: vocabularies may be incompatible." +[ ! -f $oldlm ] && echo Missing file $oldlm && exit 1; +[ ! -f $newlm ] && echo Missing file $newlm && exit 1; +! ls $indir/lat.*.gz >/dev/null && echo "No lattices input directory $indir" && exit 1; + +oldlmcommand="fstproject --project_output=true $oldlm |" +newlmcommand="fstproject --project_output=true $newlm |" + +mkdir -p $outdir/log + +phi=`grep -w '#0' $newlang/words.txt | awk '{print $2}'` + +if [ "$mode" == 4 ]; then + # we have to prepare $outdir/Ldet.fst in this case: determinized + # lexicon (determinized on phones), with disambig syms removed. + # take L_disambig.fst; get rid of transition with "#0 #0" on it; determinize + # with epsilon removal; remove disambiguation symbols. + fstprint $newlang/L_disambig.fst | awk '{if($4 != '$phi'){print;}}' | fstcompile | \ + fstdeterminizestar | fstrmsymbols $newlang/phones/disambig.int >$outdir/Ldet.fst || exit 1; +fi + +nj=`cat $indir/num_jobs` || exit 1; +cp $indir/num_jobs $outdir + + +#for lat in $indir/lat.*.gz; do +# number=`basename $lat | cut -d. -f2`; +# newlat=$outdir/`basename $lat` + +case "$mode" in + 1) # 1 is inexact, it's the original way of doing it. + $cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \ + lattice-lmrescore --lm-scale=-1.0 "ark:gunzip -c $indir/lat.JOB.gz|" "$oldlmcommand" ark:- \| \ + lattice-lmrescore --lm-scale=1.0 ark:- "$newlmcommand" "ark,t:|gzip -c>$outdir/lat.JOB.gz" \ + || exit 1; + ;; + 2) # 2 is equivalent to 1, but using more basic operations, combined. + $cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \ + gunzip -c $indir/lat.JOB.gz \| \ + lattice-scale --acoustic-scale=-1 --lm-scale=-1 ark:- ark:- \| \ + lattice-compose ark:- "fstproject --project_output=true $oldlm |" ark:- \| \ + lattice-determinize ark:- ark:- \| \ + lattice-scale --acoustic-scale=-1 --lm-scale=-1 ark:- ark:- \| \ + lattice-compose ark:- "fstproject --project_output=true $newlm |" ark:- \| \ + lattice-determinize ark:- ark:- \| \ + gzip -c \>$outdir/lat.JOB.gz || exit 1; + ;; + 3) # 3 is "exact" in that we remove the old LM scores accepting any path + # through G.fst (which is what we want as that happened in lattice + # generation), but we add the new one with "phi matcher", only taking + # backoff arcs if an explicit arc did not exist. + $cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \ + gunzip -c $indir/lat.JOB.gz \| \ + lattice-scale --acoustic-scale=-1 --lm-scale=-1 ark:- ark:- \| \ + lattice-compose ark:- "fstproject --project_output=true $oldlm |" ark:- \| \ + lattice-determinize ark:- ark:- \| \ + lattice-scale --acoustic-scale=-1 --lm-scale=-1 ark:- ark:- \| \ + lattice-compose --phi-label=$phi ark:- $newlm ark:- \| \ + lattice-determinize ark:- ark:- \| \ + gzip -c \>$outdir/lat.JOB.gz || exit 1; + ;; + 4) # 4 is also exact (like 3), but instead of subtracting the old LM-scores, + # it removes the old graph scores entirely and adds in the lexicon, + # grammar and transition weights. + mdl=`dirname $indir`/final.mdl + [ ! -f $mdl ] && echo No such model $mdl && exit 1; + $cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \ + gunzip -c $indir/lat.JOB.gz \| \ + lattice-scale --lm-scale=0.0 ark:- ark:- \| \ + lattice-to-phone-lattice $mdl ark:- ark:- \| \ + lattice-compose ark:- $outdir/Ldet.fst ark:- \| \ + lattice-determinize ark:- ark:- \| \ + lattice-compose --phi-label=$phi ark:- $newlm ark:- \| \ + lattice-add-trans-probs --transition-scale=1.0 --self-loop-scale=0.1 \ + $mdl ark:- ark:- \| \ + gzip -c \>$outdir/lat.JOB.gz || exit 1; + ;; +esac + +rm $outdir/Ldet.fst 2>/dev/null + +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh --cmd "$cmd" $data $newlang $outdir +else + echo "Not scoring because requested so..." +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/lmrescore_const_arpa.sh b/tools/ASR_2ch_track/steps/lmrescore_const_arpa.sh new file mode 100644 index 0000000000000000000000000000000000000000..092bc53f5e83fb45120223f63a562dc9489242a8 --- /dev/null +++ b/tools/ASR_2ch_track/steps/lmrescore_const_arpa.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Copyright 2014 Guoguo Chen +# Apache 2.0 + +# This script rescores lattices with the ConstArpaLm format language model. + +# Begin configuration section. +cmd=run.pl +skip_scoring=false +stage=1 +scoring_opts= +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +. ./utils/parse_options.sh + +if [ $# != 5 ]; then + echo "Does language model rescoring of lattices (remove old LM, add new LM)" + echo "Usage: steps/lmrescore.sh [options] \\" + echo " " + echo "options: [--cmd (run.pl|queue.pl [queue opts])]" + exit 1; +fi + +[ -f path.sh ] && . ./path.sh; + +oldlang=$1 +newlang=$2 +data=$3 +indir=$4 +outdir=$5 + +oldlm=$oldlang/G.fst +newlm=$newlang/G.carpa +! cmp $oldlang/words.txt $newlang/words.txt &&\ + echo "$0: Warning: vocabularies may be incompatible." +[ ! -f $oldlm ] && echo "$0: Missing file $oldlm" && exit 1; +[ ! -f $newlm ] && echo "$0: Missing file $newlm" && exit 1; +! ls $indir/lat.*.gz >/dev/null &&\ + echo "$0: No lattices input directory $indir" && exit 1; + +oldlmcommand="fstproject --project_output=true $oldlm |" + +mkdir -p $outdir/log +nj=`cat $indir/num_jobs` || exit 1; +cp $indir/num_jobs $outdir + +if [ $stage -le 1 ]; then + $cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \ + lattice-lmrescore --lm-scale=-1.0 \ + "ark:gunzip -c $indir/lat.JOB.gz|" "$oldlmcommand" ark:- \| \ + lattice-lmrescore-const-arpa --lm-scale=1.0 \ + ark:- "$newlm" "ark,t:|gzip -c>$outdir/lat.JOB.gz" || exit 1; +fi + +if ! $skip_scoring && [ $stage -le 2 ]; then + err_msg="Not scoring because local/score.sh does not exist or not executable." + [ ! -x local/score.sh ] && echo $err_msg && exit 1; + local/score.sh --cmd "$cmd" $scoring_opts $data $newlang $outdir +else + echo "Not scoring because requested so..." +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/lmrescore_rnnlm_lat.sh b/tools/ASR_2ch_track/steps/lmrescore_rnnlm_lat.sh new file mode 100644 index 0000000000000000000000000000000000000000..dc18847712ebad3165cdca1318638673e57c7e42 --- /dev/null +++ b/tools/ASR_2ch_track/steps/lmrescore_rnnlm_lat.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +# Copyright 2015 Guoguo Chen +# Apache 2.0 + +# This script rescores lattices with RNNLM. + +# Begin configuration section. +cmd=run.pl +skip_scoring=false +max_ngram_order=4 +N=10 +inv_acwt=12 +weight=1.0 # Interpolation weight for RNNLM. +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +. ./utils/parse_options.sh + +if [ $# != 5 ]; then + echo "Does language model rescoring of lattices (remove old LM, add new LM)" + echo "with RNNLM." + echo "" + echo "Usage: $0 [options] \\" + echo " " + echo " e.g.: $0 ./rnnlm data/lang_tg data/test \\" + echo " exp/tri3/test_tg exp/tri3/test_rnnlm" + echo "options: [--cmd (run.pl|queue.pl [queue opts])]" + exit 1; +fi + +[ -f path.sh ] && . ./path.sh; + +oldlang=$1 +rnnlm_dir=$2 +data=$3 +indir=$4 +outdir=$5 + +oldlm=$oldlang/G.fst +if [ -f $oldlang/G.carpa ]; then + oldlm=$oldlang/G.carpa +elif [ ! -f $oldlm ]; then + echo "$0: expecting either $oldlang/G.fst or $oldlang/G.carpa to exist" &&\ + exit 1; +fi + +[ ! -f $oldlm ] && echo "$0: Missing file $oldlm" && exit 1; +[ ! -f $rnnlm_dir/rnnlm ] && echo "$0: Missing file $rnnlm_dir/rnnlm" && exit 1; +[ ! -f $rnnlm_dir/unk.probs ] &&\ + echo "$0: Missing file $rnnlm_dir/unk.probs" && exit 1; +[ ! -f $oldlang/words.txt ] &&\ + echo "$0: Missing file $oldlang/words.txt" && exit 1; +! ls $indir/lat.*.gz >/dev/null &&\ + echo "$0: No lattices input directory $indir" && exit 1; +awk -v n=$0 -v w=$weight 'BEGIN {if (w < 0 || w > 1) { + print n": Interpolation weight should be in the range of [0, 1]"; exit 1;}}' \ + || exit 1; + +oldlm_command="fstproject --project_output=true $oldlm |" + +acwt=`perl -e "print (1.0/$inv_acwt);"` + +mkdir -p $outdir/log +nj=`cat $indir/num_jobs` || exit 1; +cp $indir/num_jobs $outdir + +oldlm_weight=`perl -e "print -1.0 * $weight;"` +if [ "$oldlm" == "$oldlang/G.fst" ]; then + $cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \ + lattice-lmrescore --lm-scale=$oldlm_weight \ + "ark:gunzip -c $indir/lat.JOB.gz|" "$oldlm_command" ark:- \| \ + lattice-lmrescore-rnnlm --lm-scale=$weight \ + --max-ngram-order=$max_ngram_order ark:$rnnlm_dir/unk.probs \ + $oldlang/words.txt ark:- "$rnnlm_dir/rnnlm" \ + "ark,t:|gzip -c>$outdir/lat.JOB.gz" || exit 1; +else + $cmd JOB=1:$nj $outdir/log/rescorelm.JOB.log \ + lattice-lmrescore-const-arpa --lm-scale=$oldlm_weight \ + "ark:gunzip -c $indir/lat.JOB.gz|" "$oldlm_command" ark:- \| \ + lattice-lmrescore-rnnlm --lm-scale=$weight \ + --max-ngram-order=$max_ngram_order ark:$rnnlm_dir/unk.probs \ + $oldlang/words.txt ark:- "$rnnlm_dir/rnnlm" \ + "ark,t:|gzip -c>$outdir/lat.JOB.gz" || exit 1; +fi + +if ! $skip_scoring ; then + err_msg="Not scoring because local/score.sh does not exist or not executable." + [ ! -x local/score.sh ] && echo $err_msg && exit 1; + local/score.sh --cmd "$cmd" $data $oldlang $outdir +else + echo "Not scoring because requested so..." +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/make_denlats.sh b/tools/ASR_2ch_track/steps/make_denlats.sh new file mode 100644 index 0000000000000000000000000000000000000000..6afecfe5246edc9ebf787594cc195aea89a93cef --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_denlats.sh @@ -0,0 +1,174 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# Create denominator lattices for MMI/MPE training. +# Creates its output in $dir/lat.*.gz + +# Begin configuration section. +nj=4 +cmd=run.pl +sub_split=1 +beam=13.0 +lattice_beam=7.0 +acwt=0.1 +max_active=5000 +transform_dir= +max_mem=20000000 # This will stop the processes getting too large. +# This is in bytes, but not "real" bytes-- you have to multiply +# by something like 5 or 10 to get real bytes (not sure why so large) +num_threads=1 +parallel_opts= # ignored now +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: steps/make_denlats.sh [options] " + echo " e.g.: steps/make_denlats.sh data/train data/lang exp/tri1 exp/tri1_denlats" + echo "Works for (delta|lda) features, and (with --transform-dir option) such features" + echo " plus transforms." + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --sub-split # e.g. 40; use this for " + echo " # large databases so your jobs will be smaller and" + echo " # will (individually) finish reasonably soon." + echo " --transform-dir # directory to find fMLLR transforms." + echo " --num-threads # number of threads per decoding job" + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +sdata=$data/split$nj +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +delta_opts=`cat $srcdir/delta_opts 2>/dev/null` +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +oov=`cat $lang/oov.int` || exit 1; + +mkdir -p $dir + +cp -rH $lang $dir/ + +# Compute grammar FST which corresponds to unigram decoding graph. +new_lang="$dir/"$(basename "$lang") +echo "Making unigram grammar FST in $new_lang" +cat $data/text | utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt | \ + awk '{for(n=2;n<=NF;n++){ printf("%s ", $n); } printf("\n"); }' | \ + utils/make_unigram_grammar.pl | fstcompile | fstarcsort --sort_type=ilabel > $new_lang/G.fst \ + || exit 1; + +# mkgraph.sh expects a whole directory "lang", so put everything in one directory... +# it gets L_disambig.fst and G.fst (among other things) from $dir/lang, and +# final.mdl from $srcdir; the output HCLG.fst goes in $dir/graph. + +echo "Compiling decoding graph in $dir/dengraph" +if [ -s $dir/dengraph/HCLG.fst ] && [ $dir/dengraph/HCLG.fst -nt $srcdir/final.mdl ]; then + echo "Graph $dir/dengraph/HCLG.fst already exists: skipping graph creation." +else + utils/mkgraph.sh $new_lang $srcdir $dir/dengraph || exit 1; +fi + +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "$0: using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." && exit 1; + [ "`cat $transform_dir/num_jobs`" -ne "$nj" ] \ + && echo "$0: mismatch in number of jobs with $transform_dir" && exit 1; + [ -f $srcdir/final.mat ] && ! cmp $transform_dir/final.mat $srcdir/final.mat && \ + echo "$0: LDA transforms differ between $srcdir and $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +else + if [ -f $srcdir/final.alimdl ]; then + echo "$0: you seem to have a SAT system but you did not supply the --transform-dir option."; + exit 1; + fi +fi + + +# if this job is interrupted by the user, we want any background jobs to be +# killed too. +cleanup() { + local pids=$(jobs -pr) + [ -n "$pids" ] && kill $pids +} +trap "cleanup" INT QUIT TERM EXIT + + +if [ $sub_split -eq 1 ]; then + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode_den.JOB.log \ + gmm-latgen-faster$thread_string --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats" "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1; +else + # each job from 1 to $nj is split into multiple pieces (sub-split), and we aim + # to have at most two jobs running at each time. The idea is that if we have stragglers + # from one job, we can be processing another one at the same time. + rm $dir/.error 2>/dev/null + + prev_pid= + for n in `seq $[nj+1]`; do + if [ $n -gt $nj ]; then + this_pid= + elif [ -f $dir/.done.$n ] && [ $dir/.done.$n -nt $srcdir/final.mdl ]; then + echo "Not processing subset $n as already done (delete $dir/.done.$n if not)"; + this_pid= + else + sdata2=$data/split$nj/$n/split$sub_split; + if [ ! -d $sdata2 ] || [ $sdata2 -ot $sdata/$n/feats.scp ]; then + split_data.sh --per-utt $sdata/$n $sub_split || exit 1; + fi + mkdir -p $dir/log/$n + mkdir -p $dir/part + feats_subset=`echo $feats | sed "s/trans.JOB/trans.$n/g" | sed s:JOB/:$n/split$sub_split/JOB/:g` + + $cmd --num-threads $num_threads JOB=1:$sub_split $dir/log/$n/decode_den.JOB.log \ + gmm-latgen-faster$thread_string --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats_subset" "ark:|gzip -c >$dir/lat.$n.JOB.gz" || touch $dir/.error & + this_pid=$! + fi + if [ ! -z "$prev_pid" ]; then # Wait for the previous job; merge the previous set of lattices. + wait $prev_pid + [ -f $dir/.error ] && echo "$0: error generating denominator lattices" && exit 1; + rm $dir/.merge_error 2>/dev/null + echo Merging archives for data subset $prev_n + for k in `seq $sub_split`; do + gunzip -c $dir/lat.$prev_n.$k.gz || touch $dir/.merge_error; + done | gzip -c > $dir/lat.$prev_n.gz || touch $dir/.merge_error; + [ -f $dir/.merge_error ] && echo "$0: Merging lattices for subset $prev_n failed (or maybe some other error)" && exit 1; + rm $dir/lat.$prev_n.*.gz + touch $dir/.done.$prev_n + fi + prev_n=$n + prev_pid=$this_pid + done +fi + + +echo "$0: done generating denominator lattices." diff --git a/tools/ASR_2ch_track/steps/make_denlats_sgmm.sh b/tools/ASR_2ch_track/steps/make_denlats_sgmm.sh new file mode 100644 index 0000000000000000000000000000000000000000..43c5ec12e626c208ed1dcd6e3d47fae4c54e8303 --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_denlats_sgmm.sh @@ -0,0 +1,189 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# 2014 Guoguo Chen + +# Create denominator lattices for MMI/MPE training, with SGMM models. If the +# features have fMLLR transforms you have to supply the --transform-dir option. +# It gets any speaker vectors from the "alignment dir" ($alidir). Note: this is +# possibly a slight mismatch because the speaker vectors come from supervised +# adaptation. + +# Begin configuration section. +nj=4 +cmd=run.pl +sub_split=1 +beam=13.0 +lattice_beam=7.0 +acwt=0.1 +max_active=5000 +transform_dir= +max_mem=20000000 # This will stop the processes getting too large. +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: steps/make_denlats_sgmm.sh [options] " + echo " e.g.: steps/make_denlats_sgmm.sh data/train data/lang exp/sgmm4a_ali exp/sgmm4a_denlats" + echo "Works for (delta|lda) features, and (with --transform-dir option) such features" + echo " plus transforms." + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --sub-split # e.g. 40; use this for " + echo " # large databases so your jobs will be smaller and" + echo " # will (individually) finish reasonably soon." + echo " --transform-dir # directory to find fMLLR transforms." + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 # could also be $srcdir, but only if no vectors supplied. +dir=$4 + +sdata=$data/split$nj +splice_opts=`cat $alidir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +oov=`cat $lang/oov.int` || exit 1; + +mkdir -p $dir + +cp -rH $lang $dir/ + +# Compute grammar FST which corresponds to unigram decoding graph. +new_lang="$dir/"$(basename "$lang") +echo "$0: Making unigram grammar FST in $new_lang" +cat $data/text | utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt | \ + awk '{for(n=2;n<=NF;n++){ printf("%s ", $n); } printf("\n"); }' | \ + utils/make_unigram_grammar.pl | fstcompile | fstarcsort --sort_type=ilabel > $new_lang/G.fst \ + || exit 1; + +# mkgraph.sh expects a whole directory "lang", so put everything in one directory... +# it gets L_disambig.fst and G.fst (among other things) from $dir/lang, and +# final.mdl from $alidir; the output HCLG.fst goes in $dir/graph. + +echo "$0: Compiling decoding graph in $dir/dengraph" +if [ -s $dir/dengraph/HCLG.fst ] && [ $dir/dengraph/HCLG.fst -nt $srcdir/final.mdl ]; then + echo "$0: Graph $dir/dengraph/HCLG.fst already exists: skipping graph creation." +else + utils/mkgraph.sh $new_lang $alidir $dir/dengraph || exit 1; +fi + +if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $alidir/final.mat ark:- ark:- |" + cp $alidir/final.mat $dir + ;; + *) echo "$0: Invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "$0: using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne "$nj" ] \ + && echo "$0: mismatch in number of jobs with $transform_dir" && exit 1; + [ -f $alidir/final.mat ] && ! cmp $transform_dir/final.mat $alidir/final.mat && \ + echo "$0: LDA transforms differ between $alidir and $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +else + echo "$0: Assuming you don't have a SAT system, since no --transform-dir option supplied " +fi + +if [ -f $alidir/gselect.1.gz ]; then + gselect_opt="--gselect=ark,s,cs:gunzip -c $alidir/gselect.JOB.gz|" +else + echo "$0: no such file $alidir/gselect.1.gz" && exit 1; +fi + +if [ -f $alidir/vecs.1 ]; then + spkvecs_opt="--spk-vecs=ark:$alidir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk" +else + if [ -f $alidir/final.alimdl ]; then + echo "$0: You seem to have an SGMM system with speaker vectors," + echo "yet we can't find speaker vectors. Perhaps you supplied" + echo "the model director instead of the alignment directory?" + exit 1; + fi +fi + +# if this job is interrupted by the user, we want any background jobs to be +# killed too. +cleanup() { + local pids=$(jobs -pr) + [ -n "$pids" ] && kill $pids +} +trap "cleanup" INT QUIT TERM EXIT + +if [ $sub_split -eq 1 ]; then + $cmd JOB=1:$nj $dir/log/decode_den.JOB.log \ + sgmm-latgen-faster $spkvecs_opt "$gselect_opt" --beam=$beam \ + --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active \ + --word-symbol-table=$lang/words.txt $alidir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats" "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1; +else + # each job from 1 to $nj is split into multiple pieces (sub-split), and we aim + # to have at most two jobs running at each time. The idea is that if we have + # stragglers from one job, we can be processing another one at the same time. + rm $dir/.error 2>/dev/null + + prev_pid= + for n in `seq $[nj+1]`; do + if [ $n -gt $nj ]; then + this_pid= + elif [ -f $dir/.done.$n ] && [ $dir/.done.$n -nt $alidir/final.mdl ]; then + echo "$0: Not processing subset $n as already done (delete $dir/.done.$n if not)"; + this_pid= + else + sdata2=$data/split$nj/$n/split$sub_split; + if [ ! -d $sdata2 ] || [ $sdata2 -ot $sdata/$n/feats.scp ]; then + split_data.sh --per-utt $sdata/$n $sub_split || exit 1; + fi + mkdir -p $dir/log/$n + mkdir -p $dir/part + feats_subset=`echo $feats | sed "s/trans.JOB/trans.$n/g" | sed s:JOB/:$n/split$sub_split/JOB/:g` + spkvecs_opt_subset=`echo $spkvecs_opt | sed "s/JOB/$n/g"` + gselect_opt_subset=`echo $gselect_opt | sed "s/JOB/$n/g"` + $cmd JOB=1:$sub_split $dir/log/$n/decode_den.JOB.log \ + sgmm-latgen-faster $spkvecs_opt_subset "$gselect_opt_subset" \ + --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --max-mem=$max_mem --max-active=$max_active \ + --word-symbol-table=$lang/words.txt $alidir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats_subset" \ + "ark:|gzip -c >$dir/lat.$n.JOB.gz" || touch $dir/.error & + this_pid=$! + fi + if [ ! -z "$prev_pid" ]; then # Wait for the previous job to merge lattices. + wait $prev_pid + [ -f $dir/.error ] && \ + echo "$0: error generating denominator lattices" && exit 1; + rm $dir/.merge_error 2>/dev/null + echo "$0: Merging archives for data subset $prev_n" + for k in `seq $sub_split`; do + gunzip -c $dir/lat.$prev_n.$k.gz || touch $dir/.merge_error; + done | gzip -c > $dir/lat.$prev_n.gz || touch $dir/.merge_error; + [ -f $dir/.merge_error ] && \ + echo "$0: Merging lattices for subset $prev_n failed" && exit 1; + rm $dir/lat.$prev_n.*.gz + touch $dir/.done.$prev_n + fi + prev_n=$n + prev_pid=$this_pid + done +fi + + +echo "$0: done generating denominator lattices with SGMMs." diff --git a/tools/ASR_2ch_track/steps/make_denlats_sgmm2.sh b/tools/ASR_2ch_track/steps/make_denlats_sgmm2.sh new file mode 100644 index 0000000000000000000000000000000000000000..0f250d555fec01b494c39268033e0734e7a38602 --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_denlats_sgmm2.sh @@ -0,0 +1,200 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# 2014 Guoguo Chen + +# Create denominator lattices for MMI/MPE training, with SGMM models. If the +# features have fMLLR transforms you have to supply the --transform-dir option. +# It gets any speaker vectors from the "alignment dir" ($alidir). Note: this is +# possibly a slight mismatch because the speaker vectors come from supervised +# adaptation. + +# Begin configuration section. +nj=4 +cmd=run.pl +sub_split=1 +beam=13.0 +lattice_beam=7.0 +acwt=0.1 +max_active=5000 +transform_dir= +max_mem=20000000 # This will stop the processes getting too large. +num_threads=1 +parallel_opts= # ignored now. +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: steps/make_denlats_sgmm2.sh [options] " + echo " e.g.: steps/make_denlats_sgmm2.sh data/train data/lang exp/sgmm4a_ali exp/sgmm4a_denlats" + echo "Works for (delta|lda) features, and (with --transform-dir option) such features" + echo " plus transforms." + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --sub-split # e.g. 40; use this for " + echo " # large databases so your jobs will be smaller and" + echo " # will (individually) finish reasonably soon." + echo " --transform-dir # directory to find fMLLR transforms." + echo " --num-threads # number of threads per decoding job" + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 # could also be $srcdir, but only if no vectors supplied. +dir=$4 + +sdata=$data/split$nj +splice_opts=`cat $alidir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +if [ $num_threads -gt 1 ]; then + # the -parallel becomes part of the binary name we decode with. + thread_string="-parallel --num-threads=$num_threads" +fi + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +oov=`cat $lang/oov.int` || exit 1; + +mkdir -p $dir + +cp -rH $lang $dir/ + +# Compute grammar FST which corresponds to unigram decoding graph. +new_lang="$dir/"$(basename "$lang") +echo "$0: Making unigram grammar FST in $new_lang" +cat $data/text | utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt | \ + awk '{for(n=2;n<=NF;n++){ printf("%s ", $n); } printf("\n"); }' | \ + utils/make_unigram_grammar.pl | fstcompile | fstarcsort --sort_type=ilabel > $new_lang/G.fst \ + || exit 1; + +# mkgraph.sh expects a whole directory "lang", so put everything in one directory... +# it gets L_disambig.fst and G.fst (among other things) from $dir/lang, and +# final.mdl from $alidir; the output HCLG.fst goes in $dir/graph. + +echo "$0: Compiling decoding graph in $dir/dengraph" +if [ -s $dir/dengraph/HCLG.fst ] && [ $dir/dengraph/HCLG.fst -nt $srcdir/final.mdl ]; then + echo "$0: Graph $dir/dengraph/HCLG.fst already exists: skipping graph creation." +else + utils/mkgraph.sh $new_lang $alidir $dir/dengraph || exit 1; +fi + +if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $alidir/final.mat ark:- ark:- |" + cp $alidir/final.mat $dir + ;; + *) echo "$0: Invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "$0: using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne "$nj" ] \ + && echo "$0: mismatch in number of jobs with $transform_dir" && exit 1; + [ -f $alidir/final.mat ] && ! cmp $transform_dir/final.mat $alidir/final.mat && \ + echo "$0: LDA transforms differ between $alidir and $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +else + echo "$0: Assuming you don't have a SAT system, since no --transform-dir option supplied " +fi + +if [ -f $alidir/gselect.1.gz ]; then + gselect_opt="--gselect=ark,s,cs:gunzip -c $alidir/gselect.JOB.gz|" +else + echo "$0: no such file $alidir/gselect.1.gz" && exit 1; +fi + +if [ -f $alidir/vecs.1 ]; then + spkvecs_opt="--spk-vecs=ark:$alidir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk" + [ "`cat $alidir/num_jobs`" -ne "$nj" ] \ + && echo "$0: mismatch in number of jobs with $alidir" && exit 1; +else + if [ -f $alidir/final.alimdl ]; then + echo "$0: You seem to have an SGMM system with speaker vectors," + echo "yet we can't find speaker vectors. Perhaps you supplied" + echo "the model director instead of the alignment directory?" + exit 1; + fi +fi + +# if this job is interrupted by the user, we want any background jobs to be +# killed too. +cleanup() { + local pids=$(jobs -pr) + [ -n "$pids" ] && kill $pids +} +trap "cleanup" INT QUIT TERM EXIT + +if [ $sub_split -eq 1 ]; then + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode_den.JOB.log \ + sgmm2-latgen-faster$thread_string $spkvecs_opt "$gselect_opt" --beam=$beam \ + --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active \ + --word-symbol-table=$lang/words.txt $alidir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats" "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1; +else + # each job from 1 to $nj is split into multiple pieces (sub-split), and we aim + # to have at most two jobs running at each time. The idea is that if we have + # stragglers from one job, we can be processing another one at the same time. + rm $dir/.error 2>/dev/null + + prev_pid= + for n in `seq $[nj+1]`; do + if [ $n -gt $nj ]; then + this_pid= + elif [ -f $dir/.done.$n ] && [ $dir/.done.$n -nt $alidir/final.mdl ]; then + echo "$0: Not processing subset $n as already done (delete $dir/.done.$n if not)"; + this_pid= + else + sdata2=$data/split$nj/$n/split$sub_split; + if [ ! -d $sdata2 ] || [ $sdata2 -ot $sdata/$n/feats.scp ]; then + split_data.sh --per-utt $sdata/$n $sub_split || exit 1; + fi + mkdir -p $dir/log/$n + mkdir -p $dir/part + feats_subset=`echo $feats | sed "s/trans.JOB/trans.$n/g" | sed s:JOB/:$n/split$sub_split/JOB/:g` + spkvecs_opt_subset=`echo $spkvecs_opt | sed "s/JOB/$n/g"` + gselect_opt_subset=`echo $gselect_opt | sed "s/JOB/$n/g"` + $cmd --num-threads $num_threads JOB=1:$sub_split $dir/log/$n/decode_den.JOB.log \ + sgmm2-latgen-faster$thread_string \ + $spkvecs_opt_subset "$gselect_opt_subset" \ + --beam=$beam --lattice-beam=$lattice_beam \ + --acoustic-scale=$acwt --max-mem=$max_mem --max-active=$max_active \ + --word-symbol-table=$lang/words.txt $alidir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats_subset" \ + "ark:|gzip -c >$dir/lat.$n.JOB.gz" || touch $dir/.error & + this_pid=$! + fi + if [ ! -z "$prev_pid" ]; then # Wait for the previous job to merge lattices. + wait $prev_pid + [ -f $dir/.error ] && \ + echo "$0: error generating denominator lattices" && exit 1; + rm $dir/.merge_error 2>/dev/null + echo "$0: Merging archives for data subset $prev_n" + for k in `seq $sub_split`; do + gunzip -c $dir/lat.$prev_n.$k.gz || touch $dir/.merge_error; + done | gzip -c > $dir/lat.$prev_n.gz || touch $dir/.merge_error; + [ -f $dir/.merge_error ] && \ + echo "$0: Merging lattices for subset $prev_n failed" && exit 1; + rm $dir/lat.$prev_n.*.gz + touch $dir/.done.$prev_n + fi + prev_n=$n + prev_pid=$this_pid + done +fi + + +echo "$0: done generating denominator lattices with SGMMs." diff --git a/tools/ASR_2ch_track/steps/make_fbank.sh b/tools/ASR_2ch_track/steps/make_fbank.sh new file mode 100644 index 0000000000000000000000000000000000000000..ea355126c5d53ad246ceba5c3dcaed03e2779f6c --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_fbank.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +# Copyright 2012 Karel Vesely Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 +# To be run from .. (one directory up from here) +# see ../run.sh for example + +# Begin configuration section. +nj=4 +cmd=run.pl +fbank_config=conf/fbank.conf +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "usage: make_fbank.sh [options] "; + echo "options: " + echo " --fbank-config # config passed to compute-fbank-feats " + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +logdir=$2 +fbankdir=$3 + + +# make $fbankdir an absolute pathname. +fbankdir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $fbankdir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $fbankdir || exit 1; +mkdir -p $logdir || exit 1; + +if [ -f $data/feats.scp ]; then + mkdir -p $data/.backup + echo "$0: moving $data/feats.scp to $data/.backup" + mv $data/feats.scp $data/.backup +fi + +scp=$data/wav.scp + +required="$scp $fbank_config" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_fbank.sh: no such file $f" + exit 1; + fi +done + +utils/validate_data_dir.sh --no-text --no-feats $data || exit 1; + +if [ -f $data/spk2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/spk2warp" + vtln_opts="--vtln-map=ark:$data/spk2warp --utt2spk=ark:$data/utt2spk" +elif [ -f $data/utt2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/utt2warp" + vtln_opts="--vtln-map=ark:$data/utt2warp" +fi + +for n in $(seq $nj); do + # the next command does nothing unless $fbankdir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $fbankdir/raw_fbank_$name.$n.ark +done + +if [ -f $data/segments ]; then + echo "$0 [info]: segments file exists: using that." + split_segments="" + for n in $(seq $nj); do + split_segments="$split_segments $logdir/segments.$n" + done + + utils/split_scp.pl $data/segments $split_segments || exit 1; + rm $logdir/.error 2>/dev/null + + $cmd JOB=1:$nj $logdir/make_fbank_${name}.JOB.log \ + extract-segments scp,p:$scp $logdir/segments.JOB ark:- \| \ + compute-fbank-feats $vtln_opts --verbose=2 --config=$fbank_config ark:- ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$fbankdir/raw_fbank_$name.JOB.ark,$fbankdir/raw_fbank_$name.JOB.scp \ + || exit 1; + +else + echo "$0: [info]: no segments file exists: assuming wav.scp indexed by utterance." + split_scps="" + for n in $(seq $nj); do + split_scps="$split_scps $logdir/wav.$n.scp" + done + + utils/split_scp.pl $scp $split_scps || exit 1; + + $cmd JOB=1:$nj $logdir/make_fbank_${name}.JOB.log \ + compute-fbank-feats $vtln_opts --verbose=2 --config=$fbank_config scp,p:$logdir/wav.JOB.scp ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$fbankdir/raw_fbank_$name.JOB.ark,$fbankdir/raw_fbank_$name.JOB.scp \ + || exit 1; + +fi + + +if [ -f $logdir/.error.$name ]; then + echo "Error producing fbank features for $name:" + tail $logdir/make_fbank_${name}.1.log + exit 1; +fi + +# concatenate the .scp files together. +for n in $(seq $nj); do + cat $fbankdir/raw_fbank_$name.$n.scp || exit 1; +done > $data/feats.scp + +rm $logdir/wav.*.scp $logdir/segments.* 2>/dev/null + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi + +echo "Succeeded creating filterbank features for $name" diff --git a/tools/ASR_2ch_track/steps/make_fbank_pitch.sh b/tools/ASR_2ch_track/steps/make_fbank_pitch.sh new file mode 100644 index 0000000000000000000000000000000000000000..ec0c8794bc4766d47f08052967cc784e953d3cc6 --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_fbank_pitch.sh @@ -0,0 +1,149 @@ +#!/bin/bash + +# Copyright 2013 The Shenzhen Key Laboratory of Intelligent Media and Speech, +# PKU-HKUST Shenzhen Hong Kong Institution (Author: Wei Shi) +# Apache 2.0 +# Combine filterbank and pitch features together +# Note: This file is based on make_fbank.sh and make_pitch_kaldi.sh + +# Begin configuration section. +nj=4 +cmd=run.pl +fbank_config=conf/fbank.conf +pitch_config=conf/pitch.conf +pitch_postprocess_config= +paste_length_tolerance=2 +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "usage: make_fbank_pitch.sh [options] "; + echo "options: " + echo " --fbank-config # config passed to compute-fbank-feats " + echo " --pitch-config # config passed to compute-kaldi-pitch-feats " + echo " --pitch-postprocess-config # config passed to process-kaldi-pitch-feats " + echo " --paste-length-tolerance # length tolerance passed to paste-feats" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +logdir=$2 +fbank_pitch_dir=$3 + + +# make $fbank_pitch_dir an absolute pathname. +fbank_pitch_dir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $fbank_pitch_dir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $fbank_pitch_dir || exit 1; +mkdir -p $logdir || exit 1; + +if [ -f $data/feats.scp ]; then + mkdir -p $data/.backup + echo "$0: moving $data/feats.scp to $data/.backup" + mv $data/feats.scp $data/.backup +fi + +scp=$data/wav.scp + +required="$scp $fbank_config $pitch_config" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_fbank_pitch.sh: no such file $f" + exit 1; + fi +done + +if [ ! -z "$pitch_postprocess_config" ]; then + postprocess_config_opt="--config=$pitch_postprocess_config"; +else + postprocess_config_opt= +fi + +utils/validate_data_dir.sh --no-text --no-feats $data || exit 1; + +if [ -f $data/spk2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/spk2warp" + vtln_opts="--vtln-map=ark:$data/spk2warp --utt2spk=ark:$data/utt2spk" +elif [ -f $data/utt2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/utt2warp" + vtln_opts="--vtln-map=ark:$data/utt2warp" +fi + +for n in $(seq $nj); do + # the next command does nothing unless $fbank_pitch_dir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $fbank_pitch_dir/raw_fbank_pitch_$name.$n.ark +done + +if [ -f $data/segments ]; then + echo "$0 [info]: segments file exists: using that." + split_segments="" + for n in $(seq $nj); do + split_segments="$split_segments $logdir/segments.$n" + done + + utils/split_scp.pl $data/segments $split_segments || exit 1; + rm $logdir/.error 2>/dev/null + + fbank_feats="ark:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-fbank-feats $vtln_opts --verbose=2 --config=$fbank_config ark:- ark:- |" + pitch_feats="ark,s,cs:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-kaldi-pitch-feats --verbose=2 --config=$pitch_config ark:- ark:- | process-kaldi-pitch-feats $postprocess_config_opt ark:- ark:- |" + + $cmd JOB=1:$nj $logdir/make_fbank_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$fbank_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$fbank_pitch_dir/raw_fbank_pitch_$name.JOB.ark,$fbank_pitch_dir/raw_fbank_pitch_$name.JOB.scp \ + || exit 1; + +else + echo "$0: [info]: no segments file exists: assuming wav.scp indexed by utterance." + split_scps="" + for n in $(seq $nj); do + split_scps="$split_scps $logdir/wav.$n.scp" + done + + utils/split_scp.pl $scp $split_scps || exit 1; + + fbank_feats="ark:compute-fbank-feats $vtln_opts --verbose=2 --config=$fbank_config scp,p:$logdir/wav.JOB.scp ark:- |" + pitch_feats="ark,s,cs:compute-kaldi-pitch-feats --verbose=2 --config=$pitch_config scp,p:$logdir/wav.JOB.scp ark:- | process-kaldi-pitch-feats $postprocess_config_opt ark:- ark:- |" + + $cmd JOB=1:$nj $logdir/make_fbank_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$fbank_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$fbank_pitch_dir/raw_fbank_pitch_$name.JOB.ark,$fbank_pitch_dir/raw_fbank_pitch_$name.JOB.scp \ + || exit 1; + +fi + + +if [ -f $logdir/.error.$name ]; then + echo "Error producing fbank & pitch features for $name:" + tail $logdir/make_fbank_pitch_${name}.1.log + exit 1; +fi + +# concatenate the .scp files together. +for n in $(seq $nj); do + cat $fbank_pitch_dir/raw_fbank_pitch_$name.$n.scp || exit 1; +done > $data/feats.scp + +rm $logdir/wav.*.scp $logdir/segments.* 2>/dev/null + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully processed ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi + +echo "Succeeded creating filterbank & pitch features for $name" diff --git a/tools/ASR_2ch_track/steps/make_index.sh b/tools/ASR_2ch_track/steps/make_index.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c39396f338d8ba064b9e7cf4034ac3355562fc6 --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_index.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Guoguo Chen) +# Apache 2.0 + +# Begin configuration section. +model= # You can specify the model to use +cmd=run.pl +acwt=0.083333 +lmwt=1.0 +max_silence_frames=50 +max_states=1000000 +max_states_scale=4 +max_expand=180 # limit memory blowup in lattice-align-words +strict=true +word_ins_penalty=0 +silence_word= # Specify this only if you did so in kws_setup +skip_optimization=false # If you only search for few thousands of keywords, you probablly + # can skip the optimization; but if you're going to search for + # millions of keywords, you'd better do set this optimization to + # false and do the optimization on the final index. +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: steps/make_index.sh [options] " + echo "... where is where you have the lattices, and is assumed to be" + echo " a sub-directory of the directory where the model is." + echo "e.g.: steps/make_index.sh data/kws data/lang exp/sgmm2_5a_mmi/decode/ exp/sgmm2_5a_mmi/decode/kws/" + echo "" + echo "main options (for others, see top of script file)" + echo " --acwt # acoustic scale used for lattice" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --lmwt # lm scale used for lattice" + echo " --model # which model to use" + echo " # speaker-adapted decoding" + echo " --max-silence-frames # maximum #frames for silence" + exit 1; +fi + + +kwsdatadir=$1; +langdir=$2; +decodedir=$3; +kwsdir=$4; +srcdir=`dirname $decodedir`; # The model directory is one level up from decoding directory. + +mkdir -p $kwsdir/log; +nj=`cat $decodedir/num_jobs` || exit 1; +echo $nj > $kwsdir/num_jobs; +word_boundary=$langdir/phones/word_boundary.int +utter_id=$kwsdatadir/utter_id + +if [ -z "$model" ]; then # if --model was not specified on the command line... + model=$srcdir/final.mdl; +fi + +for f in $word_boundary $model $decodedir/lat.1.gz; do + [ ! -f $f ] && echo "make_index.sh: no such file $f" && exit 1; +done + +echo "Using model: $model" + +if [ ! -z $silence_word ]; then + silence_int=`grep -w $silence_word $langdir/words.txt | awk '{print $2}'` + [ -z $silence_int ] && \ + echo "Error: could not find integer representation of silence word $silence_word" && exit 1; + silence_opt="--silence-label=$silence_int" +fi + +$cmd JOB=1:$nj $kwsdir/log/index.JOB.log \ + lattice-add-penalty --word-ins-penalty=$word_ins_penalty "ark:gzip -cdf $decodedir/lat.JOB.gz|" ark:- \| \ + lattice-align-words $silence_opt --max-expand=$max_expand $word_boundary $model ark:- ark:- \| \ + lattice-scale --acoustic-scale=$acwt --lm-scale=$lmwt ark:- ark:- \| \ + lattice-to-kws-index --max-states-scale=$max_states_scale --allow-partial=true \ + --max-silence-frames=$max_silence_frames --strict=$strict ark:$utter_id ark:- ark:- \| \ + kws-index-union --skip-optimization=$skip_optimization --strict=$strict --max-states=$max_states \ + ark:- "ark:|gzip -c > $kwsdir/index.JOB.gz" || exit 1 + + +exit 0; diff --git a/tools/ASR_2ch_track/steps/make_mfcc.sh b/tools/ASR_2ch_track/steps/make_mfcc.sh new file mode 100644 index 0000000000000000000000000000000000000000..09c34d40b240c459772ca175b458a41c491192fa --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_mfcc.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 +# To be run from .. (one directory up from here) +# see ../run.sh for example + +# Begin configuration section. +nj=4 +cmd=run.pl +mfcc_config=conf/mfcc.conf +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: $0 [options] "; + echo "e.g.: $0 data/train exp/make_mfcc/train mfcc" + echo "options: " + echo " --mfcc-config # config passed to compute-mfcc-feats " + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +logdir=$2 +mfccdir=$3 + + +# make $mfccdir an absolute pathname. +mfccdir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $mfccdir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $mfccdir || exit 1; +mkdir -p $logdir || exit 1; + +if [ -f $data/feats.scp ]; then + mkdir -p $data/.backup + echo "$0: moving $data/feats.scp to $data/.backup" + mv $data/feats.scp $data/.backup +fi + +scp=$data/wav.scp + +required="$scp $mfcc_config" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_mfcc.sh: no such file $f" + exit 1; + fi +done +utils/validate_data_dir.sh --no-text --no-feats $data || exit 1; + +if [ -f $data/spk2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/spk2warp" + vtln_opts="--vtln-map=ark:$data/spk2warp --utt2spk=ark:$data/utt2spk" +elif [ -f $data/utt2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/utt2warp" + vtln_opts="--vtln-map=ark:$data/utt2warp" +fi + +for n in $(seq $nj); do + # the next command does nothing unless $mfccdir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $mfccdir/raw_mfcc_$name.$n.ark +done + + +if [ -f $data/segments ]; then + echo "$0 [info]: segments file exists: using that." + + split_segments="" + for n in $(seq $nj); do + split_segments="$split_segments $logdir/segments.$n" + done + + utils/split_scp.pl $data/segments $split_segments || exit 1; + rm $logdir/.error 2>/dev/null + + $cmd JOB=1:$nj $logdir/make_mfcc_${name}.JOB.log \ + extract-segments scp,p:$scp $logdir/segments.JOB ark:- \| \ + compute-mfcc-feats $vtln_opts --verbose=2 --config=$mfcc_config ark:- ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$mfccdir/raw_mfcc_$name.JOB.ark,$mfccdir/raw_mfcc_$name.JOB.scp \ + || exit 1; + +else + echo "$0: [info]: no segments file exists: assuming wav.scp indexed by utterance." + split_scps="" + for n in $(seq $nj); do + split_scps="$split_scps $logdir/wav_${name}.$n.scp" + done + + utils/split_scp.pl $scp $split_scps || exit 1; + + + # add ,p to the input rspecifier so that we can just skip over + # utterances that have bad wave data. + + $cmd JOB=1:$nj $logdir/make_mfcc_${name}.JOB.log \ + compute-mfcc-feats $vtln_opts --verbose=2 --config=$mfcc_config \ + scp,p:$logdir/wav_${name}.JOB.scp ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$mfccdir/raw_mfcc_$name.JOB.ark,$mfccdir/raw_mfcc_$name.JOB.scp \ + || exit 1; +fi + + +if [ -f $logdir/.error.$name ]; then + echo "Error producing mfcc features for $name:" + tail $logdir/make_mfcc_${name}.1.log + exit 1; +fi + +# concatenate the .scp files together. +for n in $(seq $nj); do + cat $mfccdir/raw_mfcc_$name.$n.scp || exit 1; +done > $data/feats.scp + +rm $logdir/wav_${name}.*.scp $logdir/segments.* 2>/dev/null + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully processed ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi + +if [ $nf -lt $[$nu - ($nu/20)] ]; then + echo "Less than 95% the features were successfully generated. Probably a serious error." + exit 1; +fi + +echo "Succeeded creating MFCC features for $name" diff --git a/tools/ASR_2ch_track/steps/make_mfcc_pitch.sh b/tools/ASR_2ch_track/steps/make_mfcc_pitch.sh new file mode 100644 index 0000000000000000000000000000000000000000..385d262fd5987c3ec1e5cf1097d51dbd0b0f56f3 --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_mfcc_pitch.sh @@ -0,0 +1,153 @@ +#!/bin/bash + +# Copyright 2013 The Shenzhen Key Laboratory of Intelligent Media and Speech, +# PKU-HKUST Shenzhen Hong Kong Institution (Author: Wei Shi) +# Apache 2.0 +# Combine MFCC and pitch features together +# Note: This file is based on make_mfcc.sh and make_pitch_kaldi.sh + +# Begin configuration section. +nj=4 +cmd=run.pl +mfcc_config=conf/mfcc.conf +pitch_config=conf/pitch.conf +pitch_postprocess_config= +paste_length_tolerance=2 +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "usage: make_mfcc_pitch.sh [options] "; + echo "options: " + echo " --mfcc-config # config passed to compute-mfcc-feats " + echo " --pitch-config # config passed to compute-kaldi-pitch-feats " + echo " --pitch-postprocess-config # config passed to process-kaldi-pitch-feats " + echo " --paste-length-tolerance # length tolerance passed to paste-feats" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +logdir=$2 +mfcc_pitch_dir=$3 + + +# make $mfcc_pitch_dir an absolute pathname. +mfcc_pitch_dir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $mfcc_pitch_dir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $mfcc_pitch_dir || exit 1; +mkdir -p $logdir || exit 1; + +if [ -f $data/feats.scp ]; then + mkdir -p $data/.backup + echo "$0: moving $data/feats.scp to $data/.backup" + mv $data/feats.scp $data/.backup +fi + +scp=$data/wav.scp + +required="$scp $mfcc_config $pitch_config" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_mfcc_pitch.sh: no such file $f" + exit 1; + fi +done +utils/validate_data_dir.sh --no-text --no-feats $data || exit 1; + +if [ ! -z "$pitch_postprocess_config" ]; then + postprocess_config_opt="--config=$pitch_postprocess_config"; +else + postprocess_config_opt= +fi + +if [ -f $data/spk2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/spk2warp" + vtln_opts="--vtln-map=ark:$data/spk2warp --utt2spk=ark:$data/utt2spk" +elif [ -f $data/utt2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/utt2warp" + vtln_opts="--vtln-map=ark:$data/utt2warp" +fi + +for n in $(seq $nj); do + # the next command does nothing unless $mfcc_pitch_dir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $mfcc_pitch_dir/raw_mfcc_pitch_$name.$n.ark +done + +if [ -f $data/segments ]; then + echo "$0 [info]: segments file exists: using that." + split_segments="" + for n in $(seq $nj); do + split_segments="$split_segments $logdir/segments.$n" + done + + utils/split_scp.pl $data/segments $split_segments || exit 1; + rm $logdir/.error 2>/dev/null + + mfcc_feats="ark:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-mfcc-feats $vtln_opts --verbose=2 --config=$mfcc_config ark:- ark:- |" + pitch_feats="ark,s,cs:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-kaldi-pitch-feats --verbose=2 --config=$pitch_config ark:- ark:- | process-kaldi-pitch-feats $postprocess_config_opt ark:- ark:- |" + + $cmd JOB=1:$nj $logdir/make_mfcc_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$mfcc_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$mfcc_pitch_dir/raw_mfcc_pitch_$name.JOB.ark,$mfcc_pitch_dir/raw_mfcc_pitch_$name.JOB.scp \ + || exit 1; + +else + echo "$0: [info]: no segments file exists: assuming wav.scp indexed by utterance." + split_scps="" + for n in $(seq $nj); do + split_scps="$split_scps $logdir/wav_${name}.$n.scp" + done + + utils/split_scp.pl $scp $split_scps || exit 1; + + mfcc_feats="ark:compute-mfcc-feats $vtln_opts --verbose=2 --config=$mfcc_config scp,p:$logdir/wav_${name}.JOB.scp ark:- |" + pitch_feats="ark,s,cs:compute-kaldi-pitch-feats --verbose=2 --config=$pitch_config scp,p:$logdir/wav_${name}.JOB.scp ark:- | process-kaldi-pitch-feats $postprocess_config_opt ark:- ark:- |" + + $cmd JOB=1:$nj $logdir/make_mfcc_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$mfcc_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$mfcc_pitch_dir/raw_mfcc_pitch_$name.JOB.ark,$mfcc_pitch_dir/raw_mfcc_pitch_$name.JOB.scp \ + || exit 1; + +fi + + +if [ -f $logdir/.error.$name ]; then + echo "Error producing mfcc & pitch features for $name:" + tail $logdir/make_mfcc_pitch_${name}.1.log + exit 1; +fi + +# concatenate the .scp files together. +for n in $(seq $nj); do + cat $mfcc_pitch_dir/raw_mfcc_pitch_$name.$n.scp || exit 1; +done > $data/feats.scp + +rm $logdir/wav_${name}.*.scp $logdir/segments.* 2>/dev/null + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully processed ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi + +if [ $nf -lt $[$nu - ($nu/20)] ]; then + echo "Less than 95% the features were successfully generated. Probably a serious error." + exit 1; +fi + +echo "Succeeded creating MFCC & Pitch features for $name" diff --git a/tools/ASR_2ch_track/steps/make_mfcc_pitch_online.sh b/tools/ASR_2ch_track/steps/make_mfcc_pitch_online.sh new file mode 100644 index 0000000000000000000000000000000000000000..beb13615a0b52d7200d3385d942b25f9258b56da --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_mfcc_pitch_online.sh @@ -0,0 +1,147 @@ +#!/bin/bash + +# Copyright 2013 The Shenzhen Key Laboratory of Intelligent Media and Speech, +# PKU-HKUST Shenzhen Hong Kong Institution (Author: Wei Shi) +# 2014 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 +# Combine MFCC and online-pitch features together +# Note: This file is based on make_mfcc_pitch.sh + +# Begin configuration section. +nj=4 +cmd=run.pl +mfcc_config=conf/mfcc.conf +online_pitch_config=conf/online_pitch.conf +paste_length_tolerance=2 +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "usage: make_mfcc_pitch.sh [options] "; + echo "options: " + echo " --mfcc-config # config passed to compute-mfcc-feats, default " + echo " # is conf/mfcc.conf" + echo " --online-pitch-config # config passed to compute-and-process-kaldi-pitch-feats, " + echo " # default is conf/online_pitch.conf" + echo " --paste-length-tolerance # length tolerance passed to paste-feats" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +logdir=$2 +mfcc_pitch_dir=$3 + + +# make $mfcc_pitch_dir an absolute pathname. +mfcc_pitch_dir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $mfcc_pitch_dir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $mfcc_pitch_dir || exit 1; +mkdir -p $logdir || exit 1; + +if [ -f $data/feats.scp ]; then + mkdir -p $data/.backup + echo "$0: moving $data/feats.scp to $data/.backup" + mv $data/feats.scp $data/.backup +fi + +scp=$data/wav.scp + +required="$scp $mfcc_config $online_pitch_config" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_mfcc_pitch.sh: no such file $f" + exit 1; + fi +done +utils/validate_data_dir.sh --no-text --no-feats $data || exit 1; + +if [ -f $data/spk2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/spk2warp" + vtln_opts="--vtln-map=ark:$data/spk2warp --utt2spk=ark:$data/utt2spk" +elif [ -f $data/utt2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/utt2warp" + vtln_opts="--vtln-map=ark:$data/utt2warp" +fi + +for n in $(seq $nj); do + # the next command does nothing unless $mfcc_pitch_dir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $mfcc_pitch_dir/raw_mfcc_online_pitch_$name.$n.ark +done + +if [ -f $data/segments ]; then + echo "$0 [info]: segments file exists: using that." + split_segments="" + for n in $(seq $nj); do + split_segments="$split_segments $logdir/segments.$n" + done + + utils/split_scp.pl $data/segments $split_segments || exit 1; + rm $logdir/.error 2>/dev/null + + mfcc_feats="ark:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-mfcc-feats $vtln_opts --verbose=2 --config=$mfcc_config ark:- ark:- |" + pitch_feats="ark,s,cs:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-and-process-kaldi-pitch-feats --verbose=2 --config=$online_pitch_config ark:- ark:- |" + + $cmd JOB=1:$nj $logdir/make_mfcc_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$mfcc_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$mfcc_pitch_dir/raw_mfcc_online_pitch_$name.JOB.ark,$mfcc_pitch_dir/raw_mfcc_online_pitch_$name.JOB.scp \ + || exit 1; + +else + echo "$0: [info]: no segments file exists: assuming wav.scp indexed by utterance." + split_scps="" + for n in $(seq $nj); do + split_scps="$split_scps $logdir/wav_${name}.$n.scp" + done + + utils/split_scp.pl $scp $split_scps || exit 1; + + mfcc_feats="ark:compute-mfcc-feats $vtln_opts --verbose=2 --config=$mfcc_config scp,p:$logdir/wav_${name}.JOB.scp ark:- |" + pitch_feats="ark,s,cs:compute-and-process-kaldi-pitch-feats --verbose=2 --config=$online_pitch_config scp,p:$logdir/wav_${name}.JOB.scp ark:- |" + + $cmd JOB=1:$nj $logdir/make_mfcc_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$mfcc_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$mfcc_pitch_dir/raw_mfcc_online_pitch_$name.JOB.ark,$mfcc_pitch_dir/raw_mfcc_online_pitch_$name.JOB.scp \ + || exit 1; +fi + + +if [ -f $logdir/.error.$name ]; then + echo "Error producing mfcc & pitch features for $name:" + tail $logdir/make_mfcc_pitch_${name}.1.log + exit 1; +fi + +# concatenate the .scp files together. +for n in $(seq $nj); do + cat $mfcc_pitch_dir/raw_mfcc_online_pitch_$name.$n.scp || exit 1; +done > $data/feats.scp + +rm $logdir/wav_${name}.*.scp $logdir/segments.* 2>/dev/null + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully processed ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi + +if [ $nf -lt $[$nu - ($nu/20)] ]; then + echo "Less than 95% the features were successfully generated. Probably a serious error." + exit 1; +fi + +echo "Succeeded creating MFCC & online-pitch features for $name" diff --git a/tools/ASR_2ch_track/steps/make_phone_graph.sh b/tools/ASR_2ch_track/steps/make_phone_graph.sh new file mode 100644 index 0000000000000000000000000000000000000000..4dbb5a8a206f9ee5a56aa2488a733b3169914b52 --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_phone_graph.sh @@ -0,0 +1,146 @@ +#!/bin/bash + +# steps/make_phone_graph.sh data/train_100k_nodup/ data/lang exp/tri2_ali_100k_nodup/ exp/tri2 + +# Copyright 2013 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script makes a phone-based LM, without smoothing to unigram, that +# is to be used for segmentation, and uses that together with a model to +# make a decoding graph. +# Uses SRILM. + +# Begin configuration section. +stage=0 +cmd=run.pl +N=3 # change N and P for non-trigram systems. +P=1 +tscale=1.0 # transition scale. +loopscale=0.1 # scale for self-loops. +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: $0 [options] " + echo " e.g.: $0 data/lang exp/tri3b_ali exp/tri4b_seg" + echo "Makes the graph in $dir/phone_graph, corresponding to the model in $dir" + echo "The alignments from $ali_dir are used to train the phone LM." + exit 1; +fi + +lang=$1 +alidir=$2 +dir=$3 + + +for f in $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $dir/final.mdl; do + if [ ! -f $f ]; then + echo "$0: expected $f to exist" + exit 1; + fi +done + +loc=`which ngram-count`; +if [ -z $loc ]; then + if uname -a | grep 64 >/dev/null; then # some kind of 64 bit... + sdir=`pwd`/../../../tools/srilm/bin/i686-m64 + else + sdir=`pwd`/../../../tools/srilm/bin/i686 + fi + if [ -f $sdir/ngram-count ]; then + echo Using SRILM tools from $sdir + export PATH=$PATH:$sdir + else + echo You appear to not have SRILM tools installed, either on your path, + echo or installed in $sdir. See tools/install_srilm.sh for installation + echo instructions. + exit 1 + fi +fi + +set -e # exit on error status + +mkdir -p $dir/phone_graph + +if [ $stage -le 0 ]; then + echo "$0: creating phone LM-training data" + gunzip -c $alidir/ali.*gz | ali-to-phones $alidir/final.mdl ark:- ark,t:- | \ + awk '{for (x=2; x <= NF; x++) printf("%s ", $x); printf("\n"); }' | \ + utils/int2sym.pl $lang/phones.txt > $dir/phone_graph/train_phones.txt +fi + +if [ $stage -le 1 ]; then + echo "$0: building ARPA LM" + ngram-count -text $dir/phone_graph/train_phones.txt -order 3 \ + -addsmooth1 1 -kndiscount2 -kndiscount3 -interpolate -lm $dir/phone_graph/arpa.gz +fi + +# Set the unigram and unigram-backoff log-probs to -99. we'll later remove the +# arcs from the FST. This is to avoid CLG blowup, and to increase speed. + +if [ $stage -le 2 ]; then + echo "$0: removing unigrams from ARPA LM" + + gunzip -c $dir/phone_graph/arpa.gz | \ + awk '/\\1-grams/{state=1;} /\\2-grams:/{ state=2; } + {if(state == 1 && NF == 3) { printf("-99\t%s\t-99\n", $2); } else {print;}}' | \ + gzip -c >$dir/phone_graph/arpa_noug.gz +fi + +if [ $stage -le 3 ]; then + echo "$0: creating G_phones.fst from ARPA" + gunzip -c $dir/phone_graph/arpa_noug.gz | arpa2fst - - | fstprint | \ + utils/eps2disambig.pl | utils/s2eps.pl | \ + awk '{if (NF < 5 || $5 < 100.0) { print; }}' | \ + fstcompile --isymbols=$lang/phones.txt --osymbols=$lang/phones.txt \ + --keep_isymbols=false --keep_osymbols=false | \ + fstconnect | \ + fstrmepsilon > $dir/phone_graph/G_phones.fst + fstisstochastic $dir/phone_graph/G_phones.fst || echo "[info]: G_phones not stochastic." +fi + + +if [ $stage -le 4 ]; then + echo "$0: creating CLG." + + fstcomposecontext --context-size=$N --central-position=$P \ + --read-disambig-syms=$lang/phones/disambig.int \ + --write-disambig-syms=$dir/phone_graph/disambig_ilabels_${N}_${P}.int \ + $dir/phone_graph/ilabels_${N}_${P} < $dir/phone_graph/G_phones.fst | \ + fstdeterminize >$dir/phone_graph/CLG.fst + fstisstochastic $dir/phone_graph/CLG.fst || echo "[info]: CLG not stochastic." +fi + +if [ $stage -le 5 ]; then + echo "$0: creating Ha.fst" + make-h-transducer --disambig-syms-out=$dir/phone_graph/disambig_tid.int \ + --transition-scale=$tscale $dir/phone_graph/ilabels_${N}_${P} $dir/tree $dir/final.mdl \ + > $dir/phone_graph/Ha.fst +fi + +if [ $stage -le 6 ]; then + echo "$0: creating HCLGa.fst" + fsttablecompose $dir/phone_graph/Ha.fst $dir/phone_graph/CLG.fst | \ + fstdeterminizestar --use-log=true | \ + fstrmsymbols $dir/phone_graph/disambig_tid.int | fstrmepslocal | \ + fstminimizeencoded > $dir/phone_graph/HCLGa.fst || exit 1; + fstisstochastic $dir/phone_graph/HCLGa.fst || echo "HCLGa is not stochastic" +fi + +if [ $stage -le 7 ]; then + add-self-loops --self-loop-scale=$loopscale --reorder=true \ + $dir/final.mdl < $dir/phone_graph/HCLGa.fst > $dir/phone_graph/HCLG.fst || exit 1; + + if [ $tscale == 1.0 -a $loopscale == 1.0 ]; then + # No point doing this test if transition-scale not 1, as it is bound to fail. + fstisstochastic $dir/phone_graph/HCLG.fst || echo "[info]: final HCLG is not stochastic." + fi + + # $lang/phones.txt is the symbol table that corresponds to the output + # symbols on the graph; decoding scripts expect it as words.txt. + cp $lang/phones.txt $dir/phone_graph/words.txt + cp -r $lang/phones $dir/phone_graph/ +fi diff --git a/tools/ASR_2ch_track/steps/make_plp.sh b/tools/ASR_2ch_track/steps/make_plp.sh new file mode 100644 index 0000000000000000000000000000000000000000..477dbaa657fd64d8d9264388cde2a216fa4c11bd --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_plp.sh @@ -0,0 +1,135 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 +# To be run from .. (one directory up from here) +# see ../run.sh for example + +# Begin configuration section. +nj=4 +cmd=run.pl +plp_config=conf/plp.conf +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: $0 [options] "; + echo "e.g.: $0 data/train exp/make_plp/train plp" + echo "options: " + echo " --plp-config # config passed to compute-plp-feats " + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +logdir=$2 +plpdir=$3 + + +# make $plpdir an absolute pathname. +plpdir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $plpdir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $plpdir || exit 1; +mkdir -p $logdir || exit 1; + +if [ -f $data/feats.scp ]; then + mkdir -p $data/.backup + echo "$0: moving $data/feats.scp to $data/.backup" + mv $data/feats.scp $data/.backup +fi + +scp=$data/wav.scp + +required="$scp $plp_config" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_plp.sh: no such file $f" + exit 1; + fi +done +utils/validate_data_dir.sh --no-text --no-feats $data || exit 1; + +if [ -f $data/spk2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/spk2warp" + vtln_opts="--vtln-map=ark:$data/spk2warp --utt2spk=ark:$data/utt2spk" +elif [ -f $data/utt2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/utt2warp" + vtln_opts="--vtln-map=ark:$data/utt2warp" +fi + +for n in $(seq $nj); do + # the next command does nothing unless $plpdir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $plpdir/raw_plp_$name.$n.ark +done + +if [ -f $data/segments ]; then + echo "$0 [info]: segments file exists: using that." + split_segments="" + for n in $(seq $nj); do + split_segments="$split_segments $logdir/segments.$n" + done + + utils/split_scp.pl $data/segments $split_segments || exit 1; + rm $logdir/.error 2>/dev/null + + $cmd JOB=1:$nj $logdir/make_plp_${name}.JOB.log \ + extract-segments scp,p:$scp $logdir/segments.JOB ark:- \| \ + compute-plp-feats $vtln_opts --verbose=2 --config=$plp_config ark:- ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$plpdir/raw_plp_$name.JOB.ark,$plpdir/raw_plp_$name.JOB.scp \ + || exit 1; + +else + echo "$0: [info]: no segments file exists: assuming wav.scp indexed by utterance." + split_scps="" + for n in $(seq $nj); do + split_scps="$split_scps $logdir/wav_${name}.$n.scp" + done + + utils/split_scp.pl $scp $split_scps || exit 1; + + $cmd JOB=1:$nj $logdir/make_plp_${name}.JOB.log \ + compute-plp-feats $vtln_opts --verbose=2 --config=$plp_config scp,p:$logdir/wav_${name}.JOB.scp ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$plpdir/raw_plp_$name.JOB.ark,$plpdir/raw_plp_$name.JOB.scp \ + || exit 1; + +fi + + +if [ -f $logdir/.error.$name ]; then + echo "Error producing plp features for $name:" + tail $logdir/make_plp_${name}.1.log + exit 1; +fi + +# concatenate the .scp files together. +for n in $(seq $nj); do + cat $plpdir/raw_plp_$name.$n.scp || exit 1; +done > $data/feats.scp + +rm $logdir/wav_${name}.*.scp $logdir/segments.* 2>/dev/null + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi +if [ $nf -lt $[$nu - ($nu/20)] ]; then + echo "Less than 95% the features were successfully generated. Probably a serious error." + exit 1; +fi + +echo "Succeeded creating PLP features for $name" diff --git a/tools/ASR_2ch_track/steps/make_plp_pitch.sh b/tools/ASR_2ch_track/steps/make_plp_pitch.sh new file mode 100644 index 0000000000000000000000000000000000000000..c8de73cdb27911f5208084aa6d710b7d0045d90c --- /dev/null +++ b/tools/ASR_2ch_track/steps/make_plp_pitch.sh @@ -0,0 +1,155 @@ +#!/bin/bash + +# Copyright 2013 The Shenzhen Key Laboratory of Intelligent Media and Speech, +# PKU-HKUST Shenzhen Hong Kong Institution (Author: Wei Shi) +# Apache 2.0 +# Combine PLP and pitch features together +# Note: This file is based on make_plp.sh and make_pitch_kaldi.sh + +# Begin configuration section. +nj=4 +cmd=run.pl +plp_config=conf/plp.conf +pitch_config=conf/pitch.conf +pitch_postprocess_config= +paste_length_tolerance=2 +compress=true +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "usage: make_plp_pitch.sh [options] "; + echo "options: " + echo " --plp-config # config passed to compute-plp-feats " + echo " --pitch-config # config passed to compute-kaldi-pitch-feats " + echo " --pitch-postprocess-config # config passed to process-kaldi-pitch-feats " + echo " --paste-length-tolerance # length tolerance passed to paste-feats" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +logdir=$2 +plp_pitch_dir=$3 + + +# make $plp_pitch_dir an absolute pathname. +plp_pitch_dir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $plp_pitch_dir ${PWD}` + +# use "name" as part of name of the archive. +name=`basename $data` + +mkdir -p $plp_pitch_dir || exit 1; +mkdir -p $logdir || exit 1; + +if [ -f $data/feats.scp ]; then + mkdir -p $data/.backup + echo "$0: moving $data/feats.scp to $data/.backup" + mv $data/feats.scp $data/.backup +fi + +scp=$data/wav.scp + +required="$scp $plp_config $pitch_config" + +for f in $required; do + if [ ! -f $f ]; then + echo "make_plp_pitch.sh: no such file $f" + exit 1; + fi +done +utils/validate_data_dir.sh --no-text --no-feats $data || exit 1; + +if [ ! -z "$pitch_postprocess_config" ]; then + postprocess_config_opt="--config=$pitch_postprocess_config"; +else + postprocess_config_opt= +fi + +if [ -f $data/spk2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/spk2warp" + vtln_opts="--vtln-map=ark:$data/spk2warp --utt2spk=ark:$data/utt2spk" +elif [ -f $data/utt2warp ]; then + echo "$0 [info]: using VTLN warp factors from $data/utt2warp" + vtln_opts="--vtln-map=ark:$data/utt2warp" +fi + +for n in $(seq $nj); do + # the next command does nothing unless $plp_pitch_dir/storage/ exists, see + # utils/create_data_link.pl for more info. + utils/create_data_link.pl $plp_pitch_dir/raw_plp_pitch_$name.$n.ark +done + + +if [ -f $data/segments ]; then + echo "$0 [info]: segments file exists: using that." + split_segments="" + for n in $(seq $nj); do + split_segments="$split_segments $logdir/segments.$n" + done + + utils/split_scp.pl $data/segments $split_segments || exit 1; + rm $logdir/.error 2>/dev/null + + plp_feats="ark:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-plp-feats $vtln_opts --verbose=2 --config=$plp_config ark:- ark:- |" + pitch_feats="ark,s,cs:extract-segments scp,p:$scp $logdir/segments.JOB ark:- | compute-kaldi-pitch-feats --verbose=2 --config=$pitch_config ark:- ark:- | process-kaldi-pitch-feats $postprocess_config_opt ark:- ark:- |" + + $cmd JOB=1:$nj $logdir/make_plp_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$plp_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$plp_pitch_dir/raw_plp_pitch_$name.JOB.ark,$plp_pitch_dir/raw_plp_pitch_$name.JOB.scp \ + || exit 1; + +else + echo "$0: [info]: no segments file exists: assuming wav.scp indexed by utterance." + split_scps="" + for n in $(seq $nj); do + split_scps="$split_scps $logdir/wav_${name}.$n.scp" + done + + utils/split_scp.pl $scp $split_scps || exit 1; + + + plp_feats="ark:compute-plp-feats $vtln_opts --verbose=2 --config=$plp_config scp,p:$logdir/wav_${name}.JOB.scp ark:- |" + pitch_feats="ark,s,cs:compute-kaldi-pitch-feats --verbose=2 --config=$pitch_config scp,p:$logdir/wav_${name}.JOB.scp ark:- | process-kaldi-pitch-feats $postprocess_config_opt ark:- ark:- |" + + $cmd JOB=1:$nj $logdir/make_plp_pitch_${name}.JOB.log \ + paste-feats --length-tolerance=$paste_length_tolerance "$plp_feats" "$pitch_feats" ark:- \| \ + copy-feats --compress=$compress ark:- \ + ark,scp:$plp_pitch_dir/raw_plp_pitch_$name.JOB.ark,$plp_pitch_dir/raw_plp_pitch_$name.JOB.scp \ + || exit 1; + +fi + + +if [ -f $logdir/.error.$name ]; then + echo "Error producing plp & pitch features for $name:" + tail $logdir/make_plp_pitch_${name}.1.log + exit 1; +fi + +# concatenate the .scp files together. +for n in $(seq $nj); do + cat $plp_pitch_dir/raw_plp_pitch_$name.$n.scp || exit 1; +done > $data/feats.scp + +rm $logdir/wav_${name}.*.scp $logdir/segments.* 2>/dev/null + +nf=`cat $data/feats.scp | wc -l` +nu=`cat $data/utt2spk | wc -l` +if [ $nf -ne $nu ]; then + echo "It seems not all of the feature files were successfully processed ($nf != $nu);" + echo "consider using utils/fix_data_dir.sh $data" +fi + +if [ $nf -lt $[$nu - ($nu/20)] ]; then + echo "Less than 95% the features were successfully generated. Probably a serious error." + exit 1; +fi + +echo "Succeeded creating PLP & Pitch features for $name" diff --git a/tools/ASR_2ch_track/steps/mixup.sh b/tools/ASR_2ch_track/steps/mixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..a0e7f70c2493832a92b14b421681218cc217a6f2 --- /dev/null +++ b/tools/ASR_2ch_track/steps/mixup.sh @@ -0,0 +1,150 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# mix up (or down); do 3 iters of model training; realign; then do two more +# iterations of model training. + +# Begin configuration section. +cmd=run.pl +beam=10 +retry_beam=40 +boost_silence=1.0 # Factor by which to boost silence likelihoods in alignment +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +num_iters=5 +realign_iters=3 # Space-separated list of iterations to realign on. +stage=0 +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh; +. parse_options.sh || exit 1; + +if [ $# != 5 ]; then + echo "Usage: steps/mixup.sh " + echo " e.g.: steps/mixup.sh 20000 data/train_si84 data/lang exp/tri3b exp/tri3b_20k" + echo "main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --stage # stage to do partial re-run from." + exit 1; +fi + +numgauss=$1 +data=$2 +lang=$3 +srcdir=$4 +dir=$5 + +for f in $data/feats.scp $srcdir/final.mdl $srcdir/final.mat; do + [ ! -f $f ] && echo "mixup_lda_etc.sh: no such file $f" && exit 1; +done + +nj=`cat $srcdir/num_jobs` || exit 1; +sdata=$data/split$nj; + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` + +mkdir -p $dir/log +cp $srcdir/splice_opts $dir 2>/dev/null +cp $srcdir/cmvn_opts $dir 2>/dev/null +cp $srcdir/final.mat $dir +echo $nj > $dir/num_jobs +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/tree $dir + + +## Set up features. +if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac +if [ -f $srcdir/trans.1 ]; then + echo Using transforms from $srcdir; + rm $dir/trans.* 2>/dev/null + ln.pl $srcdir/trans.* $dir # Link those transforms to current directory. + feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" +else + feats="$sifeats" +fi +## Done setting up features. + +rm $dir/fsts.*.gz 2>/dev/null +ln.pl $srcdir/fsts.*.gz $dir # Link training-graph FSTs to current directory. + +## Mix up old model +if [ $stage -le 0 ]; then + echo Mixing up old model to $numgauss Gaussians +# Note: this script also works for mixing down. + $cmd $dir/log/mixup.log \ + gmm-mixup --mix-up=$numgauss --mix-down=$numgauss \ + $srcdir/final.mdl $srcdir/final.occs $dir/1.mdl || exit 1; +fi +## Done. + +cur_alidir=$srcdir # dir to find alignments. +[ -z "$realign_iters" ] && ln.pl $srcdir/ali.*.gz $dir; # link alignments, if + # we won't be generating them. + +x=1 +while [ $x -le $num_iters ]; do + echo "$0: iteration $x" + if echo $realign_iters | grep -w $x >/dev/null; then + if [ $stage -le $x ]; then + echo "$0: realigning data" + mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/$x.mdl - |" + $cmd JOB=1:$nj $dir/log/align.$x.JOB.log \ + gmm-align-compiled $scale_opts --beam=10 --retry-beam=40 "$mdl" \ + "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \ + "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; + fi + cur_alidir=$dir + fi + if [ $stage -le $x ]; then + echo "$0: accumulating statistics" + $cmd JOB=1:$nj $dir/log/acc.$x.JOB.log \ + gmm-acc-stats-ali $dir/$x.mdl "$feats" \ + "ark,s,cs:gunzip -c $cur_alidir/ali.JOB.gz|" $dir/$x.JOB.acc || exit 1; + echo "$0: re-estimating model" + [ "`ls $dir/$x.*.acc | wc -w`" -ne $nj ] && echo "$0: wrong #accs" && exit 1; + $cmd $dir/log/update.$x.log \ + gmm-est --write-occs=$dir/$[$x+1].occs $dir/$x.mdl \ + "gmm-sum-accs - $dir/$x.*.acc |" $dir/$[$x+1].mdl || exit 1; + rm $dir/$x.mdl $dir/$x.*.acc + rm $dir/$x.occs 2>/dev/null + fi + x=$[$x+1] +done + +rm $dir/final.mdl $dir/final.occs 2>/dev/null +ln -s $x.mdl $dir/final.mdl +ln -s $x.occs $dir/final.occs + +if [ -f $dir/trans.1 ]; then + echo "$0: accumulating stats for alignment model." + $cmd JOB=1:$nj $dir/log/acc_alimdl.JOB.log \ + ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ + gmm-acc-stats-twofeats $dir/$x.mdl "$feats" "$sifeats" \ + ark,s,cs:- $dir/$x.JOB.acc || exit 1; + [ "`ls $dir/$x.*.acc | wc -w`" -ne $nj ] && echo "$0: wrong #accs" && exit 1; + echo "$0: Re-estimating alignment model." + $cmd $dir/log/est_alimdl.log \ + gmm-est --write-occs=$dir/final.occs --remove-low-count-gaussians=false $dir/$x.mdl \ + "gmm-sum-accs - $dir/$x.*.acc|" $dir/$x.alimdl || exit 1; + rm $dir/$x.*.acc + rm $dir/final.alimdl 2>/dev/null + ln -s $x.alimdl $dir/final.alimdl +fi + +utils/summarize_warnings.pl $dir/log + +echo Done diff --git a/tools/ASR_2ch_track/steps/nnet/align.sh b/tools/ASR_2ch_track/steps/nnet/align.sh new file mode 100644 index 0000000000000000000000000000000000000000..faf7383e600b0359d8b77f506447c559bc8e2ece --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/align.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +# Aligns 'data' to sequences of transition-ids using Neural Network based acoustic model. +# Optionally produces alignment in lattice format, this is handy to get word alignment. + +# Begin configuration section. +nj=4 +cmd=run.pl +stage=0 +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +nnet_forward_opts="--no-softmax=true --prior-scale=1.0" +ivector= # rx-specifier with i-vectors (ark-with-vectors), + +align_to_lats=false # optionally produce alignment in lattice format + lats_decode_opts="--acoustic-scale=0.1 --beam=20 --lattice_beam=10" + lats_graph_scales="--transition-scale=1.0 --self-loop-scale=0.1" + +use_gpu="no" # yes|no|optionaly +# End configuration options. + +[ $# -gt 0 ] && echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 4 ]; then + echo "usage: $0 " + echo "e.g.: $0 data/train data/lang exp/tri1 exp/tri1_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +sdata=$data/split$nj +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +cp $srcdir/{tree,final.mdl} $dir || exit 1; + +# Select default locations to model files +nnet=$srcdir/final.nnet; +class_frame_counts=$srcdir/ali_train_pdf.counts +feature_transform=$srcdir/final.feature_transform +model=$dir/final.mdl + +# Check that files exist +for f in $sdata/1/feats.scp $sdata/1/text $lang/L.fst $nnet $model $feature_transform $class_frame_counts; do + [ ! -f $f ] && echo "$0: missing file $f" && exit 1; +done + + +# PREPARE FEATURE EXTRACTION PIPELINE +# import config, +cmvn_opts= +delta_opts= +D=$srcdir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector --print-args=false "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +# nnet-forward, +feats="$feats nnet-forward $nnet_forward_opts --feature-transform=$feature_transform --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu $nnet ark:- ark:- |" +# + +echo "$0: aligning data '$data' using nnet/model '$srcdir', putting alignments in '$dir'" + +# Map oovs in reference transcription, +oov=`cat $lang/oov.int` || exit 1; +tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; +# We could just use align-mapped in the next line, but it's less efficient as it compiles the +# training graphs one by one. +if [ $stage -le 0 ]; then + train_graphs="ark:compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst '$tra' ark:- |" + $cmd JOB=1:$nj $dir/log/align.JOB.log \ + compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" ark:- \| \ + align-compiled-mapped $scale_opts --beam=$beam --retry-beam=$retry_beam $dir/final.mdl ark:- \ + "$feats" "ark,t:|gzip -c >$dir/ali.JOB.gz" || exit 1; +fi + +# Optionally align to lattice format (handy to get word alignment) +if [ "$align_to_lats" == "true" ]; then + echo "$0: aligning also to lattices '$dir/lat.*.gz'" + $cmd JOB=1:$nj $dir/log/align_lat.JOB.log \ + compile-train-graphs $lat_graph_scale $dir/tree $dir/final.mdl $lang/L.fst "$tra" ark:- \| \ + latgen-faster-mapped $lat_decode_opts --word-symbol-table=$lang/words.txt $dir/final.mdl ark:- \ + "$feats" "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1; +fi + +echo "$0: done aligning data." diff --git a/tools/ASR_2ch_track/steps/nnet/decode.sh b/tools/ASR_2ch_track/steps/nnet/decode.sh new file mode 100644 index 0000000000000000000000000000000000000000..2ede0e074027f714fe04ed5a5547b82567b77a26 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/decode.sh @@ -0,0 +1,160 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely), Daniel Povey +# Apache 2.0 + +# Begin configuration section. +nnet= # non-default location of DNN (optional) +feature_transform= # non-default location of feature_transform (optional) +model= # non-default location of transition model (optional) +class_frame_counts= # non-default location of PDF counts (optional) +srcdir= # non-default location of DNN-dir (decouples model dir from decode dir) +ivector= # rx-specifier with i-vectors (ark-with-vectors), + +blocksoftmax_dims= # 'csl' with block-softmax dimensions: dim1,dim2,dim3,... +blocksoftmax_active= # '1' for the 1st block, + +stage=0 # stage=1 skips lattice generation +nj=4 +cmd=run.pl + +acwt=0.10 # note: only really affects pruning (scoring is on lattices). +beam=13.0 +lattice_beam=8.0 +min_active=200 +max_active=7000 # limit of active tokens +max_mem=50000000 # approx. limit to memory consumption during minimization in bytes +nnet_forward_opts="--no-softmax=true --prior-scale=1.0" + +skip_scoring=false +scoring_opts="--min-lmwt 4 --max-lmwt 15" + +num_threads=1 # if >1, will use latgen-faster-parallel +parallel_opts= # Ignored now. +use_gpu="no" # yes|no|optionaly +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 3 ]; then + echo "Usage: $0 [options] " + echo "... where is assumed to be a sub-directory of the directory" + echo " where the DNN and transition model is." + echo "e.g.: $0 exp/dnn1/graph_tgpr data/test exp/dnn1/decode_tgpr" + echo "" + echo "This script works on plain or modified features (CMN,delta+delta-delta)," + echo "which are then sent through feature-transform. It works out what type" + echo "of features you used from content of srcdir." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo "" + echo " --nnet # non-default location of DNN (opt.)" + echo " --srcdir # non-default dir with DNN/models, can be different" + echo " # from parent dir of ' (opt.)" + echo "" + echo " --acwt # select acoustic scale for decoding" + echo " --scoring-opts # options forwarded to local/score.sh" + echo " --num-threads # N>1: run multi-threaded decoder" + exit 1; +fi + + +graphdir=$1 +data=$2 +dir=$3 +[ -z $srcdir ] && srcdir=`dirname $dir`; # Default model directory one level up from decoding directory. +sdata=$data/split$nj; + +mkdir -p $dir/log + +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +# Select default locations to model files (if not already set externally) +[ -z "$nnet" ] && nnet=$srcdir/final.nnet +[ -z "$model" ] && model=$srcdir/final.mdl +[ -z "$feature_transform" -a -e $srcdir/final.feature_transform ] && feature_transform=$srcdir/final.feature_transform +# +[ -z "$class_frame_counts" -a -f $srcdir/prior_counts ] && class_frame_counts=$srcdir/prior_counts # priority, +[ -z "$class_frame_counts" ] && class_frame_counts=$srcdir/ali_train_pdf.counts + +# Check that files exist, +for f in $sdata/1/feats.scp $nnet $model $feature_transform $class_frame_counts $graphdir/HCLG.fst; do + [ ! -f $f ] && echo "$0: missing file $f" && exit 1; +done + +# Possibly use multi-threaded decoder +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + + +# PREPARE FEATURE EXTRACTION PIPELINE +# import config, +cmvn_opts= +delta_opts= +D=$srcdir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector --print-args=false "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +# select a block from blocksoftmax, +if [ ! -z "$blocksoftmax_dims" ]; then + # blocksoftmax_active is a csl! dim1,dim2,dim3,... + [ -z "$blocksoftmax_active" ] && echo "$0 Missing option --blocksoftmax-active N" && exit 1 + # getting dims, + dim_total=$(awk -F'[:,]' '{ for(i=1;i<=NF;i++) { sum += $i }; print sum; }' <(echo $blocksoftmax_dims)) + dim_block=$(awk -F'[:,]' -v active=$blocksoftmax_active '{ print $active; }' <(echo $blocksoftmax_dims)) + offset=$(awk -F'[:,]' -v active=$blocksoftmax_active '{ sum=0; for(i=1;i $dim_total $dim_block $((1+offset)):$((offset+dim_block)) "; + echo " $dim_block $dim_block") $dir/copy_and_softmax.nnet + # nnet is assembled on-the fly, is removed, while + is added, + nnet="nnet-concat 'nnet-copy --remove-last-components=1 $nnet - |' $dir/copy_and_softmax.nnet - |" +fi + +# Run the decoding in the queue, +if [ $stage -le 0 ]; then + $cmd --num-threads $((num_threads+1)) JOB=1:$nj $dir/log/decode.JOB.log \ + nnet-forward $nnet_forward_opts --feature-transform=$feature_transform --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu "$nnet" "$feats" ark:- \| \ + latgen-faster-mapped$thread_string --min-active=$min_active --max-active=$max_active --max-mem=$max_mem --beam=$beam \ + --lattice-beam=$lattice_beam --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \ + $model $graphdir/HCLG.fst ark:- "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +# Run the scoring +if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + local/score.sh $scoring_opts --cmd "$cmd" $data $graphdir $dir || exit 1; +fi + +exit 0; diff --git a/tools/ASR_2ch_track/steps/nnet/make_bn_feats.sh b/tools/ASR_2ch_track/steps/nnet/make_bn_feats.sh new file mode 100644 index 0000000000000000000000000000000000000000..fb1412aace8423b71d92d32b5742611231c28238 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/make_bn_feats.sh @@ -0,0 +1,129 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 +# To be run from .. (one directory up from here) +# see ../run.sh for example + +# Begin configuration section. +nj=4 +cmd=run.pl +remove_last_components=4 # remove N last components from the nnet +nnet_forward_opts= +use_gpu=no +htk_save=false +ivector= # rx-specifier with i-vectors (ark-with-vectors), +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 5 ]; then + echo "usage: $0 [options] "; + echo "options: " + echo " --cmd 'queue.pl ' # how to run jobs." + echo " --nj # number of parallel jobs" + echo " --remove-last-components # number of NNet Components to remove from the end" + echo " --use-gpu (no|yes|optional) # forwarding on GPU" + exit 1; +fi + +if [ -f path.sh ]; then . path.sh; fi + +data=$1 +srcdata=$2 +nndir=$3 +logdir=$4 +bnfeadir=$5 + +######## CONFIGURATION + +# copy the dataset metadata from srcdata. +mkdir -p $data $logdir $bnfeadir || exit 1; +utils/copy_data_dir.sh $srcdata $data; rm $data/{feats,cmvn}.scp 2>/dev/null + +# make $bnfeadir an absolute pathname. +[ '/' != ${bnfeadir:0:1} ] && bnfeadir=$PWD/$bnfeadir + +required="$srcdata/feats.scp $nndir/final.nnet $nndir/final.feature_transform" +for f in $required; do + [ ! -f $f ] && echo "$0: Missing $f" && exit 1; +done + +name=$(basename $srcdata) +sdata=$srcdata/split$nj +[[ -d $sdata && $srcdata/feats.scp -ot $sdata ]] || split_data.sh $srcdata $nj || exit 1; + +# Concat feature transform with trimmed MLP: +nnet=$bnfeadir/feature_extractor.nnet +nnet-concat $nndir/final.feature_transform "nnet-copy --remove-last-components=$remove_last_components $nndir/final.nnet - |" $nnet 2>$logdir/feature_extractor.log || exit 1 +nnet-info $nnet >$data/feature_extractor.nnet-info + +echo "Creating bn-feats into $data" + +# PREPARE FEATURE EXTRACTION PIPELINE +# import config, +cmvn_opts= +delta_opts= +D=$nndir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$srcdata/utt2spk scp:$srcdata/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector --print-args=false "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +if [ $htk_save == false ]; then + # Run the forward pass, + $cmd JOB=1:$nj $logdir/make_bnfeats.JOB.log \ + nnet-forward $nnet_forward_opts --use-gpu=$use_gpu $nnet "$feats" \ + ark,scp:$bnfeadir/raw_bnfea_$name.JOB.ark,$bnfeadir/raw_bnfea_$name.JOB.scp \ + || exit 1; + # concatenate the .scp files + for ((n=1; n<=nj; n++)); do + cat $bnfeadir/raw_bnfea_$name.$n.scp >> $data/feats.scp + done + + # check sentence counts, + N0=$(cat $srcdata/feats.scp | wc -l) + N1=$(cat $data/feats.scp | wc -l) + [[ "$N0" != "$N1" ]] && echo "$0: sentence-count mismatch, $srcdata $N0, $data $N1" && exit 1 + echo "Succeeded creating MLP-BN features '$data'" + +else # htk_save == true + # Run the forward pass saving HTK features, + $cmd JOB=1:$nj $logdir/make_bnfeats_htk.JOB.log \ + mkdir -p $data/htkfeats/JOB \; \ + nnet-forward $nnet_forward_opts --use-gpu=$use_gpu $nnet "$feats" ark:- \| \ + copy-feats-to-htk --output-dir=$data/htkfeats/JOB ark:- || exit 1 + # Make list of htk features, + find $data/htkfeats -name *.fea >$data/htkfeats.scp + + # Check sentence counts, + N0=$(cat $srcdata/feats.scp | wc -l) + N1=$(find $data/htkfeats.scp | wc -l) + [[ "$N0" != "$N1" ]] && echo "$0: sentence-count mismatch, $srcdata $N0, $data/htk* $N1" && exit 1 + echo "Succeeded creating MLP-BN features '$data/htkfeats.scp'" +fi diff --git a/tools/ASR_2ch_track/steps/nnet/make_denlats.sh b/tools/ASR_2ch_track/steps/nnet/make_denlats.sh new file mode 100644 index 0000000000000000000000000000000000000000..2e3588c6b047d154356c10225e4c332b11a9fc1e --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/make_denlats.sh @@ -0,0 +1,205 @@ +#!/bin/bash +# Copyright 2012-2013 Brno University of Technology (author: Karel Vesely), Daniel Povey +# Apache 2.0. + +# Create denominator lattices for MMI/MPE/sMBR training. +# Creates its output in $dir/lat.*.ark,$dir/lat.scp +# The lattices are uncompressed, we need random access for DNN training. + +# Begin configuration section. +nj=4 +cmd=run.pl +sub_split=1 +beam=13.0 +lattice_beam=7.0 +acwt=0.1 +max_active=5000 +nnet= +nnet_forward_opts="--no-softmax=true --prior-scale=1.0" +max_mem=20000000 # This will stop the processes getting too large. +# This is in bytes, but not "real" bytes-- you have to multiply +# by something like 5 or 10 to get real bytes (not sure why so large) +# End configuration section. +use_gpu=no # yes|no|optional +parallel_opts="--num-threads 2" +ivector= # rx-specifier with i-vectors (ark-with-vectors), + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 4 ]; then + echo "Usage: steps/$0 [options] " + echo " e.g.: steps/$0 data/train data/lang exp/tri1 exp/tri1_denlats" + echo "Works for plain features (or CMN, delta), forwarded through feature-transform." + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --sub-split # e.g. 40; use this for " + echo " # large databases so your jobs will be smaller and" + echo " # will (individually) finish reasonably soon." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +sdata=$data/split$nj +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +oov=`cat $lang/oov.int` || exit 1; + +mkdir -p $dir + +cp -r $lang $dir/ + +# Compute grammar FST which corresponds to unigram decoding graph. +new_lang="$dir/"$(basename "$lang") +echo "Making unigram grammar FST in $new_lang" +cat $data/text | utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt | \ + awk '{for(n=2;n<=NF;n++){ printf("%s ", $n); } printf("\n"); }' | \ + utils/make_unigram_grammar.pl | fstcompile | fstarcsort --sort_type=ilabel > $new_lang/G.fst \ + || exit 1; + +# mkgraph.sh expects a whole directory "lang", so put everything in one directory... +# it gets L_disambig.fst and G.fst (among other things) from $dir/lang, and +# final.mdl from $srcdir; the output HCLG.fst goes in $dir/graph. + +echo "Compiling decoding graph in $dir/dengraph" +if [ -s $dir/dengraph/HCLG.fst ] && [ $dir/dengraph/HCLG.fst -nt $srcdir/final.mdl ]; then + echo "Graph $dir/dengraph/HCLG.fst already exists: skipping graph creation." +else + utils/mkgraph.sh $new_lang $srcdir $dir/dengraph || exit 1; +fi + + +cp $srcdir/{tree,final.mdl} $dir + +# Select default locations to model files +[ -z "$nnet" ] && nnet=$srcdir/final.nnet; +class_frame_counts=$srcdir/ali_train_pdf.counts +feature_transform=$srcdir/final.feature_transform +model=$dir/final.mdl + +# Check that files exist +for f in $sdata/1/feats.scp $nnet $model $feature_transform $class_frame_counts; do + [ ! -f $f ] && echo "$0: missing file $f" && exit 1; +done + + +# PREPARE FEATURE EXTRACTION PIPELINE +# import config, +cmvn_opts= +delta_opts= +D=$srcdir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector --print-args=false "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +# nnet-forward, +feats="$feats nnet-forward $nnet_forward_opts --feature-transform=$feature_transform --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu $nnet ark:- ark:- |" + +# if this job is interrupted by the user, we want any background jobs to be +# killed too. +cleanup() { + local pids=$(jobs -pr) + [ -n "$pids" ] && kill $pids || true +} +trap "cleanup" INT QUIT TERM EXIT + + +echo "$0: generating denlats from data '$data', putting lattices in '$dir'" +#1) Generate the denominator lattices +if [ $sub_split -eq 1 ]; then + # Prepare 'scp' for storing lattices separately and gzipped + for n in `seq $nj`; do + [ ! -d $dir/lat$n ] && mkdir $dir/lat$n; + cat $sdata/$n/feats.scp | awk '{ print $1" | gzip -c >'$dir'/lat'$n'/"$1".gz"; }' + done >$dir/lat.store_separately_as_gz.scp + # Generate the lattices + $cmd $parallel_opts JOB=1:$nj $dir/log/decode_den.JOB.log \ + latgen-faster-mapped --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats" "scp:$dir/lat.store_separately_as_gz.scp" || exit 1; +else + # each job from 1 to $nj is split into multiple pieces (sub-split), and we aim + # to have at most two jobs running at each time. The idea is that if we have stragglers + # from one job, we can be processing another one at the same time. + rm -f $dir/.error + + prev_pid= + for n in `seq $[nj+1]`; do + if [ $n -gt $nj ]; then + this_pid= + elif [ -f $dir/.done.$n ] && [ $dir/.done.$n -nt $srcdir/final.mdl ]; then + echo "Not processing subset $n as already done (delete $dir/.done.$n if not)"; + this_pid= + else + sdata2=$data/split$nj/$n/split$sub_split; + if [ ! -d $sdata2 ] || [ $sdata2 -ot $sdata/$n/feats.scp ]; then + split_data.sh --per-utt $sdata/$n $sub_split || exit 1; + fi + mkdir -p $dir/log/$n + mkdir -p $dir/part + feats_subset=$(echo $feats | sed s:JOB/:$n/split$sub_split/JOB/:g) + # Prepare 'scp' for storing lattices separately and gzipped + for k in `seq $sub_split`; do + [ ! -d $dir/lat$n/$k ] && mkdir -p $dir/lat$n/$k; + cat $sdata2/$k/feats.scp | awk '{ print $1" | gzip -c >'$dir'/lat'$n'/'$k'/"$1".gz"; }' + done >$dir/lat.$n.store_separately_as_gz.scp + # Generate lattices + $cmd $parallel_opts JOB=1:$sub_split $dir/log/$n/decode_den.JOB.log \ + latgen-faster-mapped --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats_subset" scp:$dir/lat.$n.store_separately_as_gz.scp || touch .error & + this_pid=$! + fi + if [ ! -z "$prev_pid" ]; then # Wait for the previous job; merge the previous set of lattices. + wait $prev_pid + [ -f $dir/.error ] && echo "$0: error generating denominator lattices" && exit 1; + touch $dir/.done.$prev_n + fi + prev_n=$n + prev_pid=$this_pid + done +fi + +#2) Generate 'scp' for reading the lattices +# make $dir an absolute pathname. +[ '/' != ${dir:0:1} ] && dir=$PWD/$dir +for n in `seq $nj`; do + find $dir/lat${n} -name "*.gz" | perl -ape 's:.*/([^/]+)\.gz$:$1 gunzip -c $& |:; ' +done | sort >$dir/lat.scp +[ -s $dir/lat.scp ] || exit 1 + +echo "$0: done generating denominator lattices." diff --git a/tools/ASR_2ch_track/steps/nnet/make_fmllr_feats.sh b/tools/ASR_2ch_track/steps/nnet/make_fmllr_feats.sh new file mode 100644 index 0000000000000000000000000000000000000000..c9d679004f107c9723617294a5d96731ef8a88dc --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/make_fmllr_feats.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely), +# +# Apache 2.0. +# +# This script dumps fMLLR features in a new data directory, +# which is later used for neural network training/testing. + +# Begin configuration section. +nj=4 +cmd=run.pl +transform_dir= +raw_transform_dir= +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 5 ]; then + echo "Usage: $0 [options] " + echo "e.g.: $0 data-fmllr/train data/train exp/tri5a exp/make_fmllr_feats/log plp/processed/" + echo "" + echo "This script dumps fMLLR features to disk, so it can be used for NN training." + echo "It automoatically figures out the 'feature-type' of the source GMM systems." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs" + echo " --nj # number of parallel jobs" + echo " --transform-dir # dir with fMLLR transforms" + echo " --raw-transform-dir # dir with raw-fMLLR transforms" + exit 1; +fi + +data=$1 +srcdata=$2 +gmmdir=$3 +logdir=$4 +feadir=$5 + +sdata=$srcdata/split$nj; + +# Get the config, +D=$gmmdir +[ -f $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) || cmvn_opts= +[ -f $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) || delta_opts= +[ -f $D/splice_opts ] && splice_opts=$(cat $D/splice_opts) || splice_opts= + +mkdir -p $data $logdir $feadir +[[ -d $sdata && $srcdata/feats.scp -ot $sdata ]] || split_data.sh $srcdata $nj || exit 1; + +# Check files exist, +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp; do + [ ! -f $f ] && echo "$0: Missing $f" && exit 1; +done +[ ! -z "$transform_dir" -a ! -f $transform_dir/trans.1 ] && \ + echo "$0: Missing $transform_dir/trans.1" && exit 1; +[ ! -z "$raw_transform_dir" -a ! -f $raw_transform_dir/raw_trans.1 ] && \ + echo "$0: Missing $raw_transform_dir/raw_trans.1" && exit 1; + +# Figure-out the feature-type, +feat_type="[UNKNOWN]" +[ -z "$raw_transform_dir" -a ! -f $gmmdir/final.mat -a ! -z "$transform_dir" ] && feat_type=delta_fmllr +[ -z "$raw_transform_dir" -a -f $gmmdir/final.mat -a ! -z "$transform_dir" ] && feat_type=lda_fmllr +[ ! -z "$raw_transform_dir" ] && feat_type=raw_fmllr +echo "$0: feature type is $feat_type"; + +# Hand-code the feature pipeline, +case $feat_type in + delta_fmllr) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk \"ark:cat $transform_dir/trans.* |\" ark:- ark:- |";; + lda_fmllr) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $gmmdir/final.mat ark:- ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk \"ark:cat $transform_dir/trans.* |\" ark:- ark:- |";; + raw_fmllr) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$raw_transform_dir/raw_trans.JOB ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +# Prepare the output dir, +utils/copy_data_dir.sh $srcdata $data; rm $data/{feats,cmvn}.scp 2>/dev/null +# Make $feadir an absolute pathname, +[ '/' != ${feadir:0:1} ] && feadir=$PWD/$feadir + +# Store the output-features, +name=`basename $data` +$cmd JOB=1:$nj $logdir/make_fmllr_feats.JOB.log \ + copy-feats "$feats" \ + ark,scp:$feadir/feats_fmllr_$name.JOB.ark,$feadir/feats_fmllr_$name.JOB.scp || exit 1; + +# Merge the scp, +for n in $(seq 1 $nj); do + cat $feadir/feats_fmllr_$name.$n.scp +done > $data/feats.scp + +echo "$0: Done!, type $feat_type, $srcdata --> $data, using : raw-trans ${raw_transform_dir:-None}, gmm $gmmdir, trans ${transform_dir:-None}" + +exit 0; diff --git a/tools/ASR_2ch_track/steps/nnet/make_fmmi_feats.sh b/tools/ASR_2ch_track/steps/nnet/make_fmmi_feats.sh new file mode 100644 index 0000000000000000000000000000000000000000..2874f00067be678d457c119eec158320b8e8182c --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/make_fmmi_feats.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely), +# +# Apache 2.0 +# +# This script dumps fMMI features in a new data directory, +# which is later used for neural network training/testing. + +# Begin configuration section. +iter=final +nj=4 +cmd=run.pl +ngselect=2; # Just use the 2 top Gaussians for fMMI/fMPE. Should match train. +transform_dir= +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 5 ]; then + echo "Usage: $0 [options] " + echo "e.g.: $0 data-fmmi/train data/train exp/tri5a_fmmi_b0.1 data-fmmi/train/_log data-fmmi/train/_data " + echo "" + echo "This script works on CMN + (delta+delta-delta | LDA+MLLT) features; it works out" + echo "what type of features you used (assuming it's one of these two)" + echo "You can also use fMLLR features-- you have to supply --transform-dir option." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --iter # Iteration of model to test." + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --transform-dir # where to find fMLLR transforms." + exit 1; +fi + +data=$1 +srcdata=$2 +gmmdir=$3 +logdir=$4 +feadir=$5 + +sdata=$srcdata/split$nj; + +# Get the config, +D=$gmmdir +[ -f $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) || cmvn_opts= +[ -f $D/splice_opts ] && splice_opts=$(cat $D/splice_opts) || splice_opts= + +mkdir -p $data $logdir $feadir +[[ -d $sdata && $srcdata/feats.scp -ot $sdata ]] || split_data.sh $srcdata $nj || exit 1; + +for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $gmmdir/$iter.fmpe; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +if [ -f $gmmdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +echo "$0: feature type is $feat_type"; + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $gmmdir/final.mat ark:- ark:- |";; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then # add transforms to features... + echo "Using fMLLR transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist." + [ "`cat $transform_dir/num_jobs`" -ne $nj ] && \ + echo "Mismatch in number of jobs with $transform_dir"; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +fi + +# Get Gaussian selection info. +$cmd JOB=1:$nj $logdir/gselect.JOB.log \ + gmm-gselect --n=$ngselect $gmmdir/$iter.fmpe "$feats" \ + "ark:|gzip -c >$feadir/gselect.JOB.gz" || exit 1; + +# prepare the dir +cp $srcdata/* $data 2>/dev/null; rm $data/{feats,cmvn}.scp; + +# make $bnfeadir an absolute pathname. +feadir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $feadir ${PWD}` + +# forward the feats +$cmd JOB=1:$nj $logdir/make_fmmi_feats.JOB.log \ + fmpe-apply-transform $gmmdir/$iter.fmpe "$feats" "ark,s,cs:gunzip -c $feadir/gselect.JOB.gz|" \ + ark,scp:$feadir/feats_fmmi.JOB.ark,$feadir/feats_fmmi.JOB.scp || exit 1; + +# merge the feats to single SCP +for n in $(seq 1 $nj); do + cat $feadir/feats_fmmi.$n.scp +done > $data/feats.scp + +echo "$0 finished... $srcdata -> $data ($gmmdir)" + +exit 0; diff --git a/tools/ASR_2ch_track/steps/nnet/make_priors.sh b/tools/ASR_2ch_track/steps/nnet/make_priors.sh new file mode 100644 index 0000000000000000000000000000000000000000..09d196a8205612c32568113539ec00e0103d6823 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/make_priors.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 +# To be run from .. (one directory up from here) +# see ../run.sh for example + +# Begin configuration section. +nj=4 +cmd=run.pl +use_gpu=no +ivector= +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 2 ]; then + echo "usage: $0 [options] "; + echo "options: " + echo " --cmd 'queue.pl ' # how to run jobs." + echo " --nj # number of parallel jobs" + echo " --remove-last-components # number of NNet Components to remove from the end" + echo " --use-gpu (no|yes|optional) # forwarding on GPU" + exit 1; +fi + +if [ -f path.sh ]; then . path.sh; fi + +data=$1 +nndir=$2 + +######## CONFIGURATION + +required="$data/feats.scp $nndir/final.nnet $nndir/final.feature_transform" +for f in $required; do + [ ! -f $f ] && echo "$0: Missing $f" && exit 1; +done + +sdata=$data/split$nj +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +echo "Accumulating prior stats by forwarding '$data' with '$nndir'" + +# We estimate priors on 10k utterances, selected randomly from the splitted data, +N=$((10000/nj)) + +# PREPARE FEATURE EXTRACTION PIPELINE +# import config, +cmvn_opts= +delta_opts= +D=$nndir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark:cat $sdata/JOB/feats.scp | utils/shuffle_list.pl --srand 777 | head -n$N | copy-feats scp:- ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector --print-args=false "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +# Run the forward pass, +$cmd JOB=1:$nj $nndir/log/prior_stats.JOB.log \ + nnet-forward --use-gpu=$use_gpu --feature-transform=$nndir/final.feature_transform $nndir/final.nnet "$feats" ark:- \| \ + compute-cmvn-stats --binary=false ark:- $nndir/JOB.prior_cmvn_stats || exit 1 + +sum-matrices --binary=false $nndir/prior_cmvn_stats $nndir/*.prior_cmvn_stats 2>$nndir/log/prior_sum_matrices.log || exit 1 +rm $nndir/*.prior_cmvn_stats + +awk 'NR==2{ $NF=""; print "[",$0,"]"; }' $nndir/prior_cmvn_stats >$nndir/prior_counts || exit 1 + +echo "Succeeded creating prior counts '$nndir/prior_counts' from '$data'" + diff --git a/tools/ASR_2ch_track/steps/nnet/pretrain_dbn.sh b/tools/ASR_2ch_track/steps/nnet/pretrain_dbn.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac291c01efc52e25534e6b42e6b1260d7be189a9 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/pretrain_dbn.sh @@ -0,0 +1,336 @@ +#!/bin/bash +# Copyright 2013-2015 Brno University of Technology (author: Karel Vesely) + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + +# To be run from ../../ +# +# Restricted Boltzman Machine (RBM) pre-training by Contrastive Divergence +# algorithm (CD-1). A stack of RBMs forms a Deep Belief Neetwork (DBN). +# +# This script by default pre-trains on plain features (ie. saved fMLLR features), +# building a 'feature_transform' containing +/-5 frame splice and global CMVN. +# +# There is also a support for adding speaker-based CMVN, deltas, i-vectors, +# or passing custom 'feature_transform' or its prototype. +# + +# Begin configuration. + +# topology, initialization, +nn_depth=6 # number of hidden layers, +hid_dim=2048 # number of neurons per layer, +param_stddev_first=0.1 # init parameters in 1st RBM +param_stddev=0.1 # init parameters in other RBMs +input_vis_type=gauss # type of visible nodes on DBN input + +# number of iterations, +rbm_iter=1 # number of pre-training epochs (Gaussian-Bernoulli RBM has 2x more) + +# pre-training opts, +rbm_lrate=0.4 # RBM learning rate +rbm_lrate_low=0.01 # lower RBM learning rate (for Gaussian units) +rbm_l2penalty=0.0002 # L2 penalty (increases RBM-mixing rate) +rbm_extra_opts= + +# data processing, +copy_feats=true # resave the features to tmpdir, +copy_feats_tmproot=/tmp/kaldi.XXXX # sets tmproot for 'copy-feats', + +# feature processing, +splice=5 # (default) splice features both-ways along time axis, +cmvn_opts= # (optional) adds 'apply-cmvn' to input feature pipeline, see opts, +delta_opts= # (optional) adds 'add-deltas' to input feature pipeline, see opts, +ivector= # (optional) adds 'append-vector-to-feats', it's rx-filename, + +feature_transform_proto= # (optional) use this prototype for 'feature_transform', +feature_transform= # (optional) directly use this 'feature_transform', + +# misc. +verbose=1 # enable per-cache reports +skip_cuda_check=false + +# End configuration. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh; +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 2 ]; then + echo "Usage: $0 " + echo " e.g.: $0 data/train exp/rbm_pretrain" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo "" + echo " --nn-depth # number of RBM layers" + echo " --hid-dim # number of hidden units per layer" + echo " --rbm-iter # number of CD-1 iterations per layer" + echo " # can be used to subsample large datasets" + echo " --rbm-lrate # learning-rate for Bernoulli-Bernoulli RBMs" + echo " --rbm-lrate-low # learning-rate for Gaussian-Bernoulli RBM" + echo "" + echo " --cmvn-opts # add 'apply-cmvn' to input feature pipeline" + echo " --delta-opts # add 'add-deltas' to input feature pipeline" + echo " --splice # splice +/-N frames of input features" + echo " --copy-feats # copy features to /tmp, lowers storage stress" + echo "" + echo " --feature_transform_proto # use this prototype for 'feature_transform'" + echo " --feature-transform # directly use this 'feature_transform'" + exit 1; +fi + +data=$1 +dir=$2 + + +for f in $data/feats.scp; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +echo "# INFO" +echo "$0 : Pre-training Deep Belief Network as a stack of RBMs" +printf "\t dir : $dir \n" +printf "\t Train-set : $data '$(cat $data/feats.scp | wc -l)'\n" +echo + +[ -e $dir/${nn_depth}.dbn ] && echo "$0 Skipping, already have $dir/${nn_depth}.dbn" && exit 0 + +# check if CUDA compiled in and GPU is available, +if ! $skip_cuda_check; then cuda-gpu-available || exit 1; fi + +mkdir -p $dir/log + +###### PREPARE FEATURES ###### +echo +echo "# PREPARING FEATURES" +if [ "$copy_feats" == "true" ]; then + # re-save the features to local disk into /tmp/, + tmpdir=$(mktemp -d $copy_feats_tmproot) + copy-feats scp:$data/feats.scp ark,scp:$tmpdir/train.ark,$dir/train_sorted.scp || exit 1 + trap "echo \"# Removing features tmpdir $tmpdir @ $(hostname)\"; ls $tmpdir; rm -r $tmpdir" EXIT +else + # or copy the list, + cp $data/feats.scp $dir/train_sorted.scp +fi +# shuffle the list, +utils/shuffle_list.pl --srand 777 <$dir/train_sorted.scp >$dir/train.scp + +# create a 10k utt subset for global cmvn estimates, +head -n 10000 $dir/train.scp > $dir/train.scp.10k + +# for debugging, add list with non-local features, +utils/shuffle_list.pl --srand 777 <$data/feats.scp >$dir/train.scp_non_local + +###### OPTIONALLY IMPORT FEATURE SETTINGS ###### +ivector_dim= # no ivectors, +if [ ! -z $feature_transform ]; then + D=$(dirname $feature_transform) + echo "# importing feature settings from dir '$D'" + [ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) + [ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) + [ -e $D/ivector_dim ] && ivector_dim=$(cat $D/ivector_dim) + echo "# cmvn_opts='$cmvn_opts' delta_opts='$delta_opts' ivector_dim='$ivector_dim'" +fi + +###### PREPARE FEATURE PIPELINE ###### +# read the features +feats_tr="ark:copy-feats scp:$dir/train.scp ark:- |" + +# optionally add per-speaker CMVN +if [ ! -z "$cmvn_opts" ]; then + echo "+ 'apply-cmvn' with '$cmvn_opts' using statistics : $data/cmvn.scp" + [ ! -r $data/cmvn.scp ] && echo "Missing $data/cmvn.scp" && exit 1; + [ ! -r $data/utt2spk ] && echo "Missing $data/utt2spk" && exit 1; + feats_tr="$feats_tr apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp ark:- ark:- |" +else + echo "# 'apply-cmvn' not used," +fi + +# optionally add deltas +if [ ! -z "$delta_opts" ]; then + feats_tr="$feats_tr add-deltas $delta_opts ark:- ark:- |" + echo "# + 'add-deltas' with '$delta_opts'" +fi + +# keep track of the config, +[ ! -z "$cmvn_opts" ] && echo "$cmvn_opts" >$dir/cmvn_opts +[ ! -z "$delta_opts" ] && echo "$delta_opts" >$dir/delta_opts +# + +# get feature dim, +feat_dim=$(feat-to-dim "$feats_tr" -) +echo "# feature dim : $feat_dim (input of 'feature_transform')" + +# Now we start building 'feature_transform' which goes right in front of a NN. +# The forwarding is computed on a GPU before the frame shuffling is applied. +# +# Same GPU is used both for 'feature_transform' and the NN training. +# So it has to be done by a single process (we are using exclusive mode). +# This also reduces the CPU-GPU uploads/downloads to minimum. + +if [ ! -z "$feature_transform" ]; then + echo "# importing 'feature_transform' from '$feature_transform'" + tmp=$dir/imported_$(basename $feature_transform) + cp $feature_transform $tmp; feature_transform=$tmp +else + # Make default proto with splice, + if [ ! -z $feature_transform_proto ]; then + echo "# importing custom 'feature_transform_proto' from : $feature_transform_proto" + else + echo "+ default 'feature_transform_proto' with splice +/-$splice frames" + feature_transform_proto=$dir/splice${splice}.proto + echo " $feat_dim $(((2*splice+1)*feat_dim)) -$splice:$splice " >$feature_transform_proto + fi + + # Initialize 'feature-transform' from a prototype, + feature_transform=$dir/tr_$(basename $feature_transform_proto .proto).nnet + nnet-initialize --binary=false $feature_transform_proto $feature_transform + + # Renormalize the MLP input to zero mean and unit variance, + feature_transform_old=$feature_transform + feature_transform=${feature_transform%.nnet}_cmvn-g.nnet + echo "# compute normalization stats from 10k sentences" + nnet-forward --print-args=true --use-gpu=yes $feature_transform_old \ + "$(echo $feats_tr | sed 's|train.scp|train.scp.10k|')" ark:- |\ + compute-cmvn-stats ark:- $dir/cmvn-g.stats + echo "# + normalization of NN-input at '$feature_transform'" + nnet-concat --print-args=false --binary=false $feature_transform_old \ + "cmvn-to-nnet $dir/cmvn-g.stats -|" $feature_transform +fi + +if [ ! -z $ivector ]; then + echo + echo "# ADDING IVECTOR FEATURES" + # The iVectors are concatenated 'as they are' directly to the input of the neural network, + # To do this, we paste the features, and use where the 1st component + # contains the transform and 2nd network contains component. + + echo "# getting dims," + dim_raw=$(feat-to-dim "$feats_tr" -) + dim_ivec=$(copy-vector "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + echo "# dims, feats-raw $dim_raw, ivectors $dim_ivec," + + # Should we do something with 'feature_transform'? + if [ ! -z $ivector_dim ]; then + # No, the 'ivector_dim' comes from dir with 'feature_transform' with iVec forwarding, + echo "# assuming we got '$feature_transform' with ivector forwarding," + [ $ivector_dim != $dim_ivec ] && \ + echo -n "Error, i-vector dimensionality mismatch!" && \ + echo " (expected $ivector_dim, got $dim_ivec in $ivector)" && exit 1 + else + # Yes, adjust the transform to do ``iVec forwarding'', + feature_transform_old=$feature_transform + feature_transform=${feature_transform%.nnet}_ivec_copy.nnet + echo "# setting up ivector forwarding into '$feature_transform'," + dim_transformed=$(feat-to-dim "$feats_tr nnet-forward $feature_transform_old ark:- ark:- |" -) + nnet-initialize --print-args=false <(echo " $dim_ivec $dim_ivec 1:$dim_ivec ") $dir/tr_ivec_copy.nnet + nnet-initialize --print-args=false <(echo " $((dim_raw+dim_ivec)) $((dim_transformed+dim_ivec)) $feature_transform_old $dir/tr_ivec_copy.nnet ") $feature_transform + fi + echo $dim_ivec >$dir/ivector_dim # mark down the iVec dim! + + # pasting the iVecs to the feaures, + echo "# + ivector input '$ivector'" + feats_tr="$feats_tr append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +###### Show the final 'feature_transform' in the log, +echo +echo "### Showing the final 'feature_transform':" +nnet-info $feature_transform +echo "###" + +###### MAKE LINK TO THE FINAL feature_transform, so the other scripts will find it ###### +[ -f $dir/final.feature_transform ] && unlink $dir/final.feature_transform +(cd $dir; ln -s $(basename $feature_transform) final.feature_transform ) +feature_transform=$dir/final.feature_transform + + +###### GET THE DIMENSIONS ###### +num_fea=$(feat-to-dim --print-args=false "$feats_tr nnet-forward --use-gpu=no $feature_transform ark:- ark:- |" - 2>/dev/null) +num_hid=$hid_dim + + +###### PERFORM THE PRE-TRAINING ###### +for depth in $(seq 1 $nn_depth); do + echo + echo "# PRE-TRAINING RBM LAYER $depth" + RBM=$dir/$depth.rbm + [ -f $RBM ] && echo "RBM '$RBM' already trained, skipping." && continue + + # The first RBM needs special treatment, because of Gussian input nodes, + if [ "$depth" == "1" ]; then + # This is usually Gaussian-Bernoulli RBM (not if CNN layers are part of input transform) + # initialize, + echo "# initializing '$RBM.init'" + echo " $num_fea $num_hid $input_vis_type bern $param_stddev_first" > $RBM.proto + nnet-initialize $RBM.proto $RBM.init 2>$dir/log/nnet-initialize.$depth.log || exit 1 + # pre-train, + num_iter=$rbm_iter; [ $input_vis_type == "gauss" ] && num_iter=$((2*rbm_iter)) # 2x more epochs for Gaussian input + [ $input_vis_type == "bern" ] && rbm_lrate_low=$rbm_lrate # original lrate for Bernoulli input + echo "# pretraining '$RBM' (input $input_vis_type, lrate $rbm_lrate_low, iters $num_iter)" + rbm-train-cd1-frmshuff --learn-rate=$rbm_lrate_low --l2-penalty=$rbm_l2penalty \ + --num-iters=$num_iter --verbose=$verbose \ + --feature-transform=$feature_transform \ + $rbm_extra_opts \ + $RBM.init "$feats_tr" $RBM 2>$dir/log/rbm.$depth.log || exit 1 + else + # This is Bernoulli-Bernoulli RBM, + # cmvn stats for init, + echo "# computing cmvn stats '$dir/$depth.cmvn' for RBM initialization" + if [ ! -f $dir/$depth.cmvn ]; then + nnet-forward --print-args=false --use-gpu=yes \ + "nnet-concat $feature_transform $dir/$((depth-1)).dbn - |" \ + "$(echo $feats_tr | sed 's|train.scp|train.scp.10k|')" ark:- | \ + compute-cmvn-stats --print-args=false ark:- - | \ + cmvn-to-nnet --print-args=false - $dir/$depth.cmvn || exit 1 + else + echo "# compute-cmvn-stats already done, skipping." + fi + # initialize, + echo "initializing '$RBM.init'" + echo " $num_hid $num_hid bern bern $param_stddev $dir/$depth.cmvn" > $RBM.proto + nnet-initialize $RBM.proto $RBM.init 2>$dir/log/nnet-initialize.$depth.log || exit 1 + # pre-train, + echo "pretraining '$RBM' (lrate $rbm_lrate, iters $rbm_iter)" + rbm-train-cd1-frmshuff --learn-rate=$rbm_lrate --l2-penalty=$rbm_l2penalty \ + --num-iters=$rbm_iter --verbose=$verbose \ + --feature-transform="nnet-concat $feature_transform $dir/$((depth-1)).dbn - |" \ + $rbm_extra_opts \ + $RBM.init "$feats_tr" $RBM 2>$dir/log/rbm.$depth.log || exit 1 + fi + + # Create DBN stack, + if [ "$depth" == "1" ]; then + echo "# converting RBM to $dir/$depth.dbn" + rbm-convert-to-nnet $RBM $dir/$depth.dbn + else + echo "# appending RBM to $dir/$depth.dbn" + nnet-concat $dir/$((depth-1)).dbn "rbm-convert-to-nnet $RBM - |" $dir/$depth.dbn + fi + +done + +echo +echo "# REPORT" +echo "# RBM pre-training progress (line per-layer)" +grep progress $dir/log/rbm.*.log +echo + +echo "Pre-training finished." + +sleep 3 +exit 0 diff --git a/tools/ASR_2ch_track/steps/nnet/train.sh b/tools/ASR_2ch_track/steps/nnet/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..656acf2a8159c4d3490e523d6b139668bb259880 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/train.sh @@ -0,0 +1,460 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +# Begin configuration. + +config= # config, also forwarded to 'train_scheduler.sh', + +# topology, initialization, +network_type=dnn # select type of neural network (dnn,cnn1d,cnn2d,lstm), +hid_layers=4 # nr. of hidden layers (before sotfmax or bottleneck), +hid_dim=1024 # number of neurons per layer, +bn_dim= # (optional) adds bottleneck and one more hidden layer to the NN, +dbn= # (optional) prepend layers to the initialized NN, + +proto_opts= # adds options to 'make_nnet_proto.py', +cnn_proto_opts= # adds options to 'make_cnn_proto.py', + +nnet_init= # (optional) use this pre-initialized NN, +nnet_proto= # (optional) use this NN prototype for initialization, + +# feature processing, +splice=5 # (default) splice features both-ways along time axis, +cmvn_opts= # (optional) adds 'apply-cmvn' to input feature pipeline, see opts, +delta_opts= # (optional) adds 'add-deltas' to input feature pipeline, see opts, +ivector= # (optional) adds 'append-vector-to-feats', it's rx-filename, + +feat_type=plain +traps_dct_basis=11 # (feat_type=traps) nr. of DCT basis, 11 is good with splice=10, +transf= # (feat_type=transf) import this linear tranform, +splice_after_transf=5 # (feat_type=transf) splice after the linear transform, + +feature_transform_proto= # (optional) use this prototype for 'feature_transform', +feature_transform= # (optional) directly use this 'feature_transform', +pytel_transform= # (BUT) use external python transform, + +# labels, +labels= # (optional) specify non-default training targets, + # (targets need to be in posterior format, see 'ali-to-post', 'feat-to-post'), +num_tgt= # (optional) specifiy number of NN outputs, to be used with 'labels=', + +# training scheduler, +learn_rate=0.008 # initial learning rate, +scheduler_opts= # options, passed to the training scheduler, +train_tool= # optionally change the training tool, +train_tool_opts= # options for the training tool, +frame_weights= # per-frame weights for gradient weighting, +utt_weights= # per-utterance weights (scalar for --frame-weights), + +# data processing, misc. +copy_feats=true # resave the train/cv features into /tmp (disabled by default), +copy_feats_tmproot=/tmp/kaldi.XXXX # sets tmproot for 'copy-feats', +seed=777 # seed value used for data-shuffling, nn-initialization, and training, +skip_cuda_check=false + +# End configuration. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh; +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 6 ]; then + echo "Usage: $0 " + echo " e.g.: $0 data/train data/cv data/lang exp/mono_ali_train exp/mono_ali_cv exp/mono_nnet" + echo "" + echo " Training data : , (for optimizing cross-entropy)" + echo " Held-out data : , (for learn-rate scheduling, model selection)" + echo " note.: , can point to same directory, or 2 separate directories." + echo "" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo "" + echo " --network-type (dnn,cnn1d,cnn2d,lstm) # type of neural network" + echo " --nnet-proto # use this NN prototype" + echo " --feature-transform # re-use this input feature transform" + echo "" + echo " --feat-type (plain|traps|transf) # type of input features" + echo " --cmvn-opts # add 'apply-cmvn' to input feature pipeline" + echo " --delta-opts # add 'add-deltas' to input feature pipeline" + echo " --splice # splice +/-N frames of input features" + echo + echo " --learn-rate # initial leaning-rate" + echo " --copy-feats # copy features to /tmp, lowers storage stress" + echo "" + exit 1; +fi + +data=$1 +data_cv=$2 +lang=$3 +alidir=$4 +alidir_cv=$5 +dir=$6 + +# Using alidir for supervision (default) +if [ -z "$labels" ]; then + silphonelist=`cat $lang/phones/silence.csl` || exit 1; + for f in $alidir/final.mdl $alidir/ali.1.gz $alidir_cv/ali.1.gz; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; + done +fi + +for f in $data/feats.scp $data_cv/feats.scp; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +echo +echo "# INFO" +echo "$0 : Training Neural Network" +printf "\t dir : $dir \n" +printf "\t Train-set : $data $(cat $data/feats.scp | wc -l), $alidir \n" +printf "\t CV-set : $data_cv $(cat $data_cv/feats.scp | wc -l) $alidir_cv \n" +echo + +mkdir -p $dir/{log,nnet} + +# skip when already trained, +if [ -e $dir/final.nnet ]; then + echo "SKIPPING TRAINING... ($0)" + echo "nnet already trained : $dir/final.nnet ($(readlink $dir/final.nnet))" + exit 0 +fi + +# check if CUDA compiled in and GPU is available, +if ! $skip_cuda_check; then cuda-gpu-available || exit 1; fi + +###### PREPARE ALIGNMENTS ###### +echo +echo "# PREPARING ALIGNMENTS" +if [ ! -z "$labels" ]; then + echo "Using targets '$labels' (by force)" + labels_tr="$labels" + labels_cv="$labels" +else + echo "Using PDF targets from dirs '$alidir' '$alidir_cv'" + # training targets in posterior format, + labels_tr="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark:- | ali-to-post ark:- ark:- |" + labels_cv="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir_cv/ali.*.gz |\" ark:- | ali-to-post ark:- ark:- |" + # training targets for analyze-counts, + labels_tr_pdf="ark:ali-to-pdf $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark:- |" + labels_tr_phn="ark:ali-to-phones --per-frame=true $alidir/final.mdl \"ark:gunzip -c $alidir/ali.*.gz |\" ark:- |" + + # get pdf-counts, used later for decoding/aligning, + analyze-counts --verbose=1 --binary=false \ + ${frame_weights:+ "--frame-weights=$frame_weights"} \ + ${utt_weights:+ "--utt-weights=$utt_weights"} \ + "$labels_tr_pdf" $dir/ali_train_pdf.counts 2>$dir/log/analyze_counts_pdf.log || exit 1 + # copy the old transition model, will be needed by decoder, + copy-transition-model --binary=false $alidir/final.mdl $dir/final.mdl || exit 1 + # copy the tree + cp $alidir/tree $dir/tree || exit 1 + + # make phone counts for analysis, + [ -e $lang/phones.txt ] && analyze-counts --verbose=1 --symbol-table=$lang/phones.txt \ + ${frame_weights:+ "--frame-weights=$frame_weights"} \ + ${utt_weights:+ "--utt-weights=$utt_weights"} \ + "$labels_tr_phn" /dev/null 2>$dir/log/analyze_counts_phones.log || exit 1 +fi + +###### PREPARE FEATURES ###### +echo +echo "# PREPARING FEATURES" +if [ "$copy_feats" == "true" ]; then + echo "# re-saving features to local disk," + tmpdir=$(mktemp -d $copy_feats_tmproot) + copy-feats scp:$data/feats.scp ark,scp:$tmpdir/train.ark,$dir/train_sorted.scp || exit 1 + copy-feats scp:$data_cv/feats.scp ark,scp:$tmpdir/cv.ark,$dir/cv.scp || exit 1 + trap "echo \"# Removing features tmpdir $tmpdir @ $(hostname)\"; ls $tmpdir; rm -r $tmpdir" EXIT +else + # or copy the list, + cp $data/feats.scp $dir/train_sorted.scp + cp $data_cv/feats.scp $dir/cv.scp +fi +# shuffle the list, +utils/shuffle_list.pl --srand ${seed:-777} <$dir/train_sorted.scp >$dir/train.scp + +# create a 10k utt subset for global cmvn estimates, +head -n 10000 $dir/train.scp > $dir/train.scp.10k + +# for debugging, add lists with non-local features, +utils/shuffle_list.pl --srand ${seed:-777} <$data/feats.scp >$dir/train.scp_non_local +cp $data_cv/feats.scp $dir/cv.scp_non_local + +###### OPTIONALLY IMPORT FEATURE SETTINGS (from pre-training) ###### +ivector_dim= # no ivectors, +if [ ! -z $feature_transform ]; then + D=$(dirname $feature_transform) + echo "# importing feature settings from dir '$D'" + [ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, + [ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) + [ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, + [ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) + [ -e $D/ivector_dim ] && ivector_dim=$(cat $D/ivector_dim) + echo "# cmvn_opts='$cmvn_opts' delta_opts='$delta_opts' ivector_dim='$ivector_dim'" +fi + +###### PREPARE FEATURE PIPELINE ###### +# read the features, +feats_tr="ark:copy-feats scp:$dir/train.scp ark:- |" +feats_cv="ark:copy-feats scp:$dir/cv.scp ark:- |" + +# optionally add per-speaker CMVN, +if [ ! -z "$cmvn_opts" ]; then + echo "# + 'apply-cmvn' with '$cmvn_opts' using statistics : $data/cmvn.scp, $data_cv/cmvn.scp" + [ ! -r $data/cmvn.scp ] && echo "Missing $data/cmvn.scp" && exit 1; + [ ! -r $data_cv/cmvn.scp ] && echo "Missing $data_cv/cmvn.scp" && exit 1; + feats_tr="$feats_tr apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp ark:- ark:- |" + feats_cv="$feats_cv apply-cmvn $cmvn_opts --utt2spk=ark:$data_cv/utt2spk scp:$data_cv/cmvn.scp ark:- ark:- |" +else + echo "# 'apply-cmvn' is not used," +fi + +# optionally add deltas, +if [ ! -z "$delta_opts" ]; then + feats_tr="$feats_tr add-deltas $delta_opts ark:- ark:- |" + feats_cv="$feats_cv add-deltas $delta_opts ark:- ark:- |" + echo "# + 'add-deltas' with '$delta_opts'" +fi + +# keep track of the config, +[ ! -z "$cmvn_opts" ] && echo "$cmvn_opts" >$dir/cmvn_opts +[ ! -z "$delta_opts" ] && echo "$delta_opts" >$dir/delta_opts +# + +# optionally append python feature transform, +if [ ! -z "$pytel_transform" ]; then + cp $pytel_transform $dir/pytel_transform.py + { echo; echo "### Comes from here: '$pytel_transform' ###"; } >> $dir/pytel_transform.py + pytel_transform=$dir/pytel_transform.py + feats_tr="$feats_tr /bin/env python $pytel_transform |" + feats_cv="$feats_cv /bin/env python $pytel_transform |" + echo "# + 'pytel-transform' from '$pytel_transform'" +fi + +# get feature dim, +feat_dim=$(feat-to-dim "$feats_tr" -) +echo "# feature dim : $feat_dim (input of 'feature_transform')" + +# Now we start building 'feature_transform' which goes right in front of a NN. +# The forwarding is computed on a GPU before the frame shuffling is applied. +# +# Same GPU is used both for 'feature_transform' and the NN training. +# So it has to be done by a single process (we are using exclusive mode). +# This also reduces the CPU-GPU uploads/downloads to minimum. + +if [ ! -z "$feature_transform" ]; then + echo "# importing 'feature_transform' from '$feature_transform'" + tmp=$dir/imported_$(basename $feature_transform) + cp $feature_transform $tmp; feature_transform=$tmp +else + # Make default proto with splice, + if [ ! -z $feature_transform_proto ]; then + echo "# importing custom 'feature_transform_proto' from '$feature_transform_proto'" + else + echo "# + default 'feature_transform_proto' with splice +/-$splice frames," + feature_transform_proto=$dir/splice${splice}.proto + echo " $feat_dim $(((2*splice+1)*feat_dim)) -$splice:$splice " >$feature_transform_proto + fi + + # Initialize 'feature-transform' from a prototype, + feature_transform=$dir/tr_$(basename $feature_transform_proto .proto).nnet + nnet-initialize --binary=false $feature_transform_proto $feature_transform + + # Choose further processing of spliced features + echo "# feature type : $feat_type" + case $feat_type in + plain) + ;; + traps) + #generate hamming+dct transform + feature_transform_old=$feature_transform + feature_transform=${feature_transform%.nnet}_hamm_dct${traps_dct_basis}.nnet + echo "# + Hamming DCT transform (t$((splice*2+1)),dct${traps_dct_basis}) into '$feature_transform'" + #prepare matrices with time-transposed hamming and dct + utils/nnet/gen_hamm_mat.py --fea-dim=$feat_dim --splice=$splice > $dir/hamm.mat + utils/nnet/gen_dct_mat.py --fea-dim=$feat_dim --splice=$splice --dct-basis=$traps_dct_basis > $dir/dct.mat + #put everything together + compose-transforms --binary=false $dir/dct.mat $dir/hamm.mat - | \ + transf-to-nnet - - | \ + nnet-concat --binary=false $feature_transform_old - $feature_transform || exit 1 + ;; + transf) + feature_transform_old=$feature_transform + feature_transform=${feature_transform%.nnet}_transf_splice${splice_after_transf}.nnet + [ -z $transf ] && transf=$alidir/final.mat + [ ! -f $transf ] && echo "Missing transf $transf" && exit 1 + feat_dim=$(feat-to-dim "$feats_tr nnet-forward 'nnet-concat $feature_transform_old \"transf-to-nnet $transf - |\" - |' ark:- ark:- |" -) + nnet-concat --binary=false $feature_transform_old \ + "transf-to-nnet $transf - |" \ + "utils/nnet/gen_splice.py --fea-dim=$feat_dim --splice=$splice_after_transf |" \ + $feature_transform || exit 1 + ;; + *) + echo "Unknown feature type $feat_type" + exit 1; + ;; + esac + + # keep track of feat_type, + echo $feat_type > $dir/feat_type + + # Renormalize the MLP input to zero mean and unit variance, + feature_transform_old=$feature_transform + feature_transform=${feature_transform%.nnet}_cmvn-g.nnet + echo "# compute normalization stats from 10k sentences" + nnet-forward --print-args=true --use-gpu=yes $feature_transform_old \ + "$(echo $feats_tr | sed 's|train.scp|train.scp.10k|')" ark:- |\ + compute-cmvn-stats ark:- $dir/cmvn-g.stats + echo "# + normalization of NN-input at '$feature_transform'" + nnet-concat --binary=false $feature_transform_old \ + "cmvn-to-nnet $dir/cmvn-g.stats -|" $feature_transform +fi + +if [ ! -z $ivector ]; then + echo + echo "# ADDING IVECTOR FEATURES" + # The iVectors are concatenated 'as they are' directly to the input of the neural network, + # To do this, we paste the features, and use where the 1st component + # contains the transform and 2nd network contains component. + + echo "# getting dims," + dim_raw=$(feat-to-dim "$feats_tr" -) + dim_ivec=$(copy-vector "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || echo true + echo "# dims, feats-raw $dim_raw, ivectors $dim_ivec," + + # Should we do something with 'feature_transform'? + if [ ! -z $ivector_dim ]; then + # No, the 'ivector_dim' comes from dir with 'feature_transform' with iVec forwarding, + echo "# assuming we got '$feature_transform' with ivector forwarding," + [ $ivector_dim != $dim_ivec ] && \ + echo -n "Error, i-vector dimensionality mismatch!" && \ + echo " (expected $ivector_dim, got $dim_ivec in $ivector)" && exit 1 + else + # Yes, adjust the transform to do ``iVec forwarding'', + feature_transform_old=$feature_transform + feature_transform=${feature_transform%.nnet}_ivec_copy.nnet + echo "# setting up ivector forwarding into '$feature_transform'," + dim_transformed=$(feat-to-dim "$feats_tr nnet-forward $feature_transform_old ark:- ark:- |" -) + nnet-initialize --print-args=false <(echo " $dim_ivec $dim_ivec 1:$dim_ivec ") $dir/tr_ivec_copy.nnet + nnet-initialize --print-args=false <(echo " $((dim_raw+dim_ivec)) $((dim_transformed+dim_ivec)) $feature_transform_old $dir/tr_ivec_copy.nnet ") $feature_transform + fi + echo $dim_ivec >$dir/ivector_dim # mark down the iVec dim! + + # pasting the iVecs to the feaures, + echo "# + ivector input '$ivector'" + feats_tr="$feats_tr append-vector-to-feats ark:- '$ivector' ark:- |" + feats_cv="$feats_cv append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +###### Show the final 'feature_transform' in the log, +echo +echo "### Showing the final 'feature_transform':" +nnet-info $feature_transform +echo "###" + +###### MAKE LINK TO THE FINAL feature_transform, so the other scripts will find it ###### +[ -f $dir/final.feature_transform ] && unlink $dir/final.feature_transform +(cd $dir; ln -s $(basename $feature_transform) final.feature_transform ) +feature_transform=$dir/final.feature_transform + + +###### INITIALIZE THE NNET ###### +echo +echo "# NN-INITIALIZATION" +if [ ! -z $nnet_init ]; then + echo "# using pre-initialized network '$nnet_init'" +elif [ ! -z $nnet_proto ]; then + echo "# initializing NN from prototype '$nnet_proto'"; + nnet_init=$dir/nnet.init; log=$dir/log/nnet_initialize.log + nnet-initialize --seed=$seed $nnet_proto $nnet_init +else + echo "# getting input/output dims :" + # input-dim, + get_dim_from=$feature_transform + [ ! -z "$dbn" ] && get_dim_from="nnet-concat $feature_transform '$dbn' -|" + num_fea=$(feat-to-dim "$feats_tr nnet-forward \"$get_dim_from\" ark:- ark:- |" -) + + # output-dim, + [ -z $num_tgt ] && \ + num_tgt=$(hmm-info --print-args=false $alidir/final.mdl | grep pdfs | awk '{ print $NF }') + + # make network prototype, + nnet_proto=$dir/nnet.proto + echo "# genrating network prototype $nnet_proto" + case "$network_type" in + dnn) + utils/nnet/make_nnet_proto.py $proto_opts \ + ${bn_dim:+ --bottleneck-dim=$bn_dim} \ + $num_fea $num_tgt $hid_layers $hid_dim >$nnet_proto || exit 1 + ;; + cnn1d) + delta_order=$([ -z $delta_opts ] && echo "0" || { echo $delta_opts | tr ' ' '\n' | grep "delta[-_]order" | sed 's:^.*=::'; }) + echo "Debug : $delta_opts, delta_order $delta_order" + utils/nnet/make_cnn_proto.py $cnn_proto_opts \ + --splice=$splice --delta-order=$delta_order --dir=$dir \ + $num_fea >$nnet_proto || exit 1 + cnn_fea=$(cat $nnet_proto | grep -v '^$' | tail -n1 | awk '{ print $5; }') + utils/nnet/make_nnet_proto.py $proto_opts \ + --no-proto-head --no-smaller-input-weights \ + ${bn_dim:+ --bottleneck-dim=$bn_dim} \ + "$cnn_fea" $num_tgt $hid_layers $hid_dim >>$nnet_proto || exit 1 + ;; + cnn2d) + delta_order=$([ -z $delta_opts ] && echo "0" || { echo $delta_opts | tr ' ' '\n' | grep "delta[-_]order" | sed 's:^.*=::'; }) + echo "Debug : $delta_opts, delta_order $delta_order" + utils/nnet/make_cnn2d_proto.py $cnn_proto_opts \ + --splice=$splice --delta-order=$delta_order --dir=$dir \ + $num_fea >$nnet_proto || exit 1 + cnn_fea=$(cat $nnet_proto | grep -v '^$' | tail -n1 | awk '{ print $5; }') + utils/nnet/make_nnet_proto.py $proto_opts \ + --no-proto-head --no-smaller-input-weights \ + ${bn_dim:+ --bottleneck-dim=$bn_dim} \ + "$cnn_fea" $num_tgt $hid_layers $hid_dim >>$nnet_proto || exit 1 + ;; + lstm) + utils/nnet/make_lstm_proto.py $proto_opts \ + $num_fea $num_tgt >$nnet_proto || exit 1 + ;; + blstm) + utils/nnet/make_blstm_proto.py $proto_opts \ + $num_fea $num_tgt >$nnet_proto || exit 1 + ;; + *) echo "Unknown : --network-type $network_type" && exit 1; + esac + + # initialize, + nnet_init=$dir/nnet.init + echo "# initializing the NN '$nnet_proto' -> '$nnet_init'" + nnet-initialize --seed=$seed $nnet_proto $nnet_init + + # optionally prepend dbn to the initialization, + if [ ! -z "$dbn" ]; then + nnet_init_old=$nnet_init; nnet_init=$dir/nnet_dbn_dnn.init + nnet-concat "$dbn" $nnet_init_old $nnet_init || exit 1 + fi +fi + + +###### TRAIN ###### +echo +echo "# RUNNING THE NN-TRAINING SCHEDULER" +steps/nnet/train_scheduler.sh \ + ${scheduler_opts} \ + ${train_tool:+ --train-tool "$train_tool"} \ + ${train_tool_opts:+ --train-tool-opts "$train_tool_opts"} \ + ${feature_transform:+ --feature-transform $feature_transform} \ + --learn-rate $learn_rate \ + ${frame_weights:+ --frame-weights "$frame_weights"} \ + ${utt_weights:+ --utt-weights "$utt_weights"} \ + ${config:+ --config $config} \ + $nnet_init "$feats_tr" "$feats_cv" "$labels_tr" "$labels_cv" $dir || exit 1 + +echo "$0 successfuly finished.. $dir" + +sleep 3 +exit 0 diff --git a/tools/ASR_2ch_track/steps/nnet/train_mmi.sh b/tools/ASR_2ch_track/steps/nnet/train_mmi.sh new file mode 100644 index 0000000000000000000000000000000000000000..d72ffd2cebfee87a95e086ca3ac1590b4999e483 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/train_mmi.sh @@ -0,0 +1,216 @@ +#!/bin/bash +# Copyright 2013-2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0. + +# Sequence-discriminative MMI/BMMI training of DNN. +# 4 iterations (by default) of Stochastic Gradient Descent with per-utterance updates. +# Boosting of paths with more errors (BMMI) gets activated by '--boost ' option. + +# For the numerator we have a fixed alignment rather than a lattice-- +# this actually follows from the way lattices are defined in Kaldi, which +# is to have a single path for each word (output-symbol) sequence. + + +# Begin configuration section. +cmd=run.pl +num_iters=4 +boost=0.0 #ie. disable boosting +acwt=0.1 +lmwt=1.0 +learn_rate=0.00001 +halving_factor=1.0 #ie. disable halving +drop_frames=true +verbose=1 +ivector= + +seed=777 # seed value used for training data shuffling +skip_cuda_check=false +# End configuration section + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# -ne 6 ]; then + echo "Usage: $0 " + echo " e.g.: $0 data/train_all data/lang exp/tri3b_dnn exp/tri3b_dnn_ali exp/tri3b_dnn_denlats exp/tri3b_dnn_mmi" + echo "Main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --num-iters # number of iterations to run" + echo " --acwt # acoustic score scaling" + echo " --lmwt # linguistic score scaling" + echo " --learn-rate # learning rate for NN training" + echo " --drop-frames # drop frames num/den completely disagree" + echo " --boost # (e.g. 0.1), for boosted MMI. (default 0)" + + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +alidir=$4 +denlatdir=$5 +dir=$6 + +for f in $data/feats.scp $alidir/{tree,final.mdl,ali.1.gz} $denlatdir/lat.scp $srcdir/{final.nnet,final.feature_transform}; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +# check if CUDA compiled in, +if ! $skip_cuda_check; then cuda-compiled || { echo "Error, CUDA not compiled-in!"; exit 1; } fi + +mkdir -p $dir/log + +cp $alidir/{final.mdl,tree} $dir + +silphonelist=`cat $lang/phones/silence.csl` || exit 1; + + +#Get the files we will need +nnet=$srcdir/$(readlink $srcdir/final.nnet || echo final.nnet); +[ -z "$nnet" ] && echo "Error nnet '$nnet' does not exist!" && exit 1; +cp $nnet $dir/0.nnet; nnet=$dir/0.nnet + +class_frame_counts=$srcdir/ali_train_pdf.counts +[ -z "$class_frame_counts" ] && echo "Error class_frame_counts '$class_frame_counts' does not exist!" && exit 1; +cp $srcdir/ali_train_pdf.counts $dir + +feature_transform=$srcdir/final.feature_transform +if [ ! -f $feature_transform ]; then + echo "Missing feature_transform '$feature_transform'" + exit 1 +fi +cp $feature_transform $dir/final.feature_transform + +model=$dir/final.mdl +[ -z "$model" ] && echo "Error transition model '$model' does not exist!" && exit 1; + + + +# Shuffle the feature list to make the GD stochastic! +# By shuffling features, we have to use lattices with random access (indexed by .scp file). +cat $data/feats.scp | utils/shuffle_list.pl --srand $seed > $dir/train.scp + +### +### PREPARE FEATURE EXTRACTION PIPELINE +### +# import config, +cmvn_opts= +delta_opts= +D=$srcdir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,o:copy-feats scp:$dir/train.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $data/cmvn.scp ] && echo "$0: Missing $data/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +### Record the setup, +[ ! -z "$cmvn_opts" ] && echo $cmvn_opts >$dir/cmvn_opts +[ ! -z "$delta_opts" ] && echo $delta_opts >$dir/delta_opts +[ -e $D/pytel_transform.py ] && cp $D/pytel_transform.py $dir/pytel_transform.py +[ -e $D/ivector_dim ] && cp $D/ivector_dim $dir/ivector_dim +### + +### +### Prepare the alignments +### +# Assuming all alignments will fit into memory +ali="ark:gunzip -c $alidir/ali.*.gz |" + + +### +### Prepare the lattices +### +# The lattices are indexed by SCP (they are not gziped because of the random access in SGD) +lats="scp:$denlatdir/lat.scp" + +# Optionally apply boosting +if [[ "$boost" != "0.0" && "$boost" != 0 ]]; then + # make lattice scp with same order as the shuffled feature scp, + awk '{ if(r==0) { utt_id=$1; latH[$1]=$0; } # lat.scp + if(r==1) { if(latH[$1] != "") { print latH[$1]; } } # train.scp + }' r=0 $denlatdir/lat.scp r=1 $dir/train.scp > $dir/lat.scp + # get the list of alignments, + ali-to-phones $alidir/final.mdl "$ali" ark,t:- | awk '{print $1;}' > $dir/ali.lst + # remove from features sentences which have no lattice or no alignment, + # (so that the mmi training tool does not blow-up due to lattice caching), + mv $dir/train.scp $dir/train.scp_unfilt + awk '{ if(r==0) { latH[$1]="1"; } # lat.scp + if(r==1) { aliH[$1]="1"; } # ali.lst + if(r==2) { if((latH[$1] != "") && (aliH[$1] != "")) { print $0; } } # train.scp_ + }' r=0 $dir/lat.scp r=1 $dir/ali.lst r=2 $dir/train.scp_unfilt > $dir/train.scp + # create the lat pipeline, + lats="ark,o:lattice-boost-ali --b=$boost --silence-phones=$silphonelist $alidir/final.mdl scp:$dir/lat.scp '$ali' ark:- |" +fi +### +### +### + +# Run several iterations of the MMI/BMMI training +cur_mdl=$nnet +x=1 +while [ $x -le $num_iters ]; do + echo "Pass $x (learnrate $learn_rate)" + if [ -f $dir/$x.nnet ]; then + echo "Skipped, file $dir/$x.nnet exists" + else + $cmd $dir/log/mmi.$x.log \ + nnet-train-mmi-sequential \ + --feature-transform=$feature_transform \ + --class-frame-counts=$class_frame_counts \ + --acoustic-scale=$acwt \ + --lm-scale=$lmwt \ + --learn-rate=$learn_rate \ + --drop-frames=$drop_frames \ + --verbose=$verbose \ + $cur_mdl $alidir/final.mdl "$feats" "$lats" "$ali" $dir/$x.nnet || exit 1 + fi + cur_mdl=$dir/$x.nnet + + #report the progress + grep -B 2 MMI-objective $dir/log/mmi.$x.log | sed -e 's|^[^)]*)[^)]*)||' + + x=$((x+1)) + learn_rate=$(awk "BEGIN{print($learn_rate*$halving_factor)}") + +done + +(cd $dir; [ -e final.nnet ] && unlink final.nnet; ln -s $((x-1)).nnet final.nnet) + +echo "MMI/BMMI training finished" + +if [ -e $dir/prior_counts ]; then + echo "Priors are already re-estimated, skipping... ($dir/prior_counts)" +else + echo "Re-estimating priors by forwarding 10k utterances from training set." + . cmd.sh + nj=$(cat $alidir/num_jobs) + steps/nnet/make_priors.sh --cmd "$train_cmd" ${ivector:+--ivector "$ivector"} --nj $nj \ + $data $dir || exit 1 +fi + +exit 0 diff --git a/tools/ASR_2ch_track/steps/nnet/train_mpe.sh b/tools/ASR_2ch_track/steps/nnet/train_mpe.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c780da27c166fe4ebeeff923440695726342e48 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/train_mpe.sh @@ -0,0 +1,204 @@ +#!/bin/bash +# Copyright 2013-2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0. + +# Sequence-discriminative MPE/sMBR training of DNN. +# 4 iterations (by default) of Stochastic Gradient Descent with per-utterance updates. +# We select between MPE/sMBR optimization by '--do-smbr ' option. + +# For the numerator we have a fixed alignment rather than a lattice-- +# this actually follows from the way lattices are defined in Kaldi, which +# is to have a single path for each word (output-symbol) sequence. + + +# Begin configuration section. +cmd=run.pl +num_iters=4 +acwt=0.1 +lmwt=1.0 +learn_rate=0.00001 +momentum=0.0 +halving_factor=1.0 #ie. disable halving +do_smbr=true +exclude_silphones=true # exclude silphones from approximate accuracy computation +unkphonelist= # exclude unkphones from approximate accuracy computation (overrides exclude_silphones) +one_silence_class=true # true : reduce insertions in sMBR/MPE FW/BW, more stable training, +verbose=1 +ivector= + +seed=777 # seed value used for training data shuffling +skip_cuda_check=false +# End configuration section + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# -ne 6 ]; then + echo "Usage: $0 " + echo " e.g.: $0 data/train_all data/lang exp/tri3b_dnn exp/tri3b_dnn_ali exp/tri3b_dnn_denlats exp/tri3b_dnn_smbr" + echo "Main options (for others, see top of script file)" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --config # config containing options" + echo " --num-iters # number of iterations to run" + echo " --acwt # acoustic score scaling" + echo " --lmwt # linguistic score scaling" + echo " --learn-rate # learning rate for NN training" + echo " --do-smbr # do sMBR training, otherwise MPE" + + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +alidir=$4 +denlatdir=$5 +dir=$6 + +for f in $data/feats.scp $alidir/{tree,final.mdl,ali.1.gz} $denlatdir/lat.scp $srcdir/{final.nnet,final.feature_transform}; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +# check if CUDA compiled in, +if ! $skip_cuda_check; then cuda-compiled || { echo "Error, CUDA not compiled-in!"; exit 1; } fi + +mkdir -p $dir/log + +cp $alidir/{final.mdl,tree} $dir + +silphonelist=`cat $lang/phones/silence.csl` || exit 1; + +#Get the files we will need +nnet=$srcdir/$(readlink $srcdir/final.nnet || echo final.nnet); +[ -z "$nnet" ] && echo "Error nnet '$nnet' does not exist!" && exit 1; +cp $nnet $dir/0.nnet; nnet=$dir/0.nnet + +class_frame_counts=$srcdir/ali_train_pdf.counts +[ -z "$class_frame_counts" ] && echo "Error class_frame_counts '$class_frame_counts' does not exist!" && exit 1; +cp $srcdir/ali_train_pdf.counts $dir + +feature_transform=$srcdir/final.feature_transform +if [ ! -f $feature_transform ]; then + echo "Missing feature_transform '$feature_transform'" + exit 1 +fi +cp $feature_transform $dir/final.feature_transform + +model=$dir/final.mdl +[ -z "$model" ] && echo "Error transition model '$model' does not exist!" && exit 1; + +#enable/disable silphones from MPE training +mpe_silphones_arg= #empty +$exclude_silphones && mpe_silphones_arg="--silence-phones=$silphonelist" # all silphones +[ ! -z $unkphonelist ] && mpe_silphones_arg="--silence-phones=$unkphonelist" # unk only + + +# Shuffle the feature list to make the GD stochastic! +# By shuffling features, we have to use lattices with random access (indexed by .scp file). +cat $data/feats.scp | utils/shuffle_list.pl --srand $seed > $dir/train.scp + +### +### PREPARE FEATURE EXTRACTION PIPELINE +### +# import config, +cmvn_opts= +delta_opts= +D=$srcdir +[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility, +[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts) +[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility, +[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts) +# +# Create the feature stream, +feats="ark,o:copy-feats scp:$dir/train.scp ark:- |" +# apply-cmvn (optional), +[ ! -z "$cmvn_opts" -a ! -f $data/cmvn.scp ] && echo "$0: Missing $data/cmvn.scp" && exit 1 +[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp ark:- ark:- |" +# add-deltas (optional), +[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |" +# add-pytel transform (optional), +[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |" + +# add-ivector (optional), +if [ -e $D/ivector_dim ]; then + ivector_dim=$(cat $D/ivector_dim) + [ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1 + ivector_dim2=$(copy-vector "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true + [ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1 + # Append to feats + feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |" +fi + +### Record the setup, +[ ! -z "$cmvn_opts" ] && echo $cmvn_opts >$dir/cmvn_opts +[ ! -z "$delta_opts" ] && echo $delta_opts >$dir/delta_opts +[ -e $D/pytel_transform.py ] && cp {$D,$dir}/pytel_transform.py +[ -e $D/ivector_dim ] && cp {$D,$dir}/ivector_dim +### + +### +### Prepare the alignments +### +# Assuming all alignments will fit into memory +ali="ark:gunzip -c $alidir/ali.*.gz |" + + +### +### Prepare the lattices +### +# The lattices are indexed by SCP (they are not gziped because of the random access in SGD) +lats="scp:$denlatdir/lat.scp" + + +# Run several iterations of the MPE/sMBR training +cur_mdl=$nnet +x=1 +while [ $x -le $num_iters ]; do + echo "Pass $x (learnrate $learn_rate)" + if [ -f $dir/$x.nnet ]; then + echo "Skipped, file $dir/$x.nnet exists" + else + #train + $cmd $dir/log/mpe.$x.log \ + nnet-train-mpe-sequential \ + --feature-transform=$feature_transform \ + --class-frame-counts=$class_frame_counts \ + --acoustic-scale=$acwt \ + --lm-scale=$lmwt \ + --learn-rate=$learn_rate \ + --momentum=$momentum \ + --do-smbr=$do_smbr \ + --verbose=$verbose \ + --one-silence-class=$one_silence_class \ + $mpe_silphones_arg \ + $cur_mdl $alidir/final.mdl "$feats" "$lats" "$ali" $dir/$x.nnet || exit 1 + fi + cur_mdl=$dir/$x.nnet + + #report the progress + grep -B 2 "Overall average frame-accuracy" $dir/log/mpe.$x.log | sed -e 's|.*)||' + + x=$((x+1)) + learn_rate=$(awk "BEGIN{print($learn_rate*$halving_factor)}") + +done + +(cd $dir; [ -e final.nnet ] && unlink final.nnet; ln -s $((x-1)).nnet final.nnet) + + +echo "MPE/sMBR training finished" + +if [ -e $dir/prior_counts ]; then + echo "Priors are already re-estimated, skipping... ($dir/prior_counts)" +else + echo "Re-estimating priors by forwarding 10k utterances from training set." + . cmd.sh + nj=$(cat $alidir/num_jobs) + steps/nnet/make_priors.sh --cmd "$train_cmd" --nj $nj $data $dir || exit 1 +fi + +exit 0 diff --git a/tools/ASR_2ch_track/steps/nnet/train_scheduler.sh b/tools/ASR_2ch_track/steps/nnet/train_scheduler.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a6649cfa016f7b72493e43bb7dc5222d75fd4ce --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet/train_scheduler.sh @@ -0,0 +1,185 @@ +#!/bin/bash + +# Copyright 2012-2015 Brno University of Technology (author: Karel Vesely) +# Apache 2.0 + +# Schedules epochs and controls learning rate during the neural network training + +# Begin configuration. + +# training options, +learn_rate=0.008 +momentum=0 +l1_penalty=0 +l2_penalty=0 + +# data processing, +train_tool="nnet-train-frmshuff" +train_tool_opts="--minibatch-size=256 --randomizer-size=32768 --randomizer-seed=777" +feature_transform= + +# learn rate scheduling, +max_iters=20 +min_iters=0 # keep training, disable weight rejection, start learn-rate halving as usual, +keep_lr_iters=0 # fix learning rate for N initial epochs, disable weight rejection, +start_halving_impr=0.01 +end_halving_impr=0.001 +halving_factor=0.5 + +# misc, +verbose=1 +frame_weights= +utt_weights= + +# End configuration. + +echo "$0 $@" # Print the command line for logging +[ -f path.sh ] && . ./path.sh; + +. parse_options.sh || exit 1; + +set -euo pipefail + +if [ $# != 6 ]; then + echo "Usage: $0 " + echo " e.g.: $0 0.nnet scp:train.scp scp:cv.scp ark:labels_tr.ark ark:labels_cv.ark exp/dnn1" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + exit 1; +fi + +mlp_init=$1 +feats_tr=$2 +feats_cv=$3 +labels_tr=$4 +labels_cv=$5 +dir=$6 + +[ ! -d $dir ] && mkdir $dir +[ ! -d $dir/log ] && mkdir $dir/log +[ ! -d $dir/nnet ] && mkdir $dir/nnet + +# Skip training +[ -e $dir/final.nnet ] && echo "'$dir/final.nnet' exists, skipping training" && exit 0 + +############################## +# start training + +# choose mlp to start with, +mlp_best=$mlp_init +mlp_base=${mlp_init##*/}; mlp_base=${mlp_base%.*} + +# optionally resume training from the best epoch, using saved learning-rate, +[ -e $dir/.mlp_best ] && mlp_best=$(cat $dir/.mlp_best) +[ -e $dir/.learn_rate ] && learn_rate=$(cat $dir/.learn_rate) + +# cross-validation on original network, +log=$dir/log/iter00.initial.log; hostname>$log +$train_tool --cross-validate=true --randomize=false --verbose=$verbose $train_tool_opts \ + ${feature_transform:+ --feature-transform=$feature_transform} \ + ${frame_weights:+ "--frame-weights=$frame_weights"} \ + ${utt_weights:+ "--utt-weights=$utt_weights"} \ + "$feats_cv" "$labels_cv" $mlp_best \ + 2>> $log + +loss=$(cat $dir/log/iter00.initial.log | grep "AvgLoss:" | tail -n 1 | awk '{ print $4; }') +loss_type=$(cat $dir/log/iter00.initial.log | grep "AvgLoss:" | tail -n 1 | awk '{ print $5; }') +echo "CROSSVAL PRERUN AVG.LOSS $(printf "%.4f" $loss) $loss_type" + +# resume lr-halving, +halving=0 +[ -e $dir/.halving ] && halving=$(cat $dir/.halving) + +# training, +for iter in $(seq -w $max_iters); do + echo -n "ITERATION $iter: " + mlp_next=$dir/nnet/${mlp_base}_iter${iter} + + # skip iteration (epoch) if already done, + [ -e $dir/.done_iter$iter ] && echo -n "skipping... " && ls $mlp_next* && continue + + # training, + log=$dir/log/iter${iter}.tr.log; hostname>$log + $train_tool --cross-validate=false --randomize=true --verbose=$verbose $train_tool_opts \ + --learn-rate=$learn_rate --momentum=$momentum \ + --l1-penalty=$l1_penalty --l2-penalty=$l2_penalty \ + ${feature_transform:+ --feature-transform=$feature_transform} \ + ${frame_weights:+ "--frame-weights=$frame_weights"} \ + ${utt_weights:+ "--utt-weights=$utt_weights"} \ + "$feats_tr" "$labels_tr" $mlp_best $mlp_next \ + 2>> $log || exit 1; + + tr_loss=$(cat $dir/log/iter${iter}.tr.log | grep "AvgLoss:" | tail -n 1 | awk '{ print $4; }') + echo -n "TRAIN AVG.LOSS $(printf "%.4f" $tr_loss), (lrate$(printf "%.6g" $learn_rate)), " + + # cross-validation, + log=$dir/log/iter${iter}.cv.log; hostname>$log + $train_tool --cross-validate=true --randomize=false --verbose=$verbose $train_tool_opts \ + ${feature_transform:+ --feature-transform=$feature_transform} \ + ${frame_weights:+ "--frame-weights=$frame_weights"} \ + ${utt_weights:+ "--utt-weights=$utt_weights"} \ + "$feats_cv" "$labels_cv" $mlp_next \ + 2>>$log || exit 1; + + loss_new=$(cat $dir/log/iter${iter}.cv.log | grep "AvgLoss:" | tail -n 1 | awk '{ print $4; }') + echo -n "CROSSVAL AVG.LOSS $(printf "%.4f" $loss_new), " + + # accept or reject? + loss_prev=$loss + if [ 1 == $(bc <<< "$loss_new < $loss") -o $iter -le $keep_lr_iters -o $iter -le $min_iters ]; then + # accepting: the loss was better, or we had fixed learn-rate, or we had fixed epoch-number, + loss=$loss_new + mlp_best=$dir/nnet/${mlp_base}_iter${iter}_learnrate${learn_rate}_tr$(printf "%.4f" $tr_loss)_cv$(printf "%.4f" $loss_new) + [ $iter -le $min_iters ] && mlp_best=${mlp_best}_min-iters-$min_iters + [ $iter -le $keep_lr_iters ] && mlp_best=${mlp_best}_keep-lr-iters-$keep_lr_iters + mv $mlp_next $mlp_best + echo "nnet accepted ($(basename $mlp_best))" + echo $mlp_best > $dir/.mlp_best + else + # rejecting, + mlp_reject=$dir/nnet/${mlp_base}_iter${iter}_learnrate${learn_rate}_tr$(printf "%.4f" $tr_loss)_cv$(printf "%.4f" $loss_new)_rejected + mv $mlp_next $mlp_reject + echo "nnet rejected ($(basename $mlp_reject))" + fi + + # create .done file, the iteration (epoch) is completed, + touch $dir/.done_iter$iter + + # continue with original learn-rate, + [ $iter -le $keep_lr_iters ] && continue + + # stopping criterion, + rel_impr=$(bc <<< "scale=10; ($loss_prev-$loss)/$loss_prev") + if [ 1 == $halving -a 1 == $(bc <<< "$rel_impr < $end_halving_impr") ]; then + if [ $iter -le $min_iters ]; then + echo we were supposed to finish, but we continue as min_iters : $min_iters + continue + fi + echo finished, too small rel. improvement $rel_impr + break + fi + + # start learning-rate fade-out when improvement is low, + if [ 1 == $(bc <<< "$rel_impr < $start_halving_impr") ]; then + halving=1 + echo $halving >$dir/.halving + fi + + # reduce the learning-rate, + if [ 1 == $halving ]; then + learn_rate=$(awk "BEGIN{print($learn_rate*$halving_factor)}") + echo $learn_rate >$dir/.learn_rate + fi +done + +# select the best network, +if [ $mlp_best != $mlp_init ]; then + mlp_final=${mlp_best}_final_ + ( cd $dir/nnet; ln -s $(basename $mlp_best) $(basename $mlp_final); ) + ( cd $dir; ln -s nnet/$(basename $mlp_final) final.nnet; ) + echo "Succeeded training the Neural Network : $dir/final.nnet" +else + "Error training neural network..." + exit 1 +fi + diff --git a/tools/ASR_2ch_track/steps/nnet2/adjust_priors.sh b/tools/ASR_2ch_track/steps/nnet2/adjust_priors.sh new file mode 100644 index 0000000000000000000000000000000000000000..3cdcfb4ae73179fa5353a567e27901b8f0f1b817 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/adjust_priors.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# Copyright (c) 2015, Johns Hopkins University (Yenda Trmal ) +# License: Apache 2.0 + +# Begin configuration section. +cmd=run.pl +iter=final +# End configuration section + + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 2 ]; then + echo "Usage: $0 [opts] " + echo " e.g.: $0 exp/tri4_mpe_degs exp/tri4_mpe" + echo "" + echo "Performs priors adjustment either on the final iteration" + echo "or iteration of choice of the training. The adjusted model" + echo "filename will be suffixed by \"adj\", i.e. for the final" + echo "iteration final.mdl will become final.adj.mdl" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --iter # which iteration to be adjusted" + exit 1; +fi + +degs_dir=$1 +dir=$2 + +src_model=$dir/${iter}.mdl + +if [ ! -f $src_model ]; then + echo "$0: Expecting $src_model to exist." + exit 1 +fi + +if [ ! -f $degs_dir/priors_egs.1.ark ]; then + echo "$0: Expecting $degs_dir/priors_egs.1.ark to exist." + exit 1 +fi + +num_archives_priors=`cat $degs_dir/info/num_archives_priors` || { + echo "Could not find $degs_dir/info/num_archives_priors."; + exit 1; +} + +$cmd JOB=1:$num_archives_priors $dir/log/get_post.${iter}.JOB.log \ + nnet-compute-from-egs "nnet-to-raw-nnet $src_model -|" \ + ark:$degs_dir/priors_egs.JOB.ark ark:- \| \ + matrix-sum-rows ark:- ark:- \| \ + vector-sum ark:- $dir/post.${iter}.JOB.vec || { + echo "Error in getting posteriors for adjusting priors." + echo "See $dir/log/get_post.${iter}.*.log"; + exit 1; + } + + +$cmd $dir/log/sum_post.${iter}.log \ + vector-sum $dir/post.${iter}.*.vec $dir/post.${iter}.vec || { + echo "Error in summing posteriors. See $dir/log/sum_post.${iter}.log"; + exit 1; + } + +rm -f $dir/post.${iter}.*.vec + +echo "Re-adjusting priors based on computed posteriors for iter $iter" +$cmd $dir/log/adjust_priors.${iter}.log \ + nnet-adjust-priors $src_model $dir/post.${iter}.vec $dir/${iter}.adj.mdl || { + echo "Error in adjusting priors. See $dir/log/adjust_priors.${iter}.log"; + exit 1; + } + +echo "Done adjusting priors (on $src_model)" diff --git a/tools/ASR_2ch_track/steps/nnet2/align.sh b/tools/ASR_2ch_track/steps/nnet2/align.sh new file mode 100644 index 0000000000000000000000000000000000000000..73b9f047f15f0e02774a63025d8de49fa58e2dad --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/align.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# Copyright 2012 Brno University of Technology (Author: Karel Vesely) +# 2013 Johns Hopkins University (Author: Daniel Povey) +# Apache 2.0 + +# Computes training alignments using DNN + +# Begin configuration section. +nj=4 +cmd=run.pl +# Begin configuration. +scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" +beam=10 +retry_beam=40 +transform_dir= +iter=final +use_gpu=no +online_ivector_dir= +feat_type= # you can set this to force it to use delta features. +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: $0 [--transform-dir ] " + echo "e.g.: $0 data/train data/lang exp/nnet4 exp/nnet4_ali" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + +oov=`cat $lang/oov.int` || exit 1; +mkdir -p $dir/log +echo $nj > $dir/num_jobs +sdata=$data/split$nj +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + + +extra_files= +[ ! -z "$online_ivector_dir" ] && \ + extra_files="$online_ivector_dir/ivector_online.scp $online_ivector_dir/ivector_period" +for f in $srcdir/tree $srcdir/${iter}.mdl $data/feats.scp $lang/L.fst $extra_files; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +cp $srcdir/{tree,${iter}.mdl} $dir || exit 1; + + +## Set up features. Note: these are different from the normal features +## because we have one rspecifier that has the features for the entire +## training set, not separate ones for each batch. +if [ -z "$feat_type" ]; then + if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=raw; fi +fi +echo "$0: feature type is $feat_type" + +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null + +case $feat_type in + raw) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |" + ;; + lda) + splice_opts=`cat $srcdir/splice_opts 2>/dev/null` + cp $srcdir/splice_opts $dir 2>/dev/null + cp $srcdir/final.mat $dir || exit 1; + feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -s $transform_dir/num_jobs ] && \ + echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1; + nj_orig=$(cat $transform_dir/num_jobs) + + if [ $feat_type == "raw" ]; then trans=raw_trans; + else trans=trans; fi + if [ $feat_type == "lda" ] && ! cmp $transform_dir/final.mat $srcdir/final.mat; then + echo "$0: LDA transforms differ between $srcdir and $transform_dir" + exit 1; + fi + if [ ! -f $transform_dir/$trans.1 ]; then + echo "$0: expected $transform_dir/$trans.1 to exist (--transform-dir option)" + exit 1; + fi + if [ $nj -ne $nj_orig ]; then + # Copy the transforms into an archive with an index. + for n in $(seq $nj_orig); do cat $transform_dir/$trans.$n; done | \ + copy-feats ark:- ark,scp:$dir/$trans.ark,$dir/$trans.scp || exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/$trans.scp ark:- ark:- |" + else + # number of jobs matches with alignment dir. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/$trans.JOB ark:- ark:- |" + fi +fi + +if [ ! -z "$online_ivector_dir" ]; then + ivector_period=$(cat $online_ivector_dir/ivector_period) || exit 1; + # note: subsample-feats, with negative n, will repeat each feature -n times. + feats="$feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |" +fi + +echo "$0: aligning data in $data using model from $srcdir, putting alignments in $dir" + +tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|"; + +$cmd JOB=1:$nj $dir/log/align.JOB.log \ + compile-train-graphs $dir/tree $srcdir/${iter}.mdl $lang/L.fst "$tra" ark:- \| \ + nnet-align-compiled $scale_opts --use-gpu=$use_gpu --beam=$beam --retry-beam=$retry_beam \ + $srcdir/${iter}.mdl ark:- "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; + +echo "$0: done aligning data." + diff --git a/tools/ASR_2ch_track/steps/nnet2/convert_lda_to_raw.sh b/tools/ASR_2ch_track/steps/nnet2/convert_lda_to_raw.sh new file mode 100644 index 0000000000000000000000000000000000000000..c5eead8e6d252650baaa287f28f97c40b2af6339 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/convert_lda_to_raw.sh @@ -0,0 +1,160 @@ +#!/bin/bash + +# Copyright 2014 Johns Hopkins University (Author: Daniel Povey). +# Apache 2.0. + +# This script converts nnet2 models which expect splice+LDA as the input, into +# models which expect raw features (e.g. MFCC) as the input. If you include +# the option --global-cmvn-stats , it will also remove CMVN from the model +# by including it as part of the neural net. + + +# Begin configuration section +cleanup=true +global_cmvn_stats= +cmd=run.pl +# learning_rate and max_change will only make a difference if we train this model, which is unlikely. +learning_rate=0.00001 # give it a tiny learning rate by default; the user + # should probably tune this or set it if they want to train. +max_change=5.0 +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + + +if [ $# -ne 2 ]; then + echo "Usage: $0 [options] " + echo "e.g.: $0 --global-cmvn-stats global_cmvn.mat exp/dnn4b_nnet2 exp/dnn4b_nnet2_raw" + echo "Options include" + echo " --global-cmvn-stats # Filename of globally summed CMVN stats, if" + echo " # you want to push the CMVN inside the nnet" + echo " # (it won't any longer be speaker specific)" + exit 1; +fi + +src=$1 +dir=$2 + +mkdir -p $dir/log || exit 1; + +for f in $src/final.mdl $src/final.mat $src/splice_opts $src/cmvn_opts; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1 +done + + +mkdir -p $dir/log + +# nnet.config will be a config for a few trivial neural-network layers +# that come before the main network, and which do things like +echo -n >$dir/nnet.config + +if [ ! -z "$global_cmvn_stats" ]; then + [ ! -f $src/cmvn_opts ] && \ + echo "$0: expected $src/cmvn_opts to exist" && exit 1; + norm_vars=false + if grep 'norm-means=false' $src/cmvn_opts; then + echo "$0: if --norm-means=false, don't supply the --global-cmvn-stats option to this script" + exit 1; + elif grep 'norm-vars=true' $src/cmvn_opts; then + echo "$0: warning: this script has not been tested with --norm-vars=true in CMVN options" + norm_vars=true + fi + + + # First add to the config, layers that will do the same transform as cepstral + # mean and variance normalization using these global stats. We do this as + # first an added offset (FixedBiasComonent), then, only if norm-vars=true + # in the CMVN options, a scaling (FixedScaleComponent). + + $cmd $dir/log/copy_feats.log \ + copy-feats --binary=false "$global_cmvn_stats" $dir/global_cmvn_stats.txt || exit 1; + cat $dir/global_cmvn_stats.txt | \ + perl -e ' $line0 = ; $line0 == "[\n" || die "expected first line to be [, got $line0"; + $line1 = ; $line2 = ; @L1 = split(" ",$line1); @L2 = split(" ",$line2); + ($bias_out, $scale_out) = @ARGV; + open(B, ">$bias_out") || die "opening bias-out file $bias_out"; + open(S, ">$scale_out") || die "opening scale-out file $scale_out"; + pop @L2; pop @L2; # remove the " 0 ]" + $count = pop @L1; # last element of line 1 is total count. + ($count > 0.0) || die "Bad count $count"; + $dim = @L1; + $dim == scalar @L2 || die "Bad dimension of second line of CMVN stats @L2"; + print B "[ "; print S "[ "; + for ($x = 0; $x < $dim; $x++) { + $mean = $L1[$x] / $count; $var = ($L2[$x] / $count) - ($mean * $mean); + $bias = -$mean; print B "$bias "; + $scale = 1.0 / sqrt($var); $scale > 0 || die "Bad scale $scale"; print S "$scale "; + } + print B "]\n"; print S "]\n"; ' $dir/bias.txt $dir/scales.txt || exit 1; + echo "FixedBiasComponent bias=$dir/bias.txt" >> $dir/nnet.config + if $norm_vars; then + echo "FixedScaleComponent scales=$dir/scales.txt" >> $dir/nnet.config + fi + echo "--norm-means=false --norm-vars=false" >$dir/cmvn_opts || exit 1; +else + cp $src/cmvn_opts $dir/ || exit 1; +fi + +# We need the dimension of the raw features. We work it out from the LDA matrix dimension. +# get a word-count of the second row of the LDA matrix... this will be either the +# spliced dim or the spliced dim plus one. +spliced_dim=$(copy-matrix --binary=false $src/final.mat - | head -n 2 | tail -n 1 | wc -w) || exit 1; + + +splice_opts=$(cat $src/splice_opts) || exit 1; +# Work out how many frames are spliced together by splicing a matrix with one element +# and testing the resulting number of columns. +num_splice=$(echo "foo [ 1.0 ]" | splice-feats $splice_opts ark:- ark:- | feat-to-dim ark:- -) + +# We'll separately need the left-context and right-context. +# defaults in the splice-feats code are 4 and 4. +left_context=4 +right_context=4 +for opt in $(cat $src/splice_opts); do + if echo $opt | grep left-context >/dev/null; then + left_context=$(echo $opt | cut -d= -f2) || exit 1; + fi + if echo $opt | grep right-context >/dev/null; then + right_context=$(echo $opt | cut -d= -f2) || exit 1; + fi +done +if ! [ $num_splice -eq $[$left_context+1+$right_context] ]; then + echo "$0: num-splice worked out from the binaries differs from our interpreation of the options:" + echo "$num_splice != $left_context + 1 + $right_context" + exit 1; +fi + +modulo=$[$spliced_dim%$num_splice] +if [ $modulo -eq 1 ]; then + # matrix includes offset term. + spliced_dim=$[$spliced_dim-1]; + cp $src/final.mat $dir/ +elif [ $modulo -eq 0 ]; then + # We need to add a zero bias term to the matrix, because the AffineComponent + # expects that. + copy-matrix --binary=false $src/final.mat - | \ + awk '{if ($NF == "]") { $NF = "0"; print $0, "]"; } else { if (NF > 1) { print $0, "0"; } else {print;}}}' >$dir/final.mat +else + echo "$0: Cannot make sense of spliced dimension $spliced_dim and num-splice=$num_splice" + exit 1; +fi +feat_dim=$[$spliced_dim/$num_splice]; +echo "SpliceComponent input-dim=$feat_dim left-context=$left_context right-context=$right_context" >>$dir/nnet.config + +# use AffineComponentPreconditioned as it's easier to configure than AffineComponentPreconditionedOnline. +echo "AffineComponentPreconditioned alpha=4.0 learning-rate=$learning_rate max-change=$max_change matrix=$dir/final.mat" >>$dir/nnet.config + + +$cmd $dir/log/nnet_init.log \ + nnet-init $dir/nnet.config $dir/lda.nnet || exit 1; + +$cmd $dir/log/nnet_insert.log \ + nnet-insert --insert-at=0 --randomize-next-component=false \ + $src/final.mdl $dir/lda.nnet $dir/final.mdl || exit 1; + +if $cleanup; then + rm $dir/final.mat $dir/lda.nnet +fi diff --git a/tools/ASR_2ch_track/steps/nnet2/convert_nnet1_to_nnet2.sh b/tools/ASR_2ch_track/steps/nnet2/convert_nnet1_to_nnet2.sh new file mode 100644 index 0000000000000000000000000000000000000000..abafb22ad44d200da4c34867c46c77dfd163f8c8 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/convert_nnet1_to_nnet2.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# Copyright 2014 Johns Hopkins University (Author: Daniel Povey). +# Apache 2.0. + +# This script converts nnet1 into nnet2 models. +# Note, it doesn't support all possible types of nnet1 models. + +# Begin configuration section +cleanup=true +cmd=run.pl +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + + +if [ $# -ne 2 ]; then + echo "Usage: $0 [options] " + echo "e.g.: $0 exp/dnn4b_pretrain-dbn_dnn exp/dnn4b_nnet2" + exit 1; +fi + +src=$1 +dir=$2 + +mkdir -p $dir/log || exit 1; + +for f in $src/final.mdl $src/final.feature_transform $src/ali_train_pdf.counts; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1 +done + + +$cmd $dir/log/convert_feature_transform.log \ + nnet1-to-raw-nnet $src/final.feature_transform $dir/0.raw || exit 1; + + +if [ -f $src/final.nnet ]; then + echo "$0: $src/final.nnet exists, using it as input." + $cmd $dir/log/convert_model.log \ + nnet1-to-raw-nnet $src/final.nnet $dir/1.raw || exit 1; +elif [ -f $src/final.dbn ]; then + echo "$0: $src/final.dbn exists, using it as input." + num_leaves=$(am-info $src/final.mdl | grep -w pdfs | awk '{print $NF}') || exit 1; + dbn_output_dim=$(nnet-info exp/dnn4b_pretrain-dbn/6.dbn | grep component | tail -n 1 | sed s:,::g | awk '{print $NF}') || exit 1; + [ -z "$dbn_output_dim" ] && exit 1; + + cat > $dir/final_layer.conf < " + echo "where will typically be a normal neural net from another corpus," + echo "and will usually be a single-layer neural net trained on top of it by" + echo "dumping the activations (e.g. using steps/online/nnet2/dump_nnet_activations.sh, I" + echo "think no such script exists for non-online), and then training using" + echo "steps/nnet2/retrain_fast.sh." + echo "e.g.: $0 ../../swbd/s5b/exp/nnet2_online/nnet_gpu_online exp/nnet2_swbd_online/nnet_gpu_online exp/nnet2_swbd_online/nnet_gpu_online_combined" +fi + + +src1=$1 +src2=$2 +dir=$3 + +for f in $src1/final.mdl $src2/tree $src2/final.mdl; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1; +done + + +mkdir -p $dir/log + +info=$dir/nnet_info +nnet-am-info $src1/final.mdl >$info +nc=$(grep num-components $info | awk '{print $2}'); +if grep SumGroupComponent $info >/dev/null; then + nc_truncate=$[$nc-3] # we did mix-up: remove AffineComponent, + # SumGroupComponent, SoftmaxComponent +else + # we didn't mix-up: + nc_truncate=$[$nc-2] # remove AffineComponent, SoftmaxComponent +fi + +$cmd $dir/log/get_raw_nnet.log \ + nnet-to-raw-nnet --truncate=$nc_truncate $src1/final.mdl $dir/first_nnet.raw || exit 1; + +$cmd $dir/log/append_nnet.log \ + nnet-insert --randomize-next-component=false --insert-at=0 \ + $src2/final.mdl $dir/first_nnet.raw $dir/final.mdl || exit 1; + +$cleanup && rm $dir/first_nnet.raw + +# Copy the tree etc., + +cp $src2/tree $dir || exit 1; + +# Copy feature-related things from src1 where we built the initial model. +# Note: if you've done anything like mess with the feature-extraction configs, +# or changed the feature type, you have to keep track of that yourself. +for f in final.mat cmvn_opts splice_opts; do + if [ -f $src1/$f ]; then + cp $src1/$f $dir || exit 1; + fi +done + +echo "$0: created appended model in $dir" diff --git a/tools/ASR_2ch_track/steps/nnet2/decode.sh b/tools/ASR_2ch_track/steps/nnet2/decode.sh new file mode 100644 index 0000000000000000000000000000000000000000..e4e726522f0c264281e634d6c7585d0dbf568857 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/decode.sh @@ -0,0 +1,159 @@ +#!/bin/bash + +# Copyright 2012-2013 Johns Hopkins University (Author: Daniel Povey). +# Apache 2.0. + +# This script does decoding with a neural-net. If the neural net was built on +# top of fMLLR transforms from a conventional system, you should provide the +# --transform-dir option. + +# Begin configuration section. +stage=1 +transform_dir= # dir to find fMLLR transforms. +nj=4 # number of decoding jobs. If --transform-dir set, must match that number! +acwt=0.1 # Just a default value, used for adaptation and beam-pruning.. +cmd=run.pl +beam=15.0 +max_active=7000 +min_active=200 +ivector_scale=1.0 +lattice_beam=8.0 # Beam we use in lattice generation. +iter=final +num_threads=1 # if >1, will use gmm-latgen-faster-parallel +parallel_opts= # ignored now. +scoring_opts= +skip_scoring=false +feat_type= +online_ivector_dir= +minimize=false +# End configuration section. + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# -ne 3 ]; then + echo "Usage: $0 [options] " + echo " e.g.: $0 --transform-dir exp/tri3b/decode_dev93_tgpr \\" + echo " exp/tri3b/graph_tgpr data/test_dev93 exp/tri4a_nnet/decode_dev93_tgpr" + echo "main options (for others, see top of script file)" + echo " --transform-dir # directory of previous decoding" + echo " # where we can find transforms for SAT systems." + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd # Command to run in parallel with" + echo " --beam # Decoding beam; default 15.0" + echo " --iter # Iteration of model to decode; default is final." + echo " --scoring-opts # options to local/score.sh" + echo " --num-threads # number of threads to use, default 1." + echo " --parallel-opts # e.g. '--num-threads 4' if you supply --num-threads 4" + exit 1; +fi + +graphdir=$1 +data=$2 +dir=$3 +srcdir=`dirname $dir`; # Assume model directory one level up from decoding directory. +model=$srcdir/$iter.mdl + + +[ ! -z "$online_ivector_dir" ] && \ + extra_files="$online_ivector_dir/ivector_online.scp $online_ivector_dir/ivector_period" + +for f in $graphdir/HCLG.fst $data/feats.scp $model $extra_files; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +sdata=$data/split$nj; +cmvn_opts=`cat $srcdir/cmvn_opts` || exit 1; +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +mkdir -p $dir/log +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + + +## Set up features. +if [ -z "$feat_type" ]; then + if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=raw; fi + echo "$0: feature type is $feat_type" +fi + +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` + +case $feat_type in + raw) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |" + if [ -f $srcdir/delta_order ]; then + delta_order=`cat $srcdir/delta_order 2>/dev/null` + feats="$feats add-deltas --delta-order=$delta_order ark:- ark:- |" + fi + ;; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -s $transform_dir/num_jobs ] && \ + echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1; + nj_orig=$(cat $transform_dir/num_jobs) + + if [ $feat_type == "raw" ]; then trans=raw_trans; + else trans=trans; fi + if [ $feat_type == "lda" ] && \ + ! cmp $transform_dir/../final.mat $srcdir/final.mat && \ + ! cmp $transform_dir/final.mat $srcdir/final.mat; then + echo "$0: LDA transforms differ between $srcdir and $transform_dir" + exit 1; + fi + if [ ! -f $transform_dir/$trans.1 ]; then + echo "$0: expected $transform_dir/$trans.1 to exist (--transform-dir option)" + exit 1; + fi + if [ $nj -ne $nj_orig ]; then + # Copy the transforms into an archive with an index. + for n in $(seq $nj_orig); do cat $transform_dir/$trans.$n; done | \ + copy-feats ark:- ark,scp:$dir/$trans.ark,$dir/$trans.scp || exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/$trans.scp ark:- ark:- |" + else + # number of jobs matches with alignment dir. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/$trans.JOB ark:- ark:- |" + fi +elif grep 'transform-feats --utt2spk' $srcdir/log/train.1.log >&/dev/null; then + echo "$0: **WARNING**: you seem to be using a neural net system trained with transforms," + echo " but you are not providing the --transform-dir option in test time." +fi +## + +if [ ! -z "$online_ivector_dir" ]; then + ivector_period=$(cat $online_ivector_dir/ivector_period) || exit 1; + # note: subsample-feats, with negative n, will repeat each feature -n times. + feats="$feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- | copy-matrix --scale=$ivector_scale ark:- ark:-|' ark:- |" +fi + +if [ $stage -le 1 ]; then + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \ + nnet-latgen-faster$thread_string \ + --minimize=$minimize --max-active=$max_active --min-active=$min_active --beam=$beam \ + --lattice-beam=$lattice_beam --acoustic-scale=$acwt --allow-partial=true \ + --word-symbol-table=$graphdir/words.txt "$model" \ + $graphdir/HCLG.fst "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1; +fi + +# The output of this script is the files "lat.*.gz"-- we'll rescore this at +# different acoustic scales to get the final output. + + +if [ $stage -le 2 ]; then + if ! $skip_scoring ; then + [ ! -x local/score.sh ] && \ + echo "Not scoring because local/score.sh does not exist or not executable." && exit 1; + echo "score best paths" + local/score.sh --iter $iter $scoring_opts --cmd "$cmd" $data $graphdir $dir + echo "score confidence and timing with sclite" + fi +fi +echo "Decoding done." +exit 0; diff --git a/tools/ASR_2ch_track/steps/nnet2/dump_bottleneck_features.sh b/tools/ASR_2ch_track/steps/nnet2/dump_bottleneck_features.sh new file mode 100644 index 0000000000000000000000000000000000000000..0746a3188a16d2f5eb6ffcb778bc63c1ade55153 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/dump_bottleneck_features.sh @@ -0,0 +1,116 @@ +#!/bin/bash + +# 2014 Pegah Ghahremani +# Apache 2.0 + + +# Begin configuration section. +feat_type= +stage=1 +nj=4 +cmd=run.pl + +# Begin configuration. +transform_dir= + +# End configuration options. + +echo "$0 $@" # Print the command line for logging + +[ -f path.sh ] && . ./path.sh # source the path. +. parse_options.sh || exit 1; + +if [ $# != 5 ]; then + echo "usage: steps/nnet2/dump_bottleneck_features.sh " + echo "e.g.: steps/nnet2/dump_bottleneck_features.sh data/train data/train_bnf exp_bnf/bnf_net exp/tri5_ali mfcc exp_bnf/dump_bnf" + echo "main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + exit 1; +fi + +data=$1 +bnf_data=$2 +nnetdir=$3 +archivedir=$4 +dir=$5 + +# Assume that final.nnet is in nnetdir +bnf_nnet=$nnetdir/final.raw +if [ ! -f $bnf_nnet ] ; then + echo "No such file $bnf_nnet"; + exit 1; +fi + +## Set up input features of nnet +if [ -z "$feat_type" ]; then + if [ -f $nnetdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi +fi +echo "$0: feature type is $feat_type" + +if [ "$feat_type" == "lda" ] && [ ! -f $nnetdir/final.mat ]; then + echo "$0: no such file $nnetdir/final.mat" + exit 1 +fi + +name=`basename $data` +sdata=$data/split$nj + +mkdir -p $dir/log +mkdir -p $bnf_data +echo $nj > $nnetdir/num_jobs +splice_opts=`cat $nnetdir/splice_opts 2>/dev/null` +delta_opts=`cat $nnetdir/delta_opts 2>/dev/null` +[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; + +case $feat_type in + raw) feats="ark,s,cs:apply-cmvn --norm-vars=false --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |";; + delta) feats="ark,s,cs:apply-cmvn --norm-vars=false --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; + lda) feats="ark,s,cs:apply-cmvn --norm-vars=false --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $nnetdir/final.mat ark:- ark:- |" + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then + echo "Using transforms from $transform_dir" + [ ! -f $transform_dir/trans.1 ] && echo "No such file $transform_dir/trans.1" && exit 1; + transform_nj=`cat $transform_dir/num_jobs` || exit 1; + if [ "$nj" != "$transform_nj" ]; then + for n in $(seq $transform_nj); do cat $transform_dir/trans.$n; done >$dir/trans.ark + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.ark ark:- ark:- |" + else + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |" + fi +fi + + +if [ $stage -le 1 ]; then + echo "Making BNF scp and ark." + $cmd JOB=1:$nj $dir/log/make_bnf_$name.JOB.log \ + nnet-compute $bnf_nnet "$feats" ark:- \| \ + copy-feats --compress=true ark:- ark,scp:$archivedir/raw_bnfeat_$name.JOB.ark,$archivedir/raw_bnfeat_$name.JOB.scp || exit 1; +fi + +rm $dir/trans.ark 2>/dev/null + +N0=$(cat $data/feats.scp | wc -l) +N1=$(cat $archivedir/raw_bnfeat_$name.*.scp | wc -l) +if [[ "$N0" != "$N1" ]]; then + echo "Error happens when generating BNF for $name (Original:$N0 BNF:$N1)" + exit 1; +fi + +# Concatenate feats.scp into bnf_data +for n in $(seq $nj); do cat $archivedir/raw_bnfeat_$name.$n.scp; done > $bnf_data/feats.scp + +for f in segments spk2utt text utt2spk wav.scp char.stm glm kws reco2file_and_channel stm; do + [ -e $data/$f ] && cp -r $data/$f $bnf_data/$f +done + +echo "$0: computing CMVN stats." +steps/compute_cmvn_stats.sh $bnf_data $dir $archivedir + +echo "$0: done making BNF feats.scp." + +exit 0; diff --git a/tools/ASR_2ch_track/steps/nnet2/get_egs.sh b/tools/ASR_2ch_track/steps/nnet2/get_egs.sh new file mode 100644 index 0000000000000000000000000000000000000000..bb9ce09822e91a568f06ac43d95afb0b1aa55617 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/get_egs.sh @@ -0,0 +1,298 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# This script, which will generally be called from other neural-net training +# scripts, extracts the training examples used to train the neural net (and also +# the validation examples used for diagnostics), and puts them in separate archives. + +# Begin configuration section. +cmd=run.pl +feat_type= +num_utts_subset=300 # number of utterances in validation and training + # subsets used for shrinkage and diagnostics +num_valid_frames_combine=0 # #valid frames for combination weights at the very end. +num_train_frames_combine=10000 # # train frames for the above. +num_frames_diagnostic=4000 # number of frames for "compute_prob" jobs +samples_per_iter=200000 # each iteration of training, see this many samples + # per job. This is just a guideline; it will pick a number + # that divides the number of samples in the entire data. +transform_dir= # If supplied, overrides alidir +num_jobs_nnet=16 # Number of neural net jobs to run in parallel +stage=0 +io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time. +splice_width=4 # meaning +- 4 frames on each side for second LDA +left_context= +right_context= +random_copy=false +online_ivector_dir= +ivector_randomize_prob=0.0 # if >0.0, randomizes iVectors during training with + # this prob per iVector. +cmvn_opts= # can be used for specifying CMVN options, if feature type is not lda. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + + +if [ $# != 4 ]; then + echo "Usage: steps/nnet2/get_egs.sh [opts] " + echo " e.g.: steps/nnet2/get_egs.sh data/train data/lang exp/tri3_ali exp/tri4_nnet" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl;utils/queue.pl ) # how to run jobs." + echo " --num-jobs-nnet # Number of parallel jobs to use for main neural net" + echo " # training (will affect results as well as speed; try 8, 16)" + echo " # Note: if you increase this, you may want to also increase" + echo " # the learning rate." + echo " --samples-per-iter <#samples;400000> # Number of samples of data to process per iteration, per" + echo " # process." + echo " --feat-type # (by default it tries to guess). The feature type you want" + echo " # to use as input to the neural net." + echo " --splice-width # Number of frames on each side to append for feature input" + echo " --left-context # Number of frames on left side to append for feature input, overrides splice-width" + echo " --right-context # Number of frames on right side to append for feature input, overrides splice-width" + echo " --num-frames-diagnostic <#frames;4000> # Number of frames used in computing (train,valid) diagnostics" + echo " --num-valid-frames-combine <#frames;10000> # Number of frames used in getting combination weights at the" + echo " # very end." + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + + exit 1; +fi + +data=$1 +lang=$2 # kept for historical reasons, but never used. +alidir=$3 +dir=$4 + +[ -z "$left_context" ] && left_context=$splice_width +[ -z "$right_context" ] && right_context=$splice_width + + +# Check some files. +[ ! -z "$online_ivector_dir" ] && \ + extra_files="$online_ivector_dir/ivector_online.scp $online_ivector_dir/ivector_period" + +for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree $extra_files; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir... + +sdata=$data/split$nj +utils/split_data.sh $data $nj + +mkdir -p $dir/log +cp $alidir/tree $dir + + +# Get list of validation utterances. +awk '{print $1}' $data/utt2spk | utils/shuffle_list.pl | head -$num_utts_subset \ + > $dir/valid_uttlist || exit 1; + +if [ -f $data/utt2uniq ]; then + echo "File $data/utt2uniq exists, so augmenting valid_uttlist to" + echo "include all perturbed versions of the same 'real' utterances." + mv $dir/valid_uttlist $dir/valid_uttlist.tmp + utils/utt2spk_to_spk2utt.pl $data/utt2uniq > $dir/uniq2utt + cat $dir/valid_uttlist.tmp | utils/apply_map.pl $data/utt2uniq | \ + sort | uniq | utils/apply_map.pl $dir/uniq2utt | \ + awk '{for(n=1;n<=NF;n++) print $n;}' | sort > $dir/valid_uttlist + rm $dir/uniq2utt $dir/valid_uttlist.tmp +fi + +awk '{print $1}' $data/utt2spk | utils/filter_scp.pl --exclude $dir/valid_uttlist | \ + utils/shuffle_list.pl | head -$num_utts_subset > $dir/train_subset_uttlist || exit 1; + +[ -z "$transform_dir" ] && transform_dir=$alidir + +## Set up features. +if [ -z $feat_type ]; then + if [ -f $alidir/final.mat ] && [ ! -f $transform_dir/raw_trans.1 ]; then feat_type=lda; else feat_type=raw; fi +fi +echo "$0: feature type is $feat_type" + +case $feat_type in + raw) feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- |" + valid_feats="ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |" + train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |" + echo $cmvn_opts >$dir/cmvn_opts + ;; + lda) + splice_opts=`cat $alidir/splice_opts 2>/dev/null` + cp $alidir/{splice_opts,cmvn_opts,final.mat} $dir || exit 1; + [ ! -z "$cmvn_opts" ] && \ + echo "You cannot supply --cmvn-opts option if feature type is LDA." && exit 1; + cmvn_opts=$(cat $dir/cmvn_opts) + feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + valid_feats="ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac + +if [ -f $transform_dir/trans.1 ] && [ $feat_type != "raw" ]; then + echo "$0: using transforms from $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" + valid_feats="$valid_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/trans.*|' ark:- ark:- |" + train_subset_feats="$train_subset_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/trans.*|' ark:- ark:- |" +fi +if [ -f $transform_dir/raw_trans.1 ] && [ $feat_type == "raw" ]; then + echo "$0: using raw-fMLLR transforms from $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/raw_trans.JOB ark:- ark:- |" + valid_feats="$valid_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/raw_trans.*|' ark:- ark:- |" + train_subset_feats="$train_subset_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/raw_trans.*|' ark:- ark:- |" +fi +if [ ! -z "$online_ivector_dir" ]; then + feats_one="$(echo "$feats" | sed s:JOB:1:g)" + ivector_dim=$(feat-to-dim scp:$online_ivector_dir/ivector_online.scp -) || exit 1; + ivectors_opt="--const-feat-dim=$ivector_dim" + ivector_period=$(cat $online_ivector_dir/ivector_period) || exit 1; + feats="$feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- | ivector-randomize --randomize-prob=$ivector_randomize_prob ark:- ark:- |' ark:- |" + valid_feats="$valid_feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- | ivector-randomize --randomize-prob=$ivector_randomize_prob ark:- ark:- |' ark:- |" + train_subset_feats="$train_subset_feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- | ivector-randomize --randomize-prob=$ivector_randomize_prob ark:- ark:- |' ark:- |" +fi + +if [ $stage -le 0 ]; then + echo "$0: working out number of frames of training data" + num_frames=$(steps/nnet2/get_num_frames.sh $data) + echo $num_frames > $dir/num_frames +else + num_frames=`cat $dir/num_frames` || exit 1; +fi + +# Working out number of iterations per epoch. +iters_per_epoch=`perl -e "print int($num_frames/($samples_per_iter * $num_jobs_nnet) + 0.5);"` || exit 1; +[ $iters_per_epoch -eq 0 ] && iters_per_epoch=1 +samples_per_iter_real=$[$num_frames/($num_jobs_nnet*$iters_per_epoch)] +echo "$0: Every epoch, splitting the data up into $iters_per_epoch iterations," +echo "$0: giving samples-per-iteration of $samples_per_iter_real (you requested $samples_per_iter)." + +# Making soft links to storage directories. This is a no-up unless +# the subdirectory $dir/egs/storage/ exists. See utils/create_split_dir.pl +for x in `seq 1 $num_jobs_nnet`; do + for y in `seq 0 $[$iters_per_epoch-1]`; do + utils/create_data_link.pl $dir/egs/egs.$x.$y.ark + utils/create_data_link.pl $dir/egs/egs_tmp.$x.$y.ark + done + for y in `seq 1 $nj`; do + utils/create_data_link.pl $dir/egs/egs_orig.$x.$y.ark + done +done + +remove () { for x in $*; do [ -L $x ] && rm $(readlink -f $x); rm $x; done } + +nnet_context_opts="--left-context=$left_context --right-context=$right_context" +mkdir -p $dir/egs + +if [ $stage -le 2 ]; then + echo "Getting validation and training subset examples." + rm $dir/.error 2>/dev/null + echo "$0: extracting validation and training-subset alignments." + set -o pipefail; + for id in $(seq $nj); do gunzip -c $alidir/ali.$id.gz; done | \ + copy-int-vector ark:- ark,t:- | \ + utils/filter_scp.pl <(cat $dir/valid_uttlist $dir/train_subset_uttlist) | \ + gzip -c >$dir/ali_special.gz || exit 1; + set +o pipefail; # unset the pipefail option. + + all_ids=$(seq -s, $nj) # e.g. 1,2,...39,40 + $cmd $dir/log/create_valid_subset.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts "$valid_feats" \ + "ark,s,cs:gunzip -c $dir/ali_special.gz | ali-to-pdf $alidir/final.mdl ark:- ark:- | ali-to-post ark:- ark:- |" \ + "ark:$dir/egs/valid_all.egs" || touch $dir/.error & + $cmd $dir/log/create_train_subset.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts "$train_subset_feats" \ + "ark,s,cs:gunzip -c $dir/ali_special.gz | ali-to-pdf $alidir/final.mdl ark:- ark:- | ali-to-post ark:- ark:- |" \ + "ark:$dir/egs/train_subset_all.egs" || touch $dir/.error & + wait; + [ -f $dir/.error ] && echo "Error detected while creating train/valid egs" && exit 1 + echo "Getting subsets of validation examples for diagnostics and combination." + $cmd $dir/log/create_valid_subset_combine.log \ + nnet-subset-egs --n=$num_valid_frames_combine ark:$dir/egs/valid_all.egs \ + ark:$dir/egs/valid_combine.egs || touch $dir/.error & + $cmd $dir/log/create_valid_subset_diagnostic.log \ + nnet-subset-egs --n=$num_frames_diagnostic ark:$dir/egs/valid_all.egs \ + ark:$dir/egs/valid_diagnostic.egs || touch $dir/.error & + + $cmd $dir/log/create_train_subset_combine.log \ + nnet-subset-egs --n=$num_train_frames_combine ark:$dir/egs/train_subset_all.egs \ + ark:$dir/egs/train_combine.egs || touch $dir/.error & + $cmd $dir/log/create_train_subset_diagnostic.log \ + nnet-subset-egs --n=$num_frames_diagnostic ark:$dir/egs/train_subset_all.egs \ + ark:$dir/egs/train_diagnostic.egs || touch $dir/.error & + wait + cat $dir/egs/valid_combine.egs $dir/egs/train_combine.egs > $dir/egs/combine.egs + + for f in $dir/egs/{combine,train_diagnostic,valid_diagnostic}.egs; do + [ ! -s $f ] && echo "No examples in file $f" && exit 1; + done + rm $dir/egs/valid_all.egs $dir/egs/train_subset_all.egs $dir/egs/{train,valid}_combine.egs $dir/ali_special.gz +fi + +if [ $stage -le 3 ]; then + # Other scripts might need to know the following info: + echo $num_jobs_nnet >$dir/egs/num_jobs_nnet + echo $iters_per_epoch >$dir/egs/iters_per_epoch + echo $samples_per_iter_real >$dir/egs/samples_per_iter + + echo "Creating training examples"; + # in $dir/egs, create $num_jobs_nnet separate files with training examples. + # The order is not randomized at this point. + + egs_list= + for n in `seq 1 $num_jobs_nnet`; do + egs_list="$egs_list ark:$dir/egs/egs_orig.$n.JOB.ark" + done + echo "Generating training examples on disk" + # The examples will go round-robin to egs_list. + $cmd $io_opts JOB=1:$nj $dir/log/get_egs.JOB.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts "$feats" \ + "ark,s,cs:gunzip -c $alidir/ali.JOB.gz | ali-to-pdf $alidir/final.mdl ark:- ark:- | ali-to-post ark:- ark:- |" ark:- \| \ + nnet-copy-egs ark:- $egs_list || exit 1; +fi + +if [ $stage -le 4 ]; then + echo "$0: rearranging examples into parts for different parallel jobs" + # combine all the "egs_orig.JOB.*.scp" (over the $nj splits of the data) and + # then split into multiple parts egs.JOB.*.scp for different parts of the + # data, 0 .. $iters_per_epoch-1. + + if [ $iters_per_epoch -eq 1 ]; then + echo "$0: Since iters-per-epoch == 1, just concatenating the data." + for n in `seq 1 $num_jobs_nnet`; do + cat $dir/egs/egs_orig.$n.*.ark > $dir/egs/egs_tmp.$n.0.ark || exit 1; + remove $dir/egs/egs_orig.$n.*.ark + done + else # We'll have to split it up using nnet-copy-egs. + egs_list= + for n in `seq 0 $[$iters_per_epoch-1]`; do + egs_list="$egs_list ark:$dir/egs/egs_tmp.JOB.$n.ark" + done + # note, the "|| true" below is a workaround for NFS bugs + # we encountered running this script with Debian-7, NFS-v4. + $cmd $io_opts JOB=1:$num_jobs_nnet $dir/log/split_egs.JOB.log \ + nnet-copy-egs --random=$random_copy --srand=JOB \ + "ark:cat $dir/egs/egs_orig.JOB.*.ark|" $egs_list || exit 1; + remove $dir/egs/egs_orig.*.*.ark 2>/dev/null + fi +fi + +if [ $stage -le 5 ]; then + # Next, shuffle the order of the examples in each of those files. + # Each one should not be too large, so we can do this in memory. + echo "Shuffling the order of training examples" + echo "(in order to avoid stressing the disk, these won't all run at once)." + + for n in `seq 0 $[$iters_per_epoch-1]`; do + $cmd $io_opts JOB=1:$num_jobs_nnet $dir/log/shuffle.$n.JOB.log \ + nnet-shuffle-egs "--srand=\$[JOB+($num_jobs_nnet*$n)]" \ + ark:$dir/egs/egs_tmp.JOB.$n.ark ark:$dir/egs/egs.JOB.$n.ark + remove $dir/egs/egs_tmp.*.$n.ark + done +fi + +echo "$0: Finished preparing training examples" diff --git a/tools/ASR_2ch_track/steps/nnet2/get_egs2.sh b/tools/ASR_2ch_track/steps/nnet2/get_egs2.sh new file mode 100644 index 0000000000000000000000000000000000000000..864fe4e7b46aef064e1939d30c3e4f017dbce3e4 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/get_egs2.sh @@ -0,0 +1,330 @@ +#!/bin/bash + +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# +# This script, which will generally be called from other neural-net training +# scripts, extracts the training examples used to train the neural net (and also +# the validation examples used for diagnostics), and puts them in separate archives. +# +# This script differs from get_egs.sh in that it dumps egs with several frames +# of labels, controlled by the frames_per_eg config variable (default: 8). This +# takes many times less disk space because typically we have 4 to 7 frames of +# context on the left and right, and this ends up getting shared. This is at +# the expense of slightly higher disk I/O during training time. +# +# We also have a simpler way of dividing the egs up into pieces, with one level +# of index, so we have $dir/egs.{0,1,2,...}.ark instead of having two levels of +# indexes. The extra files we write to $dir that explain the structure are +# $dir/info/num_archives, which contains the number of files egs.*.ark, and +# $dir/info/frames_per_eg, which contains the number of frames of labels per eg +# (e.g. 7), and $dir/samples_per_archive. These replace the files +# iters_per_epoch and num_jobs_nnet and egs_per_iter that the previous script +# wrote to. This script takes the directory where the "egs" are located as the +# argument, not the directory one level up. + +# Begin configuration section. +cmd=run.pl +feat_type= # e.g. set it to "raw" to use raw MFCC +frames_per_eg=8 # number of frames of labels per example. more->less disk space and + # less time preparing egs, but more I/O during training. + # note: the script may reduce this if reduce_frames_per_eg is true. +left_context=4 # amount of left-context per eg +right_context=4 # amount of right-context per eg +delta_order= # delta feature order + +reduce_frames_per_eg=true # If true, this script may reduce the frames_per_eg + # if there is only one archive and even with the + # reduced frames_pe_eg, the number of + # samples_per_iter that would result is less than or + # equal to the user-specified value. +num_utts_subset=300 # number of utterances in validation and training + # subsets used for shrinkage and diagnostics. +num_valid_frames_combine=0 # #valid frames for combination weights at the very end. +num_train_frames_combine=10000 # # train frames for the above. +num_frames_diagnostic=4000 # number of frames for "compute_prob" jobs +samples_per_iter=400000 # each iteration of training, see this many samples + # per job. This is just a guideline; it will pick a number + # that divides the number of samples in the entire data. + +transform_dir= # If supplied, overrides alidir as the place to find fMLLR transforms +postdir= # If supplied, we will use posteriors in it as soft training targets. + +stage=0 +io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time. +random_copy=false +online_ivector_dir= # can be used if we are including speaker information as iVectors. +cmvn_opts= # can be used for specifying CMVN options, if feature type is not lda (if lda, + # it doesn't make sense to use different options than were used as input to the + # LDA transform). This is used to turn off CMVN in the online-nnet experiments. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + + +if [ $# != 3 ]; then + echo "Usage: $0 [opts] " + echo " e.g.: $0 data/train exp/tri3_ali exp/tri4_nnet/egs" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl;utils/queue.pl ) # how to run jobs." + echo " --samples-per-iter <#samples;400000> # Number of samples of data to process per iteration, per" + echo " # process." + echo " --feat-type # (by default it tries to guess). The feature type you want" + echo " # to use as input to the neural net." + echo " --frames-per-eg # number of frames per eg on disk" + echo " --left-context # Number of frames on left side to append for feature input" + echo " --right-context # Number of frames on right side to append for feature input" + echo " --num-frames-diagnostic <#frames;4000> # Number of frames used in computing (train,valid) diagnostics" + echo " --num-valid-frames-combine <#frames;10000> # Number of frames used in getting combination weights at the" + echo " # very end." + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + + exit 1; +fi + +data=$1 +alidir=$2 +dir=$3 + + +# Check some files. +[ ! -z "$online_ivector_dir" ] && \ + extra_files="$online_ivector_dir/ivector_online.scp $online_ivector_dir/ivector_period" + +for f in $data/feats.scp $alidir/ali.1.gz $alidir/final.mdl $alidir/tree $extra_files; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir... + +sdata=$data/split$nj +utils/split_data.sh $data $nj + +mkdir -p $dir/log $dir/info +cp $alidir/tree $dir + +# Get list of validation utterances. +awk '{print $1}' $data/utt2spk | utils/shuffle_list.pl | head -$num_utts_subset \ + > $dir/valid_uttlist || exit 1; + +if [ -f $data/utt2uniq ]; then + echo "File $data/utt2uniq exists, so augmenting valid_uttlist to" + echo "include all perturbed versions of the same 'real' utterances." + mv $dir/valid_uttlist $dir/valid_uttlist.tmp + utils/utt2spk_to_spk2utt.pl $data/utt2uniq > $dir/uniq2utt + cat $dir/valid_uttlist.tmp | utils/apply_map.pl $data/utt2uniq | \ + sort | uniq | utils/apply_map.pl $dir/uniq2utt | \ + awk '{for(n=1;n<=NF;n++) print $n;}' | sort > $dir/valid_uttlist + rm $dir/uniq2utt $dir/valid_uttlist.tmp +fi + +awk '{print $1}' $data/utt2spk | utils/filter_scp.pl --exclude $dir/valid_uttlist | \ + utils/shuffle_list.pl | head -$num_utts_subset > $dir/train_subset_uttlist || exit 1; + +[ -z "$transform_dir" ] && transform_dir=$alidir + +## Set up features. +if [ -z $feat_type ]; then + if [ -f $alidir/final.mat ] && [ ! -f $transform_dir/raw_trans.1 ]; then feat_type=lda; else feat_type=raw; fi +fi +echo "$0: feature type is $feat_type" + +case $feat_type in + raw) feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- |" + valid_feats="ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |" + train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |" + echo $cmvn_opts >$dir/cmvn_opts # caution: the top-level nnet training script should copy this to its own dir now. + if [ ! -z "$delta_order" ]; then + feats="$feats add-deltas --delta-order=$delta_order ark:- ark:- |" + valid_feats="$valid_feats add-deltas --delta-order=$delta_order ark:- ark:- |" + train_subset_feats="$train_subset_feats add-deltas --delta-order=$delta_order ark:- ark:- |" + echo $delta_order >$dir/delta_order + fi + ;; + lda) + splice_opts=`cat $alidir/splice_opts 2>/dev/null` + # caution: the top-level nnet training script should copy these to its own dir now. + cp $alidir/{splice_opts,cmvn_opts,final.mat} $dir || exit 1; + [ ! -z "$cmvn_opts" ] && \ + echo "You cannot supply --cmvn-opts option if feature type is LDA." && exit 1; + cmvn_opts=$(cat $dir/cmvn_opts) + feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + valid_feats="ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac + +if [ -f $transform_dir/trans.1 ] && [ $feat_type != "raw" ]; then + echo "$0: using transforms from $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" + valid_feats="$valid_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/trans.*|' ark:- ark:- |" + train_subset_feats="$train_subset_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/trans.*|' ark:- ark:- |" +fi +if [ -f $transform_dir/raw_trans.1 ] && [ $feat_type == "raw" ]; then + echo "$0: using raw-fMLLR transforms from $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/raw_trans.JOB ark:- ark:- |" + valid_feats="$valid_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/raw_trans.*|' ark:- ark:- |" + train_subset_feats="$train_subset_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/raw_trans.*|' ark:- ark:- |" +fi +if [ ! -z "$online_ivector_dir" ]; then + feats_one="$(echo "$feats" | sed s:JOB:1:g)" + ivector_dim=$(feat-to-dim scp:$online_ivector_dir/ivector_online.scp -) || exit 1; + echo $ivector_dim > $dir/info/ivector_dim + ivectors_opt="--const-feat-dim=$ivector_dim" + ivector_period=$(cat $online_ivector_dir/ivector_period) || exit 1; + feats="$feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |" + valid_feats="$valid_feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |" + train_subset_feats="$train_subset_feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |" +else + echo 0 >$dir/info/ivector_dim +fi + +if [ $stage -le 0 ]; then + echo "$0: working out number of frames of training data" + num_frames=$(steps/nnet2/get_num_frames.sh $data) + echo $num_frames > $dir/info/num_frames +else + num_frames=`cat $dir/info/num_frames` || exit 1; +fi + +# the + 1 is to round up, not down... we assume it doesn't divide exactly. +num_archives=$[$num_frames/($frames_per_eg*$samples_per_iter)+1] +# (for small data)- while reduce_frames_per_eg == true and the number of +# archives is 1 and would still be 1 if we reduced frames_per_eg by 1, reduce it +# by 1. +reduced=false +while $reduce_frames_per_eg && [ $frames_per_eg -gt 1 ] && \ + [ $[$num_frames/(($frames_per_eg-1)*$samples_per_iter)] -eq 0 ]; do + frames_per_eg=$[$frames_per_eg-1] + num_archives=1 + reduced=true +done +$reduced && echo "$0: reduced frames_per_eg to $frames_per_eg because amount of data is small." + +echo $num_archives >$dir/info/num_archives +echo $frames_per_eg >$dir/info/frames_per_eg + +# Working out number of egs per archive +egs_per_archive=$[$num_frames/($frames_per_eg*$num_archives)] +! [ $egs_per_archive -le $samples_per_iter ] && \ + echo "$0: script error: egs_per_archive=$egs_per_archive not <= samples_per_iter=$samples_per_iter" \ + && exit 1; + +echo $egs_per_archive > $dir/info/egs_per_archive + +echo "$0: creating $num_archives archives, each with $egs_per_archive egs, with" +echo "$0: $frames_per_eg labels per example, and (left,right) context = ($left_context,$right_context)" + +# Making soft links to storage directories. This is a no-up unless +# the subdirectory $dir/storage/ exists. See utils/create_split_dir.pl +for x in `seq $num_archives`; do + utils/create_data_link.pl $dir/egs.$x.ark + for y in `seq $nj`; do + utils/create_data_link.pl $dir/egs_orig.$x.$y.ark + done +done + +nnet_context_opts="--left-context=$left_context --right-context=$right_context" + +echo $left_context > $dir/info/left_context +echo $right_context > $dir/info/right_context +if [ $stage -le 2 ]; then + echo "$0: Getting validation and training subset examples." + rm $dir/.error 2>/dev/null + echo "$0: ... extracting validation and training-subset alignments." + set -o pipefail; + for id in $(seq $nj); do gunzip -c $alidir/ali.$id.gz; done | \ + copy-int-vector ark:- ark,t:- | \ + utils/filter_scp.pl <(cat $dir/valid_uttlist $dir/train_subset_uttlist) | \ + gzip -c >$dir/ali_special.gz || exit 1; + set +o pipefail; # unset the pipefail option. + + $cmd $dir/log/create_valid_subset.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts "$valid_feats" \ + "ark,s,cs:gunzip -c $dir/ali_special.gz | ali-to-pdf $alidir/final.mdl ark:- ark:- | ali-to-post ark:- ark:- |" \ + "ark:$dir/valid_all.egs" || touch $dir/.error & + $cmd $dir/log/create_train_subset.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts "$train_subset_feats" \ + "ark,s,cs:gunzip -c $dir/ali_special.gz | ali-to-pdf $alidir/final.mdl ark:- ark:- | ali-to-post ark:- ark:- |" \ + "ark:$dir/train_subset_all.egs" || touch $dir/.error & + wait; + [ -f $dir/.error ] && echo "Error detected while creating train/valid egs" && exit 1 + echo "... Getting subsets of validation examples for diagnostics and combination." + $cmd $dir/log/create_valid_subset_combine.log \ + nnet-subset-egs --n=$num_valid_frames_combine ark:$dir/valid_all.egs \ + ark:$dir/valid_combine.egs || touch $dir/.error & + $cmd $dir/log/create_valid_subset_diagnostic.log \ + nnet-subset-egs --n=$num_frames_diagnostic ark:$dir/valid_all.egs \ + ark:$dir/valid_diagnostic.egs || touch $dir/.error & + + $cmd $dir/log/create_train_subset_combine.log \ + nnet-subset-egs --n=$num_train_frames_combine ark:$dir/train_subset_all.egs \ + ark:$dir/train_combine.egs || touch $dir/.error & + $cmd $dir/log/create_train_subset_diagnostic.log \ + nnet-subset-egs --n=$num_frames_diagnostic ark:$dir/train_subset_all.egs \ + ark:$dir/train_diagnostic.egs || touch $dir/.error & + wait + sleep 5 # wait for file system to sync. + cat $dir/valid_combine.egs $dir/train_combine.egs > $dir/combine.egs + + for f in $dir/{combine,train_diagnostic,valid_diagnostic}.egs; do + [ ! -s $f ] && echo "No examples in file $f" && exit 1; + done + rm $dir/valid_all.egs $dir/train_subset_all.egs $dir/{train,valid}_combine.egs $dir/ali_special.gz +fi + +if [ $stage -le 3 ]; then + # create egs_orig.*.*.ark; the first index goes to $num_archives, + # the second to $nj (which is the number of jobs in the original alignment + # dir) + + egs_list= + for n in $(seq $num_archives); do + egs_list="$egs_list ark:$dir/egs_orig.$n.JOB.ark" + done + echo "$0: Generating training examples on disk" + # The examples will go round-robin to egs_list. + if [ ! -z $postdir ]; then + $cmd $io_opts JOB=1:$nj $dir/log/get_egs.JOB.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts --num-frames=$frames_per_eg "$feats" \ + scp:$postdir/post.JOB.scp ark:- \| \ + nnet-copy-egs ark:- $egs_list || exit 1; + else + $cmd $io_opts JOB=1:$nj $dir/log/get_egs.JOB.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts --num-frames=$frames_per_eg "$feats" \ + "ark,s,cs:gunzip -c $alidir/ali.JOB.gz | ali-to-pdf $alidir/final.mdl ark:- ark:- | ali-to-post ark:- ark:- |" ark:- \| \ + nnet-copy-egs ark:- $egs_list || exit 1; + fi +fi +if [ $stage -le 4 ]; then + echo "$0: recombining and shuffling order of archives on disk" + # combine all the "egs_orig.JOB.*.scp" (over the $nj splits of the data) and + # shuffle the order, writing to the egs.JOB.ark + + egs_list= + for n in $(seq $nj); do + egs_list="$egs_list $dir/egs_orig.JOB.$n.ark" + done + + $cmd $io_opts $extra_opts JOB=1:$num_archives $dir/log/shuffle.JOB.log \ + nnet-shuffle-egs --srand=JOB "ark:cat $egs_list|" ark:$dir/egs.JOB.ark || exit 1; +fi + +if [ $stage -le 5 ]; then + echo "$0: removing temporary archives" + for x in `seq $num_archives`; do + for y in `seq $nj`; do + file=$dir/egs_orig.$x.$y.ark + [ -L $file ] && rm $(readlink -f $file) + rm $file + done + done +fi + +echo "$0: Finished preparing training examples" diff --git a/tools/ASR_2ch_track/steps/nnet2/get_egs_discriminative2.sh b/tools/ASR_2ch_track/steps/nnet2/get_egs_discriminative2.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c08a08b8240d7158b9361062dd53a4b4805622e --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/get_egs_discriminative2.sh @@ -0,0 +1,352 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script dumps examples MPE or MMI or state-level minimum bayes risk (sMBR) +# training of neural nets. Note: for "criterion", smbr > mpe > mmi in terms of +# compatibility of the dumped egs, meaning you can use the egs dumped with +# --criterion smbr for MPE or MMI, and egs dumped with --criterion mpe for MMI +# training. The discriminative training program itself doesn't enforce this and +# it would let you mix and match them arbitrarily; we area speaking in terms of +# the correctness of the algorithm that splits the lattices into pieces. + +# Begin configuration section. +cmd=run.pl +criterion=smbr +drop_frames=false # option relevant for MMI, affects how we dump examples. +samples_per_iter=400000 # measured in frames, not in "examples" +max_temp_archives=128 # maximum number of temp archives per input job, only + # affects the process of generating archives, not the + # final result. + +stage=0 + +cleanup=true +transform_dir= # If this is a SAT system, directory for transforms +online_ivector_dir= + +num_utts_subset=3000 +num_archives_priors=10 + +# End configuration section. + + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + + +if [ $# != 6 ]; then + echo "Usage: $0 [opts] " + echo " e.g.: $0 data/train data/lang exp/tri3_ali exp/tri4_nnet_denlats exp/tri4/final.mdl exp/tri4_mpe/degs" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs (probably would be good to add -tc 5 or so if using" + echo " # GridEngine (to avoid excessive NFS traffic)." + echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per" + echo " # process." + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + echo " --criterion # Training criterion: may be smbr, mmi or mpfe" + echo " --online-ivector-dir # Directory for online-estimated iVectors, used in the" + echo " # online-neural-net setup. (but you may want to use" + echo " # steps/online/nnet2/get_egs_discriminative2.sh instead)" + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 +denlatdir=$4 +src_model=$5 +dir=$6 + + +extra_files= +[ ! -z $online_ivector_dir ] && \ + extra_files="$online_ivector_dir/ivector_period $online_ivector_dir/ivector_online.scp" + +# Check some files. +for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/num_jobs $alidir/tree \ + $denlatdir/lat.1.gz $denlatdir/num_jobs $src_model $extra_files; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +mkdir -p $dir/log $dir/info || exit 1; + + +nj=$(cat $denlatdir/num_jobs) || exit 1; # $nj is the number of + # splits of the denlats and alignments. + + +[ "$(readlink /bin/sh)" == dash ] && \ + echo "This script won't work if /bin/sh points to dash. make it point to bash." && exit 1 + +nj_ali=$(cat $alidir/num_jobs) || exit 1; + +sdata=$data/split$nj +utils/split_data.sh $data $nj + +if [ $nj_ali -eq $nj ]; then + ali_rspecifier="ark,s,cs:gunzip -c $alidir/ali.JOB.gz |" + all_ids=$(seq -s, $nj) + prior_ali_rspecifier="ark,s,cs:gunzip -c $alidir/ali.{$all_ids}.gz | copy-int-vector ark:- ark,t:- | utils/filter_scp.pl $dir/priors_uttlist | ali-to-pdf $alidir/final.mdl ark,t:- ark:- |" +else + ali_rspecifier="scp:$dir/ali.scp" + prior_ali_rspecifier="ark,s,cs:utils/filter_scp.pl $dir/priors_uttlist $dir/ali.scp | ali-to-pdf $alidir/final.mdl scp:- ark:- |" + if [ $stage -le 1 ]; then + echo "$0: number of jobs in den-lats versus alignments differ: dumping them as single archive and index." + all_ids=$(seq -s, $nj_ali) + $cmd $dir/log/copy_alignments.log \ + copy-int-vector "ark:gunzip -c $alidir/ali.{$all_ids}.gz|" \ + ark,scp:$dir/ali.ark,$dir/ali.scp || exit 1; + fi +fi + +splice_opts=`cat $alidir/splice_opts 2>/dev/null` +silphonelist=`cat $lang/phones/silence.csl` || exit 1; +cmvn_opts=`cat $alidir/cmvn_opts 2>/dev/null` +cp $alidir/splice_opts $dir 2>/dev/null +cp $alidir/cmvn_opts $dir 2>/dev/null +cp $alidir/tree $dir +cp $lang/phones/silence.csl $dir/info/ +cp $src_model $dir/final.mdl || exit 1 + +if [ ! -z "$online_ivector_dir" ]; then + ivector_period=$(cat $online_ivector_dir/ivector_period) + ivector_dim=$(feat-to-dim scp:$online_ivector_dir/ivector_online.scp -) || exit 1; + echo $ivector_dim >$dir/info/ivector_dim + # the 'const_dim_opt' allows it to write only one iVector per example, + # rather than one per time-index... it has to average over + const_dim_opt="--const-feat-dim=$ivector_dim" +else + echo 0 > $dir/info/ivector_dim +fi + +# Get list of validation utterances. +awk '{print $1}' $data/utt2spk | utils/shuffle_list.pl | head -$num_utts_subset \ + > $dir/priors_uttlist || exit 1; + +## We don't support deltas here, only LDA or raw (mainly because deltas are less +## frequently used). +if [ -z $feat_type ]; then + if [ -f $alidir/final.mat ] && [ ! -f $transform_dir/raw_trans.1 ]; then feat_type=lda; else feat_type=raw; fi +fi +echo "$0: feature type is $feat_type" + +case $feat_type in + raw) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |" + priors_feats="ark,s,cs:utils/filter_scp.pl $dir/priors_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |" + ;; + lda) + splice_opts=`cat $alidir/splice_opts 2>/dev/null` + cp $alidir/final.mat $dir + feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + priors_feats="ark,s,cs:utils/filter_scp.pl $dir/priors_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac + +if [ -z "$transform_dir" ]; then + if [ -f $transform_dir/trans.1 ] || [ -f $transform_dir/raw_trans.1 ]; then + transform_dir=$alidir + fi +fi + +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -s $transform_dir/num_jobs ] && \ + echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1; + nj_orig=$(cat $transform_dir/num_jobs) + + if [ $feat_type == "raw" ]; then trans=raw_trans; + else trans=trans; fi + if [ $feat_type == "lda" ] && ! cmp $transform_dir/final.mat $alidir/final.mat; then + echo "$0: LDA transforms differ between $alidir and $transform_dir" + exit 1; + fi + if [ ! -f $transform_dir/$trans.1 ]; then + echo "$0: expected $transform_dir/$trans.1 to exist (--transform-dir option)" + exit 1; + fi + if [ $nj -ne $nj_orig ]; then + # Copy the transforms into an archive with an index. + for n in $(seq $nj_orig); do cat $transform_dir/$trans.$n; done | \ + copy-feats ark:- ark,scp:$dir/$trans.ark,$dir/$trans.scp || exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/$trans.scp ark:- ark:- |" + priors_feats="$priors_feats transform-feats --utt2spk=ark:$data/utt2spk scp:$dir/$trans.scp ark:- ark:- |" + else + # number of jobs matches with alignment dir. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/$trans.JOB ark:- ark:- |" + all_ids=`seq -s, $nj` + priors_feats="$priors_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $transform_dir/$trans.{$all_ids} |' ark:- ark:- |" + fi +fi +if [ ! -z $online_ivector_dir ]; then + # add iVectors to the features. + feats="$feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |" + priors_feats="$priors_feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $dir/priors_uttlist $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |" +fi + + +if [ $stage -le 2 ]; then + echo "$0: working out number of frames of training data" + num_frames=$(steps/nnet2/get_num_frames.sh $data) + + echo $num_frames > $dir/info/num_frames + + # Working out total number of archives. Add one on the assumption the + # num-frames won't divide exactly, and we want to round up. + num_archives=$[$num_frames/$samples_per_iter + 1] + + # the next few lines relate to how we may temporarily split each input job + # into fewer than $num_archives pieces, to avoid using an excessive + # number of filehandles. + archive_ratio=$[$num_archives/$max_temp_archives+1] + num_archives_temp=$[$num_archives/$archive_ratio] + # change $num_archives slightly to make it an exact multiple + # of $archive_ratio. + num_archives=$[$num_archives_temp*$archive_ratio] + + echo $num_archives >$dir/info/num_archives || exit 1 + echo $num_archives_temp >$dir/info/num_archives_temp || exit 1 + + frames_per_archive=$[$num_frames/$num_archives] + + # note, this is the number of frames per archive prior to discarding frames. + echo $frames_per_archive > $dir/info/frames_per_archive +else + num_archives=$(cat $dir/info/num_archives) || exit 1; + num_archives_temp=$(cat $dir/info/num_archives_temp) || exit 1; + frames_per_archive=$(cat $dir/info/frames_per_archive) || exit 1; +fi + +echo "$0: Splitting the data up into $num_archives archives (using $num_archives_temp temporary pieces per input job)" +echo "$0: giving samples-per-iteration of $frames_per_archive (you requested $samples_per_iter)." + +# we create these data links regardless of the stage, as there are situations +# where we would want to recreate a data link that had previously been deleted. + +if [ -d $dir/storage ]; then + echo "$0: creating data links for distributed storage of degs" + # See utils/create_split_dir.pl for how this 'storage' directory is created. + for x in $(seq $nj); do + for y in $(seq $num_archives_temp); do + utils/create_data_link.pl $dir/degs_orig.$x.$y.ark + done + done + for z in $(seq $num_archives); do + utils/create_data_link.pl $dir/degs.$z.ark + done + if [ $num_archives_temp -ne $num_archives ]; then + for z in $(seq $num_archives); do + utils/create_data_link.pl $dir/degs_temp.$z.ark + done + fi +fi + +rm $dir/.error 2>/dev/null +left_context=$(nnet-am-info $dir/final.mdl | grep '^left-context' | awk '{print $2}') || exit 1 +right_context=$(nnet-am-info $dir/final.mdl | grep '^right-context' | awk '{print $2}') || exit 1 + +( + +if [ $stage -le 10 ]; then + +priors_egs_list= +for y in `seq $num_archives_priors`; do + utils/create_data_link.pl $dir/priors_egs.$y.ark + priors_egs_list="$priors_egs_list ark:$dir/priors_egs.$y.ark" +done + +nnet_context_opts="--left-context=$left_context --right-context=$right_context" + +echo "$0: dumping egs for prior adjustment in the background." + +$cmd $dir/log/create_priors_subset.log \ + nnet-get-egs $ivectors_opt $nnet_context_opts "$priors_feats" \ + "$prior_ali_rspecifier ali-to-post ark:- ark:- |" \ + ark:- \| nnet-copy-egs ark:- $priors_egs_list || \ + { touch $dir/.error; echo "Error in creating priors subset. See $dir/log/create_priors_subset.log"; exit 1; } + +sleep 3; + +echo $num_archives_priors >$dir/info/num_archives_priors + +fi + +) & + +if [ $stage -le 3 ]; then + echo "$0: getting initial training examples by splitting lattices" + + degs_list=$(for n in $(seq $num_archives_temp); do echo ark:$dir/degs_orig.JOB.$n.ark; done) + + $cmd JOB=1:$nj $dir/log/get_egs.JOB.log \ + nnet-get-egs-discriminative --criterion=$criterion --drop-frames=$drop_frames \ + "$src_model" "$feats" "$ali_rspecifier" "ark,s,cs:gunzip -c $denlatdir/lat.JOB.gz|" ark:- \| \ + nnet-copy-egs-discriminative $const_dim_opt ark:- $degs_list || exit 1; + sleep 5; # wait a bit so NFS has time to write files. +fi + +if [ $stage -le 4 ]; then + + degs_list=$(for n in $(seq $nj); do echo $dir/degs_orig.$n.JOB.ark; done) + + if [ $num_archives -eq $num_archives_temp ]; then + echo "$0: combining data into final archives and shuffling it" + + $cmd JOB=1:$num_archives $dir/log/shuffle.JOB.log \ + cat $degs_list \| nnet-shuffle-egs-discriminative --srand=JOB ark:- \ + ark:$dir/degs.JOB.ark || exit 1; + else + echo "$0: combining and re-splitting data into un-shuffled versions of final archives." + + archive_ratio=$[$num_archives/$num_archives_temp] + ! [ $archive_ratio -gt 1 ] && echo "$0: Bad archive_ratio $archive_ratio" && exit 1; + + # note: the \$[ .. ] won't be evaluated until the job gets executed. The + # aim is to write to the archives with the final numbering, 1 + # ... num_archives, which is more than num_archives_temp. The list with + # \$[... ] expressions in it computes the set of final indexes for each + # temporary index. + degs_list_out=$(for n in $(seq $archive_ratio); do echo "ark:$dir/degs_temp.\$[((JOB-1)*$archive_ratio)+$n].ark"; done) + # e.g. if dir=foo and archive_ratio=2, we'd have + # degs_list_out='foo/degs_temp.$[((JOB-1)*2)+1].ark foo/degs_temp.$[((JOB-1)*2)+2].ark' + + $cmd JOB=1:$num_archives_temp $dir/log/resplit.JOB.log \ + cat $degs_list \| nnet-copy-egs-discriminative --srand=JOB ark:- \ + $degs_list_out || exit 1; + fi +fi + +if [ $stage -le 5 ] && [ $num_archives -ne $num_archives_temp ]; then + echo "$0: shuffling final archives." + + $cmd JOB=1:$num_archives $dir/log/shuffle.JOB.log \ + nnet-shuffle-egs-discriminative --srand=JOB ark:$dir/degs_temp.JOB.ark \ + ark:$dir/degs.JOB.ark || exit 1 +fi + +wait; +[ -f $dir/.error ] && echo "Error detected while creating priors adjustment egs" && exit 1 + +if $cleanup; then + echo "$0: removing temporary archives." + for x in $(seq $nj); do + for y in $(seq $num_archives_temp); do + file=$dir/degs_orig.$x.$y.ark + [ -L $file ] && rm $(readlink -f $file); rm $file + done + done + if [ $num_archives_temp -ne $num_archives ]; then + for z in $(seq $num_archives); do + file=$dir/degs_temp.$z.ark + [ -L $file ] && rm $(readlink -f $file); rm $file + done + fi +fi + +echo "$0: Done." diff --git a/tools/ASR_2ch_track/steps/nnet2/get_lda.sh b/tools/ASR_2ch_track/steps/nnet2/get_lda.sh new file mode 100644 index 0000000000000000000000000000000000000000..efb69bede7e8e5d864d1b870f12a97b748393f02 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/get_lda.sh @@ -0,0 +1,184 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# This script, which will generally be called from other neural-net training +# scripts, extracts the training examples used to train the neural net (and also +# the validation examples used for diagnostics), and puts them in separate archives. + +# Begin configuration section. +cmd=run.pl + +feat_type= +stage=0 +splice_width=4 # meaning +- 4 frames on each side for second LDA +left_context= # left context for second LDA +right_context= # right context for second LDA +rand_prune=4.0 # Relates to a speedup we do for LDA. +within_class_factor=0.0001 # This affects the scaling of the transform rows... + # sorry for no explanation, you'll have to see the code. +transform_dir= # If supplied, overrides alidir +num_feats=10000 # maximum number of feature files to use. Beyond a certain point it just + # gets silly to use more data. +lda_dim= # This defaults to no dimension reduction. +online_ivector_dir= +ivector_randomize_prob=0.0 # if >0.0, randomizes iVectors during training with + # this prob per iVector. +ivector_dir= +cmvn_opts= # allows you to specify options for CMVN, if feature type is not lda. + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + + +if [ $# != 4 ]; then + echo "Usage: steps/nnet2/get_lda.sh [opts] " + echo " e.g.: steps/nnet2/get_lda.sh data/train data/lang exp/tri3_ali exp/tri4_nnet" + echo " As well as extracting the examples, this script will also do the LDA computation," + echo " if --est-lda=true (default:true)" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --splice-width # Number of frames on each side to append for feature input" + echo " # (note: we splice processed, typically 40-dimensional frames" + echo " --left-context # Number of frames on left side to append for feature input, overrides splice-width" + echo " --right-context # Number of frames on right side to append for feature input, overrides splice-width" + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + echo " --online-vector-dir # Directory produced by" + echo " # steps/online/nnet2/extract_ivectors_online.sh" + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 +dir=$4 + +[ -z "$left_context" ] && left_context=$splice_width +[ -z "$right_context" ] && right_context=$splice_width + +[ ! -z "$online_ivector_dir" ] && \ + extra_files="$online_ivector_dir/ivector_online.scp $online_ivector_dir/ivector_period" + +# Check some files. +for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree $extra_files; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +# Set some variables. +oov=`cat $lang/oov.int` +num_leaves=`gmm-info $alidir/final.mdl 2>/dev/null | awk '/number of pdfs/{print $NF}'` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; + +nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir... +# in this dir we'll have just one job. +sdata=$data/split$nj +utils/split_data.sh $data $nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +cp $alidir/tree $dir + +[ -z "$transform_dir" ] && transform_dir=$alidir +if [ -z "$cmvn_opts" ]; then + cmvn_opts=`cat $alidir/cmvn_opts 2>/dev/null` +fi +echo $cmvn_opts >$dir/cmvn_opts 2>/dev/null + +## Set up features. Note: these are different from the normal features +## because we have one rspecifier that has the features for the entire +## training set, not separate ones for each batch. +if [ -z $feat_type ]; then + if [ -f $alidir/final.mat ] && ! [ -f $alidir/raw_trans.1 ]; then feat_type=lda; else feat_type=raw; fi +fi +echo "$0: feature type is $feat_type" + + +# If we have more than $num_feats feature files (default: 10k), +# we use a random subset. This won't affect the transform much, and will +# spare us an unnecessary pass over the data. Probably 10k is +# way too much, but for small datasets this phase is quite fast. +N=$[$num_feats/$nj] + +case $feat_type in + raw) feats="ark,s,cs:utils/subset_scp.pl --quiet $N $sdata/JOB/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- |" + echo $cmvn_opts >$dir/cmvn_opts + ;; + lda) + splice_opts=`cat $alidir/splice_opts 2>/dev/null` + cp $alidir/{splice_opts,cmvn_opts,final.mat} $dir || exit 1; + [ ! -z "$cmvn_opts" ] && \ + echo "You cannot supply --cmvn-opts option of feature type is LDA." && exit 1; + cmvn_opts=$(cat $dir/cmvn_opts) + feats="ark,s,cs:utils/subset_scp.pl --quiet $N $sdata/JOB/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |" + ;; + *) echo "$0: invalid feature type $feat_type" && exit 1; +esac + +if [ -f $transform_dir/trans.1 ] && [ $feat_type != "raw" ]; then + echo "$0: using transforms from $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |" +fi +if [ -f $transform_dir/raw_trans.1 ] && [ $feat_type == "raw" ]; then + echo "$0: using raw-fMLLR transforms from $transform_dir" + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/raw_trans.JOB ark:- ark:- |" +fi + + +feats_one="$(echo "$feats" | sed s:JOB:1:g)" +# note: feat_dim is the raw, un-spliced feature dim without the iVectors. +feat_dim=$(feat-to-dim "$feats_one" -) || exit 1; +# by default: no dim reduction. + +spliced_feats="$feats splice-feats --left-context=$left_context --right-context=$right_context ark:- ark:- |" + +if [ ! -z "$online_ivector_dir" ]; then + ivector_period=$(cat $online_ivector_dir/ivector_period) || exit 1; + # note: subsample-feats, with negative value of n, repeats each feature n times. + spliced_feats="$spliced_feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- | ivector-randomize --randomize-prob=$ivector_randomize_prob ark:- ark:- |' ark:- |" + ivector_dim=$(feat-to-dim scp:$online_ivector_dir/ivector_online.scp -) || exit 1; +else + ivector_dim=0 +fi +echo $ivector_dim >$dir/ivector_dim + +if [ -z "$lda_dim" ]; then + spliced_feats_one="$(echo "$spliced_feats" | sed s:JOB:1:g)" + lda_dim=$(feat-to-dim "$spliced_feats_one" -) || exit 1; +fi + +if [ $stage -le 0 ]; then + echo "$0: Accumulating LDA statistics." + rm $dir/lda.*.acc 2>/dev/null # in case any left over from before. + $cmd JOB=1:$nj $dir/log/lda_acc.JOB.log \ + ali-to-post "ark:gunzip -c $alidir/ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alidir/final.mdl ark:- ark:- \| \ + acc-lda --rand-prune=$rand_prune $alidir/final.mdl "$spliced_feats" ark,s,cs:- \ + $dir/lda.JOB.acc || exit 1; +fi + +echo $feat_dim > $dir/feat_dim +echo $lda_dim > $dir/lda_dim +echo $ivector_dim > $dir/ivector_dim + +if [ $stage -le 1 ]; then + sum-lda-accs $dir/lda.acc $dir/lda.*.acc 2>$dir/log/lda_sum.log || exit 1; + rm $dir/lda.*.acc +fi + +if [ $stage -le 2 ]; then + # There are various things that we sometimes (but not always) need + # the within-class covariance and its Cholesky factor for, and we + # write these to disk just in case. + nnet-get-feature-transform --write-cholesky=$dir/cholesky.tpmat \ + --write-within-covar=$dir/within_covar.spmat \ + --within-class-factor=$within_class_factor --dim=$lda_dim \ + $dir/lda.mat $dir/lda.acc \ + 2>$dir/log/lda_est.log || exit 1; +fi + +echo "$0: Finished estimating LDA" diff --git a/tools/ASR_2ch_track/steps/nnet2/get_lda_block.sh b/tools/ASR_2ch_track/steps/nnet2/get_lda_block.sh new file mode 100644 index 0000000000000000000000000000000000000000..7bd4ecf564707b2cf0f64ce0888aab12a5bb227f --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/get_lda_block.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# This script, which will generally be called from other neural-net training +# scripts, extracts the training examples used to train the neural net (and also +# the validation examples used for diagnostics), and puts them in separate archives. + +# Begin configuration section. +cmd=run.pl + +stage=0 +splice_width=4 # meaning +- 4 frames on each side for second LDA +rand_prune=4.0 # Relates to a speedup we do for LDA. +within_class_factor=0.0001 # This affects the scaling of the transform rows... + # sorry for no explanation, you'll have to see the code. +block_size=10 +block_shift=5 + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + + +if [ $# != 4 ]; then + echo "Usage: steps/nnet2/get_lda_block.sh [opts] " + echo " e.g.: steps/nnet2/get_lda.sh data/train data/lang exp/tri3_ali exp/tri4_nnet" + echo " As well as extracting the examples, this script will also do the LDA computation," + echo " if --est-lda=true (default:true)" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --splice-width # Number of frames on each side to append for feature input" + echo " # (note: we splice processed, typically 40-dimensional frames" + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 +dir=$4 + +# Check some files. +for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +# Set some variables. +oov=`cat $lang/oov.int` +num_leaves=`gmm-info $alidir/final.mdl 2>/dev/null | awk '/number of pdfs/{print $NF}'` || exit 1; +silphonelist=`cat $lang/phones/silence.csl` || exit 1; + +nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir... +# in this dir we'll have just one job. +sdata=$data/split$nj +utils/split_data.sh $data $nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +cp $alidir/tree $dir +cmvn_opts=`cat $alidir/cmvn_opts 2>/dev/null` + +## Set up features. Note: these are different from the normal features +## because we have one rspecifier that has the features for the entire +## training set, not separate ones for each batch. + + +feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- |" +train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn $cmvn_opts --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |" + +feat_dim=`feat-to-dim "$train_subset_feats" -` || exit 1; + +if [ $stage -le 0 ]; then + echo "$0: Accumulating LDA statistics." + $cmd JOB=1:$nj $dir/log/lda_acc.JOB.log \ + set -o pipefail '&&' \ + ali-to-post "ark:gunzip -c $alidir/ali.JOB.gz|" ark:- \| \ + weight-silence-post 0.0 $silphonelist $alidir/final.mdl ark:- ark:- \| \ + acc-lda --rand-prune=$rand_prune $alidir/final.mdl "$feats splice-feats --left-context=$splice_width --right-context=$splice_width ark:- ark:- |" ark,s,cs:- \ + $dir/lda.JOB.acc || exit 1; +fi + +echo $feat_dim > $dir/feat_dim + +echo -n > $dir/indexes +# Get list of indexes, e.g. a file like: +# 0 1 2 3 4 5 6 7 8 9 +# 5 6 7 8 9 10 11 12 13 14 +# 10 ... + +cur_index=0 +num_blocks=0 +context_length=$[1+2*($splice_width)] + +while [ $[$cur_index+$block_size] -le $feat_dim ]; do + for n in `seq $cur_index $[cur_index+$block_size-1]`; do + echo -n `seq $n $feat_dim $[$n+($feat_dim*($context_length-1))]` '' >> $dir/indexes + done + echo >> $dir/indexes + num_blocks=$[$num_blocks+1] + cur_index=$[$cur_index+$block_shift] + if [ $[$cur_index+$block_size] -gt $feat_dim ]; then + cur_index=$[$feat_dim-$block_size]; + fi +done +echo $num_blocks >$dir/num_blocks + +lda_dim=`cat $dir/indexes | wc -w` +echo $lda_dim > $dir/lda_dim + +if [ $stage -le 1 ]; then + nnet-get-feature-transform-multi --within-class-factor=$within_class_factor $dir/indexes $dir/lda.*.acc $dir/lda.mat \ + 2>$dir/log/lda_est.log || exit 1; + rm $dir/lda.*.acc +fi + +echo "$0: Finished estimating LDA" diff --git a/tools/ASR_2ch_track/steps/nnet2/get_num_frames.sh b/tools/ASR_2ch_track/steps/nnet2/get_num_frames.sh new file mode 100644 index 0000000000000000000000000000000000000000..9c4aae5e6939fa21f010c9753807a8851dcf8550 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/get_num_frames.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# This script works out the approximate number of frames in a training directory. +# This is sometimes needed by higher-level scripts + + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# -ne 1 ]; then + ( + echo "Usage: $0 " + echo "Prints the number of frames of data in the data-dir" + ) 1>&2 +fi + +data=$1 + +if [ ! -f $data/utt2dur ]; then + utils/data/get_utt2dur.sh $data 1>&2 || exit 1 +fi + +frame_shift=$(utils/data/get_frame_shift.sh $data) || exit 1 + +awk -v s=$frame_shift '{n += $2} END{print int(n / s)}' <$data/utt2dur diff --git a/tools/ASR_2ch_track/steps/nnet2/get_perturbed_feats.sh b/tools/ASR_2ch_track/steps/nnet2/get_perturbed_feats.sh new file mode 100644 index 0000000000000000000000000000000000000000..9fd92e6d05648dab5292d6678d9d9b754c0d07bf --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/get_perturbed_feats.sh @@ -0,0 +1,104 @@ +#!/bin/bash + + +# begin configuration section + +cmd="run.pl" +num_copies=5 # support 3, 4 or 5 perturbed copies of the data. +stage=0 +nj=8 +cleanup=true +feature_type=fbank +# end configuration section + +set -e +. utils/parse_options.sh + +if [ $# -ne 5 ]; then + echo "Usage: $0 [options] " + echo "e.g.: $0 conf/fbank_40.conf mfcc exp/perturbed_fbank_train data/train data/train_perturbed_fbank" + echo "Supported options: " + echo "--feature-type (fbank|mfcc|plp) # Type of features we are making, default fbank" + echo "--cmd 'command-program' # Mechanism to run jobs, e.g. run.pl" + echo "--num-copies # Number of perturbed copies of the data (support 3, 4 or 5), default 5" + echo "--stage # Use for partial re-run" + echo "--cleanup (true|false) # If false, do not clean up temp files (default: true)" + echo "--nj # How many jobs to use for feature extraction (default: 8)" + exit 1; +fi + +base_config=$1 +featdir=$2 +dir=$3 # dir/log* will contain log-files +inputdata=$4 +data=$5 + +# Set pairs of (VTLN warp factor, time-warp factor) +# Aim to put these roughly in a circle centered at 1.0-1.0; the +# dynamic range of the VTLN warp factor will be 0.9 to 1.1 and +# of the time-warping factor will be 0.8 to 1.2. +if [ $num_copies -eq 5 ]; then + pairs="1.1-1.0 1.05-1.2 1.0-0.8 0.95-1.1 0.9-0.9" +elif [ $num_copies -eq 4 ]; then + pairs="1.1-1.0 1.0-0.8 1.0-1.2 0.9-1.0" +elif [ $num_copies -eq 3 ]; then + pairs="1.1-1.1 1.0-0.8 0.9-1.1" +else + echo "$0: unsupported --num-copies value: $num_copies (support 3, 4 or 5)" +fi + +for f in $base_config $inputdata/wav.scp; do + if [ ! -f $f ]; then + echo "Expected file $f to exist" + exit 1; + fi +done + +if [ "$feature_type" != "fbank" ] && [ "$feature_type" != "mfcc" ] && \ + [ "$feature_type" != "plp" ]; then + echo "$0: Invalid option --feature-type=$feature_type" + exit 1; +fi + +mkdir -p $featdir +mkdir -p $dir/conf $dir/log + +all_feature_dirs="" + +for pair in $pairs; do + vtln_warp=`echo $pair | cut -d- -f1` + time_warp=`echo $pair | cut -d- -f2` + fs=`perl -e "print ($time_warp*10);"` + conf=$dir/conf/$pair.conf + this_dir=$dir/$pair + + ( cat $base_config; echo; echo "--frame-shift=$fs"; echo "--vtln-warp=$vtln_warp" ) > $conf + + echo "Making ${feature_type} features for VTLN-warp $vtln_warp and time-warp $time_warp" + + feature_data=${data}-$pair + all_feature_dirs="$all_feature_dirs $feature_data" + + utils/copy_data_dir.sh --spk-prefix ${pair}- --utt-prefix ${pair}- $inputdata $feature_data + steps/make_${feature_type}.sh --${feature_type}-config $conf --nj "$nj" --cmd "$cmd" $feature_data $this_dir $featdir + + steps/compute_cmvn_stats.sh $feature_data $this_dir $featdir +done + +utils/combine_data.sh $data $all_feature_dirs + + +# In the combined feature directory, create a file utt2uniq which maps +# our extended utterance-ids to "unique utterances". This enables the +# script steps/nnet2/get_egs.sh to hold out data in a more proper way. +cat $data/utt2spk | \ + perl -e ' while(){ @A=split; $x=shift @A; $y=$x; + foreach $pair (@ARGV) { $y =~ s/^${pair}-// && last; } print "$x $y\n"; } ' $pairs \ + > $data/utt2uniq + +if $cleanup; then + echo "$0: Cleaning up temporary directories for ${feature_type} features." + # Note, this just removes the .scp files and so on, not the data which is located in + # $featdir and which is still needed. + rm -r $all_feature_dirs +fi diff --git a/tools/ASR_2ch_track/steps/nnet2/make_denlats.sh b/tools/ASR_2ch_track/steps/nnet2/make_denlats.sh new file mode 100644 index 0000000000000000000000000000000000000000..eea853eb7c06c4f99df903c71b547c277f8afa07 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/make_denlats.sh @@ -0,0 +1,208 @@ +#!/bin/bash +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# Create denominator lattices for MMI/MPE training. +# This version uses the neural-net models (version 2, i.e. the nnet2 code). +# Creates its output in $dir/lat.*.gz + +# Begin configuration section. +nj=4 +cmd=run.pl +sub_split=1 +beam=13.0 +lattice_beam=7.0 +acwt=0.1 +max_active=5000 +transform_dir= +max_mem=20000000 # This will stop the processes getting too large. +# This is in bytes, but not "real" bytes-- you have to multiply +# by something like 5 or 10 to get real bytes (not sure why so large) +num_threads=1 +online_ivector_dir= +parallel_opts= # ignored now +feat_type= # you can set this in order to run on top of delta features, although we don't + # normally want to do this. +# End configuration section. + + +echo "$0 $@" # Print the command line for logging + +[ -f ./path.sh ] && . ./path.sh; # source the path. +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: steps/make_denlats.sh [options] " + echo " e.g.: steps/make_denlats.sh data/train data/lang exp/nnet4 exp/nnet4_denlats" + echo "Works for (delta|lda) features, and (with --transform-dir option) such features" + echo " plus transforms." + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config containing options" + echo " --nj # number of parallel jobs" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --sub-split # e.g. 40; use this for " + echo " # large databases so your jobs will be smaller and" + echo " # will (individually) finish reasonably soon." + echo " --transform-dir # directory to find fMLLR transforms." + echo " --num-threads # number of threads per decoding job" + exit 1; +fi + +data=$1 +lang=$2 +srcdir=$3 +dir=$4 + + +extra_files= +[ ! -z "$online_ivector_dir" ] && \ + extra_files="$online_ivector_dir/ivector_online.scp $online_ivector_dir/ivector_period" +for f in $data/feats.scp $lang/L.fst $srcdir/final.mdl $extra_files; do + [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1; +done + +sdata=$data/split$nj +splice_opts=`cat $srcdir/splice_opts 2>/dev/null` +thread_string= +[ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads" + +mkdir -p $dir/log +split_data.sh $data $nj || exit 1; +echo $nj > $dir/num_jobs + +oov=`cat $lang/oov.int` || exit 1; + +cp -rH $lang $dir/ + +# Compute grammar FST which corresponds to unigram decoding graph. +new_lang="$dir/"$(basename "$lang") + +# mkgraph.sh expects a whole directory "lang", so put everything in one directory... +# it gets L_disambig.fst and G.fst (among other things) from $dir/lang, and +# final.mdl from $srcdir; the output HCLG.fst goes in $dir/graph. + +echo "Compiling decoding graph in $dir/dengraph" +if [ -s $dir/dengraph/HCLG.fst ] && [ $dir/dengraph/HCLG.fst -nt $srcdir/final.mdl ]; then + echo "Graph $dir/dengraph/HCLG.fst already exists: skipping graph creation." +else + echo "Making unigram grammar FST in $new_lang" + cat $data/text | utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt | \ + awk '{for(n=2;n<=NF;n++){ printf("%s ", $n); } printf("\n"); }' | \ + utils/make_unigram_grammar.pl | fstcompile | fstarcsort --sort_type=ilabel > $new_lang/G.fst \ + || exit 1; + utils/mkgraph.sh $new_lang $srcdir $dir/dengraph || exit 1; +fi +cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null` +cp $srcdir/cmvn_opts $dir 2>/dev/null + +if [ -z "$feat_type" ]; then + if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=raw; fi +fi +echo "$0: feature type is $feat_type" + +case $feat_type in + delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";; + raw) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |" + ;; + lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |" + cp $srcdir/final.mat $dir + ;; + *) echo "Invalid feature type $feat_type" && exit 1; +esac + +if [ ! -z "$transform_dir" ]; then + echo "$0: using transforms from $transform_dir" + [ ! -s $transform_dir/num_jobs ] && \ + echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1; + nj_orig=$(cat $transform_dir/num_jobs) + + if [ $feat_type == "raw" ]; then trans=raw_trans; + else trans=trans; fi + if [ $feat_type == "lda" ] && ! cmp $transform_dir/final.mat $srcdir/final.mat; then + echo "$0: LDA transforms differ between $srcdir and $transform_dir" + exit 1; + fi + if [ ! -f $transform_dir/$trans.1 ]; then + echo "$0: expected $transform_dir/$trans.1 to exist (--transform-dir option)" + exit 1; + fi + if [ $nj -ne $nj_orig ]; then + # Copy the transforms into an archive with an index. + for n in $(seq $nj_orig); do cat $transform_dir/$trans.$n; done | \ + copy-feats ark:- ark,scp:$dir/$trans.ark,$dir/$trans.scp || exit 1; + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/$trans.scp ark:- ark:- |" + else + # number of jobs matches with alignment dir. + feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/$trans.JOB ark:- ark:- |" + fi +fi + + +if [ ! -z "$online_ivector_dir" ]; then + ivector_period=$(cat $online_ivector_dir/ivector_period) || exit 1; + # note: subsample-feats, with negative n, will repeat each feature -n times. + feats="$feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |" +fi + + +# if this job is interrupted by the user, we want any background jobs to be +# killed too. +cleanup() { + local pids=$(jobs -pr) + [ -n "$pids" ] && kill $pids +} +trap "cleanup" INT QUIT TERM EXIT + + +if [ $sub_split -eq 1 ]; then + $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode_den.JOB.log \ + nnet-latgen-faster$thread_string --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats" "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1; +else + # each job from 1 to $nj is split into multiple pieces (sub-split), and we aim + # to have at most two jobs running at each time. The idea is that if we have stragglers + # from one job, we can be processing another one at the same time. + rm $dir/.error 2>/dev/null + + prev_pid= + for n in `seq $[nj+1]`; do + if [ $n -gt $nj ]; then + this_pid= + elif [ -f $dir/.done.$n ] && [ $dir/.done.$n -nt $srcdir/final.mdl ]; then + echo "Not processing subset $n as already done (delete $dir/.done.$n if not)"; + this_pid= + else + sdata2=$data/split$nj/$n/split$sub_split; + if [ ! -d $sdata2 ] || [ $sdata2 -ot $sdata/$n/feats.scp ]; then + split_data.sh --per-utt $sdata/$n $sub_split || exit 1; + fi + mkdir -p $dir/log/$n + mkdir -p $dir/part + feats_subset=`echo $feats | sed "s/trans.JOB/trans.$n/g" | sed s:JOB/:$n/split$sub_split/JOB/:g` + + $cmd --num-threads $num_threads JOB=1:$sub_split $dir/log/$n/decode_den.JOB.log \ + nnet-latgen-faster$thread_string --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \ + --max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \ + $dir/dengraph/HCLG.fst "$feats_subset" "ark:|gzip -c >$dir/lat.$n.JOB.gz" || touch $dir/.error & + this_pid=$! + fi + if [ ! -z "$prev_pid" ]; then # Wait for the previous job; merge the previous set of lattices. + wait $prev_pid + [ -f $dir/.error ] && echo "$0: error generating denominator lattices" && exit 1; + rm $dir/.merge_error 2>/dev/null + echo Merging archives for data subset $prev_n + for k in `seq $sub_split`; do + gunzip -c $dir/lat.$prev_n.$k.gz || touch $dir/.merge_error; + done | gzip -c > $dir/lat.$prev_n.gz || touch $dir/.merge_error; + [ -f $dir/.merge_error ] && echo "$0: Merging lattices for subset $prev_n failed (or maybe some other error)" && exit 1; + rm $dir/lat.$prev_n.*.gz + touch $dir/.done.$prev_n + fi + prev_n=$n + prev_pid=$this_pid + done +fi + + +echo "$0: done generating denominator lattices." diff --git a/tools/ASR_2ch_track/steps/nnet2/make_multisplice_configs.py b/tools/ASR_2ch_track/steps/nnet2/make_multisplice_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..cff85b7f60d62e7e9fdfc11d573eec8bc30c649e --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/make_multisplice_configs.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python +# Copyright 2014 Johns Hopkins University (Authors: Daniel Povey and Vijayaditya Peddinti). Apache 2.0. + +# Creates the nnet.config and hidde_*.config scripts used in train_pnorm_multisplice.sh +# Parses the splice string to generate relevant variables for get_egs.sh, get_lda.sh and nnet/hidden.config files + +import re, argparse, sys, math, warnings + +# returns the set of frame indices required to perform the convolution +# between sequences with frame indices in x and y +def get_convolution_index_set(x, y): + z = [] + for i in xrange(len(x)): + for j in xrange(len(y)): + z.append(x[i]+y[j]) + z = list(set(z)) + z.sort() + return z + +def parse_splice_string(splice_string): + layerwise_splice_indexes = splice_string.split('layer')[1:] + print splice_string.split('layer') + contexts={} + first_right_context = 0 # default value + first_left_context = 0 # default value + nnet_frame_indexes = [0] # frame indexes required by the network + # at the initial layer (will be used in + # determining the context for get_egs.sh) + try: + for cur_splice_indexes in layerwise_splice_indexes: + layer_index, frame_indexes = cur_splice_indexes.split("/") + frame_indexes = map(lambda x: int(x), frame_indexes.split(':')) + layer_index = int(layer_index) + assert(layer_index >= 0) + if layer_index == 0: + first_left_context = min(frame_indexes) + first_right_context = max(frame_indexes) + try: + assert(frame_indexes == range(first_left_context, first_right_context+1)) + except AssertionError: + raise Exception('Currently the first splice component just accepts contiguous context.') + try: + assert((first_left_context <=0) and (first_right_context >=0)) + except AssertionError: + raise Exception("""get_lda.sh script does not support postive left-context or negative right context. + left context provided is %d and right context provided is %d.""" % (first_left_context, first_right_context)) + # convolve the current splice indices with the splice indices until last layer + nnet_frame_indexes = get_convolution_index_set(frame_indexes, nnet_frame_indexes) + cur_context = ":".join(map(lambda x: str(x), frame_indexes)) + contexts[layer_index] = cur_context + except ValueError: + raise Exception('Unknown format in splice_indexes variable: {0}'.format(params.splice_indexes)) + print nnet_frame_indexes + max_left_context = min(nnet_frame_indexes) + max_right_context = max(nnet_frame_indexes) + return [contexts, ' nnet_left_context={0};\n nnet_right_context={1}\n first_left_context={2};\n first_right_context={3}\n'.format(abs(max_left_context), abs(max_right_context), abs(first_left_context), abs(first_right_context) )] + +def create_config_files(output_dir, params): + pnorm_p = 2 + pnorm_input_dim = params.pnorm_input_dim + pnorm_output_dim = params.pnorm_output_dim + contexts, context_variables = parse_splice_string(params.splice_indexes) + var_file = open("{0}/vars".format(output_dir), "w") + var_file.write(context_variables) + var_file.close() + + try: + assert(max(contexts.keys()) < params.num_hidden_layers) + except AssertionError: + raise Exception("""Splice string provided is {2}. + Number of hidden layers {0}, is less than the number of context specifications provided. + Splicing is supported only until layer {1}.""".format(params.num_hidden_layers, params.num_hidden_layers - 1, params.splice_indexes)) + + stddev=1.0/math.sqrt(pnorm_input_dim) + try : + nnet_config = ["SpliceComponent input-dim={0} context={1} const-component-dim={2}".format(params.total_input_dim, contexts[0], params.ivector_dim), + "FixedAffineComponent matrix={0}".format(params.lda_mat), + "AffineComponentPreconditionedOnline input-dim={0} output-dim={1} {2} learning-rate={3} param-stddev={4} bias-stddev={5}".format(params.lda_dim, pnorm_input_dim, params.online_preconditioning_opts, params.initial_learning_rate, stddev, params.bias_stddev), + ("PnormComponent input-dim={0} output-dim={1} p={2}".format(pnorm_input_dim, pnorm_output_dim, pnorm_p) if pnorm_input_dim != pnorm_output_dim else "RectifiedLinearComponent dim={0}".format(pnorm_input_dim)), + "NormalizeComponent dim={0}".format(pnorm_output_dim), + "AffineComponentPreconditionedOnline input-dim={0} output-dim={1} {2} learning-rate={3} param-stddev=0 bias-stddev=0".format(pnorm_output_dim, params.num_targets, params.online_preconditioning_opts, params.initial_learning_rate), + "SoftmaxComponent dim={0}".format(params.num_targets)] + + nnet_config_file = open(("{0}/nnet.config").format(output_dir), "w") + nnet_config_file.write("\n".join(nnet_config)) + nnet_config_file.close() + except KeyError: + raise Exception('A splice layer is expected to be the first layer. Provide a context for the first layer.') + + for i in xrange(1, params.num_hidden_layers): #just run till num_hidden_layers-1 since we do not add splice before the final affine transform + lines=[] + context_len = 1 + if contexts.has_key(i): + # Adding the splice component as a context is provided + lines.append("SpliceComponent input-dim=%d context=%s " % (pnorm_output_dim, contexts[i])) + context_len = len(contexts[i].split(":")) + # Add the hidden layer, which is a composition of an affine component, pnorm component and normalization component + lines.append("AffineComponentPreconditionedOnline input-dim=%d output-dim=%d %s learning-rate=%f param-stddev=%f bias-stddev=%f" + % ( pnorm_output_dim*context_len, pnorm_input_dim, params.online_preconditioning_opts, params.initial_learning_rate, stddev, params.bias_stddev)) + if pnorm_input_dim != pnorm_output_dim: + lines.append("PnormComponent input-dim=%d output-dim=%d p=%d" % (pnorm_input_dim, pnorm_output_dim, pnorm_p)) + else: + lines.append("RectifiedLinearComponent dim=%d" % (pnorm_input_dim)) + warnings.warn("Using the RectifiedLinearComponent, in place of the PnormComponent as pnorm_input_dim == pnorm_output_dim") + lines.append("NormalizeComponent dim={0}".format(pnorm_output_dim)) + out_file = open("{0}/hidden_{1}.config".format(output_dir, i), 'w') + out_file.write("\n".join(lines)) + out_file.close() + + +if __name__ == "__main__": + print " ".join(sys.argv) + parser = argparse.ArgumentParser() + parser.add_argument('--splice-indexes', type=str, help='string specifying the indexes for the splice layers throughout the network') + parser.add_argument('--total-input-dim', type=int, help='dimension of the input to the network') + parser.add_argument('--ivector-dim', type=int, help='dimension of the ivector portion of the neural network input') + parser.add_argument('--lda-mat', type=str, help='lda-matrix used after the first splice component') + parser.add_argument('--lda-dim', type=str, help='dimension of the lda output') + parser.add_argument('--pnorm-input-dim', type=int, help='dimension of input to pnorm layer') + parser.add_argument('--pnorm-output-dim', type=int, help='dimension of output of pnorm layer') + parser.add_argument('--online-preconditioning-opts', type=str, help='extra options for the AffineComponentPreconditionedOnline component') + parser.add_argument('--initial-learning-rate', type=float, help='') + parser.add_argument('--num-targets', type=int, help='#targets for the neural network ') + parser.add_argument('--num-hidden-layers', type=int, help='#hidden layers in the neural network ') + parser.add_argument('--bias-stddev', type=float, help='standard deviation of r.v. used for bias component initialization') + parser.add_argument("mode", type=str, help="contexts|configs") + parser.add_argument("output_dir", type=str, help="output directory to store the files") + params = parser.parse_args() + + print params + if params.mode == "contexts": + [context, context_variables] = parse_splice_string(params.splice_indexes) + var_file = open("{0}/vars".format(params.output_dir), "w") + var_file.write(context_variables) + var_file.close() + elif params.mode == "configs": + create_config_files(params.output_dir, params) + else: + raise Exception("mode has to be in the set {contexts, configs}") diff --git a/tools/ASR_2ch_track/steps/nnet2/relabel_egs.sh b/tools/ASR_2ch_track/steps/nnet2/relabel_egs.sh new file mode 100644 index 0000000000000000000000000000000000000000..9c8ca18f04bf9a7cdf2f2944b03c96d791577b12 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/relabel_egs.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +# Copyright 2014 Vimal Manohar. Apache 2.0. +# This script, which will generally be called during the neural-net training +# relabels existing examples with better labels obtained by realigning the data +# with the current nnet model + +# Begin configuration section +cmd=run.pl +stage=0 +extra_egs= # Names of additional egs files that need to relabelled + # other than egs.*.*.ark, combine.egs, train_diagnostic.egs, + # valid_diagnostic.egs +iter=final +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/nnet2/relabel_egs.sh [opts] " + echo " e.g: steps/nnet2/relabel_egs.sh exp/tri6_nnet/ali_1.5 exp/tri6_nnet/egs exp/tri6_nnet/egs_1.5" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl;utils/queue.pl ) # how to run jobs." + + exit 1; +fi + +alidir=$1 +egs_in_dir=$2 +dir=$3 + +model=$alidir/$iter.mdl + +# Check some files. + +for f in $alidir/ali.1.gz $model $egs_in_dir/egs.1.0.ark $egs_in_dir/combine.egs \ + $egs_in_dir/valid_diagnostic.egs $egs_in_dir/train_diagnostic.egs \ + $egs_in_dir/num_jobs_nnet $egs_in_dir/iters_per_epoch $egs_in_dir/samples_per_iter; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +num_jobs_nnet=`cat $egs_in_dir/num_jobs_nnet` +iters_per_epoch=`cat $egs_in_dir/iters_per_epoch` +samples_per_iter_real=`cat $egs_in_dir/samples_per_iter` +num_jobs_align=`cat $alidir/num_jobs` + +mkdir -p $dir/log + +echo $num_jobs_nnet > $dir/num_jobs_nnet +echo $iters_per_epoch > $dir/iters_per_epoch +echo $samples_per_iter_real > $dir/samples_per_iter + +alignments=`eval echo $alidir/ali.{$(seq -s ',' $num_jobs_align)}.gz` + +if [ $stage -le 0 ]; then + egs_in= + egs_out= + for x in `seq 1 $num_jobs_nnet`; do + for y in `seq 0 $[$iters_per_epoch-1]`; do + utils/create_data_link.pl $dir/egs.$x.$y.ark + if [ $x -eq 1 ]; then + egs_in="$egs_in ark:$egs_in_dir/egs.JOB.$y.ark " + egs_out="$egs_out ark:$dir/egs.JOB.$y.ark " + fi + done + done + + $cmd JOB=1:$num_jobs_nnet $dir/log/relabel_egs.JOB.log \ + nnet-relabel-egs "ark:gunzip -c $alignments | ali-to-pdf $model ark:- ark:- |" \ + $egs_in $egs_out || exit 1 +fi + +if [ $stage -le 1 ]; then + egs_in= + egs_out= + for x in combine.egs valid_diagnostic.egs train_diagnostic.egs $extra_egs; do + utils/create_data_link.pl $dir/$x + egs_in="$egs_in ark:$egs_in_dir/$x" + egs_out="$egs_out ark:$dir/$x" + done + + $cmd $dir/log/relabel_egs_extra.log \ + nnet-relabel-egs "ark:gunzip -c $alignments | ali-to-pdf $model ark:- ark:- |" \ + $egs_in $egs_out || exit 1 +fi + +echo "$0: Finished relabeling training examples" diff --git a/tools/ASR_2ch_track/steps/nnet2/relabel_egs2.sh b/tools/ASR_2ch_track/steps/nnet2/relabel_egs2.sh new file mode 100644 index 0000000000000000000000000000000000000000..25ee7de6cc9f7da44cd085419f66f181b1afc0d4 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/relabel_egs2.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +# Copyright 2014 Vimal Manohar. +# 2014 Johns Hopkins University (author: Daniel Povey) +# Apache 2.0. +# +# This script, which will generally be called during the neural-net training +# relabels existing examples with better labels obtained by realigning the data +# with the current nnet model. +# This script is as relabel_egs.sh, but is adapted to work with the newer +# egs format that is written by get_egs2.sh + +# Begin configuration section +cmd=run.pl +stage=0 +extra_egs= # Names of additional egs files that need to relabelled + # other than egs.*.*.ark, combine.egs, train_diagnostic.egs, + # valid_diagnostic.egs +iter=final +parallel_opts= +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: steps/nnet2/relabel_egs.sh [opts] " + echo " e.g: steps/nnet2/relabel_egs.sh exp/tri6_nnet/ali_1.5 exp/tri6_nnet/egs exp/tri6_nnet/egs_1.5" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl;utils/queue.pl ) # how to run jobs." + + exit 1; +fi + +alidir=$1 +egs_in_dir=$2 +dir=$3 + +model=$alidir/$iter.mdl + +# Check some files. + +[ -f $egs_in_dir/iters_per_epoch ] && \ + echo "$0: this script does not work with the old egs directory format" && exit 1; + +for f in $alidir/ali.1.gz $model $egs_in_dir/egs.1.ark $egs_in_dir/combine.egs \ + $egs_in_dir/valid_diagnostic.egs $egs_in_dir/train_diagnostic.egs \ + $egs_in_dir/info/num_archives; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +num_archives=$(cat $egs_in_dir/info/num_archives) || exit 1; +num_jobs_align=$(cat $alidir/num_jobs) || exit 1; + +mkdir -p $dir/log + +mkdir -p $dir/info +cp -r $egs_in_dir/info/* $dir/info + +alignments=`eval echo $alidir/ali.{$(seq -s ',' $num_jobs_align)}.gz` + +if [ $stage -le 0 ]; then + for x in $(seq $num_archives); do + # if $dir/storage exists, make the soft links that we'll + # use to distribute the data across machines + utils/create_data_link.pl $dir/egs.$x.ark + done + + $cmd $parallel_opts JOB=1:$num_archives $dir/log/relabel_egs.JOB.log \ + nnet-relabel-egs "ark:gunzip -c $alignments | ali-to-pdf $model ark:- ark:- |" \ + ark:$egs_in_dir/egs.JOB.ark ark:$dir/egs.JOB.ark || exit 1 +fi + +if [ $stage -le 1 ]; then + egs_in= + egs_out= + for x in combine.egs valid_diagnostic.egs train_diagnostic.egs $extra_egs; do + utils/create_data_link.pl $dir/$x + egs_in="$egs_in ark:$egs_in_dir/$x" + egs_out="$egs_out ark:$dir/$x" + done + + $cmd $dir/log/relabel_egs_extra.log \ + nnet-relabel-egs "ark:gunzip -c $alignments | ali-to-pdf $model ark:- ark:- |" \ + $egs_in $egs_out || exit 1 +fi + +echo "$0: Finished relabeling training examples" diff --git a/tools/ASR_2ch_track/steps/nnet2/remove_egs.sh b/tools/ASR_2ch_track/steps/nnet2/remove_egs.sh new file mode 100644 index 0000000000000000000000000000000000000000..143a5d0d86a8a3a59cbff8b69231d5a7a04bb685 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/remove_egs.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Copyright 2014 Johns Hopkins University (Author: Daniel Povey). +# Apache 2.0. + +# This script removes the examples in an egs/ directory, e.g. +# steps/nnet2/remove_egs.sh exp/nnet4b/egs/ +# We give it its own script because we need to be careful about +# things that are soft links to something in storage/ (i.e. remove the +# data that's linked to as well as the soft link), and we want to not +# delete the examples if someone has done "touch $dir/egs/.nodelete". + + +if [ $# != 1 ]; then + echo "Usage: $0 " + echo "e.g.: $0 data/nnet4b/egs/" + echo "e.g.: $0 data/nnet4b_mpe/degs/" + echo "This script is usually equivalent to 'rm /egs.* /degs.*' but it follows" + echo "soft links to /storage/; and it avoids deleting anything in the directory if" + echo "someone did 'touch /.nodelete" + exit 1; +fi + +egs=$1 + +if [ ! -d $egs ]; then + echo "$0: expected directory $egs to exist" + exit 1; +fi + +if [ -f $egs/.nodelete ]; then + echo "$0: not deleting egs in $egs since $egs/.nodelete exists" + exit 0; +fi + + + +for f in $egs/egs.*.ark $egs/degs.*.ark $egs/cegs.*.ark; do + if [ -L $f ]; then + rm $(dirname $f)/$(readlink $f) # this will print a warning if it fails. + fi + rm $f 2>/dev/null +done + + +echo "$0: Finished deleting examples in $egs" diff --git a/tools/ASR_2ch_track/steps/nnet2/retrain_fast.sh b/tools/ASR_2ch_track/steps/nnet2/retrain_fast.sh new file mode 100644 index 0000000000000000000000000000000000000000..2e7131b82d755b71a7e96395ceba9123571ee336 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/retrain_fast.sh @@ -0,0 +1,396 @@ +#!/bin/bash + +# Copyright 2014 Johns Hopkins University (Author: Daniel Povey). +# Apache 2.0. + +# retrain_fast.sh is a neural net training script that's intended to train +# a system on top of an already-trained neural network, whose activations have +# been dumped to disk. All it really is is training a neural network with +# no hidden layers, so it's a simplified version of some of the other scripts. +# There is no get_lda stage, as we don't support any pre-scaling of the inputs. +# It uses the AffineComponentPreconditionedOnline components, which is why +# we name it _fast. + +# Begin configuration section. +cmd=run.pl +num_epochs=4 # Number of epochs during which we reduce + # the learning rate; number of iterations is worked out from this. +num_epochs_extra=1 # Number of epochs after we stop reducing + # the learning rate. +num_iters_final=10 # Maximum number of final iterations to give to the + # optimization over the validation set (maximum) +initial_learning_rate=0.04 +final_learning_rate=0.004 + +minibatch_size=128 # by default use a smallish minibatch size for neural net + # training; this controls instability which would otherwise + # be a problem with multi-threaded update. +samples_per_iter=200000 # each iteration of training, see this many samples + # per job. This option is passed to get_egs.sh +num_jobs_nnet=16 # Number of neural net jobs to run in parallel. This option + # is passed to get_egs.sh. +get_egs_stage=0 + +shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples + # on each iter. You could set it to 0 or to a large value for complete + # randomization, but this would both consume memory and cause spikes in + # disk I/O. Smaller is easier on disk and memory but less random. It's + # not a huge deal though, as samples are anyway randomized right at the start. + # (the point of this is to get data in different minibatches on different iterations, + # since in the preconditioning method, 2 samples in the same minibatch can + # affect each others' gradients. + +stage=-5 + +io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time. These don't + +alpha=4.0 # relates to preconditioning. +update_period=4 # relates to online preconditioning: says how often we update the subspace. +num_samples_history=2000 # relates to online preconditioning +max_change_per_sample=0.075 +precondition_rank_in=20 # relates to online preconditioning +precondition_rank_out=80 # relates to online preconditioning + +# this relates to perturbed training. +min_target_objf_change=0.1 +target_multiplier=0 # Set this to e.g. 1.0 to enable perturbed training. + +mix_up=0 # Number of components to mix up to (should be > #tree leaves, if + # specified.) +num_threads=16 +parallel_opts="--num-threads 16 --mem 1G" # by default we use 16 threads; this lets the queue know. + # note: parallel_opts doesn't automatically get adjusted if you adjust num-threads. +combine_num_threads=8 +combine_parallel_opts="--num-threads 8" # queue options for the "combine" stage. +cleanup=true +egs_dir= +egs_opts= +prior_subset_size=10000 # 10k samples per job, for computing priors. Should be + # more than enough. +# End configuration section. + + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: $0 [opts] " + echo " e.g.: $0 data/train data/lang exp/tri3_ali exp/tri4_nnet" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --num-epochs <#epochs|4 > # Number of epochs of main training" + echo " # while reducing learning rate (determines #iterations, together" + echo " # with --samples-per-iter and --num-jobs-nnet)" + echo " --num-epochs-extra <#epochs-extra|1> # Number of extra epochs of training" + echo " # after learning rate fully reduced" + echo " --initial-learning-rate # Learning rate at start of training, e.g. 0.02 for small" + echo " # data, 0.01 for large data" + echo " --final-learning-rate # Learning rate at end of training, e.g. 0.004 for small" + echo " # data, 0.001 for large data" + echo " --mix-up <#pseudo-gaussians|0> # Can be used to have multiple targets in final output layer," + echo " # per context-dependent state. Try a number several times #states." + echo " --num-jobs-nnet # Number of parallel jobs to use for main neural net" + echo " # training (will affect results as well as speed; try 8, 16)" + echo " # Note: if you increase this, you may want to also increase" + echo " # the learning rate." + echo " --num-threads # Number of parallel threads per job (will affect results" + echo " # as well as speed; may interact with batch size; if you increase" + echo " # this, you may want to decrease the batch size." + echo " --parallel-opts # extra options to pass to e.g. queue.pl for processes that" + echo " # use multiple threads... " + echo " --io-opts # Options given to e.g. queue.pl for jobs that do a lot of I/O." + echo " --minibatch-size # Size of minibatch to process (note: product with --num-threads" + echo " # should not get too large, e.g. >2k)." + echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per" + echo " # process." + echo " --num-iters-final <#iters|10> # Number of final iterations to give to nnet-combine-fast to " + echo " # interpolate parameters (the weights are learned with a validation set)" + echo " --num-utts-subset <#utts|300> # Number of utterances in subsets used for validation and diagnostics" + echo " # (the validation subset is held out from training)" + echo " --num-frames-diagnostic <#frames|4000> # Number of frames used in computing (train,valid) diagnostics" + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + + + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 +dir=$4 + +# Check some files. +for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +# Set some variables. +num_leaves=`tree-info $alidir/tree 2>/dev/null | grep num-pdfs | awk '{print $2}'` || exit 1 +[ -z $num_leaves ] && echo "\$num_leaves is unset" && exit 1 +[ "$num_leaves" -eq "0" ] && echo "\$num_leaves is 0" && exit 1 + +nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir... +# in this dir we'll have just one job. +sdata=$data/split$nj +utils/split_data.sh $data $nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +cp $alidir/tree $dir + + +if [ $stage -le -3 ] && [ -z "$egs_dir" ]; then + echo "$0: calling get_egs.sh" + steps/nnet2/get_egs.sh --feat-type raw --cmvn-opts "--norm-means=false --norm-vars=false" \ + --samples-per-iter $samples_per_iter --left-context 0 --right-context 0 \ + --num-jobs-nnet $num_jobs_nnet --stage $get_egs_stage \ + --cmd "$cmd" $egs_opts --io-opts "$io_opts" \ + $data $lang $alidir $dir || exit 1; +fi + +[ -z $egs_dir ] && egs_dir=$dir/egs + +iters_per_epoch=`cat $egs_dir/iters_per_epoch` || exit 1; +! [ $num_jobs_nnet -eq `cat $egs_dir/num_jobs_nnet` ] && \ + echo "$0: Warning: using --num-jobs-nnet=`cat $egs_dir/num_jobs_nnet` from $egs_dir" +num_jobs_nnet=`cat $egs_dir/num_jobs_nnet` || exit 1; + + +if [ $stage -le -2 ]; then + echo "$0: initializing neural net"; + + feat_dim=$(feat-to-dim scp:$data/feats.scp -) || exit 1; + + online_preconditioning_opts="alpha=$alpha num-samples-history=$num_samples_history update-period=$update_period rank-in=$precondition_rank_in rank-out=$precondition_rank_out max-change-per-sample=$max_change_per_sample" + + cat >$dir/nnet.config <= $n ? $f : $i*exp($x*log($f/$i)/$n));' $[$x+1] $num_iters_reduce $initial_learning_rate $final_learning_rate`; + + if $do_average; then + $cmd $dir/log/average.$x.log \ + nnet-am-average $nnets_list - \| \ + nnet-am-copy --learning-rate=$learning_rate - $dir/$[$x+1].mdl || exit 1; + else + n=$(perl -e '($nj,$pat)=@ARGV; $best_n=1; $best_logprob=-1.0e+10; for ($n=1;$n<=$nj;$n++) { + $fn = sprintf($pat,$n); open(F, "<$fn") || die "Error opening log file $fn"; + undef $logprob; while () { if (m/log-prob-per-frame=(\S+)/) { $logprob=$1; } } + close(F); if (defined $logprob && $logprob > $best_logprob) { $best_logprob=$logprob; + $best_n=$n; } } print "$best_n\n"; ' $num_jobs_nnet $dir/log/train.$x.%d.log) || exit 1; + [ -z "$n" ] && echo "Error getting best model" && exit 1; + $cmd $dir/log/select.$x.log \ + nnet-am-copy --learning-rate=$learning_rate $dir/$[$x+1].$n.mdl $dir/$[$x+1].mdl || exit 1; + fi + + if [ "$mix_up" -gt 0 ] && [ $x -eq $mix_up_iter ]; then + # mix up. + echo Mixing up from $num_leaves to $mix_up components + $cmd $dir/log/mix_up.$x.log \ + nnet-am-mixup --min-count=10 --num-mixtures=$mix_up \ + $dir/$[$x+1].mdl $dir/$[$x+1].mdl || exit 1; + fi + rm $nnets_list + [ ! -f $dir/$[$x+1].mdl ] && exit 1; + if [ -f $dir/$[$x-1].mdl ] && $cleanup && \ + [ $[($x-1)%100] -ne 0 ] && [ $[$x-1] -le $[$num_iters-$num_iters_final] ]; then + rm $dir/$[$x-1].mdl + fi + fi + x=$[$x+1] +done + +# Now do combination. +# At the end, final.mdl will be a combination of the last e.g. 10 models. +nnets_list=() +if [ $num_iters_final -gt $num_iters_extra ]; then + echo "Setting num_iters_final=$num_iters_extra" +fi +start=$[$num_iters-$num_iters_final+1] +for x in `seq $start $num_iters`; do + idx=$[$x-$start] + if [ $x -gt $mix_up_iter ]; then + nnets_list[$idx]=$dir/$x.mdl # "nnet-am-copy --remove-dropout=true $dir/$x.mdl - |" + fi +done + +if [ $stage -le $num_iters ]; then + echo "Doing final combination to produce final.mdl" + # Below, use --use-gpu=no to disable nnet-combine-fast from using a GPU, as + # if there are many models it can give out-of-memory error; set num-threads to 8 + # to speed it up (this isn't ideal...) + num_egs=`nnet-copy-egs ark:$egs_dir/combine.egs ark:/dev/null 2>&1 | tail -n 1 | awk '{print $NF}'` + mb=$[($num_egs+$combine_num_threads-1)/$combine_num_threads] + [ $mb -gt 512 ] && mb=512 + # Setting --initial-model to a large value makes it initialize the combination + # with the average of all the models. It's important not to start with a + # single model, or, due to the invariance to scaling that these nonlinearities + # give us, we get zero diagonal entries in the fisher matrix that + # nnet-combine-fast uses for scaling, which after flooring and inversion, has + # the effect that the initial model chosen gets much higher learning rates + # than the others. This prevents the optimization from working well. + $cmd $combine_parallel_opts $dir/log/combine.log \ + nnet-combine-fast --initial-model=100000 --num-lbfgs-iters=40 --use-gpu=no \ + --num-threads=$combine_num_threads \ + --verbose=3 --minibatch-size=$mb "${nnets_list[@]}" ark:$egs_dir/combine.egs \ + $dir/final.mdl || exit 1; + + # Compute the probability of the final, combined model with + # the same subset we used for the previous compute_probs, as the + # different subsets will lead to different probs. + $cmd $dir/log/compute_prob_valid.final.log \ + nnet-compute-prob $dir/final.mdl ark:$egs_dir/valid_diagnostic.egs & + $cmd $dir/log/compute_prob_train.final.log \ + nnet-compute-prob $dir/final.mdl ark:$egs_dir/train_diagnostic.egs & +fi + +if [ $stage -le $[$num_iters+1] ]; then + echo "Getting average posterior for purposes of adjusting the priors." + # Note: this just uses CPUs, using a smallish subset of data. + rm $dir/post.*.vec 2>/dev/null + $cmd JOB=1:$num_jobs_nnet $dir/log/get_post.JOB.log \ + nnet-subset-egs --n=$prior_subset_size ark:$egs_dir/egs.JOB.0.ark ark:- \| \ + nnet-compute-from-egs "nnet-to-raw-nnet $dir/final.mdl -|" ark:- ark:- \| \ + matrix-sum-rows ark:- ark:- \| vector-sum ark:- $dir/post.JOB.vec || exit 1; + + sleep 3; # make sure there is time for $dir/post.*.vec to appear. + + $cmd $dir/log/vector_sum.log \ + vector-sum $dir/post.*.vec $dir/post.vec || exit 1; + + rm $dir/post.*.vec; + + echo "Re-adjusting priors based on computed posteriors" + $cmd $dir/log/adjust_priors.log \ + nnet-adjust-priors $dir/final.mdl $dir/post.vec $dir/final.mdl || exit 1; +fi + + +sleep 2 + +echo Done + +if $cleanup; then + echo Cleaning up data + if [ $egs_dir == "$dir/egs" ]; then + steps/nnet2/remove_egs.sh $dir/egs + fi +fi diff --git a/tools/ASR_2ch_track/steps/nnet2/retrain_simple2.sh b/tools/ASR_2ch_track/steps/nnet2/retrain_simple2.sh new file mode 100644 index 0000000000000000000000000000000000000000..9e018015075ec4ef3e1f66d73112fda4b4a9b17a --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/retrain_simple2.sh @@ -0,0 +1,516 @@ +#!/bin/bash + +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey). +# 2013 Xiaohui Zhang +# 2013 Guoguo Chen +# 2014 Vimal Manohar +# Apache 2.0. + + +# retrain_simple2.sh is a script for training a single-layer (softmax-only) +# neural network on top of activations dumped from an existing network; we'll +# later combine the networks to a single network. + +# It differs from train_pnorm_simple2.sh in the same way that retrain_fast.sh +# differs from train_pnorm_fast.sh. + + +# Begin configuration section. +cmd=run.pl +num_epochs=5 # Number of epochs of training; +initial_learning_rate=0.04 +final_learning_rate=0.004 +minibatch_size=128 # by default use a smallish minibatch size for neural net + # training; this controls instability which would otherwise + # be a problem with multi-threaded update. +samples_per_iter=400000 # each iteration of training, see this many samples + # per job. This option is passed to get_egs.sh +num_jobs_nnet=4 # Number of neural net jobs to run in parallel. This option + # is passed to get_egs.sh. +prior_subset_size=10000 # 10k samples per job, for computing priors. Should be + # more than enough. +num_jobs_compute_prior=10 # these are single-threaded, run on CPU. +get_egs_stage=0 + + +max_models_combine=20 # The "max_models_combine" is the maximum number of models we give + # to the final 'combine' stage, but these models will themselves be averages of + # iteration-number ranges. + +shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples + # on each iter. You could set it to 0 or to a large value for complete + # randomization, but this would both consume memory and cause spikes in + # disk I/O. Smaller is easier on disk and memory but less random. It's + # not a huge deal though, as samples are anyway randomized right at the start. + # (the point of this is to get data in different minibatches on different iterations, + # since in the preconditioning method, 2 samples in the same minibatch can + # affect each others' gradients. + +stage=-4 + + +alpha=4.0 # relates to online preconditioning. +update_period=4 # relates to online preconditioning: says how often we update the subspace. +num_samples_history=2000 # relates to online preconditioning +precondition_rank_in=20 # relates to online preconditioning +precondition_rank_out=80 # relates to online preconditioning +max_change_per_sample=0.075 + +mix_up=0 # Number of components to mix up to (should be > #tree leaves, if + # specified.) +num_threads=16 +parallel_opts="--num-threads 16 --mem 1G" + # by default we use 16 threads; this lets the queue know. + # note: parallel_opts doesn't automatically get adjusted if you adjust num-threads. +combine_num_threads=8 +combine_parallel_opts="--num-threads 8" # queue options for the "combine" stage. +cleanup=true +egs_dir= +egs_opts= +io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time. +align_cmd= # The cmd that is passed to steps/nnet2/align.sh +align_use_gpu= # Passed to use_gpu in steps/nnet2/align.sh [yes/no] +realign_epochs= # List of epochs, the beginning of which realignment is done +num_jobs_align=30 # Number of jobs for realignment +# End configuration section. + + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 4 ]; then + echo "Usage: $0 [opts] " + echo " e.g.: $0 data/train data/lang exp/tri3_ali exp/tri4_nnet" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --num-epochs <#epochs|5> # Number of epochs of training" + echo " --initial-learning-rate # Learning rate at start of training, e.g. 0.02 for small" + echo " # data, 0.01 for large data" + echo " --final-learning-rate # Learning rate at end of training, e.g. 0.004 for small" + echo " # data, 0.001 for large data" + echo " --mix-up <#pseudo-gaussians|0> # Can be used to have multiple targets in final output layer," + echo " # per context-dependent state. Try a number several times #states." + echo " --num-jobs-nnet # Number of parallel jobs to use for main neural net" + echo " # training (will affect results as well as speed; try 8, 16)" + echo " # Note: if you increase this, you may want to also increase" + echo " # the learning rate." + echo " --num-threads # Number of parallel threads per job (will affect results" + echo " # as well as speed; may interact with batch size; if you increase" + echo " # this, you may want to decrease the batch size." + echo " --parallel-opts # extra options to pass to e.g. queue.pl for processes that" + echo " # use multiple threads... " + echo " --io-opts # Options given to e.g. queue.pl for jobs that do a lot of I/O." + echo " --minibatch-size # Size of minibatch to process (note: product with --num-threads" + echo " # should not get too large, e.g. >2k)." + echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per" + echo " # process." + echo " --realign-epochs # A list of space-separated epoch indices the beginning of which" + echo " # realignment is to be done" + echo " --align-cmd (utils/run.pl|utils/queue.pl ) # passed to align.sh" + echo " --align-use-gpu (yes/no) # specify is gpu is to be used for realignment" + echo " --num-jobs-align <#njobs|30> # Number of jobs to perform realignment" + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + + + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 +dir=$4 + +if [ ! -z "$realign_epochs" ]; then + [ -z "$align_cmd" ] && echo "$0: realign_epochs specified but align_cmd not specified" && exit 1 + [ -z "$align_use_gpu" ] && echo "$0: realign_epochs specified but align_use_gpu not specified" && exit 1 +fi + +# Check some files. +for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +# Set some variables. +num_leaves=`tree-info $alidir/tree 2>/dev/null | grep num-pdfs | awk '{print $2}'` || exit 1 +[ -z $num_leaves ] && echo "\$num_leaves is unset" && exit 1 +[ "$num_leaves" -eq "0" ] && echo "\$num_leaves is 0" && exit 1 + +nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir... +# in this dir we'll have just one job. +sdata=$data/split$nj +utils/split_data.sh $data $nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +cp $alidir/tree $dir + + + +if [ $stage -le -3 ] && [ -z "$egs_dir" ]; then + echo "$0: calling get_egs2.sh" + # set --frames-per-eg to 1 because there is no context, so there + # is no advantage in having multiple frames per eg. + steps/nnet2/get_egs2.sh --feat-type raw \ + --frames-per-eg 1 --left-context 0 --right-context 0 \ + --cmvn-opts "--norm-means=false --norm-vars=false" \ + --io-opts "$io_opts" \ + --samples-per-iter $samples_per_iter --stage $get_egs_stage \ + --cmd "$cmd" $egs_opts $data $alidir $dir/egs || exit 1; +fi + +if [ -z $egs_dir ]; then + egs_dir=$dir/egs +fi + +frames_per_eg=$(cat $egs_dir/info/frames_per_eg) || { echo "error: no such file $egs_dir/info/frames_per_eg"; exit 1; } +num_archives=$(cat $egs_dir/info/num_archives) || { echo "error: no such file $egs_dir/info/frames_per_eg"; exit 1; } + +# num_archives_expanded considers each separate label-position from +# 0..frames_per_eg-1 to be a separate archive. +num_archives_expanded=$[$num_archives*$frames_per_eg] + +if [ $num_jobs_nnet -gt $num_archives_expanded ]; then + echo "$0: --num-jobs-nnet cannot exceed num-archives*frames-per-eg which is $num_archives_expanded" + echo "$0: setting --num-jobs-nnet to $num_archives_expanded" + num_jobs_nnet=$num_archives_expanded +fi + +if [ $stage -le -2 ]; then + echo "$0: initializing neural net"; + feat_dim=$(feat-to-dim scp:$data/feats.scp -) || exit 1; + + online_preconditioning_opts="alpha=$alpha num-samples-history=$num_samples_history update-period=$update_period rank-in=$precondition_rank_in rank-out=$precondition_rank_out max-change-per-sample=$max_change_per_sample" + + stddev=`perl -e "print 1.0/sqrt($pnorm_input_dim);"` + cat >$dir/nnet.config </dev/null + $cmd JOB=1:$num_jobs_compute_prior $dir/log/get_post.$x.JOB.log \ + nnet-copy-egs --srand=JOB --frame=random ark:$prev_egs_dir/egs.1.ark ark:- \| \ + nnet-subset-egs --srand=JOB --n=$prior_subset_size ark:- ark:- \| \ + nnet-compute-from-egs "nnet-to-raw-nnet $dir/$x.mdl -|" ark:- ark:- \| \ + matrix-sum-rows ark:- ark:- \| vector-sum ark:- $dir/post.$x.JOB.vec || exit 1; + + sleep 3; # make sure there is time for $dir/post.$x.*.vec to appear. + + $cmd $dir/log/vector_sum.$x.log \ + vector-sum $dir/post.$x.*.vec $dir/post.$x.vec || exit 1; + rm $dir/post.$x.*.vec; + + echo "Re-adjusting priors based on computed posteriors" + $cmd $dir/log/adjust_priors.$x.log \ + nnet-adjust-priors $dir/$x.mdl $dir/post.$x.vec $dir/$x.mdl || exit 1; + + sleep 2 + + steps/nnet2/align.sh --nj $num_jobs_align --cmd "$align_cmd" --use-gpu $align_use_gpu \ + --transform-dir "$transform_dir" --online-ivector-dir "$online_ivector_dir" \ + --iter $x $data $lang $dir $dir/ali_$epoch || exit 1 + + steps/nnet2/relabel_egs2.sh --cmd "$cmd" --iter $x $dir/ali_$epoch \ + $prev_egs_dir $cur_egs_dir || exit 1 + + if $cleanup && [[ $prev_egs_dir =~ $dir/egs* ]]; then + steps/nnet2/remove_egs.sh $prev_egs_dir + fi + fi + + # Set off jobs doing some diagnostics, in the background. + # Use the egs dir from the previous iteration for the diagnostics + $cmd $dir/log/compute_prob_valid.$x.log \ + nnet-compute-prob $dir/$x.mdl ark:$cur_egs_dir/valid_diagnostic.egs & + $cmd $dir/log/compute_prob_train.$x.log \ + nnet-compute-prob $dir/$x.mdl ark:$cur_egs_dir/train_diagnostic.egs & + if [ $x -gt 0 ] && [ ! -f $dir/log/mix_up.$[$x-1].log ]; then + $cmd $dir/log/progress.$x.log \ + nnet-show-progress --use-gpu=no $dir/$[$x-1].mdl $dir/$x.mdl \ + ark:$cur_egs_dir/train_diagnostic.egs '&&' \ + nnet-am-info $dir/$x.mdl & + fi + + echo "Training neural net (pass $x)" + + mdl=$dir/$x.mdl + + if [ $x -eq 0 ]; then + # on iteration zero, use a smaller minibatch + # size and just one job: the model-averaging doesn't seem to be helpful + # when the model is changing too fast (i.e. it worsens the objective + # function), and the smaller minibatch size will help to keep + # the update stable. + this_minibatch_size=$[$minibatch_size/2]; + do_average=false + else + this_minibatch_size=$minibatch_size + do_average=true + fi + + rm $dir/.error 2>/dev/null + + + ( # this sub-shell is so that when we "wait" below, + # we only wait for the training jobs that we just spawned, + # not the diagnostic jobs that we spawned above. + + # We can't easily use a single parallel SGE job to do the main training, + # because the computation of which archive and which --frame option + # to use for each job is a little complex, so we spawn each one separately. + for n in $(seq $num_jobs_nnet); do + k=$[$x*$num_jobs_nnet + $n - 1]; # k is a zero-based index that we'll derive + # the other indexes from. + archive=$[($k%$num_archives)+1]; # work out the 1-based archive index. + frame=$[(($k/$num_archives)%$frames_per_eg)]; # work out the 0-based frame + # index; this increases more slowly than the archive index because the + # same archive with different frame indexes will give similar gradients, + # so we want to separate them in time. + + $cmd $parallel_opts $dir/log/train.$x.$n.log \ + nnet-train$parallel_suffix $parallel_train_opts \ + --minibatch-size=$this_minibatch_size --srand=$x $dir/$x.mdl \ + "ark,bg:nnet-copy-egs --frame=$frame ark:$cur_egs_dir/egs.$archive.ark ark:-|nnet-shuffle-egs --buffer-size=$shuffle_buffer_size --srand=$x ark:- ark:-|" \ + $dir/$[$x+1].$n.mdl || touch $dir/.error & + done + wait + ) + # the error message below is not that informative, but $cmd will + # have printed a more specific one. + [ -f $dir/.error ] && echo "$0: error on iteration $x of training" && exit 1; + + nnets_list= + for n in `seq 1 $num_jobs_nnet`; do + nnets_list="$nnets_list $dir/$[$x+1].$n.mdl" + done + + learning_rate=`perl -e '($x,$n,$i,$f)=@ARGV; print ($x >= $n ? $f : $i*exp($x*log($f/$i)/$n));' $[$x+1] $num_iters $initial_learning_rate $final_learning_rate`; + + if $do_average; then + # average the output of the different jobs. + $cmd $dir/log/average.$x.log \ + nnet-am-average $nnets_list - \| \ + nnet-am-copy --learning-rate=$learning_rate - $dir/$[$x+1].mdl || exit 1; + else + # choose the best from the different jobs. + n=$(perl -e '($nj,$pat)=@ARGV; $best_n=1; $best_logprob=-1.0e+10; for ($n=1;$n<=$nj;$n++) { + $fn = sprintf($pat,$n); open(F, "<$fn") || die "Error opening log file $fn"; + undef $logprob; while () { if (m/log-prob-per-frame=(\S+)/) { $logprob=$1; } } + close(F); if (defined $logprob && $logprob > $best_logprob) { $best_logprob=$logprob; + $best_n=$n; } } print "$best_n\n"; ' $num_jobs_nnet $dir/log/train.$x.%d.log) || exit 1; + [ -z "$n" ] && echo "Error getting best model" && exit 1; + $cmd $dir/log/select.$x.log \ + nnet-am-copy --learning-rate=$learning_rate $dir/$[$x+1].$n.mdl $dir/$[$x+1].mdl || exit 1; + fi + + if [ "$mix_up" -gt 0 ] && [ $x -eq $mix_up_iter ]; then + # mix up. + echo Mixing up from $num_leaves to $mix_up components + $cmd $dir/log/mix_up.$x.log \ + nnet-am-mixup --min-count=10 --num-mixtures=$mix_up \ + $dir/$[$x+1].mdl $dir/$[$x+1].mdl || exit 1; + fi + rm $nnets_list + [ ! -f $dir/$[$x+1].mdl ] && exit 1; + if [ -f $dir/$[$x-1].mdl ] && $cleanup && \ + [ $[($x-1)%100] -ne 0 ] && [ $[$x-1] -lt $first_model_combine ]; then + rm $dir/$[$x-1].mdl + fi + fi + x=$[$x+1] +done + + +if [ $stage -le $num_iters ]; then + echo "Doing final combination to produce final.mdl" + + # Now do combination. + nnets_list=() + # the if..else..fi statement below sets 'nnets_list'. + if [ $max_models_combine -lt $num_models_combine ]; then + # The number of models to combine is too large, e.g. > 20. In this case, + # each argument to nnet-combine-fast will be an average of multiple models. + cur_offset=0 # current offset from first_model_combine. + for n in $(seq $max_models_combine); do + next_offset=$[($n*$num_models_combine)/$max_models_combine] + sub_list="" + for o in $(seq $cur_offset $[$next_offset-1]); do + iter=$[$first_model_combine+$o] + mdl=$dir/$iter.mdl + [ ! -f $mdl ] && echo "Expected $mdl to exist" && exit 1; + sub_list="$sub_list $mdl" + done + nnets_list[$[$n-1]]="nnet-am-average $sub_list - |" + cur_offset=$next_offset + done + else + nnets_list= + for n in $(seq 0 $[num_models_combine-1]); do + iter=$[$first_model_combine+$n] + mdl=$dir/$iter.mdl + [ ! -f $mdl ] && echo "Expected $mdl to exist" && exit 1; + nnets_list[$n]=$mdl + done + fi + + + # Below, use --use-gpu=no to disable nnet-combine-fast from using a GPU, as + # if there are many models it can give out-of-memory error; set num-threads to 8 + # to speed it up (this isn't ideal...) + num_egs=`nnet-copy-egs ark:$cur_egs_dir/combine.egs ark:/dev/null 2>&1 | tail -n 1 | awk '{print $NF}'` + mb=$[($num_egs+$combine_num_threads-1)/$combine_num_threads] + [ $mb -gt 512 ] && mb=512 + # Setting --initial-model to a large value makes it initialize the combination + # with the average of all the models. It's important not to start with a + # single model, or, due to the invariance to scaling that these nonlinearities + # give us, we get zero diagonal entries in the fisher matrix that + # nnet-combine-fast uses for scaling, which after flooring and inversion, has + # the effect that the initial model chosen gets much higher learning rates + # than the others. This prevents the optimization from working well. + $cmd $combine_parallel_opts $dir/log/combine.log \ + nnet-combine-fast --initial-model=100000 --num-lbfgs-iters=40 --use-gpu=no \ + --num-threads=$combine_num_threads \ + --verbose=3 --minibatch-size=$mb "${nnets_list[@]}" ark:$cur_egs_dir/combine.egs \ + $dir/final.mdl || exit 1; + + # Compute the probability of the final, combined model with + # the same subset we used for the previous compute_probs, as the + # different subsets will lead to different probs. + $cmd $dir/log/compute_prob_valid.final.log \ + nnet-compute-prob $dir/final.mdl ark:$cur_egs_dir/valid_diagnostic.egs & + $cmd $dir/log/compute_prob_train.final.log \ + nnet-compute-prob $dir/final.mdl ark:$cur_egs_dir/train_diagnostic.egs & +fi + +if [ $stage -le $[$num_iters+1] ]; then + echo "Getting average posterior for purposes of adjusting the priors." + # Note: this just uses CPUs, using a smallish subset of data. + rm $dir/post.$x.*.vec 2>/dev/null + $cmd JOB=1:$num_jobs_compute_prior $dir/log/get_post.$x.JOB.log \ + nnet-copy-egs --frame=random --srand=JOB ark:$cur_egs_dir/egs.1.ark ark:- \| \ + nnet-subset-egs --srand=JOB --n=$prior_subset_size ark:- ark:- \| \ + nnet-compute-from-egs "nnet-to-raw-nnet $dir/final.mdl -|" ark:- ark:- \| \ + matrix-sum-rows ark:- ark:- \| vector-sum ark:- $dir/post.$x.JOB.vec || exit 1; + + sleep 3; # make sure there is time for $dir/post.$x.*.vec to appear. + + $cmd $dir/log/vector_sum.$x.log \ + vector-sum $dir/post.$x.*.vec $dir/post.$x.vec || exit 1; + + rm $dir/post.$x.*.vec; + + echo "Re-adjusting priors based on computed posteriors" + $cmd $dir/log/adjust_priors.final.log \ + nnet-adjust-priors $dir/final.mdl $dir/post.$x.vec $dir/final.mdl || exit 1; +fi + + +if [ ! -f $dir/final.mdl ]; then + echo "$0: $dir/final.mdl does not exist." + # we don't want to clean up if the training didn't succeed. + exit 1; +fi + +sleep 2 + +echo Done + +if $cleanup; then + echo Cleaning up data + if [[ $cur_egs_dir =~ $dir/egs* ]]; then + steps/nnet2/remove_egs.sh $cur_egs_dir + fi + + echo Removing most of the models + for x in `seq 0 $num_iters`; do + if [ $[$x%100] -ne 0 ] && [ $x -ne $num_iters ] && [ -f $dir/$x.mdl ]; then + # delete all but every 100th model; don't delete the ones which combine to form the final model. + rm $dir/$x.mdl + fi + done +fi diff --git a/tools/ASR_2ch_track/steps/nnet2/retrain_tanh.sh b/tools/ASR_2ch_track/steps/nnet2/retrain_tanh.sh new file mode 100644 index 0000000000000000000000000000000000000000..f67cb1359a36cf13162befd860c34e042db89e8c --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/retrain_tanh.sh @@ -0,0 +1,216 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. + +# This script is for training networks with tanh nonlinearities; it starts with +# a given model and supports increasing the hidden-layer dimension. It is +# otherwise similar to train_tanh.sh + +# Begin configuration section. +cmd=run.pl +num_epochs=15 # Number of epochs during which we reduce + # the learning rate; number of iteration is worked out from this. +num_epochs_extra=5 # Number of epochs after we stop reducing + # the learning rate. +num_iters_final=20 # Maximum number of final iterations to give to the + # optimization over the validation set. +initial_learning_rate=0.04 +final_learning_rate=0.004 +softmax_learning_rate_factor=0.5 # Train this layer half as fast as the other layers. + + +minibatch_size=128 # by default use a smallish minibatch size for neural net + # training; this controls instability which would otherwise + # be a problem with multi-threaded update. Note: it also + # interacts with the "preconditioned" update which generally + # works better with larger minibatch size, so it's not + # completely cost free. + +shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples + # on each iter. You could set it to 0 or to a large value for complete + # randomization, but this would both consume memory and cause spikes in + # disk I/O. Smaller is easier on disk and memory but less random. It's + # not a huge deal though, as samples are anyway randomized right at the start. + + +stage=-5 + + +mix_up=0 # Number of components to mix up to (should be > #tree leaves, if + # specified.) Will do this at the start. +widen=0 # If specified, it will increase the hidden-layer dimension + # to this value. Will do this at the start. +bias_stddev=0.5 # will be used for widen + +num_threads=16 +parallel_opts="--num-threads $num_threads" # using a smallish #threads by default, out of stability concerns. + # note: parallel_opts doesn't automatically get adjusted if you adjust num-threads. +cleanup=true +# End configuration section. + + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Usage: $0 [opts] " + echo " e.g.: $0 --widen 1024 exp/tri4_nnet/egs exp/tri4_nnet exp/tri5_nnet" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --num-epochs <#epochs|15> # Number of epochs of main training" + echo " # while reducing learning rate (determines #iterations, together" + echo " # with --samples-per-iter and --num-jobs-nnet)" + echo " --num-epochs-extra <#epochs-extra|5> # Number of extra epochs of training" + echo " # after learning rate fully reduced" + echo " --initial-learning-rate # Learning rate at start of training, e.g. 0.02 for small" + echo " # data, 0.01 for large data" + echo " --final-learning-rate # Learning rate at end of training, e.g. 0.004 for small" + echo " # data, 0.001 for large data" + echo " --mix-up <#pseudo-gaussians|0> # Can be used to have multiple targets in final output layer," + echo " # per context-dependent state. Try a number several times #states." + echo " --num-threads # Number of parallel threads per job (will affect results" + echo " # as well as speed; may interact with batch size; if you increase" + echo " # this, you may want to decrease the batch size." + echo " --parallel-opts # extra options to pass to e.g. queue.pl for processes that" + echo " # use multiple threads." + echo " --minibatch-size # Size of minibatch to process (note: product with --num-threads" + echo " # should not get too large, e.g. >2k)." + echo " --num-iters-final <#iters|10> # Number of final iterations to give to nnet-combine-fast to " + echo " # interpolate parameters (the weights are learned with a validation set)" + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + exit 1; +fi + +egs_dir=$1 +nnet_dir=$2 +dir=$3 + +# Check some files. +for f in $egs_dir/egs.1.0.ark $nnet_dir/final.mdl; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + +num_jobs_nnet=`cat $egs_dir/num_jobs_nnet` || exit 1; +iters_per_epoch=`cat $egs_dir/iters_per_epoch` || exit 1; + +mkdir -p $dir/log + +cp $nnet_dir/splice_opts $dir 2>/dev/null +cp $nnet_dir/final.mat $dir 2>/dev/null # any LDA matrix... +cp $nnet_dir/tree $dir + + +if [ $stage -le -2 ] && [ $mix_up -gt 0 ]; then + echo Mixing up to $mix_up components + $cmd $dir/log/mix_up.$x.log \ + nnet-am-mixup --min-count=10 --num-mixtures=$mix_up \ + $nnet_dir/final.mdl $dir/0.mdl || exit 1; +else + cp $nnet_dir/final.mdl $dir/0.mdl || exit 1; +fi + +if [ $stage -le -1 ] && [ $widen -gt 0 ]; then + echo "$0: Widening nnet to hidden-layer-dim=$widen" + $cmd $dir/log/widen.log \ + nnet-am-widen --hidden-layer-dim=$widen $dir/0.mdl $dir/0.mdl || exit 1; +fi + +num_iters_reduce=$[$num_epochs * $iters_per_epoch]; +num_iters_extra=$[$num_epochs_extra * $iters_per_epoch]; +num_iters=$[$num_iters_reduce+$num_iters_extra] + +echo "$0: Will train for $num_epochs + $num_epochs_extra epochs, equalling " +echo "$0: $num_iters_reduce + $num_iters_extra = $num_iters iterations, " +echo "$0: (while reducing learning rate) + (with constant learning rate)." + +x=0 +while [ $x -lt $num_iters ]; do + if [ $x -ge 0 ] && [ $stage -le $x ]; then + # Set off jobs doing some diagnostics, in the background. + $cmd $dir/log/compute_prob_valid.$x.log \ + nnet-compute-prob $dir/$x.mdl ark:$egs_dir/valid_diagnostic.egs & + $cmd $dir/log/compute_prob_train.$x.log \ + nnet-compute-prob $dir/$x.mdl ark:$egs_dir/train_diagnostic.egs & + + echo "Training neural net (pass $x)" + + $cmd $parallel_opts JOB=1:$num_jobs_nnet $dir/log/train.$x.JOB.log \ + nnet-shuffle-egs --buffer-size=$shuffle_buffer_size --srand=$x \ + ark:$egs_dir/egs.JOB.$[$x%$iters_per_epoch].ark ark:- \| \ + nnet-train-parallel --num-threads=$num_threads \ + --minibatch-size=$minibatch_size --srand=$x $dir/$x.mdl \ + ark:- $dir/$[$x+1].JOB.mdl \ + || exit 1; + + nnets_list= + for n in `seq 1 $num_jobs_nnet`; do + nnets_list="$nnets_list $dir/$[$x+1].$n.mdl" + done + + learning_rate=`perl -e '($x,$n,$i,$f)=@ARGV; print ($x >= $n ? $f : $i*exp($x*log($f/$i)/$n));' $[$x+1] $num_iters_reduce $initial_learning_rate $final_learning_rate`; + softmax_learning_rate=`perl -e "print $learning_rate * $softmax_learning_rate_factor;"`; + nnet-am-info $dir/$[$x+1].1.mdl > $dir/foo 2>/dev/null || exit 1 + nu=`cat $dir/foo | grep num-updatable-components | awk '{print $2}'` + na=`cat $dir/foo | grep AffineComponent | wc -l` # number of last AffineComopnent layer [one-based] + lr_string="$learning_rate" + for n in `seq 2 $nu`; do + if [ $n -eq $na ]; then lr=$softmax_learning_rate; + else lr=$learning_rate; fi + lr_string="$lr_string:$lr" + done + + $cmd $dir/log/average.$x.log \ + nnet-am-average $nnets_list - \| \ + nnet-am-copy --learning-rates=$lr_string - $dir/$[$x+1].mdl || exit 1; + + rm $nnets_list + fi + x=$[$x+1] +done + +# Now do combination. +# At the end, final.mdl will be a combination of the last e.g. 10 models. +if [ $num_iters_final -gt $num_iters_extra ]; then + echo "Setting num_iters_final=$num_iters_extra" + num_iters_final=$num_iters_extra +fi +start=$[$num_iters-$num_iters_final+1] +nnets_list= +for x in `seq $start $num_iters`; do + nnets_list="$nnets_list $dir/$x.mdl" +done + +if [ $stage -le $num_iters ]; then + num_egs=`nnet-copy-egs ark:$egs_dir/combine.egs ark:/dev/null 2>&1 | tail -n 1 | awk '{print $NF}'` + mb=$[($num_egs+$num_threads-1)/$num_threads] + $cmd $parallel_opts $dir/log/combine.log \ + nnet-combine-fast --use-gpu=no --num-threads=$num_threads --verbose=3 --minibatch-size=$mb \ + $nnets_list ark:$egs_dir/combine.egs $dir/final.mdl || exit 1; +fi + +sleep 2; # make sure final.mdl exists. + +# Compute the probability of the final, combined model with +# the same subset we used for the previous compute_probs, as the +# different subsets will lead to different probs. +$cmd $dir/log/compute_prob_valid.final.log \ + nnet-compute-prob $dir/final.mdl ark:$egs_dir/valid_diagnostic.egs & +$cmd $dir/log/compute_prob_train.final.log \ + nnet-compute-prob $dir/final.mdl ark:$egs_dir/train_diagnostic.egs & + +echo Done + +if $cleanup; then + echo Removing most of the models + for x in `seq 0 $num_iters`; do + if [ $[$x%10] -ne 0 ] && [ $x -lt $[$num_iters-$num_iters_final+1] ]; then + # delete all but every 10th model; don't delete the ones which combine to form the final model. + rm $dir/$x.mdl + fi + done +fi diff --git a/tools/ASR_2ch_track/steps/nnet2/train_block.sh b/tools/ASR_2ch_track/steps/nnet2/train_block.sh new file mode 100644 index 0000000000000000000000000000000000000000..d65fdaa086f4c7c5d52ec265763878b68b55a272 --- /dev/null +++ b/tools/ASR_2ch_track/steps/nnet2/train_block.sh @@ -0,0 +1,386 @@ +#!/bin/bash + +# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. +# this is as train_tanh.sh but for on top of fbank feats-- we have block-diagonal +# transforms for the first few layers, on separate frequency bands. +# Otherwise it's tanh. + +# Begin configuration section. +cmd=run.pl +num_epochs=15 # Number of epochs during which we reduce + # the learning rate; number of iteration is worked out from this. +num_epochs_extra=5 # Number of epochs after we stop reducing + # the learning rate. +num_iters_final=20 # Maximum number of final iterations to give to the + # optimization over the validation set. +initial_learning_rate=0.04 +final_learning_rate=0.004 +bias_stddev=0.0 +shrink_interval=5 # shrink every $shrink_interval iters except while we are + # still adding layers, when we do it every iter. +shrink=true +num_frames_shrink=2000 # note: must be <= --num-frames-diagnostic option to get_egs.sh, if + # given. +softmax_learning_rate_factor=0.5 # Train this layer half as fast as the other layers. + +hidden_layer_dim=300 # You may want this larger, e.g. 1024 or 2048. + +minibatch_size=128 # by default use a smallish minibatch size for neural net + # training; this controls instability which would otherwise + # be a problem with multi-threaded update. Note: it also + # interacts with the "preconditioned" update which generally + # works better with larger minibatch size, so it's not + # completely cost free. + +samples_per_iter=200000 # each iteration of training, see this many samples + # per job. This option is passed to get_egs.sh +num_jobs_nnet=16 # Number of neural net jobs to run in parallel. This option + # is passed to get_egs.sh. +get_egs_stage=0 + +shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples + # on each iter. You could set it to 0 or to a large value for complete + # randomization, but this would both consume memory and cause spikes in + # disk I/O. Smaller is easier on disk and memory but less random. It's + # not a huge deal though, as samples are anyway randomized right at the start. + +add_layers_period=2 # by default, add new layers every 2 iterations. + +num_block_layers=2 +num_normal_layers=2 +block_size=10 +block_shift=5 + +stage=-5 + +io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time. +splice_width=7 # meaning +- 7 frames on each side for second LDA +randprune=4.0 # speeds up LDA. +alpha=4.0 +max_change=10.0 +mix_up=0 # Number of components to mix up to (should be > #tree leaves, if + # specified.) +num_threads=16 +parallel_opts="--num-threads 16 --mem 1G" # by default we use 16 threads; this lets the queue know. + # note: parallel_opts doesn't automatically get adjusted if you adjust num-threads. +cleanup=true +egs_dir= +lda_opts= +egs_opts= +# End configuration section. + + +echo "$0 $@" # Print the command line for logging + +if [ -f path.sh ]; then . ./path.sh; fi +. parse_options.sh || exit 1; + + +if [ $# != 4 ]; then + echo "Usage: $0 [opts] " + echo " e.g.: $0 data/train data/lang exp/tri3_ali exp/tri4_nnet" + echo "" + echo "Main options (for others, see top of script file)" + echo " --config # config file containing options" + echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." + echo " --num-epochs <#epochs|15> # Number of epochs of main training" + echo " # while reducing learning rate (determines #iterations, together" + echo " # with --samples-per-iter and --num-jobs-nnet)" + echo " --num-epochs-extra <#epochs-extra|5> # Number of extra epochs of training" + echo " # after learning rate fully reduced" + echo " --initial-learning-rate # Learning rate at start of training, e.g. 0.02 for small" + echo " # data, 0.01 for large data" + echo " --final-learning-rate # Learning rate at end of training, e.g. 0.004 for small" + echo " # data, 0.001 for large data" + echo " --num-hidden-layers <#hidden-layers|2> # Number of hidden layers, e.g. 2 for 3 hours of data, 4 for 100hrs" + echo " --initial-num-hidden-layers <#hidden-layers|1> # Number of hidden layers to start with." + echo " --add-layers-period <#iters|2> # Number of iterations between adding hidden layers" + echo " --mix-up <#pseudo-gaussians|0> # Can be used to have multiple targets in final output layer," + echo " # per context-dependent state. Try a number several times #states." + echo " --num-jobs-nnet # Number of parallel jobs to use for main neural net" + echo " # training (will affect results as well as speed; try 8, 16)" + echo " # Note: if you increase this, you may want to also increase" + echo " # the learning rate." + echo " --num-threads # Number of parallel threads per job (will affect results" + echo " # as well as speed; may interact with batch size; if you increase" + echo " # this, you may want to decrease the batch size." + echo " --parallel-opts # extra options to pass to e.g. queue.pl for processes that" + echo " # use multiple threads... " + echo " --io-opts # Options given to e.g. queue.pl for jobs that do a lot of I/O." + echo " --minibatch-size # Size of minibatch to process (note: product with --num-threads" + echo " # should not get too large, e.g. >2k)." + echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per" + echo " # process." + echo " --splice-width # Number of frames on each side to append for feature input" + echo " # (note: we splice processed, typically 40-dimensional frames" + echo " --lda-dim # Dimension to reduce spliced features to with LDA" + echo " --num-iters-final <#iters|10> # Number of final iterations to give to nnet-combine-fast to " + echo " # interpolate parameters (the weights are learned with a validation set)" + echo " --num-utts-subset <#utts|300> # Number of utterances in subsets used for validation and diagnostics" + echo " # (the validation subset is held out from training)" + echo " --num-frames-diagnostic <#frames|4000> # Number of frames used in computing (train,valid) diagnostics" + echo " --num-valid-frames-combine <#frames|10000> # Number of frames used in getting combination weights at the" + echo " # very end." + echo " --stage # Used to run a partially-completed training process from somewhere in" + echo " # the middle." + + exit 1; +fi + +data=$1 +lang=$2 +alidir=$3 +dir=$4 + +# Check some files. +for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree; do + [ ! -f $f ] && echo "$0: no such file $f" && exit 1; +done + + +# Set some variables. +num_leaves=`gmm-info $alidir/final.mdl 2>/dev/null | awk '/number of pdfs/{print $NF}'` || exit 1; + +nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir... +# in this dir we'll have just one job. +sdata=$data/split$nj +utils/split_data.sh $data $nj + +mkdir -p $dir/log +echo $nj > $dir/num_jobs +cp $alidir/tree $dir + + +# Get list of validation utterances. +awk '{print $1}' $data/utt2spk | utils/shuffle_list.pl | head -$num_utts_subset \ + > $dir/valid_uttlist || exit 1; +awk '{print $1}' $data/utt2spk | utils/filter_scp.pl --exclude $dir/valid_uttlist | \ + head -$num_utts_subset > $dir/train_subset_uttlist || exit 1; + + +if [ $stage -le -4 ]; then + echo "$0: calling get_lda.sh" + steps/nnet2/get_lda_block.sh --block-size $block_size --block-shift $block_shift \ + $lda_opts --splice-width $splice_width --cmd "$cmd" $data $lang $alidir $dir || exit 1; +fi + +# these files will have been written by get_lda_block.sh +feat_dim=`cat $dir/feat_dim` || exit 1; +lda_dim=`cat $dir/lda_dim` || exit 1; +num_blocks=`cat $dir/num_blocks` || exit 1; + +if [ $stage -le -3 ] && [ -z "$egs_dir" ]; then + echo "$0: calling get_egs.sh" + steps/nnet2/get_egs.sh --io-opts "$io_opts" --samples-per-iter $samples_per_iter \ + --num-jobs-nnet $num_jobs_nnet \ + --splice-width $splice_width --stage $get_egs_stage --cmd "$cmd" $egs_opts --feat-type raw \ + $data $lang $alidir $dir || exit 1; +fi + +if [ -z $egs_dir ]; then + egs_dir=$dir/egs +fi + +iters_per_epoch=`cat $egs_dir/iters_per_epoch` || exit 1; +! [ $num_jobs_nnet -eq `cat $egs_dir/num_jobs_nnet` ] && \ + echo "$0: Warning: using --num-jobs-nnet=`cat $egs_dir/num_jobs_nnet` from $egs_dir" +num_jobs_nnet=`cat $egs_dir/num_jobs_nnet` + + +if [ $stage -le -2 ]; then + echo "$0: initializing neural net"; + + hidden_block_size=`perl -e "print int(sqrt(($hidden_layer_dim*$hidden_layer_dim)/$num_blocks));"` + echo "Hidden block size is $hidden_block_size" + hidden_block_dim=$[$hidden_block_size*$num_blocks] + block_stddev=`perl -e "print 1.0/sqrt($block_size);"` + hidden_block_stddev=`perl -e "print 1.0/sqrt($hidden_block_size);"` + first_hidden_layer_stddev=`perl -e "print 1.0/sqrt($hidden_block_dim);"` + stddev=`perl -e "print 1.0/sqrt($hidden_layer_dim);"` + + + cat >$dir/nnet.config <>$dir/nnet.config <>$dir/nnet.config <>$dir/nnet.config <>$dir/nnet.config <