ksbai123 commited on
Commit
6698d1a
·
1 Parent(s): 055166f

Upload 380 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. tools/ASR_2ch_track/RESULTS +49 -0
  2. tools/ASR_2ch_track/cmd.sh +21 -0
  3. tools/ASR_2ch_track/conf/chime4.cfg +50 -0
  4. tools/ASR_2ch_track/conf/decode_dnn.config +2 -0
  5. tools/ASR_2ch_track/conf/fbank.conf +11 -0
  6. tools/ASR_2ch_track/conf/mfcc.conf +1 -0
  7. tools/ASR_2ch_track/local/chime4_calc_wers.sh +85 -0
  8. tools/ASR_2ch_track/local/chime4_calc_wers_smbr.sh +83 -0
  9. tools/ASR_2ch_track/local/chime4_train_lms.sh +163 -0
  10. tools/ASR_2ch_track/local/chime4_train_rnnlms.sh +111 -0
  11. tools/ASR_2ch_track/local/clean_chime4_format_data.sh +85 -0
  12. tools/ASR_2ch_track/local/clean_wsj0_data_prep.sh +152 -0
  13. tools/ASR_2ch_track/local/cstr_ndx2flist.pl +54 -0
  14. tools/ASR_2ch_track/local/find_noisy_transcripts.pl +65 -0
  15. tools/ASR_2ch_track/local/find_transcripts.pl +64 -0
  16. tools/ASR_2ch_track/local/flist2scp.pl +31 -0
  17. tools/ASR_2ch_track/local/normalize_transcript.pl +59 -0
  18. tools/ASR_2ch_track/local/real_enhan_chime4_data_prep.sh +103 -0
  19. tools/ASR_2ch_track/local/real_noisy_chime4_data_prep.sh +114 -0
  20. tools/ASR_2ch_track/local/run_beamform_2ch_track.sh +83 -0
  21. tools/ASR_2ch_track/local/run_beamform_6ch_track.sh +100 -0
  22. tools/ASR_2ch_track/local/run_dnn.sh +230 -0
  23. tools/ASR_2ch_track/local/run_dnn_recog.sh +143 -0
  24. tools/ASR_2ch_track/local/run_gmm.sh +186 -0
  25. tools/ASR_2ch_track/local/run_gmm_recog.sh +127 -0
  26. tools/ASR_2ch_track/local/run_init.sh +52 -0
  27. tools/ASR_2ch_track/local/run_lmrescore.sh +136 -0
  28. tools/ASR_2ch_track/local/run_lmrescore_recog.sh +121 -0
  29. tools/ASR_2ch_track/local/score.sh +67 -0
  30. tools/ASR_2ch_track/local/simu_enhan_chime4_data_prep.sh +112 -0
  31. tools/ASR_2ch_track/local/simu_noisy_chime4_data_prep.sh +122 -0
  32. tools/ASR_2ch_track/local/wsj_prepare_dict.sh +86 -0
  33. tools/ASR_2ch_track/path.sh +10 -0
  34. tools/ASR_2ch_track/run.sh +93 -0
  35. tools/ASR_2ch_track/steps/align_basis_fmllr.sh +153 -0
  36. tools/ASR_2ch_track/steps/align_fmllr.sh +153 -0
  37. tools/ASR_2ch_track/steps/align_fmllr_lats.sh +160 -0
  38. tools/ASR_2ch_track/steps/align_lvtln.sh +196 -0
  39. tools/ASR_2ch_track/steps/align_raw_fmllr.sh +146 -0
  40. tools/ASR_2ch_track/steps/align_sgmm.sh +195 -0
  41. tools/ASR_2ch_track/steps/align_sgmm2.sh +195 -0
  42. tools/ASR_2ch_track/steps/align_si.sh +101 -0
  43. tools/ASR_2ch_track/steps/append_feats.sh +85 -0
  44. tools/ASR_2ch_track/steps/cleanup/combine_short_segments.py +312 -0
  45. tools/ASR_2ch_track/steps/cleanup/create_segments_from_ctm.pl +471 -0
  46. tools/ASR_2ch_track/steps/cleanup/debug_lexicon.sh +200 -0
  47. tools/ASR_2ch_track/steps/cleanup/decode_segmentation.sh +133 -0
  48. tools/ASR_2ch_track/steps/cleanup/find_bad_utts.sh +193 -0
  49. tools/ASR_2ch_track/steps/cleanup/find_bad_utts_nnet.sh +162 -0
  50. tools/ASR_2ch_track/steps/cleanup/make_segmentation_data_dir.sh +206 -0
tools/ASR_2ch_track/RESULTS ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CHiME-4 2ch track results
2
+ # The result is based on Hori et al, "The MERL/SRI system for the 3rd CHiME challenge using beamforming,
3
+ # robust feature extraction, and advanced speech recognition," in Proc. ASRU'15,
4
+ # and please refer the paper if you think the baseline useful.
5
+ # Note that the following result is different from that in the paper since we don't include
6
+ # SRI's robust features and system combination
7
+
8
+ GMM noisy multi-condition with beamformit
9
+ exp/tri3b_tr05_multi_noisy/best_wer_beamformit_2mics.result
10
+ -------------------
11
+ best overall dt05 WER 17.69% (language model weight = 11)
12
+ -------------------
13
+ dt05_simu WER: 19.15% (Average), 16.14% (BUS), 23.55% (CAFE), 15.49% (PEDESTRIAN), 21.42% (STREET)
14
+ -------------------
15
+ dt05_real WER: 16.22% (Average), 20.12% (BUS), 16.25% (CAFE), 12.35% (PEDESTRIAN), 16.18% (STREET)
16
+ -------------------
17
+
18
+ DNN sMBR
19
+ exp/tri4a_dnn_tr05_multi_noisy_smbr_i1lats/best_wer_beamformit_2mics.result
20
+ -------------------
21
+ best overall dt05 WER 11.63% (language model weight = 11)
22
+ (Number of iterations = 4)
23
+ -------------------
24
+ dt05_simu WER: 12.36% (Average), 10.66% (BUS), 15.55% (CAFE), 9.87% (PEDESTRIAN), 13.36% (STREET)
25
+ -------------------
26
+ dt05_real WER: 10.90% (Average), 13.62% (BUS), 10.63% (CAFE), 7.69% (PEDESTRIAN), 11.65% (STREET)
27
+ -------------------
28
+
29
+ 5-gram rescoring
30
+ exp/tri4a_dnn_tr05_multi_noisy_smbr_lmrescore/best_wer_beamformit_2mics_5gkn_5k.result
31
+ -------------------
32
+ best overall dt05 WER 10.17% (language model weight = 11)
33
+ -------------------
34
+ dt05_simu WER: 10.72% (Average), 9.37% (BUS), 13.70% (CAFE), 8.07% (PEDESTRIAN), 11.73% (STREET)
35
+ -------------------
36
+ dt05_real WER: 9.63% (Average), 11.93% (BUS), 9.75% (CAFE), 6.46% (PEDESTRIAN), 10.37% (STREET)
37
+ -------------------
38
+
39
+ RNNLM
40
+ exp/tri4a_dnn_tr05_multi_noisy_smbr_lmrescore/best_wer_beamformit_2mics_rnnlm_5k_h300_w0.5_n100.result
41
+ -------------------
42
+ best overall dt05 WER 8.86% (language model weight = 12)
43
+ -------------------
44
+ dt05_simu WER: 9.50% (Average), 8.19% (BUS), 12.15% (CAFE), 7.12% (PEDESTRIAN), 10.55% (STREET)
45
+ -------------------
46
+ dt05_real WER: 8.23% (Average), 10.90% (BUS), 7.96% (CAFE), 5.22% (PEDESTRIAN), 8.82% (STREET)
47
+ -------------------
48
+
49
+
tools/ASR_2ch_track/cmd.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # you can change cmd.sh depending on what type of queue you are using.
2
+ # If you have no queueing system and want to run on a local machine, you
3
+ # can change all instances 'queue.pl' to run.pl (but be careful and run
4
+ # commands one by one: most recipes will exhaust the memory on your
5
+ # machine). queue.pl works with GridEngine (qsub). slurm.pl works
6
+ # with slurm. Different queues are configured differently, with different
7
+ # queue names and different ways of specifying things like memory;
8
+ # to account for these differences you can create and edit the file
9
+ # conf/queue.conf to match your queue's configuration. Search for
10
+ # conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information,
11
+ # or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl.
12
+
13
+ #export train_cmd="queue.pl --mem 2G"
14
+ #export decode_cmd="queue.pl --mem 4G"
15
+ #export mkgraph_cmd="queue.pl --mem 8G"
16
+
17
+ # run it locally...
18
+ export train_cmd=run.pl
19
+ export decode_cmd=run.pl
20
+ export cuda_cmd=run.pl
21
+ export mkgraph_cmd=run.pl
tools/ASR_2ch_track/conf/chime4.cfg ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #BeamformIt sample configuration file for AMI data (http://groups.inf.ed.ac.uk/ami/download/)
2
+
3
+ # scrolling size to compute the delays
4
+ scroll_size = 250
5
+
6
+ # cross correlation computation window size
7
+ window_size = 500
8
+
9
+ #amount of maximum points for the xcorrelation taken into account
10
+ nbest_amount = 4
11
+
12
+ #flag wether to apply an automatic noise thresholding
13
+ do_noise_threshold = 1
14
+
15
+ #Percentage of frames with lower xcorr taken as noisy
16
+ noise_percent = 10
17
+
18
+ ######## acoustic modelling parameters
19
+
20
+ #transition probabilities weight for multichannel decoding
21
+ trans_weight_multi = 25
22
+ trans_weight_nbest = 25
23
+
24
+ ###
25
+
26
+ #flag wether to print the feaures after setting them, or not
27
+ print_features = 1
28
+
29
+ #flag wether to use the bad frames in the sum process
30
+ do_avoid_bad_frames = 1
31
+
32
+ #flag to use the best channel (SNR) as a reference
33
+ #defined from command line
34
+ do_compute_reference = 1
35
+
36
+ #flag wether to use a uem file or not(process all the file)
37
+ do_use_uem_file = 0
38
+
39
+ #flag wether to use an adaptative weights scheme or fixed weights
40
+ do_adapt_weights = 1
41
+
42
+ #flag wether to output the sph files or just run the system to create the auxiliary files
43
+ do_write_sph_files = 1
44
+
45
+ ####directories where to store/retrieve info####
46
+ #channels_file = ./cfg-files/channels
47
+
48
+ #show needs to be passed as argument normally, here a default one is given just in case
49
+ #show_id = Ttmp
50
+
tools/ASR_2ch_track/conf/decode_dnn.config ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ beam=18.0 # beam for decoding. Was 13.0 in the scripts.
2
+ lattice_beam=10.0 # this has most effect on size of the lattices.
tools/ASR_2ch_track/conf/fbank.conf ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # No non-default options for now.
2
+ --window-type=hamming # disable Dans window, use the standard
3
+ --use-energy=false # only fbank outputs
4
+ --sample-frequency=16000 # Cantonese is sampled at 8kHz
5
+
6
+ --low-freq=64 # typical setup from Frantisek Grezl
7
+ --high-freq=8000
8
+ --dither=1
9
+
10
+ --num-mel-bins=40 # 8kHz so we use 15 bins
11
+ --htk-compat=true # try to make it compatible with HTK
tools/ASR_2ch_track/conf/mfcc.conf ADDED
@@ -0,0 +1 @@
 
 
1
+ --use-energy=false # only non-default option.
tools/ASR_2ch_track/local/chime4_calc_wers.sh ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2015 Mitsubishi Electric Research Laboratories (Author: Shinji Watanabe)
4
+ # Apache 2.0.
5
+
6
+ set -e
7
+
8
+ # Config:
9
+ eval_flag=true # make it true when the evaluation data are released
10
+
11
+ . utils/parse_options.sh || exit 1;
12
+
13
+ if [ $# -ne 3 ]; then
14
+ printf "\nUSAGE: %s <training experiment directory> <enhancement method> <graph_dir>\n\n" `basename $0`
15
+ printf "%s exp/tri3b_tr05_sr_noisy noisy exp/tri4a_dnn_tr05_sr_noisy/graph_tgpr_5k\n\n" `basename $0`
16
+ exit 1;
17
+ fi
18
+
19
+ echo "$0 $@" # Print the command line for logging
20
+
21
+ . path.sh
22
+
23
+ dir=$1
24
+ enhan=$2
25
+ graph_dir=$3
26
+
27
+ echo "compute dt05 WER for each location"
28
+ echo ""
29
+ mkdir -p $dir/log
30
+ for a in `find $dir/decode_tgpr_5k_dt05_real_$enhan/ | grep "\/wer_" | awk -F'[/]' '{print $NF}' | sort`; do
31
+ echo -n "$a "
32
+ if [ -e $dir/decode_tgpr_5k_dt05_simu_$enhan ]; then
33
+ cat $dir/decode_tgpr_5k_dt05_{real,simu}_$enhan/$a | grep WER | awk '{err+=$4} {wrd+=$6} END{printf("%.2f\n",err/wrd*100)}'
34
+ else
35
+ cat $dir/decode_tgpr_5k_dt05_real_$enhan/$a | grep WER | awk '{err+=$4} {wrd+=$6} END{printf("%.2f\n",err/wrd*100)}'
36
+ fi
37
+ done | sort -n -k 2 | head -n 1 > $dir/log/best_wer_$enhan
38
+
39
+ lmw=`cut -f 1 -d" " $dir/log/best_wer_$enhan | cut -f 2 -d"_"`
40
+ echo "-------------------"
41
+ printf "best overall dt05 WER %s" `cut -f 2 -d" " $dir/log/best_wer_$enhan`
42
+ echo -n "%"
43
+ printf " (language model weight = %s)\n" $lmw
44
+ echo "-------------------"
45
+ if $eval_flag; then
46
+ tasks="dt05 et05"
47
+ else
48
+ tasks="dt05"
49
+ fi
50
+ for e_d in $tasks; do
51
+ for task in simu real; do
52
+ rdir=$dir/decode_tgpr_5k_${e_d}_${task}_$enhan
53
+ if [ -e $rdir ]; then
54
+ for a in _BUS _CAF _PED _STR; do
55
+ grep $a $rdir/scoring/test_filt.txt \
56
+ > $rdir/scoring/test_filt_$a.txt
57
+ cat $rdir/scoring/$lmw.tra \
58
+ | utils/int2sym.pl -f 2- $graph_dir/words.txt \
59
+ | sed s:\<UNK\>::g \
60
+ | compute-wer --text --mode=present ark:$rdir/scoring/test_filt_$a.txt ark,p:- \
61
+ 1> $rdir/${a}_wer_$lmw 2> /dev/null
62
+ done
63
+ echo -n "${e_d}_${task} WER: `grep WER $rdir/wer_$lmw | cut -f 2 -d" "`% (Average), "
64
+ echo -n "`grep WER $rdir/_BUS_wer_$lmw | cut -f 2 -d" "`% (BUS), "
65
+ echo -n "`grep WER $rdir/_CAF_wer_$lmw | cut -f 2 -d" "`% (CAFE), "
66
+ echo -n "`grep WER $rdir/_PED_wer_$lmw | cut -f 2 -d" "`% (PEDESTRIAN), "
67
+ echo -n "`grep WER $rdir/_STR_wer_$lmw | cut -f 2 -d" "`% (STREET)"
68
+ echo ""
69
+ echo "-------------------"
70
+ fi
71
+ done
72
+ done
73
+ echo ""
74
+
75
+ for e_d in $tasks; do
76
+ echo "-----------------------------"
77
+ echo "1-best transcription for $e_d"
78
+ echo "-----------------------------"
79
+ for task in simu real; do
80
+ rdir=$dir/decode_tgpr_5k_${e_d}_${task}_$enhan
81
+ cat $rdir/scoring/$lmw.tra \
82
+ | utils/int2sym.pl -f 2- $graph_dir/words.txt \
83
+ | sed s:\<UNK\>::g
84
+ done
85
+ done
tools/ASR_2ch_track/local/chime4_calc_wers_smbr.sh ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2015 Mitsubishi Electric Research Laboratories (Author: Shinji Watanabe)
4
+ # Apache 2.0.
5
+
6
+ set -e
7
+
8
+ # Config:
9
+ eval_flag=true # make it true when the evaluation data are released
10
+
11
+ . utils/parse_options.sh || exit 1;
12
+
13
+ if [ $# -ne 3 ]; then
14
+ printf "\nUSAGE: %s <training experiment directory> <enhancement method> <graph_dir>\n\n" `basename $0`
15
+ printf "%s exp/tri3b_tr05_sr_noisy noisy exp/tri4a_dnn_tr05_sr_noisy/graph_tgpr_5k\n\n" `basename $0`
16
+ exit 1;
17
+ fi
18
+
19
+ echo "$0 $@" # Print the command line for logging
20
+
21
+ . path.sh
22
+
23
+ dir=$1
24
+ enhan=$2
25
+ graph_dir=$3
26
+
27
+ echo "compute WER for each location"
28
+ echo ""
29
+ mkdir -p $dir/log
30
+ # collect scores
31
+ for x in `find $dir/ -type d -name "*_it*" | awk -F "_it" '{print $NF}' | sort | uniq`; do
32
+ for y in `find $dir/*_${enhan}_it*/ | grep "\/wer_" | awk -F'[/]' '{print $NF}' | sort | uniq`; do
33
+ echo -n "${x}_$y "
34
+ cat $dir/decode_tgpr_5k_dt05_{real,simu}_${enhan}_it$x/$y | grep WER | awk '{err+=$4} {wrd+=$6} END{printf("%.2f\n",err/wrd*100)}'
35
+ done
36
+ done | sort -n -k 2 | head -n 1 > $dir/log/best_wer_$enhan
37
+
38
+ lmw=`cut -f 1 -d" " $dir/log/best_wer_$enhan | awk -F'[_]' '{print $NF}'`
39
+ it=`cut -f 1 -d" " $dir/log/best_wer_$enhan | awk -F'[_]' '{print $1}'`
40
+ echo "-------------------"
41
+ printf "best overall dt05 WER %s" `cut -f 2 -d" " $dir/log/best_wer_$enhan`
42
+ echo -n "%"
43
+ printf " (language model weight = %s)\n" $lmw
44
+ printf " (Number of iterations = %s)\n" $it
45
+ echo "-------------------"
46
+ if $eval_flag; then
47
+ tasks="dt05 et05"
48
+ else
49
+ tasks="dt05"
50
+ fi
51
+ for e_d in $tasks; do
52
+ for task in simu real; do
53
+ rdir=$dir/decode_tgpr_5k_${e_d}_${task}_${enhan}_it$it
54
+ for a in _BUS _CAF _PED _STR; do
55
+ grep $a $rdir/scoring/test_filt.txt \
56
+ > $rdir/scoring/test_filt_$a.txt
57
+ cat $rdir/scoring/$lmw.tra \
58
+ | utils/int2sym.pl -f 2- $graph_dir/words.txt \
59
+ | sed s:\<UNK\>::g \
60
+ | compute-wer --text --mode=present ark:$rdir/scoring/test_filt_$a.txt ark,p:- \
61
+ 1> $rdir/${a}_wer_$lmw 2> /dev/null
62
+ done
63
+ echo -n "${e_d}_${task} WER: `grep WER $rdir/wer_$lmw | cut -f 2 -d" "`% (Average), "
64
+ echo -n "`grep WER $rdir/_BUS_wer_$lmw | cut -f 2 -d" "`% (BUS), "
65
+ echo -n "`grep WER $rdir/_CAF_wer_$lmw | cut -f 2 -d" "`% (CAFE), "
66
+ echo -n "`grep WER $rdir/_PED_wer_$lmw | cut -f 2 -d" "`% (PEDESTRIAN), "
67
+ echo -n "`grep WER $rdir/_STR_wer_$lmw | cut -f 2 -d" "`% (STREET)"
68
+ echo ""
69
+ echo "-------------------"
70
+ done
71
+ done
72
+
73
+ for e_d in $tasks; do
74
+ echo "-----------------------------"
75
+ echo "1-best transcription for $e_d"
76
+ echo "-----------------------------"
77
+ for task in simu real; do
78
+ rdir=$dir/decode_tgpr_5k_${e_d}_${task}_${enhan}_it$it
79
+ cat $rdir/scoring/$lmw.tra \
80
+ | utils/int2sym.pl -f 2- $graph_dir/words.txt \
81
+ | sed s:\<UNK\>::g
82
+ done
83
+ done
tools/ASR_2ch_track/local/chime4_train_lms.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Modified from the script for CHiME3 baseline
4
+ # Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori)
5
+
6
+ # Config:
7
+ order=5 # n-gram order
8
+
9
+ . utils/parse_options.sh || exit 1;
10
+
11
+ . ./path.sh
12
+
13
+ if [ $# -ne 1 ]; then
14
+ printf "\nUSAGE: %s <Chime4 root directory>\n\n" `basename $0`
15
+ echo "Please specifies a Chime4 root directory"
16
+ echo "If you use kaldi scripts distributed in the Chime4 data,"
17
+ echo "It would be `pwd`/../.."
18
+ exit 1;
19
+ fi
20
+
21
+ # check data directories
22
+ chime4_data=$1
23
+ wsj0_data=$chime4_data/data/WSJ0 # directory of WSJ0 in Chime4. You can also specify your WSJ0 corpus directory
24
+ if [ ! -d $chime4_data ]; then
25
+ echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1
26
+ fi
27
+ if [ ! -d $wsj0_data ]; then
28
+ echo "$wsj0_data does not exist. Please specify WSJ0 corpus directory" && exit 1
29
+ fi
30
+ lm_train=$wsj0_data/wsj0/doc/lng_modl/lm_train/np_data
31
+
32
+ # check whether run_init is executed
33
+ if [ ! -d data/lang ]; then
34
+ echo "error, execute local/run_init.sh, first"
35
+ exit 1;
36
+ fi
37
+
38
+ # lm directories
39
+ dir=data/local/local_lm
40
+ srcdir=data/local/nist_lm
41
+ mkdir -p $dir
42
+
43
+ # check srilm ngram
44
+ ! which ngram-count \
45
+ && echo "SRILM tools not installed, which are required for LM training" && exit 1;
46
+
47
+ # extract 5k vocabulary from a baseline language model
48
+ srclm=$srcdir/lm_tgpr_5k.arpa.gz
49
+ if [ -f $srclm ]; then
50
+ echo "Getting vocabulary from a baseline language model";
51
+ gunzip -c $srclm | awk 'BEGIN{unig=0}{
52
+ if(unig==0){
53
+ if($1=="\\1-grams:"){unig=1}}
54
+ else {
55
+ if ($1 != "") {
56
+ if ($1=="\\2-grams:" || $1=="\\end\\") {exit}
57
+ else {print $2}}
58
+ }}' > $dir/vocab_5k.txt
59
+ else
60
+ echo "Language model $srclm does not exist" && exit 1;
61
+ fi
62
+
63
+ # collect training data from WSJ0
64
+ touch $dir/train.gz
65
+ if [ `du -m $dir/train.gz | cut -f 1` -eq 63 ]; then
66
+ echo "Not getting training data again [already exists]";
67
+ else
68
+ echo "Collecting training data from $lm_train";
69
+ gunzip -c $lm_train/{87,88,89}/*.z \
70
+ | awk -v voc=$dir/vocab_5k.txt '
71
+ BEGIN{ while((getline<voc)>0) { invoc[$1]=1; }}
72
+ /^</{next}{
73
+ for (x=1;x<=NF;x++) {
74
+ w=toupper($x);
75
+ if (invoc[w]) { printf("%s ",w); } else { printf("<UNK> "); }
76
+ }
77
+ printf("\n");
78
+ }' | gzip -c > $dir/train.gz
79
+ fi
80
+
81
+ # get validation data from Chime4 dev set
82
+ touch $dir/valid.gz
83
+ if [ `du -k $dir/valid.gz | cut -f 1` -eq 68 ]; then
84
+ echo "Not getting validation data again [already exists]";
85
+ else
86
+ echo "Collecting validation data from $chime4_data/data/transcriptions";
87
+ cut -d" " -f2- $chime4_data/data/transcriptions/dt05_real.trn_all \
88
+ $chime4_data/data/transcriptions/dt05_simu.trn_all \
89
+ |gzip -c > $dir/valid.gz
90
+ fi
91
+
92
+ # train a large n-gram language model
93
+ lm_suffix=${order}gkn_5k
94
+ if [ -f $dir/lm_${lm_suffix}.arpa.gz ]; then
95
+ echo "A $order-gram language model aready exists and is not constructed again"
96
+ echo "To reconstruct, remove $dir/$dir/lm_${lm_suffix}.arpa.gz first"
97
+ else
98
+ echo "Training a $order-gram language model"
99
+ ngram-count -text $dir/train.gz -order $order \
100
+ -vocab $dir/vocab_5k.txt -unk -map-unk "<UNK>" \
101
+ -gt2min 1 -gt3min 1 -gt4min 2 -gt5min 2 \
102
+ -interpolate -kndiscount \
103
+ -lm $dir/lm_${lm_suffix}.arpa.gz
104
+ fi
105
+ echo "Checking validation perplexity of $order-gram language model"
106
+ ngram -order $order -ppl $dir/valid.gz -lm $dir/lm_${lm_suffix}.arpa.gz
107
+ # e.g. 5-gram perplexity:
108
+ # file data/local/local_lm/valid.txt: 3280 sentences, 54239 words, 3 OOVs
109
+ # 0 zeroprobs, logprob= -96775.5 ppl= 48.1486 ppl1= 60.8611
110
+
111
+ # Next, create the corresponding FST and lang_test_* directory.
112
+ echo "Preparing language models for test"
113
+ tmpdir=data/local/lm_tmp
114
+ lexicon=data/local/lang_tmp/lexiconp.txt
115
+ mkdir -p $tmpdir
116
+
117
+ test=data/lang_test_${lm_suffix}
118
+ mkdir -p $test
119
+ for f in phones.txt words.txt phones.txt L.fst L_disambig.fst \
120
+ phones; do
121
+ cp -r data/lang/$f $test
122
+ done
123
+ gunzip -c $dir/lm_${lm_suffix}.arpa.gz | \
124
+ utils/find_arpa_oovs.pl $test/words.txt > $tmpdir/oovs_${lm_suffix}.txt
125
+
126
+ # grep -v '<s> <s>' because the LM seems to have some strange and useless
127
+ # stuff in it with multiple <s>'s in the history. Encountered some other similar
128
+ # things in a LM from Geoff. Removing all "illegal" combinations of <s> and </s>,
129
+ # which are supposed to occur only at being/end of utt. These can cause
130
+ # determinization failures of CLG [ends up being epsilon cycles].
131
+ gunzip -c $dir/lm_${lm_suffix}.arpa.gz | \
132
+ grep -v '<s> <s>' | \
133
+ grep -v '</s> <s>' | \
134
+ grep -v '</s> </s>' | \
135
+ arpa2fst - | fstprint | \
136
+ utils/remove_oovs.pl $tmpdir/oovs_${lm_suffix}.txt | \
137
+ utils/eps2disambig.pl | utils/s2eps.pl | fstcompile --isymbols=$test/words.txt \
138
+ --osymbols=$test/words.txt --keep_isymbols=false --keep_osymbols=false | \
139
+ fstrmepsilon | fstarcsort --sort_type=ilabel > $test/G.fst
140
+ fstisstochastic $test/G.fst
141
+ # The output is like:
142
+ # 9.14233e-05 -0.259833
143
+ # we do expect the first of these 2 numbers to be close to zero (the second is
144
+ # nonzero because the backoff weights make the states sum to >1).
145
+ # Because of the <s> fiasco for these particular LMs, the first number is not
146
+ # as close to zero as it could be.
147
+
148
+ # Everything below is only for diagnostic.
149
+ # Checking that G has no cycles with empty words on them (e.g. <s>, </s>);
150
+ # this might cause determinization failure of CLG.
151
+ # #0 is treated as an empty word.
152
+ mkdir -p $tmpdir/g
153
+ awk '{if(NF==1){ printf("0 0 %s %s\n", $1,$1); }} END{print "0 0 #0 #0"; print "0";}' \
154
+ < "$lexicon" >$tmpdir/g/select_empty.fst.txt
155
+ fstcompile --isymbols=$test/words.txt --osymbols=$test/words.txt $tmpdir/g/select_empty.fst.txt | \
156
+ fstarcsort --sort_type=olabel | fstcompose - $test/G.fst > $tmpdir/g/empty_words.fst
157
+ fstinfo $tmpdir/g/empty_words.fst | grep cyclic | grep -w 'y' &&
158
+ echo "Language model has cycles with empty words" && exit 1
159
+ rm -r $tmpdir/g
160
+
161
+ echo "Succeeded in preparing a large ${order}-gram LM"
162
+ rm -r $tmpdir
163
+
tools/ASR_2ch_track/local/chime4_train_rnnlms.sh ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori)
4
+
5
+ # Config:
6
+ hidden=300 # Num-hidden units
7
+ class=200 # Num-classes
8
+ rnnlm_ver=rnnlm-0.3e # version of RNNLM to use
9
+ threads=1 # for RNNLM-HS
10
+ bptt=4 # length of BPTT unfolding in RNNLM
11
+ bptt_block=10 # length of BPTT unfolding in RNNLM
12
+
13
+ . utils/parse_options.sh || exit 1;
14
+
15
+ . ./path.sh
16
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
17
+ ## This relates to the queue.
18
+
19
+ if [ $# -ne 1 ]; then
20
+ printf "\nUSAGE: %s <Chime4 root directory>\n\n" `basename $0`
21
+ echo "Please specifies a Chime4 root directory"
22
+ echo "If you use kaldi scripts distributed in the Chime4 data,"
23
+ echo "It would be `pwd`/../.."
24
+ exit 1;
25
+ fi
26
+
27
+ # check data directories
28
+ chime4_data=$1
29
+ wsj0_data=$chime4_data/data/WSJ0 # directory of WSJ0 in Chime4. You can also specify your WSJ0 corpus directory
30
+ if [ ! -d $chime4_data ]; then
31
+ echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1
32
+ fi
33
+ if [ ! -d $wsj0_data ]; then
34
+ echo "$wsj0_data does not exist. Please specify WSJ0 corpus directory" && exit 1
35
+ fi
36
+ lm_train=$wsj0_data/wsj0/doc/lng_modl/lm_train/np_data
37
+
38
+ # lm directories
39
+ dir=data/local/local_lm
40
+ srcdir=data/local/nist_lm
41
+ mkdir -p $dir
42
+
43
+ # extract 5k vocabulary from a baseline language model
44
+ srclm=$srcdir/lm_tgpr_5k.arpa.gz
45
+ if [ -f $srclm ]; then
46
+ echo "Getting vocabulary from a baseline language model";
47
+ gunzip -c $srclm | awk 'BEGIN{unig=0}{
48
+ if(unig==0){
49
+ if($1=="\\1-grams:"){unig=1}}
50
+ else {
51
+ if ($1 != "") {
52
+ if ($1=="\\2-grams:" || $1=="\\end\\") {exit}
53
+ else {print $2}}
54
+ }}' | sed "s/<UNK>/<RNN_UNK>/" > $dir/vocab_5k.rnn
55
+ else
56
+ echo "Language model $srclm does not exist" && exit 1;
57
+ fi
58
+
59
+ # collect training data from WSJ0
60
+ touch $dir/train.rnn
61
+ if [ `du -m $dir/train.rnn | cut -f 1` -eq 223 ]; then
62
+ echo "Not getting training data again [already exists]";
63
+ else
64
+ echo "Collecting training data from $lm_train";
65
+ gunzip -c $lm_train/{87,88,89}/*.z \
66
+ | awk -v voc=$dir/vocab_5k.rnn '
67
+ BEGIN{ while((getline<voc)>0) { invoc[$1]=1; }}
68
+ /^</{next}{
69
+ for (x=1;x<=NF;x++) {
70
+ w=toupper($x);
71
+ if (invoc[w]) { printf("%s ",w); } else { printf("<RNN_UNK> "); }
72
+ }
73
+ printf("\n");
74
+ }' > $dir/train.rnn
75
+ fi
76
+
77
+ # get validation data from Chime4 dev set
78
+ touch $dir/valid.rnn
79
+ if [ `cat $dir/valid.rnn | wc -w` -eq 54239 ]; then
80
+ echo "Not getting validation data again [already exists]";
81
+ else
82
+ echo "Collecting validation data from $chime4_data/data/transcriptions";
83
+ cut -d" " -f2- $chime4_data/data/transcriptions/dt05_real.trn_all \
84
+ $chime4_data/data/transcriptions/dt05_simu.trn_all \
85
+ > $dir/valid.rnn
86
+ fi
87
+
88
+ # RNN language model traing
89
+ $KALDI_ROOT/tools/extras/check_for_rnnlm.sh "$rnnlm_ver" || exit 1
90
+
91
+ # train a RNN language model
92
+ rnnmodel=$dir/rnnlm_5k_h${hidden}_bptt${bptt}
93
+ if [ -f $rnnmodel ]; then
94
+ echo "A RNN language model aready exists and is not constructed again"
95
+ echo "To reconstruct, remove $rnnmodel first"
96
+ else
97
+ echo "Training a RNN language model with $rnnlm_ver"
98
+ echo "(runtime log is written to $dir/rnnlm.log)"
99
+ $train_cmd $dir/rnnlm.log \
100
+ $KALDI_ROOT/tools/$rnnlm_ver/rnnlm -train $dir/train.rnn -valid $dir/valid.rnn \
101
+ -rnnlm $rnnmodel -hidden $hidden -class $class \
102
+ -rand-seed 1 -independent -debug 1 -bptt $bptt -bptt-block $bptt_block || exit 1;
103
+ fi
104
+
105
+ # store in a RNNLM directory with necessary files
106
+ rnndir=data/lang_test_rnnlm_5k_h${hidden}
107
+ mkdir -p $rnndir
108
+ cp $rnnmodel $rnndir/rnnlm
109
+ grep -v -e "<s>" -e "</s>" $dir/vocab_5k.rnn > $rnndir/wordlist.rnn
110
+ touch $rnndir/unk.probs # make an empty file because we don't know unk-word probs.
111
+
tools/ASR_2ch_track/local/clean_chime4_format_data.sh ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
4
+ # Apache 2.0
5
+
6
+ # This script takes data prepared in a corpus-dependent way
7
+ # in data/local/, and converts it into the "canonical" form,
8
+ # in various subdirectories of data/, e.g. data/lang, data/lang_test_ug,
9
+ # data/train_si84, etc.
10
+
11
+ # Modified from the script for Chime2 baseline
12
+ # Shinji Watanabe 02/13/2015
13
+
14
+ . ./path.sh || exit 1;
15
+
16
+ echo "Preparing train and test data"
17
+ srcdir=data/local/data
18
+ lmdir=data/local/nist_lm
19
+ tmpdir=data/local/lm_tmp
20
+ lexicon=data/local/lang_tmp/lexiconp.txt
21
+ mkdir -p $tmpdir
22
+
23
+ for x in et05_orig_clean dt05_orig_clean tr05_orig_clean; do
24
+ mkdir -p data/$x
25
+ cp $srcdir/${x}_wav.scp data/$x/wav.scp || exit 1;
26
+ cp $srcdir/$x.txt data/$x/text || exit 1;
27
+ cp $srcdir/$x.spk2utt data/$x/spk2utt || exit 1;
28
+ cp $srcdir/$x.utt2spk data/$x/utt2spk || exit 1;
29
+ utils/filter_scp.pl data/$x/spk2utt $srcdir/spk2gender > data/$x/spk2gender || exit 1;
30
+ done
31
+
32
+
33
+ # Next, for each type of language model, create the corresponding FST
34
+ # and the corresponding lang_test_* directory.
35
+
36
+ echo Preparing language models for test
37
+
38
+ for lm_suffix in tgpr_5k; do
39
+ test=data/lang_test_${lm_suffix}
40
+ mkdir -p $test
41
+ for f in phones.txt words.txt phones.txt L.fst L_disambig.fst \
42
+ phones; do
43
+ cp -r data/lang/$f $test
44
+ done
45
+ gunzip -c $lmdir/lm_${lm_suffix}.arpa.gz | \
46
+ utils/find_arpa_oovs.pl $test/words.txt > $tmpdir/oovs_${lm_suffix}.txt
47
+
48
+ # grep -v '<s> <s>' because the LM seems to have some strange and useless
49
+ # stuff in it with multiple <s>'s in the history. Encountered some other similar
50
+ # things in a LM from Geoff. Removing all "illegal" combinations of <s> and </s>,
51
+ # which are supposed to occur only at being/end of utt. These can cause
52
+ # determinization failures of CLG [ends up being epsilon cycles].
53
+ gunzip -c $lmdir/lm_${lm_suffix}.arpa.gz | \
54
+ grep -v '<s> <s>' | \
55
+ grep -v '</s> <s>' | \
56
+ grep -v '</s> </s>' | \
57
+ arpa2fst - | fstprint | \
58
+ utils/remove_oovs.pl $tmpdir/oovs_${lm_suffix}.txt | \
59
+ utils/eps2disambig.pl | utils/s2eps.pl | fstcompile --isymbols=$test/words.txt \
60
+ --osymbols=$test/words.txt --keep_isymbols=false --keep_osymbols=false | \
61
+ fstrmepsilon | fstarcsort --sort_type=ilabel > $test/G.fst
62
+ fstisstochastic $test/G.fst
63
+ # The output is like:
64
+ # 9.14233e-05 -0.259833
65
+ # we do expect the first of these 2 numbers to be close to zero (the second is
66
+ # nonzero because the backoff weights make the states sum to >1).
67
+ # Because of the <s> fiasco for these particular LMs, the first number is not
68
+ # as close to zero as it could be.
69
+
70
+ # Everything below is only for diagnostic.
71
+ # Checking that G has no cycles with empty words on them (e.g. <s>, </s>);
72
+ # this might cause determinization failure of CLG.
73
+ # #0 is treated as an empty word.
74
+ mkdir -p $tmpdir/g
75
+ awk '{if(NF==1){ printf("0 0 %s %s\n", $1,$1); }} END{print "0 0 #0 #0"; print "0";}' \
76
+ < "$lexicon" >$tmpdir/g/select_empty.fst.txt
77
+ fstcompile --isymbols=$test/words.txt --osymbols=$test/words.txt $tmpdir/g/select_empty.fst.txt | \
78
+ fstarcsort --sort_type=olabel | fstcompose - $test/G.fst > $tmpdir/g/empty_words.fst
79
+ fstinfo $tmpdir/g/empty_words.fst | grep cyclic | grep -w 'y' &&
80
+ echo "Language model has cycles with empty words" && exit 1
81
+ rm -r $tmpdir/g
82
+ done
83
+
84
+ echo "Succeeded in formatting data."
85
+ rm -r $tmpdir
tools/ASR_2ch_track/local/clean_wsj0_data_prep.sh ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
5
+ # Apache 2.0.
6
+
7
+ # This is modified from the script in standard Kaldi recipe to account
8
+ # for the way the WSJ data is structured on the Edinburgh systems.
9
+ # - Arnab Ghoshal, 29/05/12
10
+
11
+ # Modified from the script for CHiME2 baseline
12
+ # Shinji Watanabe 02/13/2015
13
+
14
+ if [ $# -ne 1 ]; then
15
+ printf "\nUSAGE: %s <original WSJ0 corpus-directory>\n\n" `basename $0`
16
+ echo "The argument should be a the top-level WSJ corpus directory."
17
+ echo "It is assumed that there will be a 'wsj0' and a 'wsj1' subdirectory"
18
+ echo "within the top-level corpus directory."
19
+ exit 1;
20
+ fi
21
+
22
+ wsj0=$1
23
+
24
+ dir=`pwd`/data/local/data
25
+ lmdir=`pwd`/data/local/nist_lm
26
+ mkdir -p $dir $lmdir
27
+ local=`pwd`/local
28
+ utils=`pwd`/utils
29
+
30
+ . ./path.sh # Needed for KALDI_ROOT
31
+ sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe
32
+ if [ ! -x $sph2pipe ]; then
33
+ echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
34
+ exit 1;
35
+ fi
36
+
37
+ if [ -z $IRSTLM ] ; then
38
+ export IRSTLM=$KALDI_ROOT/tools/irstlm/
39
+ fi
40
+ export PATH=${PATH}:$IRSTLM/bin
41
+ if ! command -v prune-lm >/dev/null 2>&1 ; then
42
+ echo "$0: Error: the IRSTLM is not available or compiled" >&2
43
+ echo "$0: Error: We used to install it by default, but." >&2
44
+ echo "$0: Error: this is no longer the case." >&2
45
+ echo "$0: Error: To install it, go to $KALDI_ROOT/tools" >&2
46
+ echo "$0: Error: and run extras/install_irstlm.sh" >&2
47
+ exit 1
48
+ fi
49
+
50
+ cd $dir
51
+
52
+ # This version for SI-84
53
+ cat $wsj0/wsj0/doc/indices/train/tr_s_wv1.ndx \
54
+ | $local/cstr_ndx2flist.pl $wsj0 | sort -u > tr05_orig_clean.flist
55
+
56
+ # Now for the test sets.
57
+ # $wsj0/wsj1/doc/indices/readme.doc
58
+ # describes all the different test sets.
59
+ # Note: each test-set seems to come in multiple versions depending
60
+ # on different vocabulary sizes, verbalized vs. non-verbalized
61
+ # pronunciations, etc. We use the largest vocab and non-verbalized
62
+ # pronunciations.
63
+ # The most normal one seems to be the "baseline 60k test set", which
64
+ # is h1_p0.
65
+
66
+ # Nov'92 (330 utts, 5k vocab)
67
+ cat $wsj0/wsj0/doc/indices/test/nvp/si_et_05.ndx | \
68
+ $local/cstr_ndx2flist.pl $wsj0 | sort > et05_orig_clean.flist
69
+
70
+ # Note: the ???'s below match WSJ and SI_DT, or wsj and si_dt.
71
+ # Sometimes this gets copied from the CD's with upcasing, don't know
72
+ # why (could be older versions of the disks).
73
+ find $wsj0/wsj0/si_dt_05 -print | grep -i ".wv1" | sort > dt05_orig_clean.flist
74
+
75
+ # Finding the transcript files:
76
+ find -L $wsj0 -iname '*.dot' > dot_files.flist
77
+
78
+ # Convert the transcripts into our format (no normalization yet)
79
+ # adding suffix to utt_id
80
+ # 0 for clean condition
81
+ for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do
82
+ $local/flist2scp.pl $x.flist | sort > ${x}_sph_tmp.scp
83
+ cat ${x}_sph_tmp.scp | awk '{print $1}' \
84
+ | $local/find_transcripts.pl dot_files.flist > ${x}_tmp.trans1
85
+ cat ${x}_sph_tmp.scp | awk '{printf("%s %s\n", $1, $2);}' > ${x}_sph.scp
86
+ cat ${x}_tmp.trans1 | awk '{printf("%s ", $1); for(i=2;i<=NF;i++) printf("%s ", $i); printf("\n");}' > ${x}.trans1
87
+ done
88
+
89
+ # Do some basic normalization steps. At this point we don't remove OOVs--
90
+ # that will be done inside the training scripts, as we'd like to make the
91
+ # data-preparation stage independent of the specific lexicon used.
92
+ noiseword="<NOISE>";
93
+ for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do
94
+ cat $x.trans1 | $local/normalize_transcript.pl $noiseword \
95
+ | sort > $x.txt || exit 1;
96
+ done
97
+
98
+ # Create scp's with wav's. (the wv1 in the distribution is not really wav, it is sph.)
99
+ for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do
100
+ awk '{printf("%s '$sph2pipe' -f wav %s |\n", $1, $2);}' < ${x}_sph.scp \
101
+ > ${x}_wav.scp
102
+ done
103
+
104
+ # Make the utt2spk and spk2utt files.
105
+ for x in tr05_orig_clean et05_orig_clean dt05_orig_clean; do
106
+ cat ${x}_sph.scp | awk '{print $1}' \
107
+ | perl -ane 'chop; m:^...:; print "$_ $&\n";' > $x.utt2spk
108
+ cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
109
+ done
110
+
111
+ #in case we want to limit lm's on most frequent words, copy lm training word frequency list
112
+ cp $wsj0/wsj0/doc/lng_modl/vocab/wfl_64.lst $lmdir
113
+ chmod u+w $lmdir/*.lst # had weird permissions on source.
114
+
115
+ # The 5K vocab language model without verbalized pronunciations.
116
+ # This is used for 3rd CHiME challenge
117
+ # trigram would be: !only closed vocabulary here!
118
+ cp $wsj0/wsj0/doc/lng_modl/base_lm/tcb05cnp.z $lmdir/lm_tg_5k.arpa.gz || exit 1;
119
+ chmod u+rw $lmdir/lm_tg_5k.arpa.gz
120
+ gunzip $lmdir/lm_tg_5k.arpa.gz
121
+ tail -n 4328839 $lmdir/lm_tg_5k.arpa | gzip -c -f > $lmdir/lm_tg_5k.arpa.gz
122
+ rm $lmdir/lm_tg_5k.arpa
123
+
124
+ prune-lm --threshold=1e-7 $lmdir/lm_tg_5k.arpa.gz $lmdir/lm_tgpr_5k.arpa || exit 1;
125
+ gzip -f $lmdir/lm_tgpr_5k.arpa || exit 1;
126
+
127
+
128
+ if [ ! -f wsj0-train-spkrinfo.txt ] || [ `cat wsj0-train-spkrinfo.txt | wc -l` -ne 134 ]; then
129
+ rm -f wsj0-train-spkrinfo.txt
130
+ wget http://www.ldc.upenn.edu/Catalog/docs/LDC93S6A/wsj0-train-spkrinfo.txt \
131
+ || ( echo "Getting wsj0-train-spkrinfo.txt from backup location" && \
132
+ wget --no-check-certificate https://sourceforge.net/projects/kaldi/files/wsj0-train-spkrinfo.txt );
133
+ fi
134
+
135
+ if [ ! -f wsj0-train-spkrinfo.txt ]; then
136
+ echo "Could not get the spkrinfo.txt file from LDC website (moved)?"
137
+ echo "This is possibly omitted from the training disks; couldn't find it."
138
+ echo "Everything else may have worked; we just may be missing gender info"
139
+ echo "which is only needed for VTLN-related diagnostics anyway."
140
+ exit 1
141
+ fi
142
+ # Note: wsj0-train-spkrinfo.txt doesn't seem to be on the disks but the
143
+ # LDC put it on the web. Perhaps it was accidentally omitted from the
144
+ # disks.
145
+
146
+ cat $wsj0/wsj0/doc/spkrinfo.txt \
147
+ ./wsj0-train-spkrinfo.txt | \
148
+ perl -ane 'tr/A-Z/a-z/; m/^;/ || print;' | \
149
+ awk '{print $1, $2}' | grep -v -- -- | sort | uniq > spk2gender
150
+
151
+
152
+ echo "Data preparation succeeded"
tools/ASR_2ch_track/local/cstr_ndx2flist.pl ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2010-2011 Microsoft Corporation
4
+
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
12
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
13
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
14
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
15
+ # See the Apache 2 License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ # This is modified from the script in standard Kaldi recipe to account
19
+ # for the way the WSJ data is structured on the Edinburgh systems.
20
+ # - Arnab Ghoshal, 12/1/12
21
+
22
+ # This program takes as its standard input an .ndx file from the WSJ corpus that looks
23
+ # like this:
24
+ #;; File: tr_s_wv1.ndx, updated 04/26/94
25
+ #;;
26
+ #;; Index for WSJ0 SI-short Sennheiser training data
27
+ #;; Data is read WSJ sentences, Sennheiser mic.
28
+ #;; Contains 84 speakers X (~100 utts per speaker MIT/SRI and ~50 utts
29
+ #;; per speaker TI) = 7236 utts
30
+ #;;
31
+ #11_1_1:wsj0/si_tr_s/01i/01ic0201.wv1
32
+ #11_1_1:wsj0/si_tr_s/01i/01ic0202.wv1
33
+ #11_1_1:wsj0/si_tr_s/01i/01ic0203.wv1
34
+
35
+ # and as command-line argument it takes the names of the WSJ disk locations, e.g.:
36
+ # /group/corpora/public/wsjcam0/data on DICE machines.
37
+ # It outputs a list of absolute pathnames.
38
+
39
+ $wsj_dir = $ARGV[0];
40
+
41
+ while(<STDIN>){
42
+ if(m/^;/){ next; } # Comment. Ignore it.
43
+ else {
44
+ m/^([0-9_]+):\s*(\S+)$/ || die "Could not parse line $_";
45
+ $filename = $2; # as a subdirectory of the distributed disk.
46
+ if ($filename !~ m/\.wv1$/) { $filename .= ".wv1"; }
47
+ $filename = "$wsj_dir/$filename";
48
+ if (-e $filename) {
49
+ print "$filename\n";
50
+ } else {
51
+ print STDERR "File $filename found in the index but not on disk\n";
52
+ }
53
+ }
54
+ }
tools/ASR_2ch_track/local/find_noisy_transcripts.pl ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+ # Copyright 2010-2011 Microsoft Corporation
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
11
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
12
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
13
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
14
+ # See the Apache 2 License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+
19
+ # This program takes on its standard input a list of utterance
20
+ # id's, one for each line. (e.g. 4k0c030a is a an utterance id).
21
+ # It takes as
22
+ # Extracts from the dot files the transcripts for a given
23
+ # dataset (represented by a file list).
24
+ #
25
+
26
+ @ARGV == 1 || die "find_transcripts.pl dot_files_flist < utterance_ids > transcripts";
27
+ $dot_flist = shift @ARGV;
28
+
29
+ open(L, "<$dot_flist") || die "Opening file list of dot files: $dot_flist\n";
30
+ while(<L>){
31
+ chop;
32
+ m:\S+/(\w{6})00.dot: || die "Bad line in dot file list: $_";
33
+ $spk = $1;
34
+ $spk2dot{$spk} = $_;
35
+ }
36
+
37
+
38
+
39
+ while(<STDIN>){
40
+ chop;
41
+ $uttid_orig = $_;
42
+ $uttid = substr $uttid_orig, 0, 8;
43
+ $uttid =~ m:(\w{6})\w\w: || die "Bad utterance id $_";
44
+ $spk = $1;
45
+ if($spk ne $curspk) {
46
+ %utt2trans = { }; # Don't keep all the transcripts in memory...
47
+ $curspk = $spk;
48
+ $dotfile = $spk2dot{$spk};
49
+ defined $dotfile || die "No dot file for speaker $spk\n";
50
+ open(F, "<$dotfile") || die "Error opening dot file $dotfile\n";
51
+ while(<F>) {
52
+ $_ =~ m:(.+)\((\w{8})\)\s*$: || die "Bad line $_ in dot file $dotfile (line $.)\n";
53
+ $trans = $1;
54
+ $utt = $2;
55
+ $utt2trans{$utt} = $trans;
56
+ }
57
+ }
58
+ if(!defined $utt2trans{$uttid}) {
59
+ print STDERR "No transcript for utterance $uttid (current dot file is $dotfile)\n";
60
+ } else {
61
+ print "$uttid_orig $utt2trans{$uttid}\n";
62
+ }
63
+ }
64
+
65
+
tools/ASR_2ch_track/local/find_transcripts.pl ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+ # Copyright 2010-2011 Microsoft Corporation
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
11
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
12
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
13
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
14
+ # See the Apache 2 License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+
19
+ # This program takes on its standard input a list of utterance
20
+ # id's, one for each line. (e.g. 4k0c030a is a an utterance id).
21
+ # It takes as
22
+ # Extracts from the dot files the transcripts for a given
23
+ # dataset (represented by a file list).
24
+ #
25
+
26
+ @ARGV == 1 || die "find_transcripts.pl dot_files_flist < utterance_ids > transcripts";
27
+ $dot_flist = shift @ARGV;
28
+
29
+ open(L, "<$dot_flist") || die "Opening file list of dot files: $dot_flist\n";
30
+ while(<L>){
31
+ chop;
32
+ m:\S+/(\w{6})00.dot: || die "Bad line in dot file list: $_";
33
+ $spk = $1;
34
+ $spk2dot{$spk} = $_;
35
+ }
36
+
37
+
38
+
39
+ while(<STDIN>){
40
+ chop;
41
+ $uttid = $_;
42
+ $uttid =~ m:(\w{6})\w\w: || die "Bad utterance id $_";
43
+ $spk = $1;
44
+ if($spk ne $curspk) {
45
+ %utt2trans = { }; # Don't keep all the transcripts in memory...
46
+ $curspk = $spk;
47
+ $dotfile = $spk2dot{$spk};
48
+ defined $dotfile || die "No dot file for speaker $spk\n";
49
+ open(F, "<$dotfile") || die "Error opening dot file $dotfile\n";
50
+ while(<F>) {
51
+ $_ =~ m:(.+)\((\w{8})\)\s*$: || die "Bad line $_ in dot file $dotfile (line $.)\n";
52
+ $trans = $1;
53
+ $utt = $2;
54
+ $utt2trans{$utt} = $trans;
55
+ }
56
+ }
57
+ if(!defined $utt2trans{$uttid}) {
58
+ print STDERR "No transcript for utterance $uttid (current dot file is $dotfile)\n";
59
+ } else {
60
+ print "$uttid $utt2trans{$uttid}\n";
61
+ }
62
+ }
63
+
64
+
tools/ASR_2ch_track/local/flist2scp.pl ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+ # Copyright 2010-2011 Microsoft Corporation
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
11
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
12
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
13
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
14
+ # See the Apache 2 License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ # takes in a file list with lines like
19
+ # /mnt/matylda2/data/WSJ1/13-16.1/wsj1/si_dt_20/4k0/4k0c030a.wv1
20
+ # and outputs an scp in kaldi format with lines like
21
+ # 4k0c030a /mnt/matylda2/data/WSJ1/13-16.1/wsj1/si_dt_20/4k0/4k0c030a.wv1
22
+ # (the first thing is the utterance-id, which is the same as the basename of the file.
23
+
24
+
25
+ while(<>){
26
+ m:^\S+/(\w+)\.[wW][vV]1$: || die "Bad line $_";
27
+ $id = $1;
28
+ $id =~ tr/A-Z/a-z/; # Necessary because of weirdness on disk 13-16.1 (uppercase filenames)
29
+ print "$id $_";
30
+ }
31
+
tools/ASR_2ch_track/local/normalize_transcript.pl ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+ # Copyright 2010-2011 Microsoft Corporation
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
11
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
12
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
13
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
14
+ # See the Apache 2 License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ # This takes data from the standard input that's unnormalized transcripts in the format
19
+ # 4k2c0308 Of course there isn\'t any guarantee the company will keep its hot hand [misc_noise]
20
+ # 4k2c030a [loud_breath] And new hardware such as the set of personal computers I\. B\. M\. introduced last week can lead to unexpected changes in the software business [door_slam]
21
+ # and outputs normalized transcripts.
22
+ # c.f. /mnt/matylda2/data/WSJ0/11-10.1/wsj0/transcrp/doc/dot_spec.doc
23
+
24
+ @ARGV == 1 || die "usage: normalize_transcript.pl noise_word < transcript > transcript2";
25
+ $noise_word = shift @ARGV;
26
+
27
+ while(<STDIN>) {
28
+ $_ =~ m:^(\S+) (.+): || die "bad line $_";
29
+ $utt = $1;
30
+ $trans = $2;
31
+ print "$utt";
32
+ foreach $w (split (" ",$trans)) {
33
+ $w =~ tr:a-z:A-Z:; # Upcase everything to match the CMU dictionary. .
34
+ $w =~ s:\\::g; # Remove backslashes. We don't need the quoting.
35
+ $w =~ s:^\%PERCENT$:PERCENT:; # Normalization for Nov'93 test transcripts.
36
+ $w =~ s:^\.POINT$:POINT:; # Normalization for Nov'93 test transcripts.
37
+ if($w =~ m:^\[\<\w+\]$: || # E.g. [<door_slam], this means a door slammed in the preceding word. Delete.
38
+ $w =~ m:^\[\w+\>\]$: || # E.g. [door_slam>], this means a door slammed in the next word. Delete.
39
+ $w =~ m:\[\w+/\]$: || # E.g. [phone_ring/], which indicates the start of this phenomenon.
40
+ $w =~ m:\[\/\w+]$: || # E.g. [/phone_ring], which indicates the end of this phenomenon.
41
+ $w eq "~" || # This is used to indicate truncation of an utterance. Not a word.
42
+ $w eq ".") { # "." is used to indicate a pause. Silence is optional anyway so not much
43
+ # point including this in the transcript.
44
+ next; # we won't print this word.
45
+ } elsif($w =~ m:\[\w+\]:) { # Other noises, e.g. [loud_breath].
46
+ print " $noise_word";
47
+ } elsif($w =~ m:^\<([\w\']+)\>$:) {
48
+ # e.g. replace <and> with and. (the <> means verbal deletion of a word).. but it's pronounced.
49
+ print " $1";
50
+ } elsif($w eq "--DASH") {
51
+ print " -DASH"; # This is a common issue; the CMU dictionary has it as -DASH.
52
+ # } elsif($w =~ m:(.+)\-DASH$:) { # E.g. INCORPORATED-DASH... seems the DASH gets combined with previous word
53
+ # print " $1 -DASH";
54
+ } else {
55
+ print " $w";
56
+ }
57
+ }
58
+ print "\n";
59
+ }
tools/ASR_2ch_track/local/real_enhan_chime4_data_prep.sh ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
5
+ # Apache 2.0.
6
+
7
+ # This is modified from the script in standard Kaldi recipe to account
8
+ # for the way the WSJ data is structured on the Edinburgh systems.
9
+ # - Arnab Ghoshal, 29/05/12
10
+
11
+ # Modified from the script for CHiME2 baseline
12
+ # Shinji Watanabe 02/13/2015
13
+
14
+ # Config:
15
+ eval_flag=true # make it true when the evaluation data are released
16
+
17
+ . utils/parse_options.sh || exit 1;
18
+
19
+ if [ $# -ne 2 ]; then
20
+ printf "\nUSAGE: %s <enhancement-name> <enhanced-speech-directory>\n\n" `basename $0`
21
+ echo "The argument should be a the directory that only contains enhanced speech data."
22
+ exit 1;
23
+ fi
24
+
25
+ echo "$0 $@" # Print the command line for logging
26
+
27
+ enhan=$1
28
+ audio_dir=$2
29
+
30
+ dir=`pwd`/data/local/data
31
+ mkdir -p $dir
32
+ local=`pwd`/local
33
+ utils=`pwd`/utils
34
+ odir=`pwd`/data
35
+
36
+ . ./path.sh # Needed for KALDI_ROOT
37
+ export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin
38
+ sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe
39
+ if [ ! -x $sph2pipe ]; then
40
+ echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
41
+ exit 1;
42
+ fi
43
+
44
+ if $eval_flag; then
45
+ list_set="tr05_real_$enhan dt05_real_$enhan et05_real_$enhan"
46
+ else
47
+ list_set="tr05_real_$enhan dt05_real_$enhan"
48
+ fi
49
+
50
+ cd $dir
51
+
52
+ find $audio_dir/ -name '*.wav' | grep 'tr05_bus_real\|tr05_caf_real\|tr05_ped_real\|tr05_str_real' | sort -u > tr05_real_$enhan.flist
53
+ find $audio_dir/ -name '*.wav' | grep 'dt05_bus_real\|dt05_caf_real\|dt05_ped_real\|dt05_str_real' | sort -u > dt05_real_$enhan.flist
54
+ if $eval_flag; then
55
+ find $audio_dir/ -name '*.wav' | grep 'et05_bus_real\|et05_caf_real\|et05_ped_real\|et05_str_real' | sort -u > et05_real_$enhan.flist
56
+ fi
57
+
58
+ # make a scp file from file list
59
+ for x in $list_set; do
60
+ cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_REAL/' > ${x}_wav.ids
61
+ paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp
62
+ done
63
+
64
+ #make a transcription from dot
65
+ cat tr05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_REAL"}'> tr05_real_$enhan.ids
66
+ cat tr05_real.dot | sed -e 's/(.*)//' > tr05_real_$enhan.txt
67
+ paste -d" " tr05_real_$enhan.ids tr05_real_$enhan.txt | sort -k 1 > tr05_real_$enhan.trans1
68
+ cat dt05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_REAL"}'> dt05_real_$enhan.ids
69
+ cat dt05_real.dot | sed -e 's/(.*)//' > dt05_real_$enhan.txt
70
+ paste -d" " dt05_real_$enhan.ids dt05_real_$enhan.txt | sort -k 1 > dt05_real_$enhan.trans1
71
+ if $eval_flag; then
72
+ cat et05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_REAL"}'> et05_real_$enhan.ids
73
+ cat et05_real.dot | sed -e 's/(.*)//' > et05_real_$enhan.txt
74
+ paste -d" " et05_real_$enhan.ids et05_real_$enhan.txt | sort -k 1 > et05_real_$enhan.trans1
75
+ fi
76
+
77
+ # Do some basic normalization steps. At this point we don't remove OOVs--
78
+ # that will be done inside the training scripts, as we'd like to make the
79
+ # data-preparation stage independent of the specific lexicon used.
80
+ noiseword="<NOISE>";
81
+ for x in $list_set;do
82
+ cat $x.trans1 | $local/normalize_transcript.pl $noiseword \
83
+ | sort > $x.txt || exit 1;
84
+ done
85
+
86
+ # Make the utt2spk and spk2utt files.
87
+ for x in $list_set; do
88
+ cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk
89
+ cat ${x}_wav.scp | awk '{print $1}' > $x.utt
90
+ paste -d" " $x.utt $x.spk > $x.utt2spk
91
+ cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
92
+ done
93
+
94
+ # copying data to data/...
95
+ for x in $list_set; do
96
+ mkdir -p $odir/$x
97
+ cp ${x}_wav.scp $odir/$x/wav.scp || exit 1;
98
+ cp ${x}.txt $odir/$x/text || exit 1;
99
+ cp ${x}.spk2utt $odir/$x/spk2utt || exit 1;
100
+ cp ${x}.utt2spk $odir/$x/utt2spk || exit 1;
101
+ done
102
+
103
+ echo "Data preparation succeeded"
tools/ASR_2ch_track/local/real_noisy_chime4_data_prep.sh ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
5
+ # Apache 2.0.
6
+
7
+ # This is modified from the script in standard Kaldi recipe to account
8
+ # for the way the WSJ data is structured on the Edinburgh systems.
9
+ # - Arnab Ghoshal, 29/05/12
10
+
11
+ # Modified from the script for CHiME2 baseline
12
+ # Shinji Watanabe 02/13/2015
13
+
14
+ # Config:
15
+ eval_flag=true # make it true when the evaluation data are released
16
+
17
+ . utils/parse_options.sh || exit 1;
18
+
19
+ if [ $# -ne 1 ]; then
20
+ printf "\nUSAGE: %s <corpus-directory>\n\n" `basename $0`
21
+ echo "The argument should be a the top-level Chime4 directory."
22
+ echo "It is assumed that there will be a 'data' subdirectory"
23
+ echo "within the top-level corpus directory."
24
+ exit 1;
25
+ fi
26
+
27
+ echo "$0 $@" # Print the command line for logging
28
+
29
+ audio_dir=$1/data/audio/16kHz/isolated
30
+ trans_dir=$1/data/transcriptions
31
+
32
+ echo "extract 5th channel (CH5.wav, the center bottom edge in the front of the tablet) for noisy data"
33
+
34
+ dir=`pwd`/data/local/data
35
+ lmdir=`pwd`/data/local/nist_lm
36
+ mkdir -p $dir $lmdir
37
+ local=`pwd`/local
38
+ utils=`pwd`/utils
39
+
40
+ . ./path.sh # Needed for KALDI_ROOT
41
+ export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin
42
+ sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe
43
+ if [ ! -x $sph2pipe ]; then
44
+ echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
45
+ exit 1;
46
+ fi
47
+
48
+ if $eval_flag; then
49
+ list_set="tr05_real_noisy dt05_real_noisy et05_real_noisy"
50
+ else
51
+ list_set="tr05_real_noisy dt05_real_noisy"
52
+ fi
53
+
54
+ cd $dir
55
+
56
+ find $audio_dir -name '*CH5.wav' | grep 'tr05_bus_real\|tr05_caf_real\|tr05_ped_real\|tr05_str_real' | sort -u > tr05_real_noisy.flist
57
+ find $audio_dir -name '*CH5.wav' | grep 'dt05_bus_real\|dt05_caf_real\|dt05_ped_real\|dt05_str_real' | sort -u > dt05_real_noisy.flist
58
+ if $eval_flag; then
59
+ find $audio_dir -name '*CH5.wav' | grep 'et05_bus_real\|et05_caf_real\|et05_ped_real\|et05_str_real' | sort -u > et05_real_noisy.flist
60
+ fi
61
+
62
+ # make a dot format from json annotation files
63
+ cp $trans_dir/tr05_real.dot_all tr05_real.dot
64
+ cp $trans_dir/dt05_real.dot_all dt05_real.dot
65
+ if $eval_flag; then
66
+ cp $trans_dir/et05_real.dot_all et05_real.dot
67
+ fi
68
+
69
+ # make a scp file from file list
70
+ for x in $list_set; do
71
+ cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_REAL/' > ${x}_wav.ids
72
+ paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp
73
+ done
74
+
75
+ #make a transcription from dot
76
+ cat tr05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_REAL"}'> tr05_real_noisy.ids
77
+ cat tr05_real.dot | sed -e 's/(.*)//' > tr05_real_noisy.txt
78
+ paste -d" " tr05_real_noisy.ids tr05_real_noisy.txt | sort -k 1 > tr05_real_noisy.trans1
79
+ cat dt05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_REAL"}'> dt05_real_noisy.ids
80
+ cat dt05_real.dot | sed -e 's/(.*)//' > dt05_real_noisy.txt
81
+ paste -d" " dt05_real_noisy.ids dt05_real_noisy.txt | sort -k 1 > dt05_real_noisy.trans1
82
+ if $eval_flag; then
83
+ cat et05_real.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_REAL"}'> et05_real_noisy.ids
84
+ cat et05_real.dot | sed -e 's/(.*)//' > et05_real_noisy.txt
85
+ paste -d" " et05_real_noisy.ids et05_real_noisy.txt | sort -k 1 > et05_real_noisy.trans1
86
+ fi
87
+
88
+ # Do some basic normalization steps. At this point we don't remove OOVs--
89
+ # that will be done inside the training scripts, as we'd like to make the
90
+ # data-preparation stage independent of the specific lexicon used.
91
+ noiseword="<NOISE>";
92
+ for x in $list_set;do
93
+ cat $x.trans1 | $local/normalize_transcript.pl $noiseword \
94
+ | sort > $x.txt || exit 1;
95
+ done
96
+
97
+ # Make the utt2spk and spk2utt files.
98
+ for x in $list_set; do
99
+ cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk
100
+ cat ${x}_wav.scp | awk '{print $1}' > $x.utt
101
+ paste -d" " $x.utt $x.spk > $x.utt2spk
102
+ cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
103
+ done
104
+
105
+ # copying data to data/...
106
+ for x in $list_set; do
107
+ mkdir -p ../../$x
108
+ cp ${x}_wav.scp ../../$x/wav.scp || exit 1;
109
+ cp ${x}.txt ../../$x/text || exit 1;
110
+ cp ${x}.spk2utt ../../$x/spk2utt || exit 1;
111
+ cp ${x}.utt2spk ../../$x/utt2spk || exit 1;
112
+ done
113
+
114
+ echo "Data preparation succeeded"
tools/ASR_2ch_track/local/run_beamform_2ch_track.sh ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Shinji Watanabe)
4
+
5
+ . ./cmd.sh
6
+ . ./path.sh
7
+
8
+ # Config:
9
+ nj=10
10
+ cmd=run.pl
11
+
12
+ . utils/parse_options.sh || exit 1;
13
+
14
+ if [ $# != 2 ]; then
15
+ echo "Wrong #arguments ($#, expected 3)"
16
+ echo "Usage: local/run_beamform_2ch_track.sh [options] <wav-in-dir> <wav-out-dir>"
17
+ echo "main options (for others, see top of script file)"
18
+ echo " --nj <nj> # number of parallel jobs"
19
+ echo " --cmd <cmd> # Command to run in parallel with"
20
+ exit 1;
21
+ fi
22
+
23
+ sdir=$1
24
+ odir=$2
25
+
26
+ wdir=data/beamforming_2ch_track
27
+
28
+ if [ -z $BEAMFORMIT ] ; then
29
+ export BEAMFORMIT=$KALDI_ROOT/tools/BeamformIt
30
+ fi
31
+ export PATH=${PATH}:$BEAMFORMIT
32
+ ! hash BeamformIt && echo "Missing BeamformIt, run 'cd ../../../tools/; make beamformit;'" && exit 1
33
+
34
+ # Set bash to 'debug' mode, it will exit on :
35
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
36
+ set -e
37
+ set -u
38
+ set -o pipefail
39
+
40
+ mkdir -p $odir
41
+ mkdir -p $wdir/log
42
+
43
+ allwavs=`find $sdir/ | grep "\.wav" | tr ' ' '\n' | awk -F '/' '{print $(NF-1)"/"$NF}'`
44
+
45
+ # wavfiles.list can be used as the name of the output files
46
+ output_wavfiles=$wdir/wavfiles.list
47
+ echo $allwavs | tr ' ' '\n' | awk -F '.' '{print $1}' | sort | uniq > $output_wavfiles
48
+
49
+ # channel list
50
+ input_arrays=$wdir/channels
51
+ echo $allwavs | tr ' ' '\n' | sort | awk 'NR%2==1' > $wdir/channels.1st
52
+ echo $allwavs | tr ' ' '\n' | sort | awk 'NR%2==0' > $wdir/channels.2nd
53
+ paste -d" " $output_wavfiles $wdir/channels.1st $wdir/channels.2nd > $input_arrays
54
+
55
+ # split the list for parallel processing
56
+ split_wavfiles=""
57
+ for n in `seq $nj`; do
58
+ split_wavfiles="$split_wavfiles $output_wavfiles.$n"
59
+ done
60
+ utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1;
61
+
62
+ echo -e "Beamforming\n"
63
+ # making a shell script for each job
64
+ for n in `seq $nj`; do
65
+ cat << EOF > $wdir/log/beamform.$n.sh
66
+ while read line; do
67
+ $BEAMFORMIT/BeamformIt -s \$line -c $input_arrays \
68
+ --config_file `pwd`/conf/chime4.cfg \
69
+ --source_dir $sdir \
70
+ --result_dir $odir
71
+ done < $output_wavfiles.$n
72
+ EOF
73
+ done
74
+ # making a subdirectory for the output wav files
75
+ for x in `awk -F '/' '{print $1}' $output_wavfiles | sort | uniq`; do
76
+ mkdir -p $odir/$x
77
+ done
78
+
79
+ chmod a+x $wdir/log/beamform.*.sh
80
+ $cmd JOB=1:$nj $wdir/log/beamform.JOB.log \
81
+ $wdir/log/beamform.JOB.sh
82
+
83
+ echo "`basename $0` Done."
tools/ASR_2ch_track/local/run_beamform_6ch_track.sh ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Shinji Watanabe)
4
+
5
+ . ./cmd.sh
6
+ . ./path.sh
7
+
8
+ # Config:
9
+ nj=10
10
+ cmd=run.pl
11
+ bmf="1 3 4 5 6"
12
+ eval_flag=true # make it true when the evaluation data are released
13
+
14
+ . utils/parse_options.sh || exit 1;
15
+
16
+ if [ $# != 2 ]; then
17
+ echo "Wrong #arguments ($#, expected 2)"
18
+ echo "Usage: local/run_beamform_6ch_track.sh [options] <wav-in-dir> <wav-out-dir>"
19
+ echo "main options (for others, see top of script file)"
20
+ echo " --nj <nj> # number of parallel jobs"
21
+ echo " --cmd <cmd> # Command to run in parallel with"
22
+ echo " --bmf \"1 3 4 5 6\" # microphones used for beamforming (2th mic is omitted in default)"
23
+ exit 1;
24
+ fi
25
+
26
+ sdir=$1
27
+ odir=$2
28
+ wdir=data/beamforming_`echo $bmf | tr ' ' '_'`
29
+
30
+ if [ -z $BEAMFORMIT ] ; then
31
+ export BEAMFORMIT=$KALDI_ROOT/tools/BeamformIt
32
+ fi
33
+ export PATH=${PATH}:$BEAMFORMIT
34
+ ! hash BeamformIt && echo "Missing BeamformIt, run 'cd ../../../tools/; make beamformit;'" && exit 1
35
+
36
+ # Set bash to 'debug' mode, it will exit on :
37
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
38
+ set -e
39
+ set -u
40
+ set -o pipefail
41
+
42
+ mkdir -p $odir
43
+ mkdir -p $wdir/log
44
+
45
+ echo "Will use the following channels: $bmf"
46
+ # number of channels
47
+ numch=`echo $bmf | tr ' ' '\n' | wc -l`
48
+ echo "the number of channels: $numch"
49
+
50
+ # wavfiles.list can be used as the name of the output files
51
+ # we only process dev and eval waves
52
+ output_wavfiles=$wdir/wavfiles.list
53
+ if $eval_flag; then
54
+ find $sdir/{dt,et}*{simu,real}/ | grep CH1.wav \
55
+ | awk -F '/' '{print $(NF-1) "/" $NF}' | sed -e "s/\.CH1\.wav//" | sort > $output_wavfiles
56
+ else
57
+ find $sdir/dt*{simu,real}/ | grep CH1.wav \
58
+ | awk -F '/' '{print $(NF-1) "/" $NF}' | sed -e "s/\.CH1\.wav//" | sort > $output_wavfiles
59
+ fi
60
+
61
+ # this is an input file list of the microphones
62
+ # format: 1st_wav 2nd_wav ... nth_wav
63
+ input_arrays=$wdir/channels_$numch
64
+ for x in `cat $output_wavfiles`; do
65
+ echo -n "$x"
66
+ for ch in $bmf; do
67
+ echo -n " $x.CH$ch.wav"
68
+ done
69
+ echo ""
70
+ done > $input_arrays
71
+
72
+ # split the list for parallel processing
73
+ split_wavfiles=""
74
+ for n in `seq $nj`; do
75
+ split_wavfiles="$split_wavfiles $output_wavfiles.$n"
76
+ done
77
+ utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1;
78
+
79
+ echo -e "Beamforming\n"
80
+ # making a shell script for each job
81
+ for n in `seq $nj`; do
82
+ cat << EOF > $wdir/log/beamform.$n.sh
83
+ while read line; do
84
+ $BEAMFORMIT/BeamformIt -s \$line -c $input_arrays \
85
+ --config_file `pwd`/conf/chime4.cfg \
86
+ --source_dir $sdir \
87
+ --result_dir $odir
88
+ done < $output_wavfiles.$n
89
+ EOF
90
+ done
91
+ # making a subdirectory for the output wav files
92
+ for x in `awk -F '/' '{print $1}' $output_wavfiles | sort | uniq`; do
93
+ mkdir -p $odir/$x
94
+ done
95
+
96
+ chmod a+x $wdir/log/beamform.*.sh
97
+ $cmd JOB=1:$nj $wdir/log/beamform.JOB.log \
98
+ $wdir/log/beamform.JOB.sh
99
+
100
+ echo "`basename $0` Done."
tools/ASR_2ch_track/local/run_dnn.sh ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer)
4
+ # Inria (Emmanuel Vincent)
5
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
6
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
+
8
+ # This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2
9
+ # made by Chao Weng
10
+
11
+ . ./path.sh
12
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
13
+ ## This relates to the queue.
14
+
15
+ # Config:
16
+ nj=30
17
+ stage=0 # resume training with --stage=N
18
+ train=noisy
19
+ eval_flag=true # make it true when the evaluation data are released
20
+
21
+ . utils/parse_options.sh || exit 1;
22
+
23
+ # This is a shell script, but it's recommended that you run the commands one by
24
+ # one by copying and pasting into the shell.
25
+
26
+ if [ $# -ne 1 ]; then
27
+ printf "\nUSAGE: %s <enhancement method>\n\n" `basename $0`
28
+ echo "First argument specifies a unique name for different enhancement method"
29
+ exit 1;
30
+ fi
31
+
32
+ # set enhanced data
33
+ enhan=$1
34
+
35
+ # Set bash to 'debug' mode, it will exit on :
36
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
37
+ set -e
38
+ set -u
39
+ set -o pipefail
40
+
41
+ # check whether run_init is executed
42
+ if [ ! -d data/lang ]; then
43
+ echo "error, execute local/run_init.sh, first"
44
+ exit 1;
45
+ fi
46
+
47
+ # check whether run_init is executed
48
+ if [ ! -d exp/tri3b_tr05_multi_${train} ]; then
49
+ echo "error, execute local/run_init.sh, first"
50
+ exit 1;
51
+ fi
52
+
53
+ # get alignments
54
+ if [ $stage -le 0 ]; then
55
+ steps/align_fmllr.sh --nj $nj --cmd "$train_cmd" \
56
+ data/tr05_multi_${train} data/lang exp/tri3b_tr05_multi_${train} exp/tri3b_tr05_multi_${train}_ali
57
+ steps/align_fmllr.sh --nj 4 --cmd "$train_cmd" \
58
+ data/dt05_multi_$enhan data/lang exp/tri3b_tr05_multi_${train} exp/tri3b_tr05_multi_${train}_ali_dt05
59
+ fi
60
+
61
+ # make fmllr feature for training multi = simu + real
62
+ gmmdir=exp/tri3b_tr05_multi_${train}_ali
63
+ data_fmllr=data-fmllr-tri3b
64
+ mkdir -p $data_fmllr
65
+ fmllrdir=fmllr-tri3b/${train}
66
+ if [ $stage -le 1 ]; then
67
+ for x in tr05_real_${train} tr05_simu_${train}; do
68
+ steps/nnet/make_fmllr_feats.sh --nj 4 --cmd "$train_cmd" \
69
+ --transform-dir $gmmdir \
70
+ $data_fmllr/$x data/$x $gmmdir exp/make_fmllr_tri3b/$x $fmllrdir
71
+ done
72
+ fi
73
+
74
+ # make fmllr feature for dev and eval
75
+ gmmdir=exp/tri3b_tr05_multi_${train}
76
+ fmllrdir=fmllr-tri3b/$enhan
77
+ if [ $stage -le 2 ]; then
78
+ if $eval_flag; then
79
+ tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan"
80
+ else
81
+ tasks="dt05_real_$enhan dt05_simu_$enhan"
82
+ fi
83
+ for x in $tasks; do
84
+ steps/nnet/make_fmllr_feats.sh --nj 4 --cmd "$train_cmd" \
85
+ --transform-dir $gmmdir/decode_tgpr_5k_$x \
86
+ $data_fmllr/$x data/$x $gmmdir exp/make_fmllr_tri3b/$x $fmllrdir
87
+ done
88
+ fi
89
+
90
+ # make mixed training set from real and simulation enhanced data
91
+ # multi = simu + real
92
+ if [ $stage -le 3 ]; then
93
+ utils/combine_data.sh $data_fmllr/tr05_multi_${train} $data_fmllr/tr05_simu_${train} $data_fmllr/tr05_real_${train}
94
+ utils/combine_data.sh $data_fmllr/dt05_multi_$enhan $data_fmllr/dt05_simu_$enhan $data_fmllr/dt05_real_$enhan
95
+ if $eval_flag; then
96
+ utils/combine_data.sh $data_fmllr/et05_multi_$enhan $data_fmllr/et05_simu_$enhan $data_fmllr/et05_real_$enhan
97
+ fi
98
+ fi
99
+
100
+ # pre-train dnn
101
+ dir=exp/tri4a_dnn_pretrain_tr05_multi_${train}
102
+ if [ $stage -le 4 ]; then
103
+ $cuda_cmd $dir/_pretrain_dbn.log \
104
+ steps/nnet/pretrain_dbn.sh --nn-depth 7 --rbm-iter 3 $data_fmllr/tr05_multi_${train} $dir
105
+ fi
106
+
107
+ # train dnn
108
+ dir=exp/tri4a_dnn_tr05_multi_${train}
109
+ ali=exp/tri3b_tr05_multi_${train}_ali
110
+ ali_dev=exp/tri3b_tr05_multi_${train}_ali_dt05
111
+ feature_transform=exp/tri4a_dnn_pretrain_tr05_multi_${train}/final.feature_transform
112
+ dbn=exp/tri4a_dnn_pretrain_tr05_multi_${train}/7.dbn
113
+ if [ $stage -le 5 ]; then
114
+ $cuda_cmd $dir/_train_nnet.log \
115
+ steps/nnet/train.sh --feature-transform $feature_transform --dbn $dbn --hid-layers 0 --learn-rate 0.008 \
116
+ $data_fmllr/tr05_multi_${train} $data_fmllr/dt05_multi_$enhan data/lang $ali $ali_dev $dir
117
+ fi
118
+
119
+ # decode enhanced speech
120
+ if [ $stage -le 6 ]; then
121
+ utils/mkgraph.sh data/lang_test_tgpr_5k $dir $dir/graph_tgpr_5k
122
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \
123
+ $dir/graph_tgpr_5k $data_fmllr/dt05_real_$enhan $dir/decode_tgpr_5k_dt05_real_$enhan &
124
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \
125
+ $dir/graph_tgpr_5k $data_fmllr/dt05_simu_$enhan $dir/decode_tgpr_5k_dt05_simu_$enhan &
126
+ if $eval_flag; then
127
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \
128
+ $dir/graph_tgpr_5k $data_fmllr/et05_real_$enhan $dir/decode_tgpr_5k_et05_real_$enhan &
129
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --acwt 0.10 --config conf/decode_dnn.config \
130
+ $dir/graph_tgpr_5k $data_fmllr/et05_simu_$enhan $dir/decode_tgpr_5k_et05_simu_$enhan &
131
+ fi
132
+ wait;
133
+ fi
134
+
135
+ # Sequence training using sMBR criterion, we do Stochastic-GD
136
+ # with per-utterance updates. We use usually good acwt 0.1
137
+ # Lattices are re-generated after 1st epoch, to get faster convergence.
138
+ dir=exp/tri4a_dnn_tr05_multi_${train}_smbr
139
+ srcdir=exp/tri4a_dnn_tr05_multi_${train}
140
+ acwt=0.1
141
+
142
+ # First we generate lattices and alignments:
143
+ # gawk must be installed to perform awk -v FS="/" '{ print gensub(".gz","","",$NF)" gunzip -c "$0" |"; }' in
144
+ # steps/nnet/make_denlats.sh
145
+ if [ $stage -le 7 ]; then
146
+ steps/nnet/align.sh --nj $nj --cmd "$train_cmd" \
147
+ $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali
148
+ steps/nnet/make_denlats.sh --nj $nj --cmd "$decode_cmd" --config conf/decode_dnn.config --acwt $acwt \
149
+ $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_denlats
150
+ fi
151
+
152
+ # Re-train the DNN by 1 iteration of sMBR
153
+ if [ $stage -le 8 ]; then
154
+ steps/nnet/train_mpe.sh --cmd "$cuda_cmd" --num-iters 1 --acwt $acwt --do-smbr true \
155
+ $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali ${srcdir}_denlats $dir
156
+ fi
157
+
158
+ # Decode (reuse HCLG graph)
159
+ if [ $stage -le 9 ]; then
160
+ for ITER in 1; do
161
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
162
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
163
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_real_${enhan} $dir/decode_tgpr_5k_dt05_real_${enhan}_it${ITER} &
164
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
165
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
166
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_simu_${enhan} $dir/decode_tgpr_5k_dt05_simu_${enhan}_it${ITER} &
167
+ if $eval_flag; then
168
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
169
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
170
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_real_${enhan} $dir/decode_tgpr_5k_et05_real_${enhan}_it${ITER} &
171
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
172
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
173
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_simu_${enhan} $dir/decode_tgpr_5k_et05_simu_${enhan}_it${ITER} &
174
+ fi
175
+ done
176
+ fi
177
+
178
+ # Re-generate lattices, run 4 more sMBR iterations
179
+ dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats
180
+ srcdir=exp/tri4a_dnn_tr05_multi_${train}_smbr
181
+ acwt=0.1
182
+
183
+ # Generate lattices and alignments:
184
+ if [ $stage -le 10 ]; then
185
+ steps/nnet/align.sh --nj $nj --cmd "$train_cmd" \
186
+ $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali
187
+ steps/nnet/make_denlats.sh --nj $nj --cmd "$decode_cmd" --config conf/decode_dnn.config --acwt $acwt \
188
+ $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_denlats
189
+ fi
190
+
191
+ # Re-train the DNN by 4 iterations of sMBR
192
+ if [ $stage -le 11 ]; then
193
+ steps/nnet/train_mpe.sh --cmd "$cuda_cmd" --num-iters 4 --acwt $acwt --do-smbr true \
194
+ $data_fmllr/tr05_multi_${train} data/lang $srcdir ${srcdir}_ali ${srcdir}_denlats $dir || exit 1
195
+ fi
196
+
197
+ # Decode (reuse HCLG graph)
198
+ if [ $stage -le 12 ]; then
199
+ for ITER in 1 2 3 4; do
200
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
201
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
202
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_real_${enhan} $dir/decode_tgpr_5k_dt05_real_${enhan}_it${ITER} &
203
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
204
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
205
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_simu_${enhan} $dir/decode_tgpr_5k_dt05_simu_${enhan}_it${ITER} &
206
+ if $eval_flag; then
207
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
208
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
209
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_real_${enhan} $dir/decode_tgpr_5k_et05_real_${enhan}_it${ITER} &
210
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
211
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
212
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_simu_${enhan} $dir/decode_tgpr_5k_et05_simu_${enhan}_it${ITER} &
213
+ fi
214
+ done
215
+ wait
216
+ fi
217
+
218
+ # scoring
219
+ if [ $stage -le 13 ]; then
220
+ # decoded results of enhanced speech using DNN AMs trained with enhanced data
221
+ local/chime4_calc_wers.sh exp/tri4a_dnn_tr05_multi_${train} $enhan exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k \
222
+ > exp/tri4a_dnn_tr05_multi_${train}/best_wer_$enhan.result
223
+ head -n 15 exp/tri4a_dnn_tr05_multi_${train}/best_wer_$enhan.result
224
+ # decoded results of enhanced speech using sequence-training DNN
225
+ ./local/chime4_calc_wers_smbr.sh exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats ${enhan} exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k \
226
+ > exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats/best_wer_${enhan}.result
227
+ head -n 15 exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats/best_wer_${enhan}.result
228
+ fi
229
+
230
+ echo "`basename $0` Done."
tools/ASR_2ch_track/local/run_dnn_recog.sh ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer)
4
+ # Inria (Emmanuel Vincent)
5
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
6
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
+
8
+ # This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2
9
+ # made by Chao Weng
10
+
11
+ . ./path.sh
12
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
13
+ ## This relates to the queue.
14
+
15
+ # Config:
16
+ nj=30
17
+ stage=0 # resume training with --stage=N
18
+ train=noisy
19
+ eval_flag=true # make it true when the evaluation data are released
20
+
21
+ . utils/parse_options.sh || exit 1;
22
+
23
+ # This is a shell script, but it's recommended that you run the commands one by
24
+ # one by copying and pasting into the shell.
25
+
26
+ if [ $# -ne 2 ]; then
27
+ printf "\nUSAGE: %s <enhancement method> <model dir>\n\n" `basename $0`
28
+ echo "First argument specifies a unique name for different enhancement method"
29
+ echo "Second argument specifies acoustic and language model directory"
30
+ exit 1;
31
+ fi
32
+
33
+ # set enhanced data
34
+ enhan=$1
35
+ # set model directory
36
+ mdir=$2
37
+
38
+ # Set bash to 'debug' mode, it will exit on :
39
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
40
+ set -e
41
+ set -u
42
+ set -o pipefail
43
+
44
+ # check data/loca/data
45
+ if [ ! -d $mdir/data/local/data ]; then
46
+ echo "error, set $mdir correctly"
47
+ exit 1;
48
+ elif [ ! -d data/local/data ]; then
49
+ echo "copy $mdir/data/local/data"
50
+ mkdir -p data/local
51
+ cp -r $mdir/data/local/data data/local/
52
+ fi
53
+
54
+ # check gmm model
55
+ if [ ! -d $mdir/exp/tri3b_tr05_multi_${train} ]; then
56
+ echo "error, set $mdir correctly"
57
+ exit 1;
58
+ elif [ ! -d exp/tri3b_tr05_multi_${train} ]; then
59
+ echo "copy $mdir/exp/tri3b_tr05_multi_${train}"
60
+ mkdir -p exp
61
+ cp -r $mdir/exp/tri3b_tr05_multi_${train} exp/
62
+ fi
63
+
64
+ # check dnn graph
65
+ if [ ! -d $mdir/exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then
66
+ echo "error, set $mdir correctly"
67
+ exit 1;
68
+ elif [ ! -d exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then
69
+ echo "copy $mdir/exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k"
70
+ mkdir -p exp/tri4a_dnn_tr05_multi_${train}
71
+ cp -r $mdir/exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k exp/tri4a_dnn_tr05_multi_${train}/
72
+ fi
73
+
74
+ # check dnn smbr model
75
+ if [ ! -d $mdir/exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats ]; then
76
+ echo "error, set $mdir correctly"
77
+ exit 1;
78
+ elif [ ! -d exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats ]; then
79
+ echo "copy $mdir/exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats"
80
+ mkdir -p exp
81
+ cp -r $mdir/exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats exp/
82
+ fi
83
+
84
+ # make fmllr feature for dev and eval
85
+ gmmdir=exp/tri3b_tr05_multi_${train}
86
+ data_fmllr=data-fmllr-tri3b
87
+ mkdir -p $data_fmllr
88
+ fmllrdir=fmllr-tri3b/$enhan
89
+ if [ $stage -le 4 ]; then
90
+ if $eval_flag; then
91
+ tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan"
92
+ else
93
+ tasks="dt05_real_$enhan dt05_simu_$enhan"
94
+ fi
95
+ for x in $tasks; do
96
+ steps/nnet/make_fmllr_feats.sh --nj 4 --cmd "$train_cmd" \
97
+ --transform-dir $gmmdir/decode_tgpr_5k_$x \
98
+ $data_fmllr/$x data/$x $gmmdir exp/make_fmllr_tri3b/$x $fmllrdir
99
+ done
100
+ fi
101
+
102
+ # make mixed training set from real and simulation enhanced data
103
+ # multi = simu + real
104
+ if [ $stage -le 5 ]; then
105
+ utils/combine_data.sh $data_fmllr/dt05_multi_$enhan $data_fmllr/dt05_simu_$enhan $data_fmllr/dt05_real_$enhan
106
+ if $eval_flag; then
107
+ utils/combine_data.sh $data_fmllr/et05_multi_$enhan $data_fmllr/et05_simu_$enhan $data_fmllr/et05_real_$enhan
108
+ fi
109
+ fi
110
+
111
+ # Re-generate lattices, run 4 more sMBR iterations
112
+ dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats
113
+ acwt=0.1
114
+
115
+ # Decode (reuse HCLG graph)
116
+ if [ $stage -le 6 ]; then
117
+ for ITER in 1 2 3 4; do
118
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
119
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
120
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_real_${enhan} $dir/decode_tgpr_5k_dt05_real_${enhan}_it${ITER} &
121
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
122
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
123
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/dt05_simu_${enhan} $dir/decode_tgpr_5k_dt05_simu_${enhan}_it${ITER} &
124
+ if $eval_flag; then
125
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
126
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
127
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_real_${enhan} $dir/decode_tgpr_5k_et05_real_${enhan}_it${ITER} &
128
+ steps/nnet/decode.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" --config conf/decode_dnn.config \
129
+ --nnet $dir/${ITER}.nnet --acwt $acwt \
130
+ exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k $data_fmllr/et05_simu_${enhan} $dir/decode_tgpr_5k_et05_simu_${enhan}_it${ITER} &
131
+ fi
132
+ wait
133
+ done
134
+ fi
135
+
136
+ # scoring
137
+ if [ $stage -le 7 ]; then
138
+ # decoded results of enhanced speech using sequence-training DNN
139
+ ./local/chime4_calc_wers_smbr.sh $dir ${enhan} exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k > $dir/best_wer_${enhan}.result
140
+ head -n 15 $dir/best_wer_${enhan}.result
141
+ fi
142
+
143
+ echo "`basename $0` Done."
tools/ASR_2ch_track/local/run_gmm.sh ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer)
4
+ # Inria (Emmanuel Vincent)
5
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
6
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
+
8
+ # This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2
9
+ # made by Chao Weng
10
+
11
+ . ./path.sh
12
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
13
+ ## This relates to the queue.
14
+
15
+ # Config:
16
+ nj=30
17
+ stage=0 # resume training with --stage=N
18
+ train=noisy # noisy data multi-condition training
19
+ eval_flag=true # make it true when the evaluation data are released
20
+
21
+ . utils/parse_options.sh || exit 1;
22
+
23
+ # This is a shell script, but it's recommended that you run the commands one by
24
+ # one by copying and pasting into the shell.
25
+
26
+ if [ $# -ne 3 ]; then
27
+ printf "\nUSAGE: %s <enhancement method> <enhanced speech directory> <chime4 root directory>\n\n" `basename $0`
28
+ echo "First argument specifies a unique name for different enhancement method"
29
+ echo "Second argument specifies the directory of enhanced wav files"
30
+ echo "Third argument specifies the CHiME4 root directory"
31
+ exit 1;
32
+ fi
33
+
34
+ # set enhanced data
35
+ enhan=$1
36
+ enhan_data=$2
37
+ # set chime4 data
38
+ chime4_data=$3
39
+
40
+ # Set bash to 'debug' mode, it will exit on :
41
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
42
+ set -e
43
+ set -u
44
+ set -o pipefail
45
+
46
+ # check whether run_init is executed
47
+ if [ ! -d data/lang ]; then
48
+ echo "error, execute local/run_init.sh, first"
49
+ exit 1;
50
+ fi
51
+
52
+ #######################
53
+ #### training #########
54
+ if [ $stage -le 1 ]; then
55
+ # process for distant talking speech for real and simulation data
56
+ local/real_noisy_chime4_data_prep.sh $chime4_data
57
+ local/simu_noisy_chime4_data_prep.sh $chime4_data
58
+ fi
59
+
60
+ # Now make MFCC features for clean, close, and noisy data
61
+ # mfccdir should be some place with a largish disk where you
62
+ # want to store MFCC features.
63
+ mfccdir=mfcc
64
+ if [ $stage -le 2 ]; then
65
+ if $eval_flag; then
66
+ tasks="tr05_real_${train} dt05_real_${train} tr05_simu_${train} dt05_simu_${train} et05_real_${train} et05_simu_${train}"
67
+ else
68
+ tasks="tr05_real_${train} dt05_real_${train} tr05_simu_${train} dt05_simu_${train}"
69
+ fi
70
+ for x in $tasks; do
71
+ steps/make_mfcc.sh --nj 8 --cmd "$train_cmd" \
72
+ data/$x exp/make_mfcc/$x $mfccdir
73
+ steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir
74
+ done
75
+ fi
76
+
77
+ # make mixed training set from real and simulation training data
78
+ # multi = simu + real
79
+ if [ $stage -le 3 ]; then
80
+ utils/combine_data.sh data/tr05_multi_${train} data/tr05_simu_${train} data/tr05_real_${train}
81
+ utils/combine_data.sh data/dt05_multi_${train} data/dt05_simu_${train} data/dt05_real_${train}
82
+ if $eval_flag; then
83
+ utils/combine_data.sh data/et05_multi_${train} data/et05_simu_${train} data/et05_real_${train}
84
+ fi
85
+ fi
86
+
87
+ # training models for noisy data
88
+ if [ $stage -le 4 ]; then
89
+ nspk=`wc -l data/tr05_multi_${train}/spk2utt | awk '{print $1}'`
90
+ if [ $nj -gt $nspk ]; then
91
+ nj2=$nspk
92
+ else
93
+ nj2=$nj
94
+ fi
95
+ # training monophone model
96
+ steps/train_mono.sh --boost-silence 1.25 --nj $nj2 --cmd "$train_cmd" \
97
+ data/tr05_multi_${train} data/lang exp/mono0a_tr05_multi_${train}
98
+ steps/align_si.sh --boost-silence 1.25 --nj $nj2 --cmd "$train_cmd" \
99
+ data/tr05_multi_${train} data/lang exp/mono0a_tr05_multi_${train} exp/mono0a_ali_tr05_multi_${train}
100
+
101
+ # training triphone model with lad mllt features
102
+ steps/train_deltas.sh --boost-silence 1.25 --cmd "$train_cmd" \
103
+ 2000 10000 data/tr05_multi_${train} data/lang exp/mono0a_ali_tr05_multi_${train} exp/tri1_tr05_multi_${train}
104
+ steps/align_si.sh --nj $nj2 --cmd "$train_cmd" \
105
+ data/tr05_multi_${train} data/lang exp/tri1_tr05_multi_${train} exp/tri1_ali_tr05_multi_${train}
106
+
107
+ steps/train_lda_mllt.sh --cmd "$train_cmd" \
108
+ --splice-opts "--left-context=3 --right-context=3" \
109
+ 2500 15000 data/tr05_multi_${train} data/lang exp/tri1_ali_tr05_multi_${train} exp/tri2b_tr05_multi_${train}
110
+ steps/align_si.sh --nj $nj2 --cmd "$train_cmd" \
111
+ --use-graphs true data/tr05_multi_${train} data/lang exp/tri2b_tr05_multi_${train} exp/tri2b_ali_tr05_multi_${train}
112
+
113
+ steps/train_sat.sh --cmd "$train_cmd" \
114
+ 2500 15000 data/tr05_multi_${train} data/lang exp/tri2b_ali_tr05_multi_${train} exp/tri3b_tr05_multi_${train}
115
+ utils/mkgraph.sh data/lang_test_tgpr_5k exp/tri3b_tr05_multi_${train} exp/tri3b_tr05_multi_${train}/graph_tgpr_5k
116
+ fi
117
+ #### training done ####
118
+ #######################
119
+
120
+
121
+ #####################
122
+ #### tsting #########
123
+ # process for enhanced data
124
+ if [ $stage -le 5 ]; then
125
+ if [ ! -d data/dt05_real_$enhan ] || [ ! -d data/et05_real_$enhan ]; then
126
+ local/real_enhan_chime4_data_prep.sh $enhan $enhan_data
127
+ local/simu_enhan_chime4_data_prep.sh $enhan $enhan_data
128
+ fi
129
+ fi
130
+
131
+ # Now make MFCC features for enhanced data
132
+ # mfccdir should be some place with a largish disk where you
133
+ # want to store MFCC features.
134
+ mfccdir=mfcc/$enhan
135
+ if [ $stage -le 6 ]; then
136
+ if $eval_flag; then
137
+ tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan"
138
+ else
139
+ tasks="dt05_real_$enhan dt05_simu_$enhan"
140
+ fi
141
+ for x in $tasks; do
142
+ if [ ! -e data/$x/feats.scp ]; then
143
+ steps/make_mfcc.sh --nj 8 --cmd "$train_cmd" \
144
+ data/$x exp/make_mfcc/$x $mfccdir
145
+ steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir
146
+ fi
147
+ done
148
+ fi
149
+
150
+ # make mixed training set from real and simulation enhanced data
151
+ # multi = simu + real
152
+ if [ $stage -le 7 ]; then
153
+ if [ ! -d data/dt05_multi_$enhan ]; then
154
+ utils/combine_data.sh data/dt05_multi_$enhan data/dt05_simu_$enhan data/dt05_real_$enhan
155
+ if $eval_flag; then
156
+ utils/combine_data.sh data/et05_multi_$enhan data/et05_simu_$enhan data/et05_real_$enhan
157
+ fi
158
+ fi
159
+ fi
160
+
161
+ # decode enhanced speech using AMs trained with enhanced data
162
+ if [ $stage -le 8 ]; then
163
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
164
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_real_$enhan &
165
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
166
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_simu_$enhan &
167
+ if $eval_flag; then
168
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
169
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_real_$enhan &
170
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
171
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_simu_$enhan &
172
+ fi
173
+ wait;
174
+ fi
175
+
176
+ # scoring
177
+ if [ $stage -le 9 ]; then
178
+ # decoded results of enhanced speech using AMs trained with enhanced data
179
+ local/chime4_calc_wers.sh exp/tri3b_tr05_multi_${train} $enhan exp/tri3b_tr05_multi_${train}/graph_tgpr_5k \
180
+ > exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result
181
+ head -n 15 exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result
182
+ fi
183
+ #### tsting done ####
184
+ #####################
185
+
186
+ echo "`basename $0` Done."
tools/ASR_2ch_track/local/run_gmm_recog.sh ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer)
4
+ # Inria (Emmanuel Vincent)
5
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
6
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
+
8
+ # This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2
9
+ # made by Chao Weng
10
+
11
+ . ./path.sh
12
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
13
+ ## This relates to the queue.
14
+
15
+ # Config:
16
+ nj=30
17
+ stage=0 # resume training with --stage=N
18
+ train=noisy
19
+ eval_flag=true # make it true when the evaluation data are released
20
+
21
+ . utils/parse_options.sh || exit 1;
22
+
23
+ # This is a shell script, but it's recommended that you run the commands one by
24
+ # one by copying and pasting into the shell.
25
+
26
+ if [ $# -ne 3 ]; then
27
+ printf "\nUSAGE: %s <enhancement method> <enhanced speech directory> <model dir>\n\n" `basename $0`
28
+ echo "First argument specifies a unique name for different enhancement method"
29
+ echo "Second argument specifies the directory of enhanced wav files"
30
+ echo "Third argument specifies acoustic and language model directory"
31
+ exit 1;
32
+ fi
33
+
34
+ # set enhanced data
35
+ enhan=$1
36
+ enhan_data=$2
37
+ # set model directory
38
+ mdir=$3
39
+
40
+ # Set bash to 'debug' mode, it will exit on :
41
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
42
+ set -e
43
+ set -u
44
+ set -o pipefail
45
+
46
+ # check data/loca/data
47
+ if [ ! -d $mdir/data/local/data ]; then
48
+ echo "error, set $mdir correctly"
49
+ exit 1;
50
+ elif [ ! -d data/local/data ]; then
51
+ echo "copy $mdir/data/local/data"
52
+ mkdir -p data/local
53
+ cp -r $mdir/data/local/data data/local/
54
+ fi
55
+
56
+ # check gmm model
57
+ if [ ! -d $mdir/exp/tri3b_tr05_multi_${train} ]; then
58
+ echo "error, set $mdir correctly"
59
+ exit 1;
60
+ elif [ ! -d exp/tri3b_tr05_multi_${train} ]; then
61
+ echo "copy $mdir/exp/tri3b_tr05_multi_${train}"
62
+ mkdir -p exp
63
+ cp -r $mdir/exp/tri3b_tr05_multi_${train} exp/
64
+ fi
65
+
66
+ # process for enhanced data
67
+ if [ $stage -le 0 ]; then
68
+ if [ ! -d data/dt05_real_$enhan ] || [ ! -d data/et05_real_$enhan ]; then
69
+ local/real_enhan_chime4_data_prep.sh $enhan $enhan_data
70
+ local/simu_enhan_chime4_data_prep.sh $enhan $enhan_data
71
+ fi
72
+ fi
73
+
74
+ # Now make MFCC features for enhanced data
75
+ # mfccdir should be some place with a largish disk where you
76
+ # want to store MFCC features.
77
+ mfccdir=mfcc/$enhan
78
+ if [ $stage -le 1 ]; then
79
+ if $eval_flag; then
80
+ tasks="dt05_real_$enhan dt05_simu_$enhan et05_real_$enhan et05_simu_$enhan"
81
+ else
82
+ tasks="dt05_real_$enhan dt05_simu_$enhan"
83
+ fi
84
+ for x in $tasks; do
85
+ if [ ! -e data/$x/feats.scp ]; then
86
+ steps/make_mfcc.sh --nj 8 --cmd "$train_cmd" \
87
+ data/$x exp/make_mfcc/$x $mfccdir
88
+ steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir
89
+ fi
90
+ done
91
+ fi
92
+
93
+ # make mixed training set from real and simulation enhanced data
94
+ # multi = simu + real
95
+ if [ $stage -le 2 ]; then
96
+ if [ ! -d data/dt05_multi_$enhan ]; then
97
+ utils/combine_data.sh data/dt05_multi_$enhan data/dt05_simu_$enhan data/dt05_real_$enhan
98
+ if $eval_flag; then
99
+ utils/combine_data.sh data/et05_multi_$enhan data/et05_simu_$enhan data/et05_real_$enhan
100
+ fi
101
+ fi
102
+ fi
103
+
104
+ # decode enhanced speech using AMs trained with enhanced data
105
+ if [ $stage -le 3 ]; then
106
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
107
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_real_$enhan &
108
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
109
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/dt05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_dt05_simu_$enhan &
110
+ if $eval_flag; then
111
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
112
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_real_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_real_$enhan &
113
+ steps/decode_fmllr.sh --nj 4 --num-threads 3 --cmd "$decode_cmd" \
114
+ exp/tri3b_tr05_multi_${train}/graph_tgpr_5k data/et05_simu_$enhan exp/tri3b_tr05_multi_${train}/decode_tgpr_5k_et05_simu_$enhan &
115
+ fi
116
+ wait;
117
+ fi
118
+
119
+ # scoring
120
+ if [ $stage -le 4 ]; then
121
+ # decoded results of enhanced speech using AMs trained with enhanced data
122
+ local/chime4_calc_wers.sh exp/tri3b_tr05_multi_${train} $enhan exp/tri3b_tr05_multi_${train}/graph_tgpr_5k \
123
+ > exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result
124
+ head -n 15 exp/tri3b_tr05_multi_${train}/best_wer_$enhan.result
125
+ fi
126
+
127
+ echo "`basename $0` Done."
tools/ASR_2ch_track/local/run_init.sh ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer)
4
+ # Inria (Emmanuel Vincent)
5
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
6
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
+
8
+ # Config:
9
+ nj=30
10
+ stage=0 # resume training with --stage=N
11
+ eval_flag=true # make it true when the evaluation data are released
12
+
13
+ . utils/parse_options.sh || exit 1;
14
+
15
+ # This script is made from the kaldi recipe of the 2nd CHiME Challenge Track 2
16
+ # made by Chao Weng
17
+
18
+ . ./path.sh
19
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
20
+ ## This relates to the queue.
21
+
22
+ if [ $# -ne 1 ]; then
23
+ printf "\nUSAGE: %s <Chime4 root directory>\n\n" `basename $0`
24
+ echo "Please specifies a CHiME4 root directory"
25
+ echo "If you use scripts distributed in the CHiME4 package,"
26
+ echo "It would be `pwd`/../.."
27
+ exit 1;
28
+ fi
29
+
30
+ # This is a shell script, but it's recommended that you run the commands one by
31
+ # one by copying and pasting into the shell.
32
+
33
+ # clean data
34
+ chime4_data=$1
35
+ wsj0_data=$chime4_data/data/WSJ0 # directory of WSJ0 in Chime4. You can also specify your WSJ0 corpus directory
36
+
37
+ # Set bash to 'debug' mode, it will exit on :
38
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
39
+ set -e
40
+ set -u
41
+ set -o pipefail
42
+
43
+ if [ $stage -le 0 ]; then
44
+ # process for clean speech and making LMs etc. from original WSJ0
45
+ # note that training on clean data means original WSJ0 data only (no booth data)
46
+ local/clean_wsj0_data_prep.sh $wsj0_data
47
+ local/wsj_prepare_dict.sh
48
+ utils/prepare_lang.sh data/local/dict "<SPOKEN_NOISE>" data/local/lang_tmp data/lang
49
+ local/clean_chime4_format_data.sh
50
+ fi
51
+
52
+ echo "`basename $0` Done."
tools/ASR_2ch_track/local/run_lmrescore.sh ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2015 University of Sheffield (Jon Barker, Ricard Marxer)
4
+ # Inria (Emmanuel Vincent)
5
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
6
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
+
8
+ # Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori)
9
+
10
+ nj=12
11
+ stage=1
12
+ order=5
13
+ hidden=300
14
+ rnnweight=0.5
15
+ nbest=100
16
+ train=noisy
17
+ eval_flag=true # make it true when the evaluation data are released
18
+
19
+ . utils/parse_options.sh || exit 1;
20
+
21
+ . ./path.sh
22
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
23
+ ## This relates to the queue.
24
+
25
+ # This is a shell script, but it's recommended that you run the commands one by
26
+ # one by copying and pasting into the shell.
27
+
28
+ if [ $# -ne 2 ]; then
29
+ printf "\nUSAGE: %s <Chime4 root directory> <enhancement method>\n\n" `basename $0`
30
+ echo "First argument specifies a root directory of Chime4 data"
31
+ echo "Second argument specifies a unique name for different enhancement method"
32
+ exit 1;
33
+ fi
34
+
35
+ # set language models
36
+ lm_suffix=${order}gkn_5k
37
+ rnnlm_suffix=rnnlm_5k_h${hidden}
38
+
39
+ # data root
40
+ chime4_data=$1
41
+ # enhan data
42
+ enhan=$2
43
+
44
+ # check data
45
+ if [ ! -d $chime4_data ]; then
46
+ echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1
47
+ fi
48
+
49
+ # check whether run_dnn is executed
50
+ srcdir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats
51
+ if [ ! -d $srcdir ]; then
52
+ echo "error, execute local/run_dnn.sh, first"
53
+ exit 1;
54
+ fi
55
+
56
+ # train a high-order n-gram language model
57
+ if [ $stage -le 1 ]; then
58
+ local/chime4_train_lms.sh $chime4_data || exit 1;
59
+ fi
60
+
61
+ # train a RNN language model
62
+ if [ $stage -le 2 ]; then
63
+ local/chime4_train_rnnlms.sh $chime4_data || exit 1;
64
+ fi
65
+
66
+ # preparation
67
+ dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_lmrescore
68
+ mkdir -p $dir
69
+ # make a symbolic link to graph info
70
+ if [ ! -e $dir/graph_tgpr_5k ]; then
71
+ if [ ! -e exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then
72
+ echo "graph is missing, execute local/run_dnn.sh, correctly"
73
+ exit 1;
74
+ fi
75
+ pushd . ; cd $dir
76
+ ln -s ../tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k .
77
+ popd
78
+ fi
79
+
80
+ # rescore lattices by a high-order N-gram
81
+ if [ $stage -le 3 ]; then
82
+ # check the best iteration
83
+ if [ ! -f $srcdir/log/best_wer_$enhan ]; then
84
+ echo "error, execute local/run_dnn.sh, first"
85
+ exit 1;
86
+ fi
87
+ it=`cut -f 1 -d" " $srcdir/log/best_wer_$enhan | awk -F'[_]' '{print $1}'`
88
+ # rescore lattices
89
+ if $eval_flag; then
90
+ tasks="dt05_simu dt05_real et05_simu et05_real"
91
+ else
92
+ tasks="dt05_simu dt05_real"
93
+ fi
94
+ for t in $tasks; do
95
+ steps/lmrescore.sh --mode 3 \
96
+ data/lang_test_tgpr_5k \
97
+ data/lang_test_${lm_suffix} \
98
+ data-fmllr-tri3b/${t}_$enhan \
99
+ $srcdir/decode_tgpr_5k_${t}_${enhan}_it$it \
100
+ $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix}
101
+ done
102
+ # rescored results by high-order n-gram LM
103
+ mkdir -p $dir/log
104
+ local/chime4_calc_wers.sh $dir ${enhan}_${lm_suffix} $dir/graph_tgpr_5k \
105
+ > $dir/best_wer_${enhan}_${lm_suffix}.result
106
+ head -n 15 $dir/best_wer_${enhan}_${lm_suffix}.result
107
+ fi
108
+
109
+ # N-best rescoring using a RNNLM
110
+ if [ $stage -le 4 ]; then
111
+ # check the best lmw
112
+ if [ ! -f $dir/log/best_wer_${enhan}_${lm_suffix} ]; then
113
+ echo "error, rescoring with a high-order n-gram seems to be failed"
114
+ exit 1;
115
+ fi
116
+ lmw=`cut -f 1 -d" " $dir/log/best_wer_${enhan}_${lm_suffix} | awk -F'[_]' '{print $NF}'`
117
+ # rescore n-best list for all sets
118
+ if $eval_flag; then
119
+ tasks="dt05_simu dt05_real et05_simu et05_real"
120
+ else
121
+ tasks="dt05_simu dt05_real"
122
+ fi
123
+ for t in $tasks; do
124
+ steps/rnnlmrescore.sh --inv-acwt $lmw --N $nbest --use-phi true \
125
+ $rnnweight \
126
+ data/lang_test_${lm_suffix} \
127
+ data/lang_test_${rnnlm_suffix} \
128
+ data-fmllr-tri3b/${t}_$enhan \
129
+ $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix} \
130
+ $dir/decode_tgpr_5k_${t}_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}
131
+ done
132
+ # calc wers for RNNLM results
133
+ local/chime4_calc_wers.sh $dir ${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest} $dir/graph_tgpr_5k \
134
+ > $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result
135
+ head -n 15 $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result
136
+ fi
tools/ASR_2ch_track/local/run_lmrescore_recog.sh ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2015 University of Sheffield (Jon Barker, Ricard Marxer)
4
+ # Inria (Emmanuel Vincent)
5
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
6
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
+
8
+ # Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Takaaki Hori)
9
+
10
+ nj=12
11
+ stage=1
12
+ order=5
13
+ hidden=300
14
+ rnnweight=0.5
15
+ nbest=100
16
+ train=noisy
17
+ eval_flag=true # make it true when the evaluation data are released
18
+
19
+ . utils/parse_options.sh || exit 1;
20
+
21
+ . ./path.sh
22
+ . ./cmd.sh ## You'll want to change cmd.sh to something that will work on your system.
23
+ ## This relates to the queue.
24
+
25
+ # This is a shell script, but it's recommended that you run the commands one by
26
+ # one by copying and pasting into the shell.
27
+
28
+ if [ $# -ne 2 ]; then
29
+ printf "\nUSAGE: %s <enhancement method> <model dir>\n\n" `basename $0`
30
+ echo "First argument specifies a unique name for different enhancement method"
31
+ echo "Second argument specifies acoustic and language model directory"
32
+ exit 1;
33
+ fi
34
+
35
+ # set language models
36
+ lm_suffix=${order}gkn_5k
37
+ rnnlm_suffix=rnnlm_5k_h${hidden}
38
+
39
+ # enhan data
40
+ enhan=$1
41
+ # set model directory
42
+ mdir=$2
43
+ srcdir=exp/tri4a_dnn_tr05_multi_${train}_smbr_i1lats
44
+
45
+ # check language models
46
+ if [ ! -d $mdir/data/lang ]; then
47
+ echo "error, set $mdir correctly"
48
+ exit 1;
49
+ fi
50
+
51
+ # preparation
52
+ dir=exp/tri4a_dnn_tr05_multi_${train}_smbr_lmrescore
53
+ mkdir -p $dir
54
+ # make a symbolic link to graph info
55
+ if [ ! -e $dir/graph_tgpr_5k ]; then
56
+ if [ ! -e exp/tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k ]; then
57
+ echo "graph is missing, execute local/run_dnn.sh, correctly"
58
+ exit 1;
59
+ fi
60
+ pushd . ; cd $dir
61
+ ln -s ../tri4a_dnn_tr05_multi_${train}/graph_tgpr_5k .
62
+ popd
63
+ fi
64
+
65
+ # rescore lattices by a high-order N-gram
66
+ if [ $stage -le 3 ]; then
67
+ # check the best iteration
68
+ if [ ! -f $srcdir/log/best_wer_$enhan ]; then
69
+ echo "$0: error $srcdir/log/best_wer_$enhan not found. execute local/run_dnn.sh, first"
70
+ exit 1;
71
+ fi
72
+ it=`cut -f 1 -d" " $srcdir/log/best_wer_$enhan | awk -F'[_]' '{print $1}'`
73
+ # rescore lattices
74
+ if $eval_flag; then
75
+ tasks="dt05_simu dt05_real et05_simu et05_real"
76
+ else
77
+ tasks="dt05_simu dt05_real"
78
+ fi
79
+ for t in $tasks; do
80
+ steps/lmrescore.sh --mode 3 \
81
+ $mdir/data/lang_test_tgpr_5k \
82
+ $mdir/data/lang_test_${lm_suffix} \
83
+ data-fmllr-tri3b/${t}_$enhan \
84
+ $srcdir/decode_tgpr_5k_${t}_${enhan}_it$it \
85
+ $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix}
86
+ done
87
+ # rescored results by high-order n-gram LM
88
+ mkdir -p $dir/log
89
+ local/chime4_calc_wers.sh $dir ${enhan}_${lm_suffix} $dir/graph_tgpr_5k \
90
+ > $dir/best_wer_${enhan}_${lm_suffix}.result
91
+ head -n 15 $dir/best_wer_${enhan}_${lm_suffix}.result
92
+ fi
93
+
94
+ # N-best rescoring using a RNNLM
95
+ if [ $stage -le 4 ]; then
96
+ # check the best lmw
97
+ if [ ! -f $dir/log/best_wer_${enhan}_${lm_suffix} ]; then
98
+ echo "error, rescoring with a high-order n-gram seems to be failed"
99
+ exit 1;
100
+ fi
101
+ lmw=`cut -f 1 -d" " $dir/log/best_wer_${enhan}_${lm_suffix} | awk -F'[_]' '{print $NF}'`
102
+ # rescore n-best list for all sets
103
+ if $eval_flag; then
104
+ tasks="dt05_simu dt05_real et05_simu et05_real"
105
+ else
106
+ tasks="dt05_simu dt05_real"
107
+ fi
108
+ for t in $tasks; do
109
+ steps/rnnlmrescore.sh --inv-acwt $lmw --N $nbest --use-phi true \
110
+ $rnnweight \
111
+ $mdir/data/lang_test_${lm_suffix} \
112
+ $mdir/data/lang_test_${rnnlm_suffix} \
113
+ data-fmllr-tri3b/${t}_$enhan \
114
+ $dir/decode_tgpr_5k_${t}_${enhan}_${lm_suffix} \
115
+ $dir/decode_tgpr_5k_${t}_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}
116
+ done
117
+ # calc wers for RNNLM results
118
+ local/chime4_calc_wers.sh $dir ${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest} $dir/graph_tgpr_5k \
119
+ > $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result
120
+ head -n 15 $dir/best_wer_${enhan}_${rnnlm_suffix}_w${rnnweight}_n${nbest}.result
121
+ fi
tools/ASR_2ch_track/local/score.sh ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ [ -f ./path.sh ] && . ./path.sh
6
+
7
+ # begin configuration section.
8
+ cmd=run.pl
9
+ stage=0
10
+ decode_mbr=true
11
+ reverse=false
12
+ word_ins_penalty=0.0
13
+ min_lmwt=5
14
+ max_lmwt=20
15
+ #end configuration section.
16
+
17
+ [ -f ./path.sh ] && . ./path.sh
18
+ . parse_options.sh || exit 1;
19
+
20
+ if [ $# -ne 3 ]; then
21
+ echo "Usage: local/score.sh [--cmd (run.pl|queue.pl...)] <data-dir> <lang-dir|graph-dir> <decode-dir>"
22
+ echo " Options:"
23
+ echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes."
24
+ echo " --stage (0|1|2) # start scoring script from part-way through."
25
+ echo " --decode_mbr (true/false) # maximum bayes risk decoding (confusion network)."
26
+ echo " --min_lmwt <int> # minumum LM-weight for lattice rescoring "
27
+ echo " --max_lmwt <int> # maximum LM-weight for lattice rescoring "
28
+ echo " --reverse (true/false) # score with time reversed features "
29
+ exit 1;
30
+ fi
31
+
32
+ data=$1
33
+ lang_or_graph=$2
34
+ dir=$3
35
+
36
+ symtab=$lang_or_graph/words.txt
37
+
38
+ for f in $symtab $dir/lat.1.gz $data/text; do
39
+ [ ! -f $f ] && echo "score.sh: no such file $f" && exit 1;
40
+ done
41
+
42
+ mkdir -p $dir/scoring/log
43
+
44
+ cat $data/text | sed 's:<NOISE>::g' | sed 's:<SPOKEN_NOISE>::g' > $dir/scoring/test_filt.txt
45
+
46
+ $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/best_path.LMWT.log \
47
+ lattice-scale --inv-acoustic-scale=LMWT "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \
48
+ lattice-add-penalty --word-ins-penalty=$word_ins_penalty ark:- ark:- \| \
49
+ lattice-best-path --word-symbol-table=$symtab \
50
+ ark:- ark,t:$dir/scoring/LMWT.tra || exit 1;
51
+
52
+ if $reverse; then
53
+ for lmwt in `seq $min_lmwt $max_lmwt`; do
54
+ mv $dir/scoring/$lmwt.tra $dir/scoring/$lmwt.tra.orig
55
+ awk '{ printf("%s ",$1); for(i=NF; i>1; i--){ printf("%s ",$i); } printf("\n"); }' \
56
+ <$dir/scoring/$lmwt.tra.orig >$dir/scoring/$lmwt.tra
57
+ done
58
+ fi
59
+
60
+ # Note: the double level of quoting for the sed command
61
+ $cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/score.LMWT.log \
62
+ cat $dir/scoring/LMWT.tra \| \
63
+ utils/int2sym.pl -f 2- $symtab \| sed 's:\<UNK\>::g' \| \
64
+ compute-wer --text --mode=present \
65
+ ark:$dir/scoring/test_filt.txt ark,p:- ">&" $dir/wer_LMWT || exit 1;
66
+
67
+ exit 0;
tools/ASR_2ch_track/local/simu_enhan_chime4_data_prep.sh ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
5
+ # Apache 2.0.
6
+
7
+ # This is modified from the script in standard Kaldi recipe to account
8
+ # for the way the WSJ data is structured on the Edinburgh systems.
9
+ # - Arnab Ghoshal, 29/05/12
10
+
11
+ # Modified from the script for CHiME2 baseline
12
+ # Shinji Watanabe 02/13/2015
13
+
14
+ # Config:
15
+ eval_flag=true # make it true when the evaluation data are released
16
+
17
+ . utils/parse_options.sh || exit 1;
18
+
19
+ if [ $# -ne 2 ]; then
20
+ printf "\nUSAGE: %s <enhancement-name> <enhanced-speech-directory>\n\n" `basename $0`
21
+ echo "The argument should be a the directory that only contains enhanced speech data."
22
+ exit 1;
23
+ fi
24
+
25
+ echo "$0 $@" # Print the command line for logging
26
+
27
+ enhan=$1
28
+ audio_dir=$2
29
+
30
+ dir=`pwd`/data/local/data
31
+ mkdir -p $dir
32
+ local=`pwd`/local
33
+ utils=`pwd`/utils
34
+ odir=`pwd`/data
35
+
36
+ . ./path.sh # Needed for KALDI_ROOT
37
+ export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin
38
+ sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe
39
+ if [ ! -x $sph2pipe ]; then
40
+ echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
41
+ exit 1;
42
+ fi
43
+
44
+ if $eval_flag; then
45
+ list_set="tr05_simu_$enhan dt05_simu_$enhan et05_simu_$enhan"
46
+ else
47
+ list_set="tr05_simu_$enhan dt05_simu_$enhan"
48
+ fi
49
+
50
+ cd $dir
51
+
52
+ find $audio_dir/ -name '*.wav' | grep 'tr05_bus_simu\|tr05_caf_simu\|tr05_ped_simu\|tr05_str_simu' | sort -u > tr05_simu_$enhan.flist
53
+ find $audio_dir/ -name '*.wav' | grep 'dt05_bus_simu\|dt05_caf_simu\|dt05_ped_simu\|dt05_str_simu' | sort -u > dt05_simu_$enhan.flist
54
+ if $eval_flag; then
55
+ find $audio_dir/ -name '*.wav' | grep 'et05_bus_simu\|et05_caf_simu\|et05_ped_simu\|et05_str_simu' | sort -u > et05_simu_$enhan.flist
56
+ fi
57
+
58
+ # make a scp file from file list
59
+ for x in $list_set; do
60
+ cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_SIMU/' > ${x}_wav.ids
61
+ paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp
62
+ done
63
+
64
+ # make a transcription from dot
65
+ # simulation training data extract dot file from original WSJ0 data
66
+ # since it is generated from these data
67
+ if [ ! -e dot_files.flist ]; then
68
+ echo "Could not find $dir/dot_files.flist files, first run local/clean_wsj0_data_prep.sh";
69
+ exit 1;
70
+ fi
71
+ cat tr05_simu_${enhan}_wav.scp | awk -F'[_]' '{print $2}' | tr '[A-Z]' '[a-z]' \
72
+ | $local/find_noisy_transcripts.pl dot_files.flist | cut -f 2- -d" " > tr05_simu_$enhan.txt
73
+ cat tr05_simu_${enhan}_wav.scp | cut -f 1 -d" " > tr05_simu_$enhan.ids
74
+ paste -d" " tr05_simu_$enhan.ids tr05_simu_$enhan.txt | sort -k 1 > tr05_simu_$enhan.trans1
75
+ # dt05 and et05 simulation data are generated from the CHiME4 booth recording
76
+ # and we use CHiME4 dot files
77
+ cat dt05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_SIMU"}'> dt05_simu_$enhan.ids
78
+ cat dt05_simu.dot | sed -e 's/(.*)//' > dt05_simu_$enhan.txt
79
+ paste -d" " dt05_simu_$enhan.ids dt05_simu_$enhan.txt | sort -k 1 > dt05_simu_$enhan.trans1
80
+ if $eval_flag; then
81
+ cat et05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF "_SIMU"}'> et05_simu_$enhan.ids
82
+ cat et05_simu.dot | sed -e 's/(.*)//' > et05_simu_$enhan.txt
83
+ paste -d" " et05_simu_$enhan.ids et05_simu_$enhan.txt | sort -k 1 > et05_simu_$enhan.trans1
84
+ fi
85
+
86
+ # Do some basic normalization steps. At this point we don't remove OOVs--
87
+ # that will be done inside the training scripts, as we'd like to make the
88
+ # data-preparation stage independent of the specific lexicon used.
89
+ noiseword="<NOISE>";
90
+ for x in $list_set;do
91
+ cat $x.trans1 | $local/normalize_transcript.pl $noiseword \
92
+ | sort > $x.txt || exit 1;
93
+ done
94
+
95
+ # Make the utt2spk and spk2utt files.
96
+ for x in $list_set; do
97
+ cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk
98
+ cat ${x}_wav.scp | awk '{print $1}' > $x.utt
99
+ paste -d" " $x.utt $x.spk > $x.utt2spk
100
+ cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
101
+ done
102
+
103
+ # copying data to data/...
104
+ for x in $list_set; do
105
+ mkdir -p $odir/$x
106
+ cp ${x}_wav.scp $odir/$x/wav.scp || exit 1;
107
+ cp ${x}.txt $odir/$x/text || exit 1;
108
+ cp ${x}.spk2utt $odir/$x/spk2utt || exit 1;
109
+ cp ${x}.utt2spk $odir/$x/utt2spk || exit 1;
110
+ done
111
+
112
+ echo "Data preparation succeeded"
tools/ASR_2ch_track/local/simu_noisy_chime4_data_prep.sh ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
5
+ # Apache 2.0.
6
+
7
+ # This is modified from the script in standard Kaldi recipe to account
8
+ # for the way the WSJ data is structured on the Edinburgh systems.
9
+ # - Arnab Ghoshal, 29/05/12
10
+
11
+ # Modified from the script for CHiME2 baseline
12
+ # Shinji Watanabe 02/13/2015
13
+
14
+ # Config:
15
+ eval_flag=true # make it true when the evaluation data are released
16
+
17
+ . utils/parse_options.sh || exit 1;
18
+
19
+ if [ $# -ne 1 ]; then
20
+ printf "\nUSAGE: %s <corpus-directory>\n\n" `basename $0`
21
+ echo "The argument should be a the top-level Chime4 directory."
22
+ echo "It is assumed that there will be a 'data' subdirectory"
23
+ echo "within the top-level corpus directory."
24
+ exit 1;
25
+ fi
26
+
27
+ echo "$0 $@" # Print the command line for logging
28
+
29
+ audio_dir=$1/data/audio/16kHz/isolated
30
+ trans_dir=$1/data/transcriptions
31
+
32
+ echo "extract 5th channel (CH5.wav, the center bottom edge in the front of the tablet) for noisy data"
33
+
34
+ dir=`pwd`/data/local/data
35
+ lmdir=`pwd`/data/local/nist_lm
36
+ mkdir -p $dir $lmdir
37
+ local=`pwd`/local
38
+ utils=`pwd`/utils
39
+
40
+ . ./path.sh # Needed for KALDI_ROOT
41
+ export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin
42
+ sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe
43
+ if [ ! -x $sph2pipe ]; then
44
+ echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
45
+ exit 1;
46
+ fi
47
+
48
+ if $eval_flag; then
49
+ list_set="tr05_simu_noisy dt05_simu_noisy et05_simu_noisy"
50
+ else
51
+ list_set="tr05_simu_noisy dt05_simu_noisy"
52
+ fi
53
+
54
+ cd $dir
55
+
56
+ find $audio_dir -name '*CH5.wav' | grep 'tr05_bus_simu\|tr05_caf_simu\|tr05_ped_simu\|tr05_str_simu' | sort -u > tr05_simu_noisy.flist
57
+ find $audio_dir -name '*CH5.wav' | grep 'dt05_bus_simu\|dt05_caf_simu\|dt05_ped_simu\|dt05_str_simu' | sort -u > dt05_simu_noisy.flist
58
+ if $eval_flag; then
59
+ find $audio_dir -name '*CH5.wav' | grep 'et05_bus_simu\|et05_caf_simu\|et05_ped_simu\|et05_str_simu' | sort -u > et05_simu_noisy.flist
60
+ fi
61
+
62
+ # make a dot format from json annotation files
63
+ cp $trans_dir/dt05_simu.dot_all dt05_simu.dot
64
+ if $eval_flag; then
65
+ cp $trans_dir/et05_simu.dot_all et05_simu.dot
66
+ fi
67
+
68
+ # make a scp file from file list
69
+ for x in $list_set; do
70
+ cat $x.flist | awk -F'[/]' '{print $NF}'| sed -e 's/\.wav/_SIMU/' > ${x}_wav.ids
71
+ paste -d" " ${x}_wav.ids $x.flist | sort -k 1 > ${x}_wav.scp
72
+ done
73
+
74
+ # make a transcription from dot
75
+ # simulation training data extract dot file from original WSJ0 data
76
+ # since it is generated from these data
77
+ if [ ! -e dot_files.flist ]; then
78
+ echo "Could not find $dir/dot_files.flist files, first run local/clean_wsj0_data_prep.sh";
79
+ exit 1;
80
+ fi
81
+ cat tr05_simu_noisy_wav.scp | awk -F'[_]' '{print $2}' | tr '[A-Z]' '[a-z]' \
82
+ | $local/find_noisy_transcripts.pl dot_files.flist | cut -f 2- -d" " > tr05_simu_noisy.txt
83
+ cat tr05_simu_noisy_wav.scp | cut -f 1 -d" " > tr05_simu_noisy.ids
84
+ paste -d" " tr05_simu_noisy.ids tr05_simu_noisy.txt | sort -k 1 > tr05_simu_noisy.trans1
85
+ # dt05 and et05 simulation data are generated from the CHiME4 booth recording
86
+ # and we use CHiME4 dot files
87
+ cat dt05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_SIMU"}'> dt05_simu_noisy.ids
88
+ cat dt05_simu.dot | sed -e 's/(.*)//' > dt05_simu_noisy.txt
89
+ paste -d" " dt05_simu_noisy.ids dt05_simu_noisy.txt | sort -k 1 > dt05_simu_noisy.trans1
90
+ if $eval_flag; then
91
+ cat et05_simu.dot | sed -e 's/(\(.*\))/\1/' | awk '{print $NF ".CH5_SIMU"}'> et05_simu_noisy.ids
92
+ cat et05_simu.dot | sed -e 's/(.*)//' > et05_simu_noisy.txt
93
+ paste -d" " et05_simu_noisy.ids et05_simu_noisy.txt | sort -k 1 > et05_simu_noisy.trans1
94
+ fi
95
+
96
+ # Do some basic normalization steps. At this point we don't remove OOVs--
97
+ # that will be done inside the training scripts, as we'd like to make the
98
+ # data-preparation stage independent of the specific lexicon used.
99
+ noiseword="<NOISE>";
100
+ for x in $list_set;do
101
+ cat $x.trans1 | $local/normalize_transcript.pl $noiseword \
102
+ | sort > $x.txt || exit 1;
103
+ done
104
+
105
+ # Make the utt2spk and spk2utt files.
106
+ for x in $list_set; do
107
+ cat ${x}_wav.scp | awk -F'_' '{print $1}' > $x.spk
108
+ cat ${x}_wav.scp | awk '{print $1}' > $x.utt
109
+ paste -d" " $x.utt $x.spk > $x.utt2spk
110
+ cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
111
+ done
112
+
113
+ # copying data to data/...
114
+ for x in $list_set; do
115
+ mkdir -p ../../$x
116
+ cp ${x}_wav.scp ../../$x/wav.scp || exit 1;
117
+ cp ${x}.txt ../../$x/text || exit 1;
118
+ cp ${x}.spk2utt ../../$x/spk2utt || exit 1;
119
+ cp ${x}.utt2spk ../../$x/utt2spk || exit 1;
120
+ done
121
+
122
+ echo "Data preparation succeeded"
tools/ASR_2ch_track/local/wsj_prepare_dict.sh ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2010-2012 Microsoft Corporation
4
+ # 2012-2014 Johns Hopkins University (Author: Daniel Povey)
5
+
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
13
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
14
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
15
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
16
+ # See the Apache 2 License for the specific language governing permissions and
17
+ # limitations under the License.
18
+
19
+ # Call this script from one level above, e.g. from the s3/ directory. It puts
20
+ # its output in data/local/.
21
+
22
+ # The parts of the output of this that will be needed are
23
+ # [in data/local/dict/ ]
24
+ # lexicon.txt
25
+ # extra_questions.txt
26
+ # nonsilence_phones.txt
27
+ # optional_silence.txt
28
+ # silence_phones.txt
29
+
30
+ # run this from ../
31
+ dir=data/local/dict
32
+ mkdir -p $dir
33
+
34
+
35
+ # (1) Get the CMU dictionary
36
+ svn co https://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict \
37
+ $dir/cmudict || exit 1;
38
+
39
+ # can add -r 10966 for strict compatibility.
40
+
41
+
42
+ #(2) Dictionary preparation:
43
+
44
+
45
+ # Make phones symbol-table (adding in silence and verbal and non-verbal noises at this point).
46
+ # We are adding suffixes _B, _E, _S for beginning, ending, and singleton phones.
47
+
48
+ # silence phones, one per line.
49
+ (echo SIL; echo SPN; echo NSN) > $dir/silence_phones.txt
50
+ echo SIL > $dir/optional_silence.txt
51
+
52
+ # nonsilence phones; on each line is a list of phones that correspond
53
+ # really to the same base phone.
54
+ cat $dir/cmudict/cmudict.0.7a.symbols | perl -ane 's:\r::; print;' | \
55
+ perl -e 'while(<>){
56
+ chop; m:^([^\d]+)(\d*)$: || die "Bad phone $_";
57
+ $phones_of{$1} .= "$_ "; }
58
+ foreach $list (values %phones_of) {print $list . "\n"; } ' \
59
+ | sort > $dir/nonsilence_phones.txt || exit 1;
60
+
61
+ # A few extra questions that will be added to those obtained by automatically clustering
62
+ # the "real" phones. These ask about stress; there's also one for silence.
63
+ cat $dir/silence_phones.txt| awk '{printf("%s ", $1);} END{printf "\n";}' > $dir/extra_questions.txt || exit 1;
64
+ cat $dir/nonsilence_phones.txt | perl -e 'while(<>){ foreach $p (split(" ", $_)) {
65
+ $p =~ m:^([^\d]+)(\d*)$: || die "Bad phone $_"; $q{$2} .= "$p "; } } foreach $l (values %q) {print "$l\n";}' \
66
+ >> $dir/extra_questions.txt || exit 1;
67
+
68
+ grep -v ';;;' $dir/cmudict/cmudict.0.7a | \
69
+ perl -ane 'if(!m:^;;;:){ s:(\S+)\(\d+\) :$1 :; print; }' \
70
+ > $dir/lexicon1_raw_nosil.txt || exit 1;
71
+
72
+ # Add to cmudict the silences, noises etc.
73
+
74
+ # the sort | uniq is to remove a duplicated pron from cmudict.
75
+ (echo '!SIL SIL'; echo '<SPOKEN_NOISE> SPN'; echo '<UNK> SPN'; echo '<NOISE> NSN'; ) | \
76
+ cat - $dir/lexicon1_raw_nosil.txt | sort | uniq > $dir/lexicon2_raw.txt || exit 1;
77
+
78
+
79
+ # lexicon.txt is without the _B, _E, _S, _I markers.
80
+ # This is the input to wsj_format_data.sh
81
+ cp $dir/lexicon2_raw.txt $dir/lexicon.txt
82
+
83
+ rm $dir/lexiconp.txt 2>/dev/null
84
+
85
+ echo "Dictionary preparation succeeded"
86
+
tools/ASR_2ch_track/path.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ if [ -z $KALDI_ROOT ] ; then
2
+ echo "Environemnt variable KALDI_ROOT is unset. Setting it to default /data1/tools/kaldi"
3
+ export KALDI_ROOT=/data1/tools/kaldi
4
+ fi
5
+
6
+ [ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh
7
+ export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/tools/irstlm/bin/:$KALDI_ROOT/tools/kaldi_lm/:$PWD:$PATH
8
+ [ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1
9
+ . $KALDI_ROOT/tools/config/common_path.sh
10
+ export LC_ALL=C
tools/ASR_2ch_track/run.sh ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Kaldi ASR baseline for the CHiME-4 Challenge (2ch track: 2 channel track)
4
+ #
5
+ # Copyright 2016 University of Sheffield (Jon Barker, Ricard Marxer)
6
+ # Inria (Emmanuel Vincent)
7
+ # Mitsubishi Electric Research Labs (Shinji Watanabe)
8
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
9
+
10
+ . ./path.sh
11
+ . ./cmd.sh
12
+
13
+ # Config:
14
+ stage=0 # resume training with --stage=N
15
+ flatstart=false
16
+
17
+ . utils/parse_options.sh || exit 1;
18
+
19
+ # Set bash to 'debug' mode, it will exit on :
20
+ # -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
21
+ set -e
22
+ set -u
23
+ set -o pipefail
24
+
25
+ #####check data and model paths################
26
+ # Set a main root directory of the CHiME4 data
27
+ # If you use scripts distributed in the CHiME4 package,
28
+ chime4_data=`pwd`/../..
29
+ # Otherwise, please specify it, e.g.,
30
+ # chime4_data=/db/laputa1/data/original/public/CHiME4
31
+ if [ ! -d $chime4_data ]; then
32
+ echo "$chime4_data does not exist. Please specify chime4 data root correctly" && exit 1
33
+ fi
34
+ # Set a model directory for the CHiME4 data.
35
+ modeldir=$chime4_data/tools/ASR_models
36
+ for d in $modeldir $modeldir/data/{lang,lang_test_tgpr_5k,lang_test_5gkn_5k,lang_test_rnnlm_5k_h300,local} \
37
+ $modeldir/exp/{tri3b_tr05_multi_noisy,tri4a_dnn_tr05_multi_noisy,tri4a_dnn_tr05_multi_noisy_smbr_i1lats}; do
38
+ [ ! -d ] && echo "$0: no such directory $d. specify models correctly or execute './run.sh --flatstart true' first" && exit 1;
39
+ done
40
+ #####check data and model paths finished#######
41
+
42
+
43
+ #####main program start################
44
+ # You can execute run_init.sh only "once"
45
+ # This creates 3-gram LM, FSTs, and basic task files
46
+ if [ $stage -le 0 ] && $flatstart; then
47
+ local/run_init.sh $chime4_data
48
+ fi
49
+
50
+ # Using Beamformit
51
+ # See Hori et al, "The MERL/SRI system for the 3rd CHiME challenge using beamforming,
52
+ # robust feature extraction, and advanced speech recognition," in Proc. ASRU'15
53
+ # note that beamformed wav files are generated in the following directory
54
+ enhancement_method=beamformit_2mics
55
+ enhancement_data=`pwd`/enhan/$enhancement_method
56
+ if [ $stage -le 1 ]; then
57
+ local/run_beamform_2ch_track.sh --cmd "$train_cmd" --nj 20 $chime4_data/data/audio/16kHz/isolated_2ch_track $enhancement_data
58
+ fi
59
+
60
+ # GMM based ASR experiment without "retraining"
61
+ # Please set a directory of your speech enhancement method.
62
+ # run_gmm_recog.sh can be done every time when you change a speech enhancement technique.
63
+ # The directory structure and audio files must follow the attached baseline enhancement directory
64
+ if [ $stage -le 2 ]; then
65
+ if $flatstart; then
66
+ local/run_gmm.sh $enhancement_method $enhancement_data $chime4_data
67
+ else
68
+ local/run_gmm_recog.sh $enhancement_method $enhancement_data $modeldir
69
+ fi
70
+ fi
71
+
72
+ # DNN based ASR experiment
73
+ # Since it takes time to evaluate DNN, we make the GMM and DNN scripts separately.
74
+ # You may execute it after you would have promising results using GMM-based ASR experiments
75
+ if [ $stage -le 3 ]; then
76
+ if $flatstart; then
77
+ local/run_dnn.sh $enhancement_method
78
+ else
79
+ local/run_dnn_recog.sh $enhancement_method $modeldir
80
+ fi
81
+ fi
82
+
83
+ # LM-rescoring experiment with 5-gram and RNN LMs
84
+ # It takes a few days to train a RNNLM.
85
+ if [ $stage -le 4 ]; then
86
+ if $flatstart; then
87
+ local/run_lmrescore.sh $chime4_data $enhancement_method
88
+ else
89
+ local/run_lmrescore_recog.sh $enhancement_method $modeldir
90
+ fi
91
+ fi
92
+
93
+ echo "Done."
tools/ASR_2ch_track/steps/align_basis_fmllr.sh ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
3
+ # Copyright 2013 GoVivace Inc (Author: Nagendra Goel)
4
+ # Apache 2.0
5
+
6
+ # Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta)
7
+ # + fMLLR (probably with SAT models).
8
+ # It first computes an alignment with the final.alimdl (or the final.mdl if final.alimdl
9
+ # is not present), then does 2 iterations of fMLLR estimation.
10
+
11
+ # If you supply the --use-graphs option, it will use the training
12
+ # graphs from the source directory (where the model is). In this
13
+ # case the number of jobs must match the source directory.
14
+
15
+
16
+ # Begin configuration section.
17
+ stage=0
18
+ nj=4
19
+ cmd=run.pl
20
+ use_graphs=false
21
+ # Begin configuration.
22
+ scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
23
+ beam=10
24
+ retry_beam=40
25
+ boost_silence=1.5 # factor by which to boost silence during alignment.
26
+ fmllr_update_type=full
27
+ # End configuration options.
28
+
29
+ echo "$0 $@" # Print the command line for logging
30
+
31
+ [ -f path.sh ] && . ./path.sh # source the path.
32
+ . parse_options.sh || exit 1;
33
+
34
+ if [ $# != 4 ]; then
35
+ echo "usage: steps/align_fmllr.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
36
+ echo "e.g.: steps/align_fmllr.sh data/train data/lang exp/tri1 exp/tri1_ali"
37
+ echo "main options (for others, see top of script file)"
38
+ echo " --config <config-file> # config containing options"
39
+ echo " --nj <nj> # number of parallel jobs"
40
+ echo " --use-graphs true # use graphs in src-dir"
41
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
42
+ echo " --fmllr-update-type (full|diag|offset|none) # default full."
43
+ exit 1;
44
+ fi
45
+
46
+ data=$1
47
+ lang=$2
48
+ srcdir=$3
49
+ dir=$4
50
+ graphdir=$dir
51
+
52
+ oov=`cat $lang/oov.int` || exit 1;
53
+ silphonelist=`cat $lang/phones/silence.csl` || exit 1;
54
+ sdata=$data/split$nj
55
+
56
+ mkdir -p $dir/log
57
+ echo $nj > $dir/num_jobs
58
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
59
+
60
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
61
+ cp $srcdir/final.occs $dir;
62
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
63
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
64
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
65
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
66
+ delta_opts=`cat $srcdir/delta_opts 2>/dev/null`
67
+ cp $srcdir/delta_opts $dir 2>/dev/null
68
+
69
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
70
+ echo "$0: feature type is $feat_type"
71
+
72
+ case $feat_type in
73
+ delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";;
74
+ lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
75
+ cp $srcdir/final.mat $dir
76
+ ;;
77
+ *) echo "Invalid feature type $feat_type" && exit 1;
78
+ esac
79
+
80
+ ## Set up model and alignment model.
81
+ mdl=$srcdir/final.mdl
82
+ if [ -f $srcdir/final.alimdl ]; then
83
+ alimdl=$srcdir/final.alimdl
84
+ else
85
+ alimdl=$srcdir/final.mdl
86
+ fi
87
+ [ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1;
88
+ alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/boost_phones.csl` $alimdl - |"
89
+ mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |"
90
+
91
+ ## Work out where we're getting the graphs from.
92
+ if $use_graphs; then
93
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
94
+ echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1;
95
+ [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1;
96
+ graphdir=$srcdir
97
+ else
98
+ graphdir=$dir
99
+ if [ $stage -le 0 ]; then
100
+ echo "$0: compiling training graphs"
101
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
102
+ $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
103
+ compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \
104
+ "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
105
+ fi
106
+ fi
107
+
108
+
109
+ if [ $stage -le 1 ]; then
110
+ echo "$0: aligning data in $data using $alimdl and speaker-independent features."
111
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
112
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \
113
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1;
114
+ fi
115
+
116
+ if [ $stage -le 2 ]; then
117
+ echo "$0: computing fMLLR transforms"
118
+ if [ "$alimdl" != "$mdl" ]; then
119
+ $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
120
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
121
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
122
+ gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \
123
+ gmm-est-basis-fmllr-gpost --fmllr-min-count=22 --num-iters=10 \
124
+ --size-scale=0.2 --step-size-iters=3 \
125
+ --write-weights=ark:$dir/pre_wgt.JOB \
126
+ $mdl $srcdir/fmllr.basis "$sifeats" ark,s,cs:- \
127
+ ark:$dir/trans.JOB || exit 1;
128
+ # else
129
+ # $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
130
+ # ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
131
+ # weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
132
+ # gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \
133
+ # --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \
134
+ # ark,s,cs:- ark:$dir/trans.JOB || exit 1;
135
+ fi
136
+ fi
137
+
138
+ feats="$sifeats transform-feats ark:$dir/pre_trans.JOB ark:- ark:- |"
139
+
140
+ if [ $stage -le 3 ]; then
141
+ echo "$0: doing final alignment."
142
+ $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \
143
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \
144
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
145
+ fi
146
+
147
+ #rm $dir/pre_ali.*.gz
148
+
149
+ echo "$0: done aligning data."
150
+
151
+ utils/summarize_warnings.pl $dir/log
152
+
153
+ exit 0;
tools/ASR_2ch_track/steps/align_fmllr.sh ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ # Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta)
6
+ # + fMLLR (probably with SAT models).
7
+ # It first computes an alignment with the final.alimdl (or the final.mdl if final.alimdl
8
+ # is not present), then does 2 iterations of fMLLR estimation.
9
+
10
+ # If you supply the --use-graphs option, it will use the training
11
+ # graphs from the source directory (where the model is). In this
12
+ # case the number of jobs must match the source directory.
13
+
14
+
15
+ # Begin configuration section.
16
+ stage=0
17
+ nj=4
18
+ cmd=run.pl
19
+ use_graphs=false
20
+ # Begin configuration.
21
+ scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
22
+ beam=10
23
+ retry_beam=40
24
+ careful=false
25
+ boost_silence=1.0 # factor by which to boost silence during alignment.
26
+ fmllr_update_type=full
27
+ # End configuration options.
28
+
29
+ echo "$0 $@" # Print the command line for logging
30
+
31
+ [ -f path.sh ] && . ./path.sh # source the path.
32
+ . parse_options.sh || exit 1;
33
+
34
+ if [ $# != 4 ]; then
35
+ echo "usage: steps/align_fmllr.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
36
+ echo "e.g.: steps/align_fmllr.sh data/train data/lang exp/tri1 exp/tri1_ali"
37
+ echo "main options (for others, see top of script file)"
38
+ echo " --config <config-file> # config containing options"
39
+ echo " --nj <nj> # number of parallel jobs"
40
+ echo " --use-graphs true # use graphs in src-dir"
41
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
42
+ echo " --fmllr-update-type (full|diag|offset|none) # default full."
43
+ exit 1;
44
+ fi
45
+
46
+ data=$1
47
+ lang=$2
48
+ srcdir=$3
49
+ dir=$4
50
+
51
+ oov=`cat $lang/oov.int` || exit 1;
52
+ silphonelist=`cat $lang/phones/silence.csl` || exit 1;
53
+ sdata=$data/split$nj
54
+
55
+ mkdir -p $dir/log
56
+ echo $nj > $dir/num_jobs
57
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
58
+
59
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
60
+ cp $srcdir/final.alimdl $dir 2>/dev/null
61
+ cp $srcdir/final.occs $dir;
62
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
63
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
64
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
65
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
66
+ delta_opts=`cat $srcdir/delta_opts 2>/dev/null`
67
+ cp $srcdir/delta_opts $dir 2>/dev/null
68
+
69
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
70
+ echo "$0: feature type is $feat_type"
71
+
72
+ case $feat_type in
73
+ delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";;
74
+ lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
75
+ cp $srcdir/final.mat $dir
76
+ cp $srcdir/full.mat $dir 2>/dev/null
77
+ ;;
78
+ *) echo "Invalid feature type $feat_type" && exit 1;
79
+ esac
80
+
81
+ ## Set up model and alignment model.
82
+ mdl=$srcdir/final.mdl
83
+ if [ -f $srcdir/final.alimdl ]; then
84
+ alimdl=$srcdir/final.alimdl
85
+ else
86
+ alimdl=$srcdir/final.mdl
87
+ fi
88
+ [ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1;
89
+ alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |"
90
+ mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |"
91
+
92
+
93
+ ## Work out where we're getting the graphs from.
94
+ if $use_graphs; then
95
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
96
+ echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1;
97
+ [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1;
98
+ graphdir=$srcdir
99
+ else
100
+ graphdir=$dir
101
+ if [ $stage -le 0 ]; then
102
+ echo "$0: compiling training graphs"
103
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
104
+ $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
105
+ compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \
106
+ "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
107
+ fi
108
+ fi
109
+
110
+
111
+ if [ $stage -le 1 ]; then
112
+ echo "$0: aligning data in $data using $alimdl and speaker-independent features."
113
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
114
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$alimdl_cmd" \
115
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1;
116
+ fi
117
+
118
+ if [ $stage -le 2 ]; then
119
+ echo "$0: computing fMLLR transforms"
120
+ if [ "$alimdl" != "$mdl" ]; then
121
+ $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
122
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
123
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
124
+ gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \
125
+ gmm-est-fmllr-gpost --fmllr-update-type=$fmllr_update_type \
126
+ --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \
127
+ ark,s,cs:- ark:$dir/trans.JOB || exit 1;
128
+ else
129
+ $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
130
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
131
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
132
+ gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \
133
+ --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \
134
+ ark,s,cs:- ark:$dir/trans.JOB || exit 1;
135
+ fi
136
+ fi
137
+
138
+ feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |"
139
+
140
+ if [ $stage -le 3 ]; then
141
+ echo "$0: doing final alignment."
142
+ $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \
143
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl_cmd" \
144
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
145
+ fi
146
+
147
+ rm $dir/pre_ali.*.gz
148
+
149
+ echo "$0: done aligning data."
150
+
151
+ utils/summarize_warnings.pl $dir/log
152
+
153
+ exit 0;
tools/ASR_2ch_track/steps/align_fmllr_lats.sh ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #
3
+ # Copyright 2012-2015 Johns Hopkins University (Author: Daniel Povey)
4
+ # Apache 2.0
5
+
6
+ # Version of align_fmllr.sh that generates lattices (lat.*.gz) with
7
+ # alignments of alternative pronunciations in them. Mainly intended
8
+ # as a precursor to CTC training for now.
9
+
10
+ # Begin configuration section.
11
+ stage=0
12
+ nj=4
13
+ cmd=run.pl
14
+ # Begin configuration.
15
+ scale_opts="--transition-scale=1.0 --self-loop-scale=0.1"
16
+ acoustic_scale=0.1
17
+ beam=10
18
+ retry_beam=40
19
+ final_beam=20 # For the lattice-generation phase there is no retry-beam. This
20
+ # is a limitation of gmm-latgen-faster. We just use an
21
+ # intermediate beam. We'll lose a little data and it will be
22
+ # slightly slower. (however, the min-active of 200 that
23
+ # gmm-latgen-faster defaults to may help.)
24
+ boost_silence=1.0 # factor by which to boost silence during alignment.
25
+ fmllr_update_type=full
26
+ # End configuration options.
27
+
28
+ echo "$0 $@" # Print the command line for logging
29
+
30
+ [ -f path.sh ] && . ./path.sh # source the path.
31
+ . parse_options.sh || exit 1;
32
+
33
+ if [ $# != 4 ]; then
34
+ echo "usage: steps/align_fmllr_lats.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
35
+ echo "e.g.: steps/align_fmllr_lats.sh data/train data/lang exp/tri1 exp/tri1_lats"
36
+ echo "main options (for others, see top of script file)"
37
+ echo " --config <config-file> # config containing options"
38
+ echo " --nj <nj> # number of parallel jobs"
39
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
40
+ echo " --fmllr-update-type (full|diag|offset|none) # default full."
41
+ exit 1;
42
+ fi
43
+
44
+ data=$1
45
+ lang=$2
46
+ srcdir=$3
47
+ dir=$4
48
+
49
+ oov=`cat $lang/oov.int` || exit 1;
50
+ silphonelist=`cat $lang/phones/silence.csl` || exit 1;
51
+ sdata=$data/split$nj
52
+
53
+ mkdir -p $dir/log
54
+ echo $nj > $dir/num_jobs
55
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
56
+
57
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
58
+ cp $srcdir/final.alimdl $dir 2>/dev/null
59
+ cp $srcdir/final.occs $dir;
60
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
61
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
62
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
63
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
64
+ delta_opts=`cat $srcdir/delta_opts 2>/dev/null`
65
+ cp $srcdir/delta_opts $dir 2>/dev/null
66
+
67
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
68
+ echo "$0: feature type is $feat_type"
69
+
70
+ case $feat_type in
71
+ delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";;
72
+ lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
73
+ cp $srcdir/final.mat $dir
74
+ cp $srcdir/full.mat $dir 2>/dev/null
75
+ ;;
76
+ *) echo "Invalid feature type $feat_type" && exit 1;
77
+ esac
78
+
79
+ ## Set up model and alignment model.
80
+ mdl=$srcdir/final.mdl
81
+ if [ -f $srcdir/final.alimdl ]; then
82
+ alimdl=$srcdir/final.alimdl
83
+ else
84
+ alimdl=$srcdir/final.mdl
85
+ fi
86
+ [ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1;
87
+ alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |"
88
+ mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |"
89
+
90
+
91
+ ## because gmm-latgen-faster doesn't support adding the transition-probs to the
92
+ ## graph itself, we need to bake them into the compiled graphs. This means we can't reuse previously compiled graphs,
93
+ ## because the other scripts write them without transition probs.
94
+ if [ $stage -le 0 ]; then
95
+ echo "$0: compiling training graphs"
96
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
97
+ $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
98
+ compile-train-graphs $scale_opts $dir/tree $dir/final.mdl $lang/L.fst "$tra" \
99
+ "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
100
+ fi
101
+
102
+
103
+ if [ $stage -le 1 ]; then
104
+ # Note: we need to set --transition-scale=0.0 --self-loop-scale=0.0 because,
105
+ # as explained above, we compiled the transition probs into the training
106
+ # graphs.
107
+ echo "$0: aligning data in $data using $alimdl and speaker-independent features."
108
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
109
+ gmm-align-compiled --transition-scale=0.0 --self-loop-scale=0.0 --acoustic-scale=$acoustic_scale \
110
+ --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \
111
+ "ark:gunzip -c $dir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1;
112
+ fi
113
+
114
+ if [ $stage -le 2 ]; then
115
+ echo "$0: computing fMLLR transforms"
116
+ if [ "$alimdl" != "$mdl" ]; then
117
+ $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
118
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
119
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
120
+ gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \
121
+ gmm-est-fmllr-gpost --fmllr-update-type=$fmllr_update_type \
122
+ --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \
123
+ ark,s,cs:- ark:$dir/trans.JOB || exit 1;
124
+ else
125
+ $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
126
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
127
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
128
+ gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \
129
+ --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$sifeats" \
130
+ ark,s,cs:- ark:$dir/trans.JOB || exit 1;
131
+ fi
132
+ fi
133
+
134
+ feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |"
135
+
136
+ if [ $stage -le 3 ]; then
137
+ # Warning: gmm-latgen-faster doesn't support a retry-beam so you may get more
138
+ # alignment errors (however, it does have a default min-active=200 so this
139
+ # will tend to reduce alignment errors).
140
+ # --allow_partial=false makes sure we reach the end of the decoding graph.
141
+ # --word-determinize=false makes sure we retain the alternative pronunciations of
142
+ # words (including alternatives regarding optional silences).
143
+ # --lattice-beam=$beam keeps all the alternatives that were within the beam,
144
+ # it means we do no pruning of the lattice (lattices from a training transcription
145
+ # will be small anyway).
146
+ echo "$0: generating lattices containing alternate pronunciations."
147
+ $cmd JOB=1:$nj $dir/log/generate_lattices.JOB.log \
148
+ gmm-latgen-faster --acoustic-scale=$acoustic_scale --beam=$final_beam \
149
+ --lattice-beam=$final_beam --allow-partial=false --word-determinize=false \
150
+ "$mdl_cmd" "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \
151
+ "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1;
152
+ fi
153
+
154
+ rm $dir/pre_ali.*.gz
155
+
156
+ echo "$0: done generating lattices from training transcripts."
157
+
158
+ utils/summarize_warnings.pl $dir/log
159
+
160
+ exit 0;
tools/ASR_2ch_track/steps/align_lvtln.sh ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2014 Vimal Manohar
4
+
5
+ # Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta)
6
+ # Will ignore fMLLR.
7
+ # Will estimate VTLN warping factors
8
+ # as a by product, which can be used to extract VTLN-warped features.
9
+
10
+ # Begin configuration section
11
+ stage=0
12
+ nj=4
13
+ cmd=run.pl
14
+ use_graphs=false
15
+ # Begin configuration.
16
+ scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
17
+ beam=10.0
18
+ retry_beam=40
19
+ boost_silence=1.0 # factor by which to boost silence during alignment.
20
+ logdet_scale=1.0
21
+ cleanup=false
22
+
23
+ # End configuration section
24
+ echo "$0 $@" # Print the command line for logging
25
+
26
+ [ -f ./path.sh ] && . ./path.sh; # source the path.
27
+ . parse_options.sh || exit 1;
28
+
29
+ if [ $# != 4 ]; then
30
+ echo "Wrong #arguments ($#, expected 4)"
31
+ echo "Usage: steps/align_lvtln.sh [options] <data-dir> <lang-dir> <src-dir> <align-dir>"
32
+ echo " e.g.: steps/align_lvtln.sh data/train data/lang exp/tri2c exp/tri2c_ali"
33
+ echo "main options (for others, see top of script file)"
34
+ echo " --config <config-file> # config containing options"
35
+ echo " --nj <nj> # number of parallel jobs"
36
+ echo " --use-graphs true # use graphs in src-dir"
37
+ echo " --cmd <cmd> # Command to run in parallel with"
38
+ exit 1;
39
+ fi
40
+
41
+ data=$1
42
+ lang=$2
43
+ srcdir=$3
44
+ dir=$4
45
+
46
+ oov=`cat $lang/oov.int` || exit 1;
47
+ silphonelist=`cat $lang/phones/silence.csl` || exit 1;
48
+ sdata=$data/split$nj
49
+
50
+ if [ -f $data/spk2warp ]; then
51
+ echo "$0: file $data/spk2warp exists. This script expects non-VTLN features"
52
+ exit 1;
53
+ fi
54
+
55
+ mkdir -p $dir/log
56
+ echo $nj > $dir/num_jobs
57
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
58
+
59
+ cp $srcdir/{tree,final.mdl,final.lvtln} $dir || exit 1;
60
+ cp $srcdir/final.occs $dir;
61
+
62
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
63
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
64
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
65
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
66
+
67
+ ## Set up the unadapted features "$sifeats"
68
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
69
+ echo "$0: feature type is $feat_type";
70
+ case $feat_type in
71
+ delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";;
72
+ lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
73
+ cp $srcdir/final.mat $srcdir/full.mat $dir
74
+ ;;
75
+ *) echo "Invalid feature type $feat_type" && exit 1;
76
+ esac
77
+
78
+ ## Set up model and alignment model.
79
+ mdl=$srcdir/final.mdl
80
+ if [ -f $srcdir/final.alimdl ]; then
81
+ alimdl=$srcdir/final.alimdl
82
+ else
83
+ alimdl=$srcdir/final.mdl
84
+ fi
85
+ [ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1;
86
+ alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |"
87
+ mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |"
88
+
89
+ ## Work out where we're getting the graphs from.
90
+ if $use_graphs; then
91
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
92
+ echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1;
93
+ [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1;
94
+ graphdir=$srcdir
95
+ else
96
+ graphdir=$dir
97
+ if [ $stage -le 0 ]; then
98
+ echo "$0: compiling training graphs"
99
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
100
+ $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
101
+ compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \
102
+ "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
103
+ fi
104
+ fi
105
+
106
+ if [ $stage -le 1 ]; then
107
+ echo "$0: aligning data in $data using $alimdl and speaker-independent features."
108
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
109
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \
110
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1;
111
+ fi
112
+
113
+ if [ -f $data/segments ]; then
114
+ subset_utts="ark:extract-segments scp:$sdata/JOB/wav.scp $sdata/JOB/segments ark:- |"
115
+ else
116
+ echo "$0 [info]: no segments file exists: using wav.scp directly."
117
+ subset_utts="ark:wav-copy scp:$sdata/JOB/wav.scp ark:- |"
118
+ fi
119
+
120
+ ## Get the first-pass LVTLN transforms
121
+ if [ $stage -le 2 ]; then
122
+ echo "$0: computing first-pass LVTLN transforms."
123
+ $cmd JOB=1:$nj $dir/log/lvtln_pass1.JOB.log \
124
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
125
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
126
+ gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \
127
+ gmm-est-lvtln-trans --verbose=1 --spk2utt=ark:$sdata/JOB/spk2utt --logdet-scale=$logdet_scale \
128
+ $mdl $dir/final.lvtln "$sifeats" ark,s,cs:- ark:$dir/trans_pass1.JOB \
129
+ ark,t:$dir/warp_pass1.JOB || exit 1;
130
+ fi
131
+ ##
132
+
133
+ feats1="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans_pass1.JOB ark:- ark:- |"
134
+
135
+ ## Do a second pass of estimating the LVTLN transform.
136
+
137
+ if [ $stage -le 3 ]; then
138
+ echo "$0: realigning with transformed features"
139
+ $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \
140
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \
141
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats1" "ark:|gzip -c >$dir/ali_pass2.JOB.gz" || exit 1;
142
+ fi
143
+
144
+ if [ $stage -le 4 ]; then
145
+ echo "$0: re-estimating LVTLN transforms"
146
+ $cmd JOB=1:$nj $dir/log/lvtln_pass1.JOB.log \
147
+ ali-to-post "ark:gunzip -c $dir/ali_pass2.JOB.gz|" ark:- \| \
148
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
149
+ gmm-post-to-gpost $alimdl "$feats1" ark:- ark:- \| \
150
+ gmm-est-lvtln-trans --verbose=1 --spk2utt=ark:$sdata/JOB/spk2utt --logdet-scale=$logdet_scale \
151
+ $mdl $dir/final.lvtln "$sifeats" ark,s,cs:- ark:$dir/trans.JOB \
152
+ ark,t:$dir/warp.JOB || exit 1;
153
+ fi
154
+
155
+ feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |"
156
+
157
+ if [ $stage -le 5 ]; then
158
+ # This second alignment does not affect the transforms.
159
+ echo "$0: realigning with the second-pass LVTLN transforms"
160
+ $cmd JOB=1:$nj $dir/log/align.JOB.log \
161
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \
162
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
163
+ fi
164
+
165
+ if [ -f $dir/warp.1 ]; then
166
+ for j in $(seq $nj); do cat $dir/warp_pass1.$j; done > $dir/0.warp || exit 1;
167
+ for j in $(seq $nj); do cat $dir/warp.$j; done > $dir/final.warp || exit 1;
168
+ ns1=$(cat $dir/0.warp | wc -l)
169
+ ns2=$(cat $dir/final.warp | wc -l)
170
+ ! [ "$ns1" == "$ns2" ] && echo "$0: Number of speakers differ pass1 vs pass2, $ns1 != $ns2" && exit 1;
171
+
172
+ paste $dir/0.warp $dir/final.warp | awk '{x=$2 - $4; if ((x>0?x:-x) > 0.010001) { print $1, $2, $4; }}' > $dir/warp_changed
173
+ nc=$(cat $dir/warp_changed | wc -l)
174
+ echo "$0: For $nc speakers out of $ns1, warp changed pass1 vs pass2 by >0.01, see $dir/warp_changed for details"
175
+ fi
176
+
177
+ if true; then # Diagnostics
178
+ if [ -f $data/spk2gender ]; then
179
+ # To make it easier to eyeball the male and female speakers' warps
180
+ # separately, separate them out.
181
+ for g in m f; do # means: for gender in male female
182
+ cat $dir/final.warp | \
183
+ utils/filter_scp.pl <(grep -w $g $data/spk2gender | awk '{print $1}') > $dir/final.warp.$g
184
+ echo -n "The last few warp factors for gender $g are: "
185
+ tail -n 10 $dir/final.warp.$g | awk '{printf("%s ", $2);}';
186
+ echo
187
+ done
188
+ fi
189
+ fi
190
+
191
+ if $cleanup; then
192
+ rm $dir/pre_ali.*.gz $dir/ali_pass?.*.gz $dir/trans_pass1.* $dir/warp_pass1.* $dir/warp.*
193
+ fi
194
+
195
+ exit 0;
196
+
tools/ASR_2ch_track/steps/align_raw_fmllr.sh ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012-2013 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ # Computes training alignments; assumes features are (LDA+MLLT or delta+delta-delta)
6
+ # + fMLLR (probably with SAT models).
7
+ # It first computes an alignment with the final.alimdl (or the final.mdl if final.alimdl
8
+ # is not present), then does 2 iterations of fMLLR estimation.
9
+
10
+ # If you supply the --use-graphs option, it will use the training
11
+ # graphs from the source directory (where the model is). In this
12
+ # case the number of jobs must match the source directory.
13
+
14
+
15
+ # Begin configuration section.
16
+ stage=0
17
+ nj=4
18
+ cmd=run.pl
19
+ use_graphs=false
20
+ # Begin configuration.
21
+ scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
22
+ beam=10
23
+ retry_beam=40
24
+ boost_silence=1.0 # factor by which to boost silence during alignment.
25
+ # End configuration options.
26
+
27
+ echo "$0 $@" # Print the command line for logging
28
+
29
+ [ -f path.sh ] && . ./path.sh # source the path.
30
+ . parse_options.sh || exit 1;
31
+
32
+ if [ $# != 4 ]; then
33
+ echo "usage: steps/align_fmllr.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
34
+ echo "e.g.: steps/align_fmllr.sh data/train data/lang exp/tri1 exp/tri1_ali"
35
+ echo "main options (for others, see top of script file)"
36
+ echo " --config <config-file> # config containing options"
37
+ echo " --nj <nj> # number of parallel jobs"
38
+ echo " --use-graphs true # use graphs in src-dir"
39
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
40
+ exit 1;
41
+ fi
42
+
43
+ data=$1
44
+ lang=$2
45
+ srcdir=$3
46
+ dir=$4
47
+
48
+ oov=`cat $lang/oov.int` || exit 1;
49
+ silphonelist=`cat $lang/phones/silence.csl` || exit 1;
50
+ sdata=$data/split$nj
51
+
52
+ mkdir -p $dir/log
53
+ echo $nj > $dir/num_jobs
54
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
55
+
56
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
57
+ cp $srcdir/final.occs $dir;
58
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
59
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
60
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
61
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
62
+
63
+ if [[ ! -f $srcdir/final.mat || ! -f $srcdir/full.mat ]]; then
64
+ echo "$0: we require final.mat and full.mat in the source directory $srcdir"
65
+ fi
66
+
67
+ full_lda_mat="get-full-lda-mat --print-args=false $srcdir/final.mat $srcdir/full.mat -|"
68
+ cp $srcdir/full.mat $srcdir/final.mat $dir
69
+
70
+ raw_dim=$(feat-to-dim scp:$data/feats.scp -) || exit 1;
71
+ ! [ "$raw_dim" -gt 0 ] && echo "raw feature dim not set" && exit 1;
72
+
73
+ splicedfeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- |"
74
+ sifeats="$splicedfeats transform-feats $srcdir/final.mat ark:- ark:- |"
75
+
76
+ ## Set up model and alignment model.
77
+ mdl=$srcdir/final.mdl
78
+ if [ -f $srcdir/final.alimdl ]; then
79
+ alimdl=$srcdir/final.alimdl
80
+ else
81
+ alimdl=$srcdir/final.mdl
82
+ fi
83
+ [ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1;
84
+ alimdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $alimdl - |"
85
+ mdl_cmd="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $mdl - |"
86
+
87
+
88
+ ## Work out where we're getting the graphs from.
89
+ if $use_graphs; then
90
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
91
+ echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1;
92
+ [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1;
93
+ graphdir=$srcdir
94
+ else
95
+ graphdir=$dir
96
+ if [ $stage -le 0 ]; then
97
+ echo "$0: compiling training graphs"
98
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
99
+ $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
100
+ compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \
101
+ "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
102
+ fi
103
+ fi
104
+
105
+
106
+ if [ $stage -le 1 ]; then
107
+ echo "$0: aligning data in $data using $alimdl and speaker-independent features."
108
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
109
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$alimdl_cmd" \
110
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$sifeats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1;
111
+ fi
112
+
113
+ if [ $stage -le 2 ]; then
114
+ echo "$0: computing fMLLR transforms"
115
+ if [ "$alimdl" != "$mdl" ]; then
116
+ $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
117
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
118
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
119
+ gmm-post-to-gpost $alimdl "$sifeats" ark:- ark:- \| \
120
+ gmm-est-fmllr-raw-gpost --raw-feat-dim=$raw_dim --spk2utt=ark:$sdata/JOB/spk2utt \
121
+ $mdl "$full_lda_mat" "$splicedfeats" ark,s,cs:- ark:$dir/raw_trans.JOB || exit 1;
122
+ else
123
+ $cmd JOB=1:$nj $dir/log/fmllr.JOB.log \
124
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
125
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
126
+ gmm-est-fmllr-raw --raw-feat-dim=$raw_dim --spk2utt=ark:$sdata/JOB/spk2utt $mdl "$full_lda_mat" \
127
+ "$splicedfeats" ark,s,cs:- ark:$dir/raw_trans.JOB || exit 1;
128
+ fi
129
+ fi
130
+
131
+ feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/raw_trans.JOB ark:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
132
+
133
+ if [ $stage -le 3 ]; then
134
+ echo "$0: doing final alignment."
135
+ $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \
136
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl_cmd" \
137
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
138
+ fi
139
+
140
+ rm $dir/pre_ali.*.gz
141
+
142
+ echo "$0: done aligning data."
143
+
144
+ utils/summarize_warnings.pl $dir/log
145
+
146
+ exit 0;
tools/ASR_2ch_track/steps/align_sgmm.sh ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ # Computes training alignments and (if needed) speaker-vectors, given an
6
+ # SGMM system. If the system is built on top of SAT, you should supply
7
+ # transforms with the --transform-dir option.
8
+
9
+ # If you supply the --use-graphs option, it will use the training
10
+ # graphs from the source directory.
11
+
12
+ # Begin configuration section.
13
+ stage=0
14
+ nj=4
15
+ cmd=run.pl
16
+ use_graphs=false # use graphs from srcdir
17
+ use_gselect=false # use gselect info from srcdir [regardless, we use
18
+ # Gaussian-selection info, we might have to compute it though.]
19
+ gselect=15 # Number of Gaussian-selection indices for SGMMs.
20
+ # Begin configuration.
21
+ scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
22
+ beam=10
23
+ retry_beam=40
24
+ transform_dir= # directory to find fMLLR transforms in.
25
+ # End configuration options.
26
+
27
+ echo "$0 $@" # Print the command line for logging
28
+
29
+ [ -f path.sh ] && . ./path.sh # source the path.
30
+ . parse_options.sh || exit 1;
31
+
32
+ if [ $# != 4 ]; then
33
+ echo "usage: steps/align_sgmm.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
34
+ echo "e.g.: steps/align_sgmm.sh --transform-dir exp/tri3b data/train data/lang \\"
35
+ echo " exp/sgmm4a exp/sgmm5a_ali"
36
+ echo "main options (for others, see top of script file)"
37
+ echo " --config <config-file> # config containing options"
38
+ echo " --nj <nj> # number of parallel jobs"
39
+ echo " --use-graphs true # use graphs in src-dir"
40
+ echo " --transform-dir <transform-dir> # directory to find fMLLR transforms"
41
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
42
+ exit 1;
43
+ fi
44
+
45
+ data=$1
46
+ lang=$2
47
+ srcdir=$3
48
+ dir=$4
49
+
50
+ oov=`cat $lang/oov.int` || exit 1;
51
+ silphonelist=`cat $lang/phones/silence.csl` || exit 1;
52
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
53
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
54
+ sdata=$data/split$nj
55
+
56
+ mkdir -p $dir/log
57
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
58
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
59
+ echo $nj > $dir/num_jobs
60
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
61
+
62
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
63
+ [ -f $srcdir/final.alimdl ] && cp $srcdir/final.alimdl $dir
64
+ cp $srcdir/final.occs $dir;
65
+
66
+ ## Set up features.
67
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
68
+ echo "$0: feature type is $feat_type"
69
+
70
+ case $feat_type in
71
+ delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";;
72
+ lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
73
+ cp $srcdir/final.mat $dir
74
+ ;;
75
+ *) echo "Invalid feature type $feat_type" && exit 1;
76
+ esac
77
+ if [ ! -z "$transform_dir" ]; then
78
+ echo "$0: using transforms from $transform_dir"
79
+ [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1;
80
+ [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \
81
+ && echo "$0: #jobs mismatch with transform-dir." && exit 1;
82
+ feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |"
83
+ elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then
84
+ echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms,"
85
+ echo " but you are not providing the --transform-dir option during alignment."
86
+ fi
87
+ ##
88
+
89
+ ## Set up model and alignment model.
90
+ mdl=$srcdir/final.mdl
91
+ if [ -f $srcdir/final.alimdl ]; then
92
+ alimdl=$srcdir/final.alimdl
93
+ else
94
+ alimdl=$srcdir/final.mdl
95
+ fi
96
+ [ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1;
97
+
98
+ ## Work out where we're getting the graphs from.
99
+ if $use_graphs; then
100
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
101
+ echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1;
102
+ [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1;
103
+ graphdir=$srcdir
104
+ ln.pl $srcdir/fsts.*.gz $dir
105
+ else
106
+ graphdir=$dir
107
+ if [ $stage -le 0 ]; then
108
+ echo "$0: compiling training graphs"
109
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
110
+ $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
111
+ compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \
112
+ "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
113
+ fi
114
+ fi
115
+
116
+ ## Work out where we're getting the Gaussian-selection info from
117
+ if $use_gselect; then
118
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
119
+ echo "$0: you specified --use-gselect true, but #jobs mismatch." && exit 1;
120
+ [ ! -f $srcdir/gselect.1.gz ] && echo "No gselect info in $srcdir" && exit 1;
121
+ graphdir=$srcdir
122
+ gselect_opt="--gselect=ark,s,cs:gunzip -c $srcdir/gselect.JOB.gz|"
123
+ ln.pl $srcdir/gselect.*.gz $dir
124
+ else
125
+ graphdir=$dir
126
+ if [ $stage -le 1 ]; then
127
+ echo "$0: computing Gaussian-selection info"
128
+ # Note: doesn't matter whether we use $alimdl or $mdl, they will
129
+ # have the same gselect info.
130
+ $cmd JOB=1:$nj $dir/log/gselect.JOB.log \
131
+ sgmm-gselect --full-gmm-nbest=$gselect $alimdl \
132
+ "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1;
133
+ fi
134
+ gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|"
135
+ fi
136
+
137
+
138
+ if [ $alimdl == $mdl ]; then
139
+ # Speaker-independent decoding-- just one pass. Not normal.
140
+ T=`sgmm-info $mdl | grep 'speaker vector space' | awk '{print $NF}'` || exit 1;
141
+ [ "$T" -ne 0 ] && echo "No alignment model, yet speaker vector space nonempty" && exit 1;
142
+
143
+ if [ $stage -le 2 ]; then
144
+ echo "$0: aligning data in $data using model $mdl (no speaker-vectors)"
145
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
146
+ sgmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam $alimdl \
147
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
148
+ fi
149
+ echo "$0: done aligning data."
150
+ exit 0;
151
+ fi
152
+
153
+ # Continue with system with speaker vectors.
154
+ if [ $stage -le 2 ]; then
155
+ echo "$0: aligning data in $data using model $alimdl"
156
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
157
+ sgmm-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam $alimdl \
158
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1;
159
+ fi
160
+
161
+ if [ $stage -le 3 ]; then
162
+ echo "$0: computing speaker vectors (1st pass)"
163
+ $cmd JOB=1:$nj $dir/log/spk_vecs1.JOB.log \
164
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
165
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
166
+ sgmm-post-to-gpost "$gselect_opt" $alimdl "$feats" ark:- ark:- \| \
167
+ sgmm-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \
168
+ $mdl "$feats" ark,s,cs:- ark:$dir/pre_vecs.JOB || exit 1;
169
+ fi
170
+
171
+ if [ $stage -le 4 ]; then
172
+ echo "$0: computing speaker vectors (2nd pass)"
173
+ $cmd JOB=1:$nj $dir/log/spk_vecs2.JOB.log \
174
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
175
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
176
+ sgmm-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" \
177
+ --spk-vecs=ark:$dir/pre_vecs.JOB $mdl "$feats" ark,s,cs:- ark:$dir/vecs.JOB || exit 1;
178
+ rm $dir/pre_vecs.*
179
+ fi
180
+
181
+ if [ $stage -le 5 ]; then
182
+ echo "$0: doing final alignment."
183
+ $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \
184
+ sgmm-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam \
185
+ --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \
186
+ $mdl "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
187
+ fi
188
+
189
+ rm $dir/pre_ali.*.gz
190
+
191
+ echo "$0: done aligning data."
192
+
193
+ utils/summarize_warnings.pl $dir/log
194
+
195
+ exit 0;
tools/ASR_2ch_track/steps/align_sgmm2.sh ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ # Computes training alignments and (if needed) speaker-vectors, given an
6
+ # SGMM system. If the system is built on top of SAT, you should supply
7
+ # transforms with the --transform-dir option.
8
+
9
+ # If you supply the --use-graphs option, it will use the training
10
+ # graphs from the source directory.
11
+
12
+ # Begin configuration section.
13
+ stage=0
14
+ nj=4
15
+ cmd=run.pl
16
+ use_graphs=false # use graphs from srcdir
17
+ use_gselect=false # use gselect info from srcdir [regardless, we use
18
+ # Gaussian-selection info, we might have to compute it though.]
19
+ gselect=15 # Number of Gaussian-selection indices for SGMMs.
20
+ # Begin configuration.
21
+ scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
22
+ beam=10
23
+ retry_beam=40
24
+ transform_dir= # directory to find fMLLR transforms in.
25
+ # End configuration options.
26
+
27
+ echo "$0 $@" # Print the command line for logging
28
+
29
+ [ -f path.sh ] && . ./path.sh # source the path.
30
+ . parse_options.sh || exit 1;
31
+
32
+ if [ $# != 4 ]; then
33
+ echo "usage: steps/align_sgmm.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
34
+ echo "e.g.: steps/align_sgmm.sh --transform-dir exp/tri3b data/train data/lang \\"
35
+ echo " exp/sgmm4a exp/sgmm5a_ali"
36
+ echo "main options (for others, see top of script file)"
37
+ echo " --config <config-file> # config containing options"
38
+ echo " --nj <nj> # number of parallel jobs"
39
+ echo " --use-graphs true # use graphs in src-dir"
40
+ echo " --transform-dir <transform-dir> # directory to find fMLLR transforms"
41
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
42
+ exit 1;
43
+ fi
44
+
45
+ data=$1
46
+ lang=$2
47
+ srcdir=$3
48
+ dir=$4
49
+
50
+ oov=`cat $lang/oov.int` || exit 1;
51
+ silphonelist=`cat $lang/phones/silence.csl` || exit 1;
52
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
53
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
54
+ sdata=$data/split$nj
55
+
56
+ mkdir -p $dir/log
57
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
58
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
59
+ echo $nj > $dir/num_jobs
60
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
61
+
62
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
63
+ [ -f $srcdir/final.alimdl ] && cp $srcdir/final.alimdl $dir
64
+ cp $srcdir/final.occs $dir;
65
+
66
+ ## Set up features.
67
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
68
+ echo "$0: feature type is $feat_type"
69
+
70
+ case $feat_type in
71
+ delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";;
72
+ lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
73
+ cp $srcdir/final.mat $dir
74
+ ;;
75
+ *) echo "Invalid feature type $feat_type" && exit 1;
76
+ esac
77
+ if [ ! -z "$transform_dir" ]; then
78
+ echo "$0: using transforms from $transform_dir"
79
+ [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1;
80
+ [ "$nj" -ne "`cat $transform_dir/num_jobs`" ] \
81
+ && echo "$0: #jobs mismatch with transform-dir." && exit 1;
82
+ feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$transform_dir/trans.JOB ark:- ark:- |"
83
+ elif grep 'transform-feats --utt2spk' $srcdir/log/acc.0.1.log 2>/dev/null; then
84
+ echo "$0: **WARNING**: you seem to be using an SGMM system trained with transforms,"
85
+ echo " but you are not providing the --transform-dir option during alignment."
86
+ fi
87
+ ##
88
+
89
+ ## Set up model and alignment model.
90
+ mdl=$srcdir/final.mdl
91
+ if [ -f $srcdir/final.alimdl ]; then
92
+ alimdl=$srcdir/final.alimdl
93
+ else
94
+ alimdl=$srcdir/final.mdl
95
+ fi
96
+ [ ! -f $mdl ] && echo "$0: no such model $mdl" && exit 1;
97
+
98
+ ## Work out where we're getting the graphs from.
99
+ if $use_graphs; then
100
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
101
+ echo "$0: you specified --use-graphs true, but #jobs mismatch." && exit 1;
102
+ [ ! -f $srcdir/fsts.1.gz ] && echo "No graphs in $srcdir" && exit 1;
103
+ graphdir=$srcdir
104
+ ln.pl $srcdir/fsts.*.gz $dir
105
+ else
106
+ graphdir=$dir
107
+ if [ $stage -le 0 ]; then
108
+ echo "$0: compiling training graphs"
109
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
110
+ $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
111
+ compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" \
112
+ "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
113
+ fi
114
+ fi
115
+
116
+ ## Work out where we're getting the Gaussian-selection info from
117
+ if $use_gselect; then
118
+ [ "$nj" != "`cat $srcdir/num_jobs`" ] && \
119
+ echo "$0: you specified --use-gselect true, but #jobs mismatch." && exit 1;
120
+ [ ! -f $srcdir/gselect.1.gz ] && echo "No gselect info in $srcdir" && exit 1;
121
+ graphdir=$srcdir
122
+ gselect_opt="--gselect=ark,s,cs:gunzip -c $srcdir/gselect.JOB.gz|"
123
+ ln.pl $srcdir/gselect.*.gz $dir
124
+ else
125
+ graphdir=$dir
126
+ if [ $stage -le 1 ]; then
127
+ echo "$0: computing Gaussian-selection info"
128
+ # Note: doesn't matter whether we use $alimdl or $mdl, they will
129
+ # have the same gselect info.
130
+ $cmd JOB=1:$nj $dir/log/gselect.JOB.log \
131
+ sgmm2-gselect --full-gmm-nbest=$gselect $alimdl \
132
+ "$feats" "ark:|gzip -c >$dir/gselect.JOB.gz" || exit 1;
133
+ fi
134
+ gselect_opt="--gselect=ark,s,cs:gunzip -c $dir/gselect.JOB.gz|"
135
+ fi
136
+
137
+
138
+ if [ $alimdl == $mdl ]; then
139
+ # Speaker-independent decoding-- just one pass. Not normal.
140
+ T=`sgmm2-info $mdl | grep 'speaker vector space' | awk '{print $NF}'` || exit 1;
141
+ [ "$T" -ne 0 ] && echo "No alignment model, yet speaker vector space nonempty" && exit 1;
142
+
143
+ if [ $stage -le 2 ]; then
144
+ echo "$0: aligning data in $data using model $mdl (no speaker-vectors)"
145
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
146
+ sgmm2-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam $alimdl \
147
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
148
+ fi
149
+ echo "$0: done aligning data."
150
+ exit 0;
151
+ fi
152
+
153
+ # Continue with system with speaker vectors.
154
+ if [ $stage -le 2 ]; then
155
+ echo "$0: aligning data in $data using model $alimdl"
156
+ $cmd JOB=1:$nj $dir/log/align_pass1.JOB.log \
157
+ sgmm2-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam $alimdl \
158
+ "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/pre_ali.JOB.gz" || exit 1;
159
+ fi
160
+
161
+ if [ $stage -le 3 ]; then
162
+ echo "$0: computing speaker vectors (1st pass)"
163
+ $cmd JOB=1:$nj $dir/log/spk_vecs1.JOB.log \
164
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
165
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
166
+ sgmm2-post-to-gpost "$gselect_opt" $alimdl "$feats" ark:- ark:- \| \
167
+ sgmm2-est-spkvecs-gpost --spk2utt=ark:$sdata/JOB/spk2utt \
168
+ $mdl "$feats" ark,s,cs:- ark:$dir/pre_vecs.JOB || exit 1;
169
+ fi
170
+
171
+ if [ $stage -le 4 ]; then
172
+ echo "$0: computing speaker vectors (2nd pass)"
173
+ $cmd JOB=1:$nj $dir/log/spk_vecs2.JOB.log \
174
+ ali-to-post "ark:gunzip -c $dir/pre_ali.JOB.gz|" ark:- \| \
175
+ weight-silence-post 0.0 $silphonelist $alimdl ark:- ark:- \| \
176
+ sgmm2-est-spkvecs --spk2utt=ark:$sdata/JOB/spk2utt "$gselect_opt" \
177
+ --spk-vecs=ark:$dir/pre_vecs.JOB $mdl "$feats" ark,s,cs:- ark:$dir/vecs.JOB || exit 1;
178
+ rm $dir/pre_vecs.*
179
+ fi
180
+
181
+ if [ $stage -le 5 ]; then
182
+ echo "$0: doing final alignment."
183
+ $cmd JOB=1:$nj $dir/log/align_pass2.JOB.log \
184
+ sgmm2-align-compiled $scale_opts "$gselect_opt" --beam=$beam --retry-beam=$retry_beam \
185
+ --utt2spk=ark:$sdata/JOB/utt2spk --spk-vecs=ark:$dir/vecs.JOB \
186
+ $mdl "ark:gunzip -c $graphdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
187
+ fi
188
+
189
+ rm $dir/pre_ali.*.gz
190
+
191
+ echo "$0: done aligning data."
192
+
193
+ utils/summarize_warnings.pl $dir/log
194
+
195
+ exit 0;
tools/ASR_2ch_track/steps/align_si.sh ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ # Computes training alignments using a model with delta or
6
+ # LDA+MLLT features.
7
+
8
+ # If you supply the "--use-graphs true" option, it will use the training
9
+ # graphs from the source directory (where the model is). In this
10
+ # case the number of jobs must match with the source directory.
11
+
12
+
13
+ # Begin configuration section.
14
+ nj=4
15
+ cmd=run.pl
16
+ use_graphs=false
17
+ # Begin configuration.
18
+ scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
19
+ beam=10
20
+ retry_beam=40
21
+ careful=false
22
+ boost_silence=1.0 # Factor by which to boost silence during alignment.
23
+ # End configuration options.
24
+
25
+ echo "$0 $@" # Print the command line for logging
26
+
27
+ [ -f path.sh ] && . ./path.sh # source the path.
28
+ . parse_options.sh || exit 1;
29
+
30
+ if [ $# != 4 ]; then
31
+ echo "usage: steps/align_si.sh <data-dir> <lang-dir> <src-dir> <align-dir>"
32
+ echo "e.g.: steps/align_si.sh data/train data/lang exp/tri1 exp/tri1_ali"
33
+ echo "main options (for others, see top of script file)"
34
+ echo " --config <config-file> # config containing options"
35
+ echo " --nj <nj> # number of parallel jobs"
36
+ echo " --use-graphs true # use graphs in src-dir"
37
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
38
+ exit 1;
39
+ fi
40
+
41
+ data=$1
42
+ lang=$2
43
+ srcdir=$3
44
+ dir=$4
45
+
46
+
47
+ for f in $data/text $lang/oov.int $srcdir/tree $srcdir/final.mdl; do
48
+ [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1;
49
+ done
50
+
51
+ oov=`cat $lang/oov.int` || exit 1;
52
+ mkdir -p $dir/log
53
+ echo $nj > $dir/num_jobs
54
+ sdata=$data/split$nj
55
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
56
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
57
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
58
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
59
+ delta_opts=`cat $srcdir/delta_opts 2>/dev/null`
60
+ cp $srcdir/delta_opts $dir 2>/dev/null
61
+
62
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
63
+
64
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
65
+ cp $srcdir/final.occs $dir;
66
+
67
+
68
+
69
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
70
+ echo "$0: feature type is $feat_type"
71
+
72
+ case $feat_type in
73
+ delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";;
74
+ lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
75
+ cp $srcdir/final.mat $srcdir/full.mat $dir
76
+ ;;
77
+ *) echo "$0: invalid feature type $feat_type" && exit 1;
78
+ esac
79
+
80
+ echo "$0: aligning data in $data using model from $srcdir, putting alignments in $dir"
81
+
82
+ mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/final.mdl - |"
83
+
84
+ if $use_graphs; then
85
+ [ $nj != "`cat $srcdir/num_jobs`" ] && echo "$0: mismatch in num-jobs" && exit 1;
86
+ [ ! -f $srcdir/fsts.1.gz ] && echo "$0: no such file $srcdir/fsts.1.gz" && exit 1;
87
+
88
+ $cmd JOB=1:$nj $dir/log/align.JOB.log \
89
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" \
90
+ "ark:gunzip -c $srcdir/fsts.JOB.gz|" "$feats" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
91
+ else
92
+ tra="ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|";
93
+ # We could just use gmm-align in the next line, but it's less efficient as it compiles the
94
+ # training graphs one by one.
95
+ $cmd JOB=1:$nj $dir/log/align.JOB.log \
96
+ compile-train-graphs $dir/tree $dir/final.mdl $lang/L.fst "$tra" ark:- \| \
97
+ gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" ark:- \
98
+ "$feats" "ark,t:|gzip -c >$dir/ali.JOB.gz" || exit 1;
99
+ fi
100
+
101
+ echo "$0: done aligning data."
tools/ASR_2ch_track/steps/append_feats.sh ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2014 Brno University of Technology (Author: Karel Vesely)
4
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
5
+ # Apache 2.0
6
+ # This script appends the features in two or more data directories.
7
+
8
+ # To be run from .. (one directory up from here)
9
+ # see ../run.sh for example
10
+
11
+ # Begin configuration section.
12
+ cmd=run.pl
13
+ nj=4
14
+ length_tolerance=10 # length tolerance in frames (trim to shortest)
15
+ compress=true
16
+ # End configuration section.
17
+
18
+ echo "$0 $@" # Print the command line for logging
19
+
20
+ if [ -f path.sh ]; then . ./path.sh; fi
21
+ . parse_options.sh || exit 1;
22
+
23
+ if [ $# -lt 5 ]; then
24
+ echo "usage: $0 [options] <src-data-dir1> <src-data-dir2> [<src-data-dirN>] <dest-data-dir> <log-dir> <path-to-storage-dir>";
25
+ echo "e.g.: $0 data/train_mfcc data/train_bottleneck data/train_combined exp/append_mfcc_plp mfcc"
26
+ echo "options: "
27
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
28
+ exit 1;
29
+ fi
30
+
31
+ data_src_arr=(${@:1:$(($#-3))}) #array of source data-dirs
32
+ data=${@: -3: 1}
33
+ logdir=${@: -2: 1}
34
+ ark_dir=${@: -1: 1} #last arg.
35
+
36
+ data_src_first=${data_src_arr[0]} # get 1st src dir
37
+
38
+ # make $ark_dir an absolute pathname.
39
+ ark_dir=`perl -e '($dir,$pwd)= @ARGV; if($dir!~m:^/:) { $dir = "$pwd/$dir"; } print $dir; ' $ark_dir ${PWD}`
40
+
41
+ for data_src in ${data_src_arr[@]}; do
42
+ utils/split_data.sh $data_src $nj || exit 1;
43
+ done
44
+
45
+ mkdir -p $ark_dir $logdir
46
+
47
+ mkdir -p $data
48
+ cp $data_src_first/* $data/ 2>/dev/null # so we get the other files, such as utt2spk.
49
+ rm $data/cmvn.scp 2>/dev/null
50
+ rm $data/feats.scp 2>/dev/null
51
+
52
+ # use "name" as part of name of the archive.
53
+ name=`basename $data`
54
+
55
+ # get list of source scp's for pasting
56
+ data_src_args=
57
+ for data_src in ${data_src_arr[@]}; do
58
+ data_src_args="$data_src_args scp:$data_src/split$nj/JOB/feats.scp"
59
+ done
60
+
61
+ for n in $(seq $nj); do
62
+ # the next command does nothing unless $arkdir/storage/ exists, see
63
+ # utils/create_data_link.pl for more info.
64
+ utils/create_data_link.pl $arkdir/pasted_$name.$n.ark
65
+ done
66
+
67
+ $cmd JOB=1:$nj $logdir/append.JOB.log \
68
+ paste-feats --length-tolerance=$length_tolerance $data_src_args ark:- \| \
69
+ copy-feats --compress=$compress ark:- \
70
+ ark,scp:$ark_dir/pasted_$name.JOB.ark,$ark_dir/pasted_$name.JOB.scp || exit 1;
71
+
72
+ # concatenate the .scp files together.
73
+ for ((n=1; n<=nj; n++)); do
74
+ cat $ark_dir/pasted_$name.$n.scp >> $data/feats.scp || exit 1;
75
+ done > $data/feats.scp || exit 1;
76
+
77
+
78
+ nf=`cat $data/feats.scp | wc -l`
79
+ nu=`cat $data/utt2spk | wc -l`
80
+ if [ $nf -ne $nu ]; then
81
+ echo "It seems not all of the feature files were successfully processed ($nf != $nu);"
82
+ echo "consider using utils/fix_data_dir.sh $data"
83
+ fi
84
+
85
+ echo "Succeeded pasting features for $name into $data"
tools/ASR_2ch_track/steps/cleanup/combine_short_segments.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2016 Vijayaditya Peddinti
4
+ # Apache 2.0
5
+
6
+ import argparse
7
+ import sys
8
+ import os
9
+ import subprocess
10
+ import errno
11
+ import copy
12
+ import shutil
13
+
14
+ def GetArgs():
15
+ # we add compulsary arguments as named arguments for readability
16
+ parser = argparse.ArgumentParser(description="""
17
+ This script concatenates segments in the input_data_dir to ensure that"""
18
+ " the segments in the output_data_dir have a specified minimum length.",
19
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
20
+
21
+ parser.add_argument("--minimum-duration", type=float, required = True,
22
+ help="Minimum duration of the segments in the output directory")
23
+ parser.add_argument("--input-data-dir", type=str, required = True)
24
+ parser.add_argument("--output-data-dir", type=str, required = True)
25
+
26
+ print(' '.join(sys.argv))
27
+ args = parser.parse_args()
28
+ return args
29
+
30
+ def RunKaldiCommand(command, wait = True):
31
+ """ Runs commands frequently seen in Kaldi scripts. These are usually a
32
+ sequence of commands connected by pipes, so we use shell=True """
33
+ p = subprocess.Popen(command, shell = True,
34
+ stdout = subprocess.PIPE,
35
+ stderr = subprocess.PIPE)
36
+
37
+ if wait:
38
+ [stdout, stderr] = p.communicate()
39
+ if p.returncode is not 0:
40
+ raise Exception("There was an error while running the command {0}\n".format(command)+"-"*10+"\n"+stderr)
41
+ return stdout, stderr
42
+ else:
43
+ return p
44
+
45
+ def MakeDir(dir):
46
+ try:
47
+ os.mkdir(dir)
48
+ except OSError as exc:
49
+ if exc.errno != errno.EEXIST:
50
+ raise exc
51
+ raise Exception("Directory {0} already exists".format(dir))
52
+ pass
53
+
54
+ def CheckFiles(input_data_dir):
55
+ for file_name in ['spk2utt', 'text', 'utt2spk', 'feats.scp']:
56
+ file_name = '{0}/{1}'.format(input_data_dir, file_name)
57
+ if not os.path.exists(file_name):
58
+ raise Exception("There is no such file {0}".format(file_name))
59
+
60
+ def ParseFileToDict(file, assert2fields = False, value_processor = None):
61
+ if value_processor is None:
62
+ value_processor = lambda x: x[0]
63
+
64
+ dict = {}
65
+ for line in open(file, 'r'):
66
+ parts = line.split()
67
+ if assert2fields:
68
+ assert(len(parts) == 2)
69
+
70
+ dict[parts[0]] = value_processor(parts[1:])
71
+ return dict
72
+
73
+ def WriteDictToFile(dict, file_name):
74
+ file = open(file_name, 'w')
75
+ keys = dict.keys()
76
+ keys.sort()
77
+ for key in keys:
78
+ value = dict[key]
79
+ if type(value) in [list, tuple] :
80
+ if type(value) is tuple:
81
+ value = list(value)
82
+ value.sort()
83
+ value = ' '.join(value)
84
+ file.write('{0}\t{1}\n'.format(key, value))
85
+ file.close()
86
+
87
+
88
+ def ParseDataDirInfo(data_dir):
89
+ data_dir_file = lambda file_name: '{0}/{1}'.format(data_dir, file_name)
90
+
91
+ utt2spk = ParseFileToDict(data_dir_file('utt2spk'))
92
+ spk2utt = ParseFileToDict(data_dir_file('spk2utt'), value_processor = lambda x: x)
93
+ text = ParseFileToDict(data_dir_file('text'), value_processor = lambda x: " ".join(x))
94
+ # we want to assert feats.scp has just 2 fields, as we don't know how
95
+ # to process it otherwise
96
+ feat = ParseFileToDict(data_dir_file('feats.scp'), assert2fields = True)
97
+ utt2dur = ParseFileToDict(data_dir_file('utt2dur'), value_processor = lambda x: float(x[0]))
98
+ utt2uniq = None
99
+ if os.path.exists(data_dir_file('utt2uniq')):
100
+ utt2uniq = ParseFileToDict(data_dir_file('utt2uniq'))
101
+ return utt2spk, spk2utt, text, feat, utt2dur, utt2uniq
102
+
103
+
104
+ def GetCombinedUttIndexRange(utt_index, utts, utt_durs, minimum_duration):
105
+ # We want the minimum number of concatenations
106
+ # to reach the minimum_duration. If two concatenations satisfy
107
+ # the minimum duration constraint we choose the shorter one.
108
+ left_index = utt_index - 1
109
+ right_index = utt_index + 1
110
+ num_remaining_segments = len(utts) - 1
111
+ cur_utt_dur = utt_durs[utts[utt_index]]
112
+
113
+ while num_remaining_segments > 0:
114
+
115
+ left_utt_dur = 0
116
+ if left_index >= 0:
117
+ left_utt_dur = utt_durs[utts[left_index]]
118
+ right_utt_dur = 0
119
+ if right_index <= len(utts) - 1:
120
+ right_utt_dur = utt_durs[utts[right_index]]
121
+
122
+ right_combined_utt_dur = cur_utt_dur + right_utt_dur
123
+ left_combined_utt_dur = cur_utt_dur + left_utt_dur
124
+ left_right_combined_utt_dur = cur_utt_dur + left_utt_dur + right_utt_dur
125
+
126
+ combine_left_exit = False
127
+ combine_right_exit = False
128
+ if right_combined_utt_dur >= minimum_duration:
129
+ if left_combined_utt_dur >= minimum_duration:
130
+ if left_combined_utt_dur <= right_combined_utt_dur:
131
+ combine_left_exit = True
132
+ else:
133
+ combine_right_exit = True
134
+ else:
135
+ combine_right_exit = True
136
+ elif left_combined_utt_dur >= minimum_duration:
137
+ combine_left_exit = True
138
+ elif left_right_combined_utt_dur >= minimum_duration :
139
+ combine_left_exit = True
140
+ combine_right_exit = True
141
+
142
+ if combine_left_exit and combine_right_exit:
143
+ cur_utt_dur = left_right_combined_utt_dur
144
+ break
145
+ elif combine_left_exit:
146
+ cur_utt_dur = left_combined_utt_dur
147
+ # move back the right_index as we don't need to combine it
148
+ right_index = right_index - 1
149
+ break
150
+ elif combine_right_exit:
151
+ cur_utt_dur = right_combined_utt_dur
152
+ # move back the left_index as we don't need to combine it
153
+ left_index = left_index + 1
154
+ break
155
+
156
+ # couldn't satisfy minimum duration requirement so continue search
157
+ if left_index >= 0:
158
+ num_remaining_segments = num_remaining_segments - 1
159
+ if right_index <= len(utts) - 1:
160
+ num_remaining_segments = num_remaining_segments - 1
161
+
162
+ left_index = left_index - 1
163
+ right_index = right_index + 1
164
+
165
+ cur_utt_dur = left_right_combined_utt_dur
166
+ left_index = max(0, left_index)
167
+ right_index = min(len(utts)-1, right_index)
168
+ return left_index, right_index, cur_utt_dur
169
+
170
+
171
+ def WriteCombinedDirFiles(output_dir, utt2spk, spk2utt, text, feat, utt2dur, utt2uniq):
172
+ out_dir_file = lambda file_name: '{0}/{1}'.format(output_dir, file_name)
173
+ total_combined_utt_list = []
174
+ for speaker in spk2utt.keys():
175
+ utts = spk2utt[speaker]
176
+ for utt in utts:
177
+ if type(utt) is tuple:
178
+ #this is a combined utt
179
+ total_combined_utt_list.append((speaker, utt))
180
+
181
+ for speaker, combined_utt_tuple in total_combined_utt_list:
182
+ combined_utt_list = list(combined_utt_tuple)
183
+ combined_utt_list.sort()
184
+ new_utt_name = "-".join(combined_utt_list)+'-appended'
185
+
186
+ # updating the utt2spk dict
187
+ for utt in combined_utt_list:
188
+ spk_name = utt2spk.pop(utt)
189
+ utt2spk[new_utt_name] = spk_name
190
+
191
+ # updating the spk2utt dict
192
+ spk2utt[speaker].remove(combined_utt_tuple)
193
+ spk2utt[speaker].append(new_utt_name)
194
+
195
+ # updating the text dict
196
+ combined_text = []
197
+ for utt in combined_utt_list:
198
+ combined_text.append(text.pop(utt))
199
+ text[new_utt_name] = ' '.join(combined_text)
200
+
201
+ # updating the feat dict
202
+ combined_feat = []
203
+ for utt in combined_utt_list:
204
+ combined_feat.append(feat.pop(utt))
205
+ feat_command = "concat-feats --print-args=false {feats} - |".format(feats = " ".join(combined_feat))
206
+ feat[new_utt_name] = feat_command
207
+
208
+ # updating utt2dur
209
+ combined_dur = 0
210
+ for utt in combined_utt_list:
211
+ combined_dur += utt2dur.pop(utt)
212
+ utt2dur[new_utt_name] = combined_dur
213
+
214
+ # updating utt2uniq
215
+ if utt2uniq is not None:
216
+ combined_uniqs = []
217
+ for utt in combined_utt_list:
218
+ combined_uniqs.append(utt2uniq.pop(utt))
219
+ # utt2uniq file is used to map perturbed data to original unperturbed
220
+ # versions so that the training cross validation sets can avoid overlap
221
+ # of data however if perturbation changes the length of the utterance
222
+ # (e.g. speed perturbation) the utterance combinations in each
223
+ # perturbation of the original recording can be very different. So there
224
+ # is no good way to find the utt2uniq mappinng so that we can avoid
225
+ # overlap.
226
+ utt2uniq[new_utt_name] = combined_uniqs[0]
227
+
228
+
229
+ WriteDictToFile(utt2spk, out_dir_file('utt2spk'))
230
+ WriteDictToFile(spk2utt, out_dir_file('spk2utt'))
231
+ WriteDictToFile(feat, out_dir_file('feats.scp'))
232
+ WriteDictToFile(text, out_dir_file('text'))
233
+ if utt2uniq is not None:
234
+ WriteDictToFile(utt2uniq, out_dir_file('utt2uniq'))
235
+ WriteDictToFile(utt2dur, out_dir_file('utt2dur'))
236
+
237
+
238
+ def CombineSegments(input_dir, output_dir, minimum_duration):
239
+ utt2spk, spk2utt, text, feat, utt2dur, utt2uniq = ParseDataDirInfo(input_dir)
240
+ total_combined_utt_list = []
241
+
242
+ # copy the duration dictionary so that we can modify it
243
+ utt_durs = copy.deepcopy(utt2dur)
244
+ speakers = spk2utt.keys()
245
+ speakers.sort()
246
+ for speaker in speakers:
247
+
248
+ utts = spk2utt[speaker] # this is an assignment of the reference
249
+ # In WriteCombinedDirFiles the values of spk2utt will have the list
250
+ # of combined utts which will be used as reference
251
+
252
+ # we make an assumption that the sorted uttlist corresponds
253
+ # to contiguous segments. This is true only if utt naming
254
+ # is done according to accepted conventions
255
+ # this is an easily violatable assumption. Have to think of a better
256
+ # way to do this.
257
+ utts.sort()
258
+ utt_index = 0
259
+ while utt_index < len(utts):
260
+ if utt_durs[utts[utt_index]] < minimum_duration:
261
+ left_index, right_index, cur_utt_dur = GetCombinedUttIndexRange(utt_index, utts, utt_durs, minimum_duration)
262
+ if not cur_utt_dur >= minimum_duration:
263
+ # this is a rare occurrence, better make the user aware of this
264
+ # situation and let them deal with it
265
+ raise Exception('Speaker {0} does not have enough utterances to satisfy the minimum duration constraint'.format(speaker))
266
+
267
+ combined_duration = 0
268
+ combined_utts = []
269
+ # update the utts_dur dictionary
270
+ for utt in utts[left_index:right_index + 1]:
271
+ combined_duration += utt_durs.pop(utt)
272
+ if type(utt) is tuple:
273
+ for item in utt:
274
+ combined_utts.append(item)
275
+ else:
276
+ combined_utts.append(utt)
277
+ combined_utts = tuple(combined_utts) # converting to immutable type to use as dictionary key
278
+ assert(cur_utt_dur == combined_duration)
279
+
280
+ # now modify the utts list
281
+ combined_indices = range(left_index, right_index + 1)
282
+ # start popping from the largest index so that the lower
283
+ # indexes are valid
284
+ for i in combined_indices[::-1]:
285
+ utts.pop(i)
286
+ utts.insert(left_index, combined_utts)
287
+ utt_durs[combined_utts] = combined_duration
288
+ utt_index = left_index
289
+ utt_index = utt_index + 1
290
+ WriteCombinedDirFiles(output_dir, utt2spk, spk2utt, text, feat, utt2dur, utt2uniq)
291
+
292
+ def Main():
293
+ args = GetArgs()
294
+
295
+ CheckFiles(args.input_data_dir)
296
+ MakeDir(args.output_data_dir)
297
+ feat_lengths = {}
298
+ segments_file = '{0}/segments'.format(args.input_data_dir)
299
+
300
+ RunKaldiCommand("utils/data/get_utt2dur.sh {0}".format(args.input_data_dir))
301
+
302
+ CombineSegments(args.input_data_dir, args.output_data_dir, args.minimum_duration)
303
+
304
+ RunKaldiCommand("utils/utt2spk_to_spk2utt.pl {od}/utt2spk > {od}/spk2utt".format(od = args.output_data_dir))
305
+ if os.path.exists('{0}/cmvn.scp'.format(args.input_data_dir)):
306
+ shutil.copy('{0}/cmvn.scp'.format(args.input_data_dir), args.output_data_dir)
307
+
308
+ RunKaldiCommand("utils/fix_data_dir.sh {0}".format(args.output_data_dir))
309
+ if __name__ == "__main__":
310
+ Main()
311
+
312
+
tools/ASR_2ch_track/steps/cleanup/create_segments_from_ctm.pl ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2014 Guoguo Chen; 2015 Nagendra Kumar Goel
4
+ # Apache 2.0
5
+
6
+ use strict;
7
+ use warnings;
8
+ use Getopt::Long;
9
+
10
+ # $SIG{__WARN__} = sub { $DB::single = 1 };
11
+
12
+ my $Usage = <<'EOU';
13
+ This script creates the segments file and text file for a data directory with
14
+ new segmentation. It takes a ctm file and an "alignment" file. The ctm file
15
+ corresponds to the audio that we want to make segmentations for, and is created
16
+ by decoding the audio using existing in-domain models. The "alignment" file is
17
+ generated by the binary align-text, and is Levenshtein alignment between the
18
+ original transcript and the decoded output.
19
+
20
+ Internally, the script first tries to find silence regions (gaps in the CTM).
21
+ If a silence region is found, and the neighboring words are free of errors
22
+ according to the alignment file, then this silence region will be taken as
23
+ a split point, and new segment will be created. If the new segment we are going
24
+ to output is too long (longer than --max-seg-length), the script will split
25
+ the long segments into smaller pieces with length roughly --max-seg-length.
26
+ If you are going to use --wer-cutoff to filter out segments with high WER, make
27
+ sure you set it to a reasonable value. If the value you set is higher than the
28
+ WER from your alignment file, then most of the segments will be filtered out.
29
+
30
+ Usage: steps/cleanup/create_segments_from_ctm.pl [options] \
31
+ <ctm> <aligned.txt> <segments> <text>
32
+ e.g.: steps/cleanup/create_segments_from_ctm.pl \
33
+ train_si284_split.ctm train_si284_split.aligned.txt \
34
+ data/train_si284_reseg/segments data/train_si284_reseg/text
35
+
36
+ Allowed options:
37
+ --max-seg-length : Maximum length of new segments (default = 10.0)
38
+ --min-seg-length : Minimum length of new segments (default = 2.0)
39
+ --min-sil-length : Minimum length of silence as split point (default = 0.5)
40
+ --separator : Separator for aligned pairs (default = ";")
41
+ --special-symbol : Special symbol to aligned with inserted or deleted words
42
+ (default = "<***>")
43
+ --wer-cutoff : Ignore segments with WER higher than the specified value.
44
+ -1 means no segment will be ignored. (default = -1)
45
+ --use-silence-midpoints : Set to 1 if you want to use silence midpoints
46
+ instead of min_sil_length for silence overhang.(default 0)
47
+ --force-correct-boundary-words : Set to zero if the segments will not be
48
+ required to have boundary words to be correct. Default 1
49
+ --aligned-ctm-filename : If set, the intermediate aligned ctm
50
+ is saved to this file
51
+ EOU
52
+
53
+ my $max_seg_length = 10.0;
54
+ my $min_seg_length = 2.0;
55
+ my $min_sil_length = 0.5;
56
+ my $separator = ";";
57
+ my $special_symbol = "<***>";
58
+ my $wer_cutoff = -1;
59
+ my $use_silence_midpoints = 0;
60
+ my $force_correct_boundary_words = 1;
61
+ my $aligned_ctm_filename = "";
62
+ GetOptions(
63
+ 'wer-cutoff=f' => \$wer_cutoff,
64
+ 'max-seg-length=f' => \$max_seg_length,
65
+ 'min-seg-length=f' => \$min_seg_length,
66
+ 'min-sil-length=f' => \$min_sil_length,
67
+ 'use-silence-midpoints=f' => \$use_silence_midpoints,
68
+ 'force-correct-boundary-words=f' => \$force_correct_boundary_words,
69
+ 'aligned-ctm-filename=s' => \$aligned_ctm_filename,
70
+ 'separator=s' => \$separator,
71
+ 'special-symbol=s' => \$special_symbol);
72
+
73
+ if (@ARGV != 4) {
74
+ die $Usage;
75
+ }
76
+
77
+ my ($ctm_in, $align_in, $segments_out, $text_out) = @ARGV;
78
+
79
+ open(CI, "<$ctm_in") || die "Error: fail to open $ctm_in\n";
80
+ open(AI, "<$align_in") || die "Error: fail to open $align_in\n";
81
+ open(my $SO, ">$segments_out") || die "Error: fail to open $segments_out\n";
82
+ open(my $TO, ">$text_out") || die "Error: fail to open $text_out\n";
83
+ my $ACT= undef;
84
+ if ($aligned_ctm_filename ne "") {
85
+ open($ACT, ">$aligned_ctm_filename");
86
+ }
87
+ # Prints the current segment to file.
88
+ sub PrintSegment {
89
+ my ($aligned_ctm, $wav_id, $min_sil_length, $min_seg_length,
90
+ $seg_start_index, $seg_end_index, $seg_count, $SO, $TO) = @_;
91
+
92
+ if ($seg_start_index > $seg_end_index) {
93
+ return -1;
94
+ }
95
+
96
+ # Removes the surrounding silence.
97
+ while ($seg_start_index < scalar(@{$aligned_ctm}) &&
98
+ $aligned_ctm->[$seg_start_index]->[0] eq "<eps>") {
99
+ $seg_start_index += 1;
100
+ }
101
+ while ($seg_end_index >= 0 &&
102
+ $aligned_ctm->[$seg_end_index]->[0] eq "<eps>") {
103
+ $seg_end_index -= 1;
104
+ }
105
+ if ($seg_start_index > $seg_end_index) {
106
+ return -1;
107
+ }
108
+
109
+ # Filters out segments with high WER.
110
+ if ($wer_cutoff != -1) {
111
+ my $num_errors = 0; my $num_words = 0;
112
+ for (my $i = $seg_start_index; $i <= $seg_end_index; $i += 1) {
113
+ if ($aligned_ctm->[$i]->[0] ne "<eps>") {
114
+ $num_words += 1;
115
+ }
116
+ $num_errors += $aligned_ctm->[$i]->[3];
117
+ }
118
+ if ($num_errors / $num_words > $wer_cutoff || $num_words < 1) {
119
+ return -1;
120
+ }
121
+ }
122
+
123
+ # Works out the surrounding silence.
124
+ my $index = $seg_start_index - 1;
125
+ while ($index >= 0 && $aligned_ctm->[$index]->[0] eq
126
+ "<eps>" && $aligned_ctm->[$index]->[3] == 0) {
127
+ $index -= 1;
128
+ }
129
+ my $left_of_segment_has_deletion = "false";
130
+ $left_of_segment_has_deletion = "true"
131
+ if ($index > 0 && $aligned_ctm->[$index-1]->[0] ne "<eps>"
132
+ && $aligned_ctm->[$index-1]->[3] == 0);
133
+
134
+ my $pad_start_sil = ($aligned_ctm->[$seg_start_index]->[1] -
135
+ $aligned_ctm->[$index + 1]->[1]) / 2.0;
136
+ if (($left_of_segment_has_deletion eq "true") || !$use_silence_midpoints) {
137
+ if ($pad_start_sil > $min_sil_length / 2.0) {
138
+ $pad_start_sil = $min_sil_length / 2.0;
139
+ }
140
+ }
141
+ my $right_of_segment_has_deletion = "false";
142
+ $index = $seg_end_index + 1;
143
+ while ($index < scalar(@{$aligned_ctm}) &&
144
+ $aligned_ctm->[$index]->[0] eq "<eps>" &&
145
+ $aligned_ctm->[$index]->[3] == 0) {
146
+ $index += 1;
147
+ }
148
+ $right_of_segment_has_deletion = "true"
149
+ if ($index < scalar(@{$aligned_ctm})-1 && $aligned_ctm->[$index+1]->[0] ne
150
+ "<eps>" && $aligned_ctm->[$index - 1]->[3] > 0);
151
+ my $pad_end_sil = ($aligned_ctm->[$index - 1]->[1] +
152
+ $aligned_ctm->[$index - 1]->[2] -
153
+ $aligned_ctm->[$seg_end_index]->[1] -
154
+ $aligned_ctm->[$seg_end_index]->[2]) / 2.0;
155
+ if (($right_of_segment_has_deletion eq "true") || !$use_silence_midpoints) {
156
+ if ($pad_end_sil > $min_sil_length / 2.0) {
157
+ $pad_end_sil = $min_sil_length / 2.0;
158
+ }
159
+ }
160
+
161
+ my $seg_start = $aligned_ctm->[$seg_start_index]->[1] - $pad_start_sil;
162
+ my $seg_end = $aligned_ctm->[$seg_end_index]->[1] +
163
+ $aligned_ctm->[$seg_end_index]->[2] + $pad_end_sil;
164
+ if ($seg_end - $seg_start < $min_seg_length) {
165
+ return -1;
166
+ }
167
+
168
+ $seg_start = sprintf("%.2f", $seg_start);
169
+ $seg_end = sprintf("%.2f", $seg_end);
170
+ my $seg_id = $wav_id . "_" . sprintf("%05d", $seg_count);
171
+ print $SO "$seg_id $wav_id $seg_start $seg_end\n";
172
+
173
+ print $TO "$seg_id ";
174
+ for (my $x = $seg_start_index; $x <= $seg_end_index; $x += 1) {
175
+ if ($aligned_ctm->[$x]->[0] ne "<eps>") {
176
+ print $TO "$aligned_ctm->[$x]->[0] ";
177
+ }
178
+ }
179
+ print $TO "\n";
180
+ return 0;
181
+ }
182
+
183
+ # Computes split point.
184
+ sub GetSplitPoint {
185
+ my ($aligned_ctm, $seg_start_index, $seg_end_index, $max_seg_length) = @_;
186
+
187
+ # Scan in the reversed order so we can maximize the length.
188
+ my $split_point = $seg_start_index;
189
+ for (my $x = $seg_end_index; $x > $seg_start_index; $x -= 1) {
190
+ my $current_seg_length = $aligned_ctm->[$x]->[1] +
191
+ $aligned_ctm->[$x]->[2] -
192
+ $aligned_ctm->[$seg_start_index]->[1];
193
+ if ($current_seg_length <= $max_seg_length) {
194
+ $split_point = $x;
195
+ last;
196
+ }
197
+ }
198
+ return $split_point;
199
+ }
200
+
201
+ # Computes segment length without surrounding silence.
202
+ sub GetSegmentLengthNoSil {
203
+ my ($aligned_ctm, $seg_start_index, $seg_end_index) = @_;
204
+ while ($seg_start_index < scalar(@{$aligned_ctm}) &&
205
+ $aligned_ctm->[$seg_start_index]->[0] eq "<eps>") {
206
+ $seg_start_index += 1;
207
+ }
208
+ while ($seg_end_index >= 0 &&
209
+ $aligned_ctm->[$seg_end_index]->[0] eq "<eps>") {
210
+ $seg_end_index -= 1;
211
+ }
212
+ if ($seg_start_index > $seg_end_index) {
213
+ return 0;
214
+ }
215
+ my $current_seg_length = $aligned_ctm->[$seg_end_index]->[1] +
216
+ $aligned_ctm->[$seg_end_index]->[2] -
217
+ $aligned_ctm->[$seg_start_index]->[1];
218
+ return $current_seg_length;
219
+ }
220
+
221
+ # Force splits long segments.
222
+ sub SplitLongSegment {
223
+ my ($aligned_ctm, $wav_id, $max_seg_length, $min_sil_length,
224
+ $seg_start_index, $seg_end_index, $current_seg_count, $SO, $TO) = @_;
225
+ # If the segment is too long, we manually split it. We make sure that the
226
+ # resulting segments are at least ($max_seg_length / 2) seconds long.
227
+ my $current_seg_length = $aligned_ctm->[$seg_end_index]->[1] +
228
+ $aligned_ctm->[$seg_end_index]->[2] -
229
+ $aligned_ctm->[$seg_start_index]->[1];
230
+ my $current_seg_index = $seg_start_index;
231
+ my $aligned_ctm_size = keys($aligned_ctm);
232
+ while ($current_seg_length > 1.5 * $max_seg_length && $current_seg_index < $aligned_ctm_size-1) {
233
+ my $split_point = GetSplitPoint($aligned_ctm, $current_seg_index,
234
+ $seg_end_index, $max_seg_length);
235
+ my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length,
236
+ $min_seg_length, $current_seg_index, $split_point,
237
+ $current_seg_count, $SO, $TO);
238
+ $current_seg_count += 1 if ($ans != -1);
239
+ $current_seg_index = $split_point + 1;
240
+ $current_seg_length = $aligned_ctm->[$seg_end_index]->[1] +
241
+ $aligned_ctm->[$seg_end_index]->[2] -
242
+ $aligned_ctm->[$current_seg_index]->[1];
243
+ }
244
+
245
+ if ($current_seg_index eq $aligned_ctm_size-1) {
246
+ my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length,
247
+ $min_seg_length, $current_seg_index, $current_seg_index,
248
+ $current_seg_count, $SO, $TO);
249
+ $current_seg_count += 1 if ($ans != -1);
250
+ return ($current_seg_count, $current_seg_index);
251
+ }
252
+
253
+ if ($current_seg_length > $max_seg_length) {
254
+ my $split_point = GetSplitPoint($aligned_ctm, $current_seg_index,
255
+ $seg_end_index,
256
+ $current_seg_length / 2.0 + 0.01);
257
+ my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length,
258
+ $min_seg_length, $current_seg_index, $split_point,
259
+ $current_seg_count, $SO, $TO);
260
+ $current_seg_count += 1 if ($ans != -1);
261
+ $current_seg_index = $split_point + 1;
262
+ }
263
+
264
+ my $split_point = GetSplitPoint($aligned_ctm, $current_seg_index,
265
+ $seg_end_index, $max_seg_length + 0.01);
266
+ my $ans = PrintSegment($aligned_ctm, $wav_id, $min_sil_length,
267
+ $min_seg_length, $current_seg_index, $split_point,
268
+ $current_seg_count, $SO, $TO);
269
+ $current_seg_count += 1 if ($ans != -1);
270
+ $current_seg_index = $split_point + 1;
271
+
272
+ return ($current_seg_count, $current_seg_index);
273
+ }
274
+
275
+ # Processes each wav file.
276
+ sub ProcessWav {
277
+ my ($max_seg_length, $min_seg_length, $min_sil_length, $special_symbol,
278
+ $current_ctm, $current_align, $SO, $TO, $ACT) = @_;
279
+
280
+ my $wav_id = $current_ctm->[0]->[0];
281
+ my $channel_id = $current_ctm->[0]->[1];
282
+ defined($wav_id) || die "Error: empty wav section\n";
283
+
284
+ # First, we have to align the ctm file to the Levenshtein alignment.
285
+ # @aligned_ctm is a list of the following:
286
+ # [word, start_time, duration, num_errors]
287
+ my $ctm_index = 0;
288
+ my @aligned_ctm = ();
289
+ foreach my $entry (@{$current_align}) {
290
+ my $ref_word = $entry->[0];
291
+ my $hyp_word = $entry->[1];
292
+ if ($hyp_word eq $special_symbol) {
293
+ # Case 1: deletion, $hyp does not correspond to a word in the ctm file.
294
+ my $start = 0.0; my $dur = 0.0;
295
+ if (defined($aligned_ctm[-1])) {
296
+ $start = $aligned_ctm[-1]->[1] + $aligned_ctm[-1]->[2];
297
+ }
298
+ push(@aligned_ctm, [$ref_word, $start, $dur, 1]);
299
+ } else {
300
+ # Case 2: non-deletion, now $hyp corresponds to a word in ctm file.
301
+ while ($current_ctm->[$ctm_index]->[4] eq "<eps>") {
302
+ # Case 2.1: ctm contains silence at the corresponding place.
303
+ push(@aligned_ctm, ["<eps>", $current_ctm->[$ctm_index]->[2],
304
+ $current_ctm->[$ctm_index]->[3], 0]);
305
+ $ctm_index += 1;
306
+ }
307
+ my $ctm_word = $current_ctm->[$ctm_index]->[4];
308
+ $hyp_word eq $ctm_word ||
309
+ die "Error: got word $hyp_word in alignment but $ctm_word in ctm\n";
310
+ my $start = $current_ctm->[$ctm_index]->[2];
311
+ my $dur = $current_ctm->[$ctm_index]->[3];
312
+ if ($ref_word ne $ctm_word) {
313
+ if ($ref_word eq $special_symbol) {
314
+ # Case 2.2: insertion, we propagate the duration and error to the
315
+ # previous one.
316
+ if (defined($aligned_ctm[-1])) {
317
+ $aligned_ctm[-1]->[2] += $dur;
318
+ $aligned_ctm[-1]->[3] += 1;
319
+ } else {
320
+ push(@aligned_ctm, ["<eps>", $start, $dur, 1]);
321
+ }
322
+ } else {
323
+ # Case 2.3: substitution.
324
+ push(@aligned_ctm, [$ref_word, $start, $dur, 1]);
325
+ }
326
+ } else {
327
+ # Case 2.4: correct.
328
+ push(@aligned_ctm, [$ref_word, $start, $dur, 0]);
329
+ }
330
+ $ctm_index += 1;
331
+ }
332
+ }
333
+
334
+ # Save the aligned CTM if needed
335
+ if(defined($ACT)){
336
+ for (my $i = 0; $i <= $#aligned_ctm; $i++) {
337
+ print $ACT "$wav_id $channel_id $aligned_ctm[$i][1] $aligned_ctm[$i][2] ";
338
+ print $ACT "$aligned_ctm[$i][0] $aligned_ctm[$i][3]\n";
339
+ }
340
+ }
341
+
342
+ # Second, we create segments from @align_ctm, using simple greedy method.
343
+ my $current_seg_index = 0;
344
+ my $current_seg_count = 0;
345
+ for (my $x = 0; $x < @aligned_ctm; $x += 1) {
346
+ my $lcorrect = "true"; my $rcorrect = "true";
347
+ $lcorrect = "false" if ($x > 0 && $aligned_ctm[$x - 1]->[3] > 0);
348
+ $rcorrect = "false" if ($x < @aligned_ctm - 1 &&
349
+ $aligned_ctm[$x + 1]->[3] > 0);
350
+
351
+ my $current_seg_length = GetSegmentLengthNoSil(\@aligned_ctm,
352
+ $current_seg_index, $x);
353
+
354
+ # We split the audio, if the silence is longer than the requested silence
355
+ # length, and if there are no alignment error around it. We also make sure
356
+ # that segment contains actual words, instead of pure silence.
357
+ if ($aligned_ctm[$x]->[0] eq "<eps>" &&
358
+ $aligned_ctm[$x]->[2] >= $min_sil_length
359
+ && (($force_correct_boundary_words && $lcorrect eq "true" &&
360
+ $rcorrect eq "true") || !$force_correct_boundary_words)) {
361
+ if ($current_seg_length <= $max_seg_length &&
362
+ $current_seg_length >= $min_seg_length) {
363
+ my $ans = PrintSegment(\@aligned_ctm, $wav_id, $min_sil_length,
364
+ $min_seg_length, $current_seg_index, $x,
365
+ $current_seg_count, $SO, $TO);
366
+ $current_seg_count += 1 if ($ans != -1);
367
+ $current_seg_index = $x + 1;
368
+ } elsif ($current_seg_length > $max_seg_length) {
369
+ ($current_seg_count, $current_seg_index)
370
+ = SplitLongSegment(\@aligned_ctm, $wav_id, $max_seg_length,
371
+ $min_sil_length, $current_seg_index, $x,
372
+ $current_seg_count, $SO, $TO);
373
+ }
374
+ }
375
+ }
376
+
377
+ # Last segment.
378
+ if ($current_seg_index <= @aligned_ctm - 1) {
379
+ SplitLongSegment(\@aligned_ctm, $wav_id, $max_seg_length, $min_sil_length,
380
+ $current_seg_index, @aligned_ctm - 1,
381
+ $current_seg_count, $SO, $TO);
382
+ }
383
+ }
384
+
385
+ # Insert <eps> as silence so the down stream process will be easier. Example:
386
+ #
387
+ # Input ctm:
388
+ # 011 A 3.39 0.23 SELL
389
+ # 011 A 3.62 0.18 OFF
390
+ # 011 A 3.83 0.45 ASSETS
391
+ #
392
+ # Output ctm:
393
+ # 011 A 3.39 0.23 SELL
394
+ # 011 A 3.62 0.18 OFF
395
+ # 011 A 3.80 0.03 <eps>
396
+ # 011 A 3.83 0.45 ASSETS
397
+ sub InsertSilence {
398
+ my ($ctm_in, $ctm_out) = @_;
399
+ for (my $x = 1; $x < @{$ctm_in}; $x += 1) {
400
+ push(@{$ctm_out}, $ctm_in->[$x - 1]);
401
+
402
+ my $new_start = sprintf("%.2f",
403
+ $ctm_in->[$x - 1]->[2] + $ctm_in->[$x - 1]->[3]);
404
+ if ($new_start < $ctm_in->[$x]->[2]) {
405
+ my $new_dur = sprintf("%.2f", $ctm_in->[$x]->[2] - $new_start);
406
+ push(@{$ctm_out}, [$ctm_in->[$x - 1]->[0], $ctm_in->[$x - 1]->[1],
407
+ $new_start, $new_dur, "<eps>"]);
408
+ }
409
+ }
410
+ push(@{$ctm_out}, $ctm_in->[@{$ctm_in} - 1]);
411
+ }
412
+
413
+ # Reads the alignment.
414
+ my %aligned = ();
415
+ while (<AI>) {
416
+ chomp;
417
+ my @col = split;
418
+ @col >= 2 || die "Error: bad line $_\n";
419
+ my $wav = shift @col;
420
+ my @pairs = split(" $separator ", join(" ", @col));
421
+ for (my $x = 0; $x < @pairs; $x += 1) {
422
+ my @col1 = split(" ", $pairs[$x]);
423
+ @col1 == 2 || die "Error: bad pair $pairs[$x]\n";
424
+ $pairs[$x] = \@col1;
425
+ }
426
+ ! defined($aligned{$wav}) || die "Error: $wav has already been processed\n";
427
+ $aligned{$wav} = \@pairs;
428
+ }
429
+
430
+ # Reads the ctm file and creates the segmentation.
431
+ my $previous_wav_id = "";
432
+ my $previous_channel_id = "";
433
+ my @current_wav = ();
434
+ while (<CI>) {
435
+ chomp;
436
+ my @col = split;
437
+ @col >= 5 || die "Error: bad line $_\n";
438
+ if ($previous_wav_id eq $col[0] && $previous_channel_id eq $col[1]) {
439
+ push(@current_wav, \@col);
440
+ } else {
441
+ if (@current_wav > 0) {
442
+ defined($aligned{$previous_wav_id}) ||
443
+ die "Error: no alignment info for $previous_wav_id\n";
444
+ my @current_wav_silence = ();
445
+ InsertSilence(\@current_wav, \@current_wav_silence);
446
+ ProcessWav($max_seg_length, $min_seg_length, $min_sil_length,
447
+ $special_symbol, \@current_wav_silence,
448
+ $aligned{$previous_wav_id}, $SO, $TO, $ACT);
449
+ }
450
+ @current_wav = ();
451
+ push(@current_wav, \@col);
452
+ $previous_wav_id = $col[0];
453
+ $previous_channel_id = $col[1];
454
+ }
455
+ }
456
+
457
+ # The last wav file.
458
+ if (@current_wav > 0) {
459
+ defined($aligned{$previous_wav_id}) ||
460
+ die "Error: no alignment info for $previous_wav_id\n";
461
+ my @current_wav_silence = ();
462
+ InsertSilence(\@current_wav, \@current_wav_silence);
463
+ ProcessWav($max_seg_length, $min_seg_length, $min_sil_length, $special_symbol,
464
+ \@current_wav_silence, $aligned{$previous_wav_id}, $SO, $TO, $ACT);
465
+ }
466
+
467
+ close(CI);
468
+ close(AI);
469
+ close($SO);
470
+ close($TO);
471
+ close($ACT) if defined($ACT);
tools/ASR_2ch_track/steps/cleanup/debug_lexicon.sh ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2014 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ # this script gets some stats that will help you debug the lexicon.
6
+
7
+ # Begin configuration section.
8
+ stage=1
9
+ remove_stress=false
10
+ nj=10 # number of jobs for various decoding-type things that we run.
11
+ cmd=run.pl
12
+ alidir=
13
+ # End configuration section
14
+
15
+ echo "$0 $@" # Print the command line for logging
16
+
17
+ [ -f path.sh ] && . ./path.sh # source the path.
18
+ . parse_options.sh || exit 1;
19
+
20
+ if [ $# != 5 ]; then
21
+ echo "usage: $0 <data-dir> <lang-dir> <src-dir> <src-dict> <dir>"
22
+ echo "e.g.: $0 data/train data/lang exp/tri4b data/local/dict/lexicon.txt exp/debug_lexicon"
23
+ echo "main options (for others, see top of script file)"
24
+ echo " --nj <nj> # number of parallel jobs"
25
+ echo " --cmd <cmd> # command to run jobs, e.g. run.pl,queue.pl"
26
+ echo " --stage <stage> # use to control partial reruns."
27
+ echo " --remove-stress <true|false> # if true, remove stress before printing analysis"
28
+ echo " # note: if you change this, you only have to rerun"
29
+ echo " # from stage 10."
30
+ echo " --alidir <alignment-dir> # if supplied, training-data alignments and transforms"
31
+ echo " # are obtained from here instead of being generated."
32
+ exit 1;
33
+ fi
34
+
35
+ data=$1
36
+ lang=$2
37
+ src=$3
38
+ srcdict=$4
39
+ dir=$5
40
+
41
+ set -e
42
+
43
+ for f in $data/feats.scp $lang/phones.txt $src/final.mdl $srcdict; do
44
+ [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1;
45
+ done
46
+
47
+ if [ -z $alidir ]; then
48
+ alidir=${src}_ali_$(basename $data)
49
+ if [ $stage -le 1 ]; then
50
+ steps/align_fmllr.sh --cmd "$cmd" --nj $nj $data $lang $src $alidir
51
+ fi
52
+ fi
53
+
54
+ phone_lang=data/$(basename $lang)_phone_bg
55
+
56
+ if [ $stage -le 2 ]; then
57
+ utils/make_phone_bigram_lang.sh $lang $alidir $phone_lang
58
+ fi
59
+
60
+ if [ $stage -le 3 ]; then
61
+ utils/mkgraph.sh $phone_lang $src $src/graph_phone_bg
62
+ fi
63
+
64
+ if [ $stage -le 4 ]; then
65
+ steps/decode_si.sh --skip-scoring true \
66
+ --cmd "$cmd" --nj $nj --transform-dir $alidir \
67
+ --acwt 0.25 --beam 10.0 --lattice-beam 5.0 --max-active 2500 \
68
+ $src/graph_phone_bg $data $src/decode_$(basename $data)_phone_bg
69
+ fi
70
+
71
+ if [ $stage -le 5 ]; then
72
+ steps/get_train_ctm.sh --print-silence true --use-segments false \
73
+ --cmd "$cmd" $data $lang $alidir
74
+ fi
75
+
76
+ if [ $stage -le 6 ]; then
77
+ steps/get_ctm.sh --use-segments false --cmd "$cmd" --min-lmwt 3 --max-lmwt 8 \
78
+ $data $phone_lang $src/decode_$(basename $data)_phone_bg
79
+ fi
80
+
81
+ if [ $stage -le 7 ]; then
82
+ mkdir -p $dir
83
+ # lmwt=4 corresponds to the scale we decoded at.
84
+ cp $src/decode_$(basename $data)_phone_bg/score_4/$(basename $data).ctm $dir/phone.ctm
85
+
86
+ cp $alidir/ctm $dir/word.ctm
87
+ fi
88
+
89
+ if [ $stage -le 8 ]; then
90
+ # we'll use 'sort' to do most of the heavy lifting when processing the data.
91
+ # suppose word.ctm has an entry like
92
+ # sw02054 A 213.32 0.24 and
93
+ # we'll convert it into two entries like this, with the start and end separately:
94
+ # sw02054-A 0021332 START and
95
+ # sw02054-A 0021356 END and
96
+ #
97
+ # and suppose phone.ctm has lines like
98
+ # sw02054 A 213.09 0.24 sil
99
+ # sw02054 A 213.33 0.13 ae_B
100
+ # we'll convert them into lines where the time is derived the midpoint of the phone, like
101
+ # sw02054 A 0021321 PHONE sil
102
+ # sw02054 A 0021340 PHONE ae_B
103
+ # and then we'll remove the optional-silence phones and, if needed, the word-boundary markers from
104
+ # the phones, to get just
105
+ # sw02054 A 0021340 PHONE ae
106
+ # then after sorting and merge-sorting the two ctm files we can easily
107
+ # work out for each word, what the phones were during that time.
108
+
109
+ grep -v '<eps>' $phone_lang/phones.txt | awk '{print $1, $1}' | \
110
+ sed 's/_B$//' | sed 's/_I$//' | sed 's/_E$//' | sed 's/_S$//' >$dir/phone_map.txt
111
+
112
+ cat $dir/phone.ctm | utils/apply_map.pl -f 5 $dir/phone_map.txt > $dir/phone_text.ctm > $dir/phone_mapped.ctm
113
+
114
+ export LC_ALL=C
115
+
116
+ cat $dir/word.ctm | awk '{printf("%s-%s %09d START %s\n", $1, $2, 100*$3, $5); printf("%s-%s %09d END %s\n", $1, $2, 100*($3+$4), $5);}' | \
117
+ sort >$dir/word_processed.ctm
118
+
119
+ cat $dir/phone_mapped.ctm | awk '{printf("%s-%s %09d PHONE %s\n", $1, $2, 100*($3+(0.5*$4)), $5);}' | \
120
+ sort >$dir/phone_processed.ctm
121
+
122
+ # merge-sort both ctm's
123
+ sort -m $dir/word_processed.ctm $dir/phone_processed.ctm > $dir/combined.ctm
124
+
125
+ fi
126
+
127
+ if [ $stage -le 9 ]; then
128
+ awk '{print $3, $4}' $dir/combined.ctm | \
129
+ perl -e ' while (<>) { chop; @A = split(" ", $_); ($a,$b) = @A;
130
+ if ($a eq "START") { $cur_word = $b; @phones = (); }
131
+ if ($a eq "END") { print $cur_word, " ", join(" ", @phones), "\n"; }
132
+ if ($a eq "PHONE") { push @phones, $b; }} ' | sort | uniq -c | sort -nr > $dir/prons.txt
133
+ fi
134
+
135
+ if [ $stage -le 10 ]; then
136
+ if $remove_stress; then
137
+ perl -e 'while(<>) { @A=split(" ", $_); for ($n=1;$n<@A;$n++) { $A[$n] =~ s/[0-9]$//; } print join(" ", @A) . "\n"; } ' \
138
+ <$srcdict >$dir/lexicon.txt
139
+ else
140
+ cp $srcdict $dir/lexicon.txt
141
+ fi
142
+ silphone=$(cat $phone_lang/phones/optional_silence.txt)
143
+ echo "<eps> $silphone" >> $dir/lexicon.txt
144
+
145
+ awk '{count[$2] += $1;} END {for (w in count){print w, count[w];}}' \
146
+ <$dir/prons.txt >$dir/counts.txt
147
+
148
+
149
+
150
+ cat $dir/prons.txt | \
151
+ if $remove_stress; then
152
+ perl -e 'while(<>) { @A=split(" ", $_); for ($n=1;$n<@A;$n++) { $A[$n] =~ s/[0-9]$//; } print join(" ", @A) . "\n"; } '
153
+ else
154
+ cat
155
+ fi | perl -e '
156
+ print ";; <count-of-this-pron> <rank-of-this-pron> <frequency-of-this-pron> CORRECT|INCORRECT <word> <pron>\n";
157
+ open(D, "<$ARGV[0]") || die "opening dict file $ARGV[0]";
158
+ # create a hash of all reference pronuncations, and for each word, record
159
+ # a list of the prons, separated by " | ".
160
+ while (<D>) {
161
+ @A = split(" ", $_); $is_pron{join(" ",@A)} = 1;
162
+ $w = shift @A;
163
+ if (!defined $prons{$w}) { $prons{$w} = join(" ", @A); }
164
+ else { $prons{$w} = $prons{$w} . " | " . join(" ", @A); }
165
+ }
166
+ open(C, "<$ARGV[1]") || die "opening counts file $ARGV[1];";
167
+ while (<C>) { @A = split(" ", $_); $word_count{$A[0]} = $A[1]; }
168
+ while (<STDIN>) { @A = split(" ", $_);
169
+ $count = shift @A; $word = $A[0]; $freq = sprintf("%0.2f", $count / $word_count{$word});
170
+ $rank = ++$wcount{$word}; # 1 if top observed pron of word, 2 if second...
171
+ $str = (defined $is_pron{join(" ", @A)} ? "CORRECT" : "INCORRECT");
172
+ shift @A;
173
+ print "$count $rank $freq $str $word \"" . join(" ", @A) . "\", ref = \"$prons{$word}\"\n";
174
+ } ' $dir/lexicon.txt $dir/counts.txt >$dir/pron_info.txt
175
+
176
+ grep -v '^;;' $dir/pron_info.txt | \
177
+ awk '{ word=$5; count=$1; if (tot[word] == 0) { first_line[word] = $0; }
178
+ corr[word] += ($4 == "CORRECT" ? count : 0); tot[word] += count; }
179
+ END {for (w in tot) { printf("%s\t%s\t%s\t\t%s\n", tot[w], w, (corr[w]/tot[w]), first_line[w]); }} ' \
180
+ | sort -k1 -nr | cat <( echo ';; <total-count-of-word> <word> <correct-proportion> <first-corresponding-line-in-pron_info.txt>') - \
181
+ > $dir/word_info.txt
182
+ fi
183
+
184
+ if [ $stage -le 11 ]; then
185
+ echo "$0: some of the more interesting stuff in $dir/pron_info.txt follows."
186
+ echo "# grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | head -n 20"
187
+
188
+ grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | head -n 20
189
+
190
+ echo "$0: here are some other interesting things.."
191
+ echo "# grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | awk '\$3 > 0.4 && \$1 > 10' | head -n 20"
192
+ grep -w INCORRECT $dir/pron_info.txt | grep -w 1 | awk '$3 > 0.4 && $1 > 10' | head -n 20
193
+
194
+ echo "$0: here are some high-frequency words whose reference pronunciations rarely show up."
195
+ echo "# awk '\$3 < 0.1' $dir/word_info.txt | head -n 20"
196
+ awk '$3 < 0.1 || $1 == ";;"' $dir/word_info.txt | head -n 20
197
+
198
+
199
+ fi
200
+
tools/ASR_2ch_track/steps/cleanup/decode_segmentation.sh ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2014 Guoguo Chen, 2015 GoVivace Inc. (Nagendra Goel)
4
+ # Apache 2.0
5
+
6
+ # Begin configuration section.
7
+ transform_dir= # this option won't normally be used, but it can be used if you
8
+ # want to supply existing fMLLR transforms when decoding.
9
+ iter=
10
+ model= # You can specify the model to use (e.g. if you want to use the .alimdl)
11
+ stage=0
12
+ nj=4
13
+ cmd=run.pl
14
+ max_active=7000
15
+ beam=13.0
16
+ lattice_beam=6.0
17
+ acwt=0.083333 # note: only really affects pruning (scoring is on lattices).
18
+ num_threads=1 # if >1, will use gmm-latgen-faster-parallel
19
+ parallel_opts= # ignored now.
20
+ scoring_opts=
21
+ # note: there are no more min-lmwt and max-lmwt options, instead use
22
+ # e.g. --scoring-opts "--min-lmwt 1 --max-lmwt 20"
23
+ skip_scoring=false
24
+ # End configuration section.
25
+
26
+ echo "$0 $@" # Print the command line for logging
27
+
28
+ [ -f ./path.sh ] && . ./path.sh; # source the path.
29
+ . parse_options.sh || exit 1;
30
+
31
+ if [ $# != 3 ]; then
32
+ echo "This is a special decoding script for segmentation where we use one"
33
+ echo "decoding graph for each segment. We assume a file HCLG.fsts.scp exists"
34
+ echo "which is the scp file of the graphs for each segment."
35
+ echo ""
36
+ echo "Usage: $0 [options] <graph-dir> <data-dir> <decode-dir>"
37
+ echo " e.g.: $0 exp/tri2b/graph_train_si284_split \\"
38
+ echo " data/train_si284_split exp/tri2b/decode_train_si284_split"
39
+ echo ""
40
+ echo "where <decode-dir> is assumed to be a sub-directory of the directory"
41
+ echo "where the model is."
42
+ echo ""
43
+ echo "main options (for others, see top of script file)"
44
+ echo " --config <config-file> # config containing options"
45
+ echo " --nj <nj> # number of parallel jobs"
46
+ echo " --iter <iter> # Iteration of model to test."
47
+ echo " --model <model> # which model to use (e.g. to"
48
+ echo " # specify the final.alimdl)"
49
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
50
+ echo " --transform-dir <trans-dir> # dir to find fMLLR transforms "
51
+ echo " --acwt <float> # acoustic scale used for lattice generation "
52
+ echo " --scoring-opts <string> # options to local/score.sh"
53
+ echo " --num-threads <n> # number of threads to use, default 1."
54
+ exit 1;
55
+ fi
56
+
57
+
58
+ graphdir=$1
59
+ data=$2
60
+ dir=$3
61
+ srcdir=`dirname $dir`; # The model directory is one level up from decoding directory.
62
+ sdata=$data/split$nj;
63
+
64
+ mkdir -p $dir/log
65
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
66
+ echo $nj > $dir/num_jobs
67
+
68
+ if [ -z "$model" ]; then # if --model <mdl> was not specified on the command line...
69
+ if [ -z $iter ]; then model=$srcdir/final.mdl;
70
+ else model=$srcdir/$iter.mdl; fi
71
+ fi
72
+
73
+ for f in $sdata/1/feats.scp $sdata/1/cmvn.scp $model $graphdir/HCLG.fsts.scp; do
74
+ [ ! -f $f ] && echo "$0: no such file $f" && exit 1;
75
+ done
76
+
77
+ # Figures out the proper HCLG.
78
+ sort -k1,1 -u < $graphdir/HCLG.fsts.scp > $graphdir/HCLG.fsts.scp.sorted
79
+ mv $graphdir/HCLG.fsts.scp.sorted $graphdir/HCLG.fsts.scp
80
+ for x in `seq 1 $nj`; do
81
+ cat $graphdir/HCLG.fsts.scp |\
82
+ utils/filter_scp.pl -f 1 $sdata/$x/feats.scp > $sdata/$x/graphs.scp
83
+ num_feats=`cat $sdata/$x/feats.scp | wc -l`
84
+ num_graphs=`cat $sdata/$x/graphs.scp | wc -l`
85
+ if [ $num_graphs -ne $num_feats ]; then
86
+ echo "$0: number of graphs is not consistent with number of features."
87
+ exit 1
88
+ fi
89
+ done
90
+ HCLG="scp:$sdata/JOB/graphs.scp"
91
+
92
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
93
+ echo "$0: feature type is $feat_type";
94
+
95
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
96
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
97
+ delta_opts=`cat $srcdir/delta_opts 2>/dev/null`
98
+
99
+ thread_string=
100
+ [ $num_threads -gt 1 ] && thread_string="-parallel --num-threads=$num_threads"
101
+
102
+ case $feat_type in
103
+ delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";;
104
+ lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |";;
105
+ *) echo "Invalid feature type $feat_type" && exit 1;
106
+ esac
107
+ if [ ! -z "$transform_dir" ]; then # add transforms to features...
108
+ echo "Using fMLLR transforms from $transform_dir"
109
+ [ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist."
110
+ [ "`cat $transform_dir/num_jobs`" -ne $nj ] && \
111
+ echo "Mismatch in number of jobs with $transform_dir";
112
+ feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |"
113
+ fi
114
+
115
+ if [ $stage -le 0 ]; then
116
+ if [ -f "$graphdir/num_pdfs" ]; then
117
+ [ "`cat $graphdir/num_pdfs`" -eq `am-info --print-args=false $model | grep pdfs | awk '{print $NF}'` ] || \
118
+ { echo "Mismatch in number of pdfs with $model"; exit 1; }
119
+ fi
120
+ $cmd --num-threads $num_threads JOB=1:$nj $dir/log/decode.JOB.log \
121
+ gmm-latgen-faster$thread_string --max-active=$max_active --beam=$beam --lattice-beam=$lattice_beam \
122
+ --acoustic-scale=$acwt --allow-partial=true --word-symbol-table=$graphdir/words.txt \
123
+ $model "$HCLG" "$feats" "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1;
124
+ fi
125
+
126
+ if ! $skip_scoring ; then
127
+ [ ! -x local/score.sh ] && \
128
+ echo "Not scoring because local/score.sh does not exist or not executable." && exit 1;
129
+ local/score.sh --cmd "$cmd" $scoring_opts $data $graphdir $dir ||
130
+ { echo "$0: Scoring failedr. (ignore by '--skip-scoring true')"; exit 1; }
131
+ fi
132
+
133
+ exit 0;
tools/ASR_2ch_track/steps/cleanup/find_bad_utts.sh ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey)
3
+ # Apache 2.0
4
+
5
+ # Computes training alignments using a model with delta or
6
+ # LDA+MLLT features. This version, rather than just using the
7
+ # text to align, computes mini-language models (unigram) from the text
8
+ # and a few common words in the LM.
9
+
10
+ # Begin configuration section.
11
+ nj=4
12
+ cmd=run.pl
13
+ use_graphs=false
14
+ # Begin configuration.
15
+ scale_opts="--transition-scale=1.0 --self-loop-scale=0.1"
16
+ acoustic_scale=0.1
17
+ beam=15.0
18
+ lattice_beam=8.0
19
+ max_active=750
20
+ transform_dir= # directory to find fMLLR transforms in.
21
+ top_n_words=100 # Number of common words that we compile into each graph (most frequent
22
+ # in $lang/text.
23
+ stage=-1
24
+ cleanup=true
25
+ # End configuration options.
26
+
27
+ echo "$0 $@" # Print the command line for logging
28
+
29
+ [ -f path.sh ] && . ./path.sh # source the path.
30
+ . parse_options.sh || exit 1;
31
+
32
+ if [ $# != 4 ]; then
33
+ echo "usage: $0 <data-dir> <lang-dir> <src-dir> <dir>"
34
+ echo "e.g.: $0 data/train data/lang exp/tri1 exp/tri1_debug"
35
+ echo "main options (for others, see top of script file)"
36
+ echo " --config <config-file> # config containing options"
37
+ echo " --nj <nj> # number of parallel jobs"
38
+ echo " --use-graphs true # use graphs in src-dir"
39
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
40
+ exit 1;
41
+ fi
42
+
43
+ data=$1
44
+ lang=$2
45
+ srcdir=$3
46
+ dir=$4
47
+
48
+ for f in $data/text $lang/oov.int $srcdir/tree $srcdir/final.mdl \
49
+ $lang/L_disambig.fst $lang/phones/disambig.int; do
50
+ [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1;
51
+ done
52
+
53
+ oov=`cat $lang/oov.int` || exit 1;
54
+ mkdir -p $dir/log
55
+ echo $nj > $dir/num_jobs
56
+ sdata=$data/split$nj
57
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
58
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
59
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
60
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
61
+
62
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
63
+
64
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
65
+ cp $srcdir/final.occs $dir;
66
+
67
+
68
+ if [ $stage -le 0 ]; then
69
+ utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt <$data/text | \
70
+ awk '{for(x=2;x<=NF;x++) print $x;}' | sort | uniq -c | \
71
+ sort -rn > $dir/word_counts.int || exit 1;
72
+ num_words=$(awk '{x+=$1} END{print x}' < $dir/word_counts.int) || exit 1;
73
+ # print top-n words with their unigram probabilities.
74
+
75
+ head -n $top_n_words $dir/word_counts.int | awk -v tot=$num_words '{print $1/tot, $2;}' >$dir/top_words.int
76
+ utils/int2sym.pl -f 2 $lang/words.txt <$dir/top_words.int >$dir/top_words.txt
77
+ fi
78
+
79
+ if [ -f $srcdir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
80
+ echo "$0: feature type is $feat_type"
81
+
82
+ case $feat_type in
83
+ delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";;
84
+ lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $srcdir/final.mat ark:- ark:- |"
85
+ cp $srcdir/final.mat $srcdir/full.mat $dir
86
+ ;;
87
+ *) echo "$0: invalid feature type $feat_type" && exit 1;
88
+ esac
89
+ if [ -z "$transform_dir" ] && [ -f $srcdir/trans.1 ]; then
90
+ transform_dir=$srcdir
91
+ fi
92
+ if [ ! -z "$transform_dir" ]; then
93
+ echo "$0: using transforms from $transform_dir"
94
+ [ ! -f $transform_dir/trans.1 ] && echo "$0: no such file $transform_dir/trans.1" && exit 1;
95
+ nj_orig=$(cat $transform_dir/num_jobs)
96
+ if [ $nj -ne $nj_orig ]; then
97
+ # Copy the transforms into an archive with an index.
98
+ for n in $(seq $nj_orig); do cat $transform_dir/trans.$n; done | \
99
+ copy-feats ark:- ark,scp:$dir/trans.ark,$dir/trans.scp || exit 1;
100
+ feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/trans.scp ark:- ark:- |"
101
+ else
102
+ # number of jobs matches with alignment dir.
103
+ feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |"
104
+ fi
105
+ elif [ -f $srcdir/final.alimdl ]; then
106
+ echo "$0: **WARNING**: you seem to be using an fMLLR system as input,"
107
+ echo " but you are not providing the --transform-dir option during alignment."
108
+ fi
109
+
110
+
111
+ if [ $stage -le 1 ]; then
112
+ echo "$0: decoding $data using utterance-specific decoding graphs using model from $srcdir, output in $dir"
113
+
114
+ rm $dir/edits.*.txt $dir/aligned_ref.*.txt 2>/dev/null
115
+
116
+ $cmd JOB=1:$nj $dir/log/decode.JOB.log \
117
+ utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text \| \
118
+ steps/cleanup/make_utterance_fsts.pl $dir/top_words.int \| \
119
+ compile-train-graphs-fsts $scale_opts --read-disambig-syms=$lang/phones/disambig.int \
120
+ $dir/tree $dir/final.mdl $lang/L_disambig.fst ark:- ark:- \| \
121
+ gmm-latgen-faster --acoustic-scale=$acoustic_scale --beam=$beam \
122
+ --max-active=$max_active --lattice-beam=$lattice_beam \
123
+ --word-symbol-table=$lang/words.txt \
124
+ $dir/final.mdl ark:- "$feats" ark:- \| \
125
+ lattice-oracle ark:- "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|" \
126
+ ark,t:- ark,t:$dir/edits.JOB.txt \| \
127
+ utils/int2sym.pl -f 2- $lang/words.txt '>' $dir/aligned_ref.JOB.txt || exit 1;
128
+ fi
129
+
130
+
131
+ if [ $stage -le 2 ]; then
132
+ if [ -f $dir/edits.1.txt ]; then
133
+ # the awk commands below are to ensure that partially-written files don't confuse us.
134
+ for x in $(seq $nj); do cat $dir/edits.$x.txt; done | awk '{if(NF==2){print;}}' > $dir/edits.txt
135
+ for x in $(seq $nj); do cat $dir/aligned_ref.$x.txt; done | awk '{if(NF>=1){print;}}' > $dir/aligned_ref.txt
136
+ else
137
+ echo "$0: warning: no file $dir/edits.1.txt, using previously concatenated file if present."
138
+ fi
139
+
140
+ # in case any utterances failed to align, get filtered copy of $data/text
141
+ utils/filter_scp.pl $dir/edits.txt < $data/text > $dir/text
142
+ cat $dir/text | awk '{print $1, (NF-1);}' > $dir/length.txt
143
+
144
+ n1=$(wc -l < $dir/edits.txt)
145
+ n2=$(wc -l < $dir/aligned_ref.txt)
146
+ n3=$(wc -l < $dir/text)
147
+ n4=$(wc -l < $dir/length.txt)
148
+ if [ $n1 -ne $n2 ] || [ $n2 -ne $n3 ] || [ $n3 -ne $n4 ]; then
149
+ echo "$0: mismatch in lengths of files:"
150
+ wc $dir/edits.txt $dir/aligned_ref.txt $dir/text $dir/length.txt
151
+ exit 1;
152
+ fi
153
+
154
+ # note: the format of all_info.txt is:
155
+ # <utterance-id> <number of errors> <reference-length> <decoded-output> <reference>
156
+ # with the fields separated by tabs, e.g.
157
+ # adg04_sr009_trn 1 12 SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED AT SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED
158
+
159
+ paste $dir/edits.txt \
160
+ <(awk '{print $2}' $dir/length.txt) \
161
+ <(awk '{$1="";print;}' <$dir/aligned_ref.txt) \
162
+ <(awk '{$1="";print;}' <$dir/text) > $dir/all_info.txt
163
+
164
+ sort -nr -k2 $dir/all_info.txt > $dir/all_info.sorted.txt
165
+
166
+ if $cleanup; then
167
+ rm $dir/edits.*.txt $dir/aligned_ref.*.txt
168
+ fi
169
+
170
+ fi
171
+
172
+ if [ $stage -le 3 ]; then
173
+ ###
174
+ # These stats might help people figure out what is wrong with the data
175
+ # a)human-friendly and machine-parsable alignment in the file per_utt_details.txt
176
+ # b)evaluation of per-speaker performance to possibly find speakers with
177
+ # distinctive accents/speech disorders and similar
178
+ # c)Global analysis on (Ins/Del/Sub) operation, which might be used to figure
179
+ # out if there is systematic issue with lexicon, pronunciation or phonetic confusability
180
+
181
+ mkdir -p $dir/analysis
182
+ align-text --special-symbol="***" ark:$dir/text ark:$dir/aligned_ref.txt ark,t:- | \
183
+ utils/scoring/wer_per_utt_details.pl --special-symbol "***" > $dir/analysis/per_utt_details.txt
184
+
185
+ cat $dir/analysis/per_utt_details.txt | \
186
+ utils/scoring/wer_per_spk_details.pl $data/utt2spk > $dir/analysis/per_spk_details.txt
187
+
188
+ cat $dir/analysis/per_utt_details.txt | \
189
+ utils/scoring/wer_ops_details.pl --special-symbol "***" | \
190
+ sort -i -b -k1,1 -k4,4nr -k2,2 -k3,3 > $dir/analysis/ops_details.txt
191
+
192
+ fi
193
+
tools/ASR_2ch_track/steps/cleanup/find_bad_utts_nnet.sh ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey)
3
+ # 2016 Ilya Platonov
4
+ # Apache 2.0
5
+ #
6
+ # Tweaked version of find_bad_utts.sh to work with nnet2 baseline models.
7
+ #
8
+ # Begin configuration section.
9
+ nj=32
10
+ cmd=run.pl
11
+ use_graphs=false
12
+ # Begin configuration.
13
+ scale_opts="--transition-scale=1.0 --self-loop-scale=0.1"
14
+ acoustic_scale=0.1
15
+ beam=15.0
16
+ lattice_beam=8.0
17
+ max_active=750
18
+ transform_dir= # directory to find fMLLR transforms in.
19
+ top_n_words=100 # Number of common words that we compile into each graph (most frequent
20
+ # in $lang/text.
21
+ stage=-1
22
+ cleanup=true
23
+ # End configuration options.
24
+
25
+ echo "$0 $@" # Print the command line for logging
26
+
27
+ [ -f path.sh ] && . ./path.sh # source the path.
28
+ . parse_options.sh || exit 1;
29
+
30
+ if [ $# != 4 ]; then
31
+ echo "usage: $0 <data-dir> <lang-dir> <src-dir> <dir>"
32
+ echo "e.g.: $0 data/train data/lang exp/tri1 exp/tri1_debug"
33
+ echo "main options (for others, see top of script file)"
34
+ echo " --config <config-file> # config containing options"
35
+ echo " --nj <nj> # number of parallel jobs"
36
+ echo " --use-graphs true # use graphs in src-dir"
37
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
38
+ exit 1;
39
+ fi
40
+
41
+ data=$1
42
+ lang=$2
43
+ srcdir=$3
44
+ dir=$4
45
+
46
+ for f in $data/text $lang/oov.int $srcdir/tree $srcdir/final.mdl \
47
+ $lang/L_disambig.fst $lang/phones/disambig.int; do
48
+ [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1;
49
+ done
50
+
51
+ oov=`cat $lang/oov.int` || exit 1;
52
+ mkdir -p $dir/log
53
+ echo $nj > $dir/num_jobs
54
+ sdata=$data/split$nj
55
+ splice_opts=`cat $srcdir/splice_opts 2>/dev/null` # frame-splicing options.
56
+ cp $srcdir/splice_opts $dir 2>/dev/null # frame-splicing options.
57
+ cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
58
+ cp $srcdir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option.
59
+
60
+ [[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
61
+
62
+ cp $srcdir/{tree,final.mdl} $dir || exit 1;
63
+
64
+
65
+ if [ $stage -le 0 ]; then
66
+ utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt <$data/text | \
67
+ awk '{for(x=2;x<=NF;x++) print $x;}' | sort | uniq -c | \
68
+ sort -rn > $dir/word_counts.int || exit 1;
69
+ num_words=$(awk '{x+=$1} END{print x}' < $dir/word_counts.int) || exit 1;
70
+ # print top-n words with their unigram probabilities.
71
+
72
+ head -n $top_n_words $dir/word_counts.int | awk -v tot=$num_words '{print $1/tot, $2;}' >$dir/top_words.int
73
+ utils/int2sym.pl -f 2 $lang/words.txt <$dir/top_words.int >$dir/top_words.txt
74
+ fi
75
+
76
+ echo "$0: feature type is raw"
77
+
78
+ feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |";
79
+
80
+ if [ $stage -le 1 ]; then
81
+ echo "$0: decoding $data using utterance-specific decoding graphs using model from $srcdir, output in $dir"
82
+
83
+ rm $dir/edits.*.txt $dir/aligned_ref.*.txt 2>/dev/null
84
+
85
+ $cmd JOB=1:$nj $dir/log/decode.JOB.log \
86
+ utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text \| \
87
+ steps/cleanup/make_utterance_fsts.pl $dir/top_words.int \| \
88
+ compile-train-graphs-fsts $scale_opts --read-disambig-syms=$lang/phones/disambig.int \
89
+ $dir/tree $dir/final.mdl $lang/L_disambig.fst ark:- ark:- \| \
90
+ nnet-latgen-faster --acoustic-scale=$acoustic_scale --beam=$beam \
91
+ --max-active=$max_active --lattice-beam=$lattice_beam \
92
+ --word-symbol-table=$lang/words.txt \
93
+ $dir/final.mdl ark:- "$feats" ark:- \| \
94
+ lattice-oracle ark:- "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt $sdata/JOB/text|" \
95
+ ark,t:- ark,t:$dir/edits.JOB.txt \| \
96
+ utils/int2sym.pl -f 2- $lang/words.txt '>' $dir/aligned_ref.JOB.txt || exit 1;
97
+ fi
98
+
99
+
100
+ if [ $stage -le 2 ]; then
101
+ if [ -f $dir/edits.1.txt ]; then
102
+ # the awk commands below are to ensure that partially-written files don't confuse us.
103
+ for x in $(seq $nj); do cat $dir/edits.$x.txt; done | awk '{if(NF==2){print;}}' > $dir/edits.txt
104
+ for x in $(seq $nj); do cat $dir/aligned_ref.$x.txt; done | awk '{if(NF>=1){print;}}' > $dir/aligned_ref.txt
105
+ else
106
+ echo "$0: warning: no file $dir/edits.1.txt, using previously concatenated file if present."
107
+ fi
108
+
109
+ # in case any utterances failed to align, get filtered copy of $data/text
110
+ utils/filter_scp.pl $dir/edits.txt < $data/text > $dir/text
111
+ cat $dir/text | awk '{print $1, (NF-1);}' > $dir/length.txt
112
+
113
+ n1=$(wc -l < $dir/edits.txt)
114
+ n2=$(wc -l < $dir/aligned_ref.txt)
115
+ n3=$(wc -l < $dir/text)
116
+ n4=$(wc -l < $dir/length.txt)
117
+ if [ $n1 -ne $n2 ] || [ $n2 -ne $n3 ] || [ $n3 -ne $n4 ]; then
118
+ echo "$0: mismatch in lengths of files:"
119
+ wc $dir/edits.txt $dir/aligned_ref.txt $dir/text $dir/length.txt
120
+ exit 1;
121
+ fi
122
+
123
+ # note: the format of all_info.txt is:
124
+ # <utterance-id> <number of errors> <reference-length> <decoded-output> <reference>
125
+ # with the fields separated by tabs, e.g.
126
+ # adg04_sr009_trn 1 12 SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED AT SHOW THE GRIDLEY+S TRACK IN BRIGHT ORANGE WITH HORNE+S IN DIM RED
127
+
128
+ paste $dir/edits.txt \
129
+ <(awk '{print $2}' $dir/length.txt) \
130
+ <(awk '{$1="";print;}' <$dir/aligned_ref.txt) \
131
+ <(awk '{$1="";print;}' <$dir/text) > $dir/all_info.txt
132
+
133
+ sort -nr -k2 $dir/all_info.txt > $dir/all_info.sorted.txt
134
+
135
+ if $cleanup; then
136
+ rm $dir/edits.*.txt $dir/aligned_ref.*.txt
137
+ fi
138
+
139
+ fi
140
+
141
+ if [ $stage -le 3 ]; then
142
+ ###
143
+ # These stats migh help people figure out what is wrong with the data
144
+ # a)human-friendly and machine-parsable alignment in the file per_utt_details.txt
145
+ # b)evaluation of per-speaker performance to possibly find speakers with
146
+ # distinctive accents/speech disorders and similar
147
+ # c)Global analysis on (Ins/Del/Sub) operation, which might be used to figure
148
+ # out if there is systematic issue with lexicon, pronunciation or phonetic confusability
149
+
150
+ mkdir -p $dir/analysis
151
+ align-text --special-symbol="***" ark:$dir/text ark:$dir/aligned_ref.txt ark,t:- | \
152
+ utils/scoring/wer_per_utt_details.pl --special-symbol "***" > $dir/analysis/per_utt_details.txt
153
+
154
+ cat $dir/analysis/per_utt_details.txt | \
155
+ utils/scoring/wer_per_spk_details.pl $data/utt2spk > $dir/analysis/per_spk_details.txt
156
+
157
+ cat $dir/analysis/per_utt_details.txt | \
158
+ utils/scoring/wer_ops_details.pl --special-symbol "***" | \
159
+ sort -i -b -k1,1 -k4,4nr -k2,2 -k3,3 > $dir/analysis/ops_details.txt
160
+
161
+ fi
162
+
tools/ASR_2ch_track/steps/cleanup/make_segmentation_data_dir.sh ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright 2014 Guoguo Chen
4
+ # Apache 2.0
5
+
6
+ # Begin configuration section.
7
+ max_seg_length=10
8
+ min_seg_length=2
9
+ min_sil_length=0.5
10
+ time_precision=0.05
11
+ special_symbol="<***>"
12
+ separator=";"
13
+ wer_cutoff=-1
14
+ # End configuration section.
15
+
16
+ set -e
17
+
18
+ echo "$0 $@"
19
+
20
+ [ -f ./path.sh ] && . ./path.sh
21
+ . parse_options.sh || exit 1;
22
+
23
+ if [ $# -ne 3 ]; then
24
+ echo "This script takes the ctm file that corresponds to the data directory"
25
+ echo "created by steps/cleanup/split_long_utterance.sh, works out a new"
26
+ echo "segmentation and creates a new data directory for the new segmentation."
27
+ echo ""
28
+ echo "Usage: $0 [options] <ctm-file> <old-data-dir> <new-data-dir>"
29
+ echo " e.g.: $0 train_si284_split.ctm \\"
30
+ echo " data/train_si284_split data/train_si284_reseg"
31
+ echo "Options:"
32
+ echo " --wer-cutoff # ignore segments with WER higher than the"
33
+ echo " # specified value. -1 means no segment will"
34
+ echo " # be ignored."
35
+ echo " --max-seg-length # maximum length of new segments"
36
+ echo " --min-seg-length # minimum length of new segments"
37
+ echo " --min-sil-length # minimum length of silence as split point"
38
+ echo " --time-precision # precision for determining \"same time\""
39
+ echo " --special-symbol # special symbol to be aligned with"
40
+ echo " # inserted or deleted words"
41
+ echo " --separator # separator for aligned pairs"
42
+ exit 1;
43
+ fi
44
+
45
+ ctm=$1
46
+ old_data_dir=$2
47
+ new_data_dir=$3
48
+
49
+ for f in $ctm $old_data_dir/text.orig $old_data_dir/utt2spk \
50
+ $old_data_dir/wav.scp $old_data_dir/segments; do
51
+ if [ ! -f $f ]; then
52
+ echo "$0: expected $f to exist"
53
+ exit 1;
54
+ fi
55
+ done
56
+
57
+ mkdir -p $new_data_dir/tmp/
58
+ cp -f $old_data_dir/wav.scp $new_data_dir
59
+ [ -f old_data_dir/spk2gender ] && cp -f $old_data_dir/spk2gender $new_data_dir
60
+
61
+ # Removes the overlapping region (in utils/split_long_utterance.sh we create
62
+ # the segmentation with overlapping region).
63
+ #
64
+ # Note that for each audio file, we expect its segments have been sorted in time
65
+ # ascending order (if we ignore the overlap).
66
+ cat $ctm | perl -e '
67
+ $precision = $ARGV[0];
68
+ @ctm = ();
69
+ %processed_ids = ();
70
+ $previous_id = "";
71
+ while (<STDIN>) {
72
+ chomp;
73
+ my @current = split;
74
+ @current >= 5 || die "Error: bad line $_\n";
75
+ $id = join("_", ($current[0], $current[1]));
76
+ @previous = @{$ctm[-1]};
77
+
78
+ # Start of a new audio file.
79
+ if ($previous_id ne $id) {
80
+ # Prints existing information.
81
+ if (@ctm > 0) {
82
+ foreach $line (@ctm) {
83
+ print "$line->[0] $line->[1] $line->[2] $line->[3] $line->[4]\n";
84
+ }
85
+ }
86
+
87
+ # Checks if the ctm file is sorted.
88
+ if (defined($processed_ids{$id})) {
89
+ die "Error: \"$current[0] $current[1]\" has already been processed\n";
90
+ } else {
91
+ $processed_ids{$id} = 1;
92
+ }
93
+
94
+ @ctm = ();
95
+ push(@ctm, \@current);
96
+ $previous_id = $id;
97
+ next;
98
+ }
99
+
100
+ $new_start = sprintf("%.2f", $previous[2] + $previous[3]);
101
+
102
+ if ($new_start > $current[2]) {
103
+ # Case 2: scans for a splice point.
104
+ $index = -1;
105
+ while (defined($ctm[$index])
106
+ && $ctm[$index]->[2] + $ctm[$index]->[3] > $current[2]) {
107
+ if ($ctm[$index]->[4] eq $current[4]
108
+ && abs($ctm[$index]->[2] - $current[2]) < $precision
109
+ && abs($ctm[$index]->[3] - $current[3]) < $precision) {
110
+ pop @ctm for 2..abs($index);
111
+ last;
112
+ } else {
113
+ $index -= 1;
114
+ }
115
+ }
116
+ } else {
117
+ push(@ctm, \@current);
118
+ }
119
+ }
120
+
121
+ if (@ctm > 0) {
122
+ foreach $line (@ctm) {
123
+ print "$line->[0] $line->[1] $line->[2] $line->[3] $line->[4]\n";
124
+ }
125
+ }' $time_precision > $new_data_dir/tmp/ctm
126
+
127
+ # Creates a text file from the ctm, which will be used in Levenshtein alignment.
128
+ # Note that we remove <eps> in the text file.
129
+ cat $new_data_dir/tmp/ctm | perl -e '
130
+ $previous_wav = "";
131
+ $previous_channel = "";
132
+ $text = "";
133
+ while (<STDIN>) {
134
+ chomp;
135
+ @col = split;
136
+ @col >= 5 || die "Error: bad line $_\n";
137
+ if ($previous_wav eq $col[0]) {
138
+ $previous_channel eq $col[1] ||
139
+ die "Error: more than one channels detected\n";
140
+ if ($col[4] ne "<eps>") {
141
+ $text .= " $col[4]";
142
+ }
143
+ } else {
144
+ if ($text ne "") {
145
+ print "$previous_wav $text\n";
146
+ }
147
+ $text = $col[4];
148
+ $previous_wav = $col[0];
149
+ $previous_channel = $col[1];
150
+ }
151
+ }
152
+ if ($text ne "") {
153
+ print "$previous_wav $text\n";
154
+ }' > $new_data_dir/tmp/text
155
+
156
+ # Computes the Levenshtein alignment.
157
+ align-text --special-symbol=$special_symbol --separator=$separator \
158
+ ark:$old_data_dir/text.orig ark:$new_data_dir/tmp/text \
159
+ ark,t:$new_data_dir/tmp/aligned.txt
160
+
161
+ # Creates new segmentation.
162
+ steps/cleanup/create_segments_from_ctm.pl \
163
+ --max-seg-length $max_seg_length --min-seg-length $min_seg_length \
164
+ --min-sil-length $min_sil_length \
165
+ --separator $separator --special-symbol $special_symbol \
166
+ --wer-cutoff $wer_cutoff \
167
+ $new_data_dir/tmp/ctm $new_data_dir/tmp/aligned.txt \
168
+ $new_data_dir/segments $new_data_dir/text
169
+
170
+ # Now creates the new utt2spk and spk2utt file.
171
+ cat $old_data_dir/utt2spk | perl -e '
172
+ ($old_seg_file, $new_seg_file, $utt2spk_file_out) = @ARGV;
173
+ open(OS, "<$old_seg_file") || die "Error: fail to open $old_seg_file\n";
174
+ open(NS, "<$new_seg_file") || die "Error: fail to open $new_seg_file\n";
175
+ open(UO, ">$utt2spk_file_out") ||
176
+ die "Error: fail to open $utt2spk_file_out\n";
177
+ while (<STDIN>) {
178
+ chomp;
179
+ @col = split;
180
+ @col == 2 || die "Error: bad line $_\n";
181
+ $utt2spk{$col[0]} = $col[1];
182
+ }
183
+ while (<OS>) {
184
+ chomp;
185
+ @col = split;
186
+ @col == 4 || die "Error: bad line $_\n";
187
+ if (defined($wav2spk{$col[1]})) {
188
+ $wav2spk{$col[1]} == $utt2spk{$col[0]} ||
189
+ die "Error: multiple speakers detected for wav file $col[1]\n";
190
+ } else {
191
+ $wav2spk{$col[1]} = $utt2spk{$col[0]};
192
+ }
193
+ }
194
+ while (<NS>) {
195
+ chomp;
196
+ @col = split;
197
+ @col == 4 || die "Error: bad line $_\n";
198
+ defined($wav2spk{$col[1]}) ||
199
+ die "Error: could not find speaker for wav file $col[1]\n";
200
+ print UO "$col[0] $wav2spk{$col[1]}\n";
201
+ } ' $old_data_dir/segments $new_data_dir/segments $new_data_dir/utt2spk
202
+ utils/utt2spk_to_spk2utt.pl $new_data_dir/utt2spk > $new_data_dir/spk2utt
203
+
204
+ utils/fix_data_dir.sh $new_data_dir
205
+
206
+ exit 0;