JinchuanTian commited on
Commit
e5a8fdd
·
verified ·
1 Parent(s): 347b635

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +33 -0
  2. UniAudio/UniAudio/download.sh +8 -0
  3. UniAudio/UniAudio/egs/SE/cmd.sh +110 -0
  4. UniAudio/UniAudio/egs/SE/data_scripts/create_data_json.py +42 -0
  5. UniAudio/UniAudio/egs/SE/data_scripts/filter_pt.py +17 -0
  6. UniAudio/UniAudio/egs/SE/data_scripts/filter_scp.py +29 -0
  7. UniAudio/UniAudio/egs/SE/data_scripts/find_peer_utts.py +45 -0
  8. UniAudio/UniAudio/egs/SE/data_scripts/merge_then_split.py +105 -0
  9. UniAudio/UniAudio/egs/SE/data_scripts/offline_tokenization.py +161 -0
  10. UniAudio/UniAudio/egs/SE/data_scripts/select_spk2utt.py +31 -0
  11. UniAudio/UniAudio/egs/SE/local/data.sh +97 -0
  12. UniAudio/UniAudio/egs/SE/local/data_prep.sh +83 -0
  13. UniAudio/UniAudio/egs/SE/local/download_and_untar.sh +95 -0
  14. UniAudio/UniAudio/egs/SE/local/trim_all_silence.py +113 -0
  15. UniAudio/UniAudio/egs/SE/make_noisy.py +205 -0
  16. UniAudio/UniAudio/egs/SE/path.sh +9 -0
  17. UniAudio/UniAudio/egs/SE/readme.md +5 -0
  18. UniAudio/UniAudio/egs/SE/run.sh +206 -0
  19. UniAudio/UniAudio/egs/SE/utils/add_disambig.pl +58 -0
  20. UniAudio/UniAudio/egs/SE/utils/add_lex_disambig.pl +196 -0
  21. UniAudio/UniAudio/egs/SE/utils/analyze_segments.pl +43 -0
  22. UniAudio/UniAudio/egs/SE/utils/apply_map.pl +97 -0
  23. UniAudio/UniAudio/egs/SE/utils/best_wer.sh +32 -0
  24. UniAudio/UniAudio/egs/SE/utils/build_const_arpa_lm.sh +53 -0
  25. UniAudio/UniAudio/egs/SE/utils/build_kenlm_model_from_arpa.sh +44 -0
  26. UniAudio/UniAudio/egs/SE/utils/combine_data.sh +146 -0
  27. UniAudio/UniAudio/egs/SE/utils/convert_ctm.pl +96 -0
  28. UniAudio/UniAudio/egs/SE/utils/convert_slf.pl +302 -0
  29. UniAudio/UniAudio/egs/SE/utils/convert_slf_parallel.sh +71 -0
  30. UniAudio/UniAudio/egs/SE/utils/copy_data_dir.sh +149 -0
  31. UniAudio/UniAudio/egs/SE/utils/create_data_link.pl +132 -0
  32. UniAudio/UniAudio/egs/SE/utils/create_split_dir.pl +92 -0
  33. UniAudio/UniAudio/egs/SE/utils/ctm/convert_ctm.pl +96 -0
  34. UniAudio/UniAudio/egs/SE/utils/ctm/fix_ctm.sh +32 -0
  35. UniAudio/UniAudio/egs/SE/utils/ctm/resolve_ctm_overlaps.py +324 -0
  36. UniAudio/UniAudio/egs/SE/utils/data/combine_data.sh +146 -0
  37. UniAudio/UniAudio/egs/SE/utils/data/combine_short_segments.sh +187 -0
  38. UniAudio/UniAudio/egs/SE/utils/data/convert_data_dir_to_whole.sh +62 -0
  39. UniAudio/UniAudio/egs/SE/utils/data/copy_data_dir.sh +149 -0
  40. UniAudio/UniAudio/egs/SE/utils/data/extend_segment_times.py +118 -0
  41. UniAudio/UniAudio/egs/SE/utils/data/extract_wav_segments_data_dir.sh +59 -0
  42. UniAudio/UniAudio/egs/SE/utils/data/fix_data_dir.sh +215 -0
  43. UniAudio/UniAudio/egs/SE/utils/data/fix_subsegment_feats.pl +106 -0
  44. UniAudio/UniAudio/egs/SE/utils/data/get_allowed_durations.py +219 -0
  45. UniAudio/UniAudio/egs/SE/utils/data/get_frame_shift.sh +74 -0
  46. UniAudio/UniAudio/egs/SE/utils/data/get_num_frames.sh +25 -0
  47. UniAudio/UniAudio/egs/SE/utils/data/get_reco2dur.sh +143 -0
  48. UniAudio/UniAudio/egs/SE/utils/data/get_reco2utt_for_data.sh +21 -0
  49. UniAudio/UniAudio/egs/SE/utils/data/get_segments_for_data.sh +29 -0
  50. UniAudio/UniAudio/egs/SE/utils/data/get_uniform_subsegments.py +121 -0
.gitattributes CHANGED
@@ -57,3 +57,36 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ UniAudio/UniAudio/tools/tokenizer/phone/lexicon.txt filter=lfs diff=lfs merge=lfs -text
61
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.1.ark filter=lfs diff=lfs merge=lfs -text
62
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.10.ark filter=lfs diff=lfs merge=lfs -text
63
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.11.ark filter=lfs diff=lfs merge=lfs -text
64
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.12.ark filter=lfs diff=lfs merge=lfs -text
65
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.13.ark filter=lfs diff=lfs merge=lfs -text
66
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.14.ark filter=lfs diff=lfs merge=lfs -text
67
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.15.ark filter=lfs diff=lfs merge=lfs -text
68
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.16.ark filter=lfs diff=lfs merge=lfs -text
69
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.2.ark filter=lfs diff=lfs merge=lfs -text
70
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.3.ark filter=lfs diff=lfs merge=lfs -text
71
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.4.ark filter=lfs diff=lfs merge=lfs -text
72
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.5.ark filter=lfs diff=lfs merge=lfs -text
73
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.6.ark filter=lfs diff=lfs merge=lfs -text
74
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.7.ark filter=lfs diff=lfs merge=lfs -text
75
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.8.ark filter=lfs diff=lfs merge=lfs -text
76
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_UniAudio.9.ark filter=lfs diff=lfs merge=lfs -text
77
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.1.ark filter=lfs diff=lfs merge=lfs -text
78
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.10.ark filter=lfs diff=lfs merge=lfs -text
79
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.11.ark filter=lfs diff=lfs merge=lfs -text
80
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.12.ark filter=lfs diff=lfs merge=lfs -text
81
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.13.ark filter=lfs diff=lfs merge=lfs -text
82
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.14.ark filter=lfs diff=lfs merge=lfs -text
83
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.15.ark filter=lfs diff=lfs merge=lfs -text
84
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.16.ark filter=lfs diff=lfs merge=lfs -text
85
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.2.ark filter=lfs diff=lfs merge=lfs -text
86
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.3.ark filter=lfs diff=lfs merge=lfs -text
87
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.4.ark filter=lfs diff=lfs merge=lfs -text
88
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.5.ark filter=lfs diff=lfs merge=lfs -text
89
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.6.ark filter=lfs diff=lfs merge=lfs -text
90
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.7.ark filter=lfs diff=lfs merge=lfs -text
91
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.8.ark filter=lfs diff=lfs merge=lfs -text
92
+ dump/raw_codec_ssl_tts_librispeech/test_clean/data/wav_codec_ssl_UniAudio.9.ark filter=lfs diff=lfs merge=lfs -text
UniAudio/UniAudio/download.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ mkdir UniAudio/checkpoints
2
+ cd UniAudio/checkpoints
3
+ wget https://huggingface.co/Dongchao/UniAudio/resolve/main/lang_nosp.zip
4
+ unzip lang_nosp.zip
5
+ wget https://huggingface.co/Dongchao/UniAudio/resolve/main/universal_model.zip
6
+ unzip universal_model.zip
7
+ wget https://huggingface.co/Dongchao/UniAudio/resolve/main/hubert_base_ls960.pt
8
+ wget https://huggingface.co/Dongchao/UniAudio/resolve/main/hubert_base_ls960_L9_km500.bin
UniAudio/UniAudio/egs/SE/cmd.sh ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ====== About run.pl, queue.pl, slurm.pl, and ssh.pl ======
2
+ # Usage: <cmd>.pl [options] JOB=1:<nj> <log> <command...>
3
+ # e.g.
4
+ # run.pl --mem 4G JOB=1:10 echo.JOB.log echo JOB
5
+ #
6
+ # Options:
7
+ # --time <time>: Limit the maximum time to execute.
8
+ # --mem <mem>: Limit the maximum memory usage.
9
+ # -–max-jobs-run <njob>: Limit the number parallel jobs. This is ignored for non-array jobs.
10
+ # --num-threads <ngpu>: Specify the number of CPU core.
11
+ # --gpu <ngpu>: Specify the number of GPU devices.
12
+ # --config: Change the configuration file from default.
13
+ #
14
+ # "JOB=1:10" is used for "array jobs" and it can control the number of parallel jobs.
15
+ # The left string of "=", i.e. "JOB", is replaced by <N>(Nth job) in the command and the log file name,
16
+ # e.g. "echo JOB" is changed to "echo 3" for the 3rd job and "echo 8" for 8th job respectively.
17
+ # Note that the number must start with a positive number, so you can't use "JOB=0:10" for example.
18
+ #
19
+ # run.pl, queue.pl, slurm.pl, and ssh.pl have unified interface, not depending on its backend.
20
+ # These options are mapping to specific options for each backend and
21
+ # it is configured by "conf/queue.conf" and "conf/slurm.conf" by default.
22
+ # If jobs failed, your configuration might be wrong for your environment.
23
+ #
24
+ #
25
+ # The official documentation for run.pl, queue.pl, slurm.pl, and ssh.pl:
26
+ # "Parallelization in Kaldi": http://kaldi-asr.org/doc/queue.html
27
+ # =========================================================~
28
+
29
+
30
+ # Select the backend used by run.sh from "local", "stdout", "sge", "slurm", or "ssh"
31
+ cmd_backend='local'
32
+
33
+ # Local machine, without any Job scheduling system
34
+ if [ "${cmd_backend}" = local ]; then
35
+
36
+ # The other usage
37
+ export train_cmd="run.pl"
38
+ # Used for "*_train.py": "--gpu" is appended optionally by run.sh
39
+ export cuda_cmd="run.pl"
40
+ # Used for "*_recog.py"
41
+ export decode_cmd="run.pl"
42
+
43
+ # Local machine logging to stdout and log file, without any Job scheduling system
44
+ elif [ "${cmd_backend}" = stdout ]; then
45
+
46
+ # The other usage
47
+ export train_cmd="stdout.pl"
48
+ # Used for "*_train.py": "--gpu" is appended optionally by run.sh
49
+ export cuda_cmd="stdout.pl"
50
+ # Used for "*_recog.py"
51
+ export decode_cmd="stdout.pl"
52
+
53
+
54
+ # "qsub" (Sun Grid Engine, or derivation of it)
55
+ elif [ "${cmd_backend}" = sge ]; then
56
+ # The default setting is written in conf/queue.conf.
57
+ # You must change "-q g.q" for the "queue" for your environment.
58
+ # To know the "queue" names, type "qhost -q"
59
+ # Note that to use "--gpu *", you have to setup "complex_value" for the system scheduler.
60
+
61
+ export train_cmd="queue.pl"
62
+ export cuda_cmd="queue.pl"
63
+ export decode_cmd="queue.pl"
64
+
65
+
66
+ # "qsub" (Torque/PBS.)
67
+ elif [ "${cmd_backend}" = pbs ]; then
68
+ # The default setting is written in conf/pbs.conf.
69
+
70
+ export train_cmd="pbs.pl"
71
+ export cuda_cmd="pbs.pl"
72
+ export decode_cmd="pbs.pl"
73
+
74
+
75
+ # "sbatch" (Slurm)
76
+ elif [ "${cmd_backend}" = slurm ]; then
77
+ # The default setting is written in conf/slurm.conf.
78
+ # You must change "-p cpu" and "-p gpu" for the "partition" for your environment.
79
+ # To know the "partion" names, type "sinfo".
80
+ # You can use "--gpu * " by default for slurm and it is interpreted as "--gres gpu:*"
81
+ # The devices are allocated exclusively using "${CUDA_VISIBLE_DEVICES}".
82
+
83
+ export train_cmd="slurm.pl"
84
+ export cuda_cmd="slurm.pl"
85
+ export decode_cmd="slurm.pl"
86
+
87
+ elif [ "${cmd_backend}" = ssh ]; then
88
+ # You have to create ".queue/machines" to specify the host to execute jobs.
89
+ # e.g. .queue/machines
90
+ # host1
91
+ # host2
92
+ # host3
93
+ # Assuming you can login them without any password, i.e. You have to set ssh keys.
94
+
95
+ export train_cmd="ssh.pl"
96
+ export cuda_cmd="ssh.pl"
97
+ export decode_cmd="ssh.pl"
98
+
99
+ # This is an example of specifying several unique options in the JHU CLSP cluster setup.
100
+ # Users can modify/add their own command options according to their cluster environments.
101
+ elif [ "${cmd_backend}" = jhu ]; then
102
+
103
+ export train_cmd="queue.pl --mem 2G"
104
+ export cuda_cmd="queue-freegpu.pl --mem 2G --gpu 1 --config conf/queue.conf"
105
+ export decode_cmd="queue.pl --mem 4G"
106
+
107
+ else
108
+ echo "$0: Error: Unknown cmd_backend=${cmd_backend}" 1>&2
109
+ return 1
110
+ fi
UniAudio/UniAudio/egs/SE/data_scripts/create_data_json.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ import sys
4
+ import os
5
+ from utils.task_definition import task_formats
6
+
7
+ def main(cmd):
8
+ # parse the task first
9
+ possible_tasks = list(task_formats.keys())
10
+ parser = argparse.ArgumentParser(
11
+ description="Build the data json file for data loading",
12
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
13
+ )
14
+ parser.add_argument("--task", type=str, choices=possible_tasks, help="exact task")
15
+ parser.add_argument("--out-json", type=str, help="output json file")
16
+ args, _ = parser.parse_known_args(cmd)
17
+
18
+ # add arguments according to the task
19
+ task_format = task_formats[args.task]
20
+ required_files = task_format['keys'] + task_format['features']
21
+ for key in required_files:
22
+ parser.add_argument(f"--{key}", type=str, required=True)
23
+ args = parser.parse_args(cmd)
24
+
25
+ # replace the key / feature list by the corresponding key-value dict
26
+ keys = {k: getattr(args, k) for k in task_format['keys']}
27
+ features = {k: getattr(args, k) for k in task_format['features']}
28
+
29
+ task_format['keys'] = keys
30
+ task_format['features'] = features
31
+ task_format["task"] = args.task
32
+
33
+ # save as a json
34
+ with open(args.out_json, 'wb') as f:
35
+ f.write(
36
+ json.dumps(
37
+ task_format, indent=4, ensure_ascii=False, sort_keys=False
38
+ ).encode("utf_8")
39
+ )
40
+
41
+ if __name__ == "__main__":
42
+ main(sys.argv[1:])
UniAudio/UniAudio/egs/SE/data_scripts/filter_pt.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import sys
3
+
4
+ ref_file = sys.argv[1]
5
+ out_pt_file = sys.argv[2]
6
+ in_pt_files = sys.argv[3:]
7
+
8
+ ref_dict = open(ref_file).readlines()
9
+ ref_dict = {line.strip().split()[0]: None for line in ref_dict}
10
+
11
+ pt = {}
12
+ for f in in_pt_files:
13
+ this_dict = torch.load(f)
14
+ this_dict = {k: v for k, v in this_dict.items() if k in ref_dict}
15
+ pt.update(this_dict)
16
+
17
+ torch.save(pt, out_pt_file)
UniAudio/UniAudio/egs/SE/data_scripts/filter_scp.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ ref_f = sys.argv[1]
4
+ in_f = sys.argv[2]
5
+ try:
6
+ writer = open(sys.argv[3], 'w', encoding='utf-8')
7
+ stream_out = False
8
+ except:
9
+ stream_out = True
10
+
11
+ # output is in the order of ref_f
12
+ ref = []
13
+ for line in open(ref_f, encoding='utf-8'):
14
+ uttid = line.strip().split()[0]
15
+ ref.append(uttid)
16
+
17
+ in_dic = {}
18
+ for line in open(in_f, encoding='utf-8'):
19
+ elems = line.strip().split()
20
+ uttid = elems[0]
21
+ ctx = " ".join(elems[1:])
22
+ in_dic[uttid] = ctx
23
+
24
+ for e in ref:
25
+ if e in in_dic:
26
+ if stream_out:
27
+ print(f"{e} {in_dic[e]}")
28
+ else:
29
+ writer.write(f"{e} {in_dic[e]}\n")
UniAudio/UniAudio/egs/SE/data_scripts/find_peer_utts.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import argparse
3
+
4
+ def get_parser():
5
+ parser = argparse.ArgumentParser(
6
+ description="output a file this specifies all peer utterances of each utts",
7
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
8
+ )
9
+ parser.add_argument("--utt2spk", type=str, help="original spk2utt file")
10
+ parser.add_argument("--peer-utts", type=str, help="revised spk2utt file")
11
+ parser.add_argument("--subset-list", type=str, help="list of utt subset")
12
+ return parser
13
+
14
+ def main(args):
15
+ args = get_parser().parse_args(args)
16
+
17
+ # subset utts
18
+ subset_utts = open(args.subset_list).readlines()
19
+ subset_utts = [line.strip().split()[0] for line in subset_utts]
20
+
21
+ # utt2spk
22
+ utt2spk = open(args.utt2spk).readlines()
23
+ utt2spk = [line.strip().split() for line in utt2spk]
24
+ utt2spk = {line[0]: line[1] for line in utt2spk}
25
+
26
+ # utt2spk
27
+ spk2utt = {}
28
+ for utt, spk in utt2spk.items():
29
+ if spk not in spk2utt:
30
+ spk2utt[spk] = []
31
+ spk2utt[spk].append(utt)
32
+
33
+ writer = open(args.peer_utts, 'w')
34
+ for utt in utt2spk.keys():
35
+ spk = utt2spk[utt]
36
+ peer_utts = spk2utt[spk]
37
+
38
+ if len(peer_utts) <= 1:
39
+ print(f'warning: utt {utt} has no peer speech except itself')
40
+
41
+ out_str = " ".join([utt] + peer_utts)
42
+ writer.write(out_str + "\n")
43
+
44
+ if __name__ == "__main__":
45
+ main(sys.argv[1:])
UniAudio/UniAudio/egs/SE/data_scripts/merge_then_split.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # UniAudio Teams
2
+ import sys
3
+ import torch
4
+ import argparse
5
+ import logging
6
+ import tarfile
7
+ import mmap
8
+ import pickle
9
+ import librosa
10
+ from io import BytesIO
11
+ from kaldiio import ReadHelper
12
+ from tools.tokenizer.soundstream.AudioTokenizer import AudioTokenizer
13
+ from tools.tokenizer.soundstream.EncodecTokenizer import EncodecTokenizer
14
+ from tools.tokenizer.Text2Phone.Text2PhoneTokenizer import Text2PhoneTokenizer
15
+ from tools.tokenizer.BPE.SPTokenizer import BPETokenizer
16
+ from tools.tokenizer.Semantic.Semantic_tokenizer import SemanticTokenizer
17
+ from tools.tokenizer.phone.phone_tokenizer import PhoneTokenizer
18
+ from tools.tokenizer.Text_Image.BPETokenizer import BPETokenizer as ClipBPETokenizer
19
+
20
+ def get_parser():
21
+ parser = argparse.ArgumentParser(
22
+ description="convert a data list, do tokenization and save as a torch .pt file",
23
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
24
+ )
25
+ parser.add_argument("--input-file", type=str, default=None, help="the previous .pt save path")
26
+ parser.add_argument("--key-word", type=str, default=None, help="the key word to choose file")
27
+ parser.add_argument("--split-file", type=str, default=None, help="the reference file for split operation")
28
+ return parser
29
+
30
+ def main(args):
31
+ logging.basicConfig(
32
+ stream=sys.stdout,
33
+ level=logging.DEBUG,
34
+ format=f"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s",
35
+ )
36
+ args = get_parser().parse_args(args)
37
+ data_dict = {}
38
+
39
+ names = os.listdir(args.input_file)
40
+ for name in names:
41
+
42
+ if args.input_file is not None:
43
+ iterator = open(args.input_file)
44
+ for i, line in enumerate(open(args.input_file)):
45
+ line = line.strip().split()
46
+ key, value = line[0], " ".join(line[1:])
47
+ value = tokenizer.tokenize(value)
48
+ if value == None:
49
+ logging.error(f"an error instance: {key} {value}")
50
+ continue
51
+ if isinstance(value, torch.Tensor):
52
+ # may not be true for continuous tokenizer
53
+ # Keep this assertion at this moment.
54
+ assert value.dim() == 1
55
+ value = value.cpu()
56
+ data_dict[key] = value
57
+ if i > 0 and i % 1000 == 0:
58
+ logging.info(f"processed {i} examples")
59
+ elif args.tar_file is not None:
60
+ # we use tar as chunk
61
+ iterator = open(args.input_file)
62
+ for i, line in enumerate(open(args.input_file)):
63
+ tar_path = line.strip() #
64
+ tar_o = tarfile.open(tar_path, mode='r')
65
+ for info in tar_o_1: # get the speech info
66
+ if info.name.split(".")[-1] == args.tar_key_word:
67
+ cur_f = tar_o_1.extractfile(info)
68
+ try:
69
+ cur, _ = librosa.load(BytesIO(pickle.load(cur_f).item()), sr=16000)
70
+ except:
71
+ logging.error(f"an error instance: {tar_path} {info.name}")
72
+ cur = None
73
+ if cur is None:
74
+ continue
75
+ wav = torch.from_numpy(cur).unsqueeze(0) # transfer to (1,len)
76
+ value = tokenizer.tokenize(wav)
77
+ if value == None:
78
+ logging.error(f"an error instance: {key} {value}")
79
+ continue
80
+ if isinstance(value, torch.Tensor):
81
+ # may not be true for continuous tokenizer
82
+ # Keep this assertion at this moment.
83
+ assert value.dim() == 1
84
+ value = value.cpu()
85
+ data_dict[key] = value
86
+ if i > 0 and i % 100 == 0:
87
+ logging.info(f"processed {i} examples")
88
+ else:
89
+ # kaldiio format
90
+ assert isinstance(tokenizer, AudioTokenizer)
91
+ iterator = ReadHelper('scp:'+args.wav_scp, args.segments)
92
+ count = 0
93
+ for key, (sr, value) in iterator:
94
+ value = torch.from_numpy(value.copy()) / 32768 # [channel, samples]
95
+ value = value.unsqueeze(0)
96
+ value = tokenizer.tokenize(value)
97
+ data_dict[key] = value
98
+ if count > 0 and count % 100 == 0:
99
+ logging.info(f"processed {count} examples")
100
+ count += 1
101
+
102
+ torch.save(data_dict, args.output_file)
103
+
104
+ if __name__ == "__main__":
105
+ main(sys.argv[1:])
UniAudio/UniAudio/egs/SE/data_scripts/offline_tokenization.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: # UniAudio Teams
2
+
3
+ import sys
4
+ import torch
5
+ import argparse
6
+ import logging
7
+ import tarfile
8
+ import mmap
9
+ import pickle
10
+ import librosa
11
+ from io import BytesIO
12
+ from kaldiio import ReadHelper
13
+ from tools.tokenizer.soundstream.AudioTokenizer import AudioTokenizer
14
+ from tools.tokenizer.soundstream.EncodecTokenizer import EncodecTokenizer
15
+ from tools.tokenizer.BPE.SPTokenizer import BPETokenizer
16
+ from tools.tokenizer.Semantic.Semantic_tokenizer import SemanticTokenizer
17
+ from tools.tokenizer.phone.phone_tokenizer import PhoneTokenizer
18
+ def get_parser():
19
+ parser = argparse.ArgumentParser(
20
+ description="convert a data list, do tokenization and save as a torch .pt file",
21
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
22
+ )
23
+ parser.add_argument("--input-file", type=str, default=None, help="text file in the format <exampe_id> <content>")
24
+ parser.add_argument("--tar-file", type=str, default=None, help="we use tar chunk to save audio information")
25
+ parser.add_argument("--tar-key-word", type=str, default=None, help="the key word to find file from tar")
26
+ parser.add_argument("--tar-info", type=str, default=None, help="the file to save tar information")
27
+ parser.add_argument("--wav-scp", type=str, default=None, help="kaldi wav.scp file")
28
+ parser.add_argument("--segments", type=str, default=None, help="kaldi segment file")
29
+ parser.add_argument("--output-file", type=str, help="dict")
30
+ parser.add_argument("--tokenizer", type=str, choices=['audio', 'g2p', 'bpe', 'semantic', 'encodec', 'alignment'], help="what tokenizer to use")
31
+ parser.add_argument("--rank", type=int, help="local GPU rank, if applicable")
32
+ parser.add_argument("--batch-size", type=int, default=1, help="for batch tokenization")
33
+ return parser
34
+
35
+ def main(args):
36
+ logging.basicConfig(
37
+ stream=sys.stdout,
38
+ level=logging.DEBUG,
39
+ format=f"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s",
40
+ )
41
+ args = get_parser().parse_args(args)
42
+ args.rank -= 1 # run.pl starts from 1 but the exact jobid / gpuid starts from 0
43
+ max_gpu = torch.cuda.device_count()
44
+ args.rank = (args.rank % max_gpu) #
45
+
46
+ if args.tokenizer in ['audio', 'semantic', 'encodec']:
47
+ device = torch.device(f"cuda:{args.rank}")
48
+ else:
49
+ device = torch.device('cpu')
50
+ logging.info(f"Using device: {device}")
51
+
52
+ if args.tokenizer in ['g2p', 'bpe', 'alignment', "clipbpe"]:
53
+ assert args.wav_scp is None, f"when using {args.tokenizer}"
54
+ assert args.batch_size == 1, f"{args.tokenizer} doesn't support batch tokenization"
55
+
56
+ # GPU tokenizers
57
+ if args.tokenizer == "audio":
58
+ tokenizer = AudioTokenizer(device=device)
59
+ elif args.tokenizer == "encodec":
60
+ tokenizer = EncodecTokenizer(device=device)
61
+ elif args.tokenizer == "semantic":
62
+ tokenizer = SemanticTokenizer(device=device)
63
+ # CPU tokenizers
64
+ elif args.tokenizer == "g2p":
65
+ pass
66
+ #tokenizer = Text2PhoneTokenizer()
67
+ elif args.tokenizer == "bpe":
68
+ tokenizer = BPETokenizer()
69
+ elif args.tokenizer == "alignment":
70
+ tokenizer = PhoneTokenizer(duplicate=True)
71
+ else:
72
+ raise NotImplementedError
73
+ tokenizer = tokenizer.to(device)
74
+ logging.info('tokenizer built')
75
+ data_dict = {}
76
+ assert not (args.input_file is not None and args.wav_scp is not None)
77
+ # TODO: support batch inference
78
+ if args.input_file is not None:
79
+ iterator = open(args.input_file)
80
+ s_cnt = 0
81
+ for i, line in enumerate(open(args.input_file)):
82
+ try:
83
+ line = line.strip().split()
84
+ key, value = line[0], " ".join(line[1:])
85
+ value = tokenizer.tokenize(value)
86
+ if value == None:
87
+ logging.error(f"an error instance: {key} {value}")
88
+ continue
89
+ if isinstance(value, torch.Tensor):
90
+ # may not be true for continuous tokenizer
91
+ # Keep this assertion at this moment.
92
+ assert value.dim() == 1
93
+ value = value.cpu()
94
+ data_dict[key] = value
95
+ s_cnt += 1
96
+ if i > 0 and i % 1000 == 0:
97
+ logging.info(f"processed {s_cnt} examples")
98
+ except:
99
+ logging.error(f"an error instance: {line}")
100
+ elif args.tar_file is not None:
101
+ # we use tar as chunk
102
+ #iterator = open(args.tar_file)
103
+ f_info = open(args.tar_info, 'w')
104
+ for i, line in enumerate(open(args.tar_file,'r')):
105
+ tar_path = line.strip().split(' ')[-1] #
106
+ try:
107
+ tar_o = tarfile.open(tar_path, mode='r')
108
+ for info in tar_o: # get the speech info
109
+ # print('tar_path ', tar_path)
110
+ if info.name.split(".")[-1] == args.tar_key_word:
111
+ key = ' '.join(info.name.split(".")[:-1]) #
112
+ tar_new_name = tar_path.replace('','')
113
+ tar_new_name = tar_new_name.replace('/', '_')[:-4]
114
+ key = tar_new_name+'_'+key
115
+ print('key ', key, info.name)
116
+ # assert 1==2
117
+ cur_f = tar_o.extractfile(info)
118
+ # cur, _ = librosa.load(BytesIO(pickle.load(cur_f).item()), sr=16000)
119
+ # print('cur ', cur.shape)
120
+ try:
121
+ cur = pickle.load(cur_f)
122
+ #cur, _ = librosa.load(BytesIO(pickle.load(cur_f).item()), sr=16000)
123
+ except:
124
+ logging.error(f"an error instance: {tar_path} {info.name}")
125
+ cur = None
126
+ if cur is None:
127
+ continue
128
+ wav = torch.from_numpy(cur).unsqueeze(0).float() # transfer to (1,len)
129
+ value = tokenizer.tokenize(wav)
130
+ if value == None:
131
+ logging.error(f"an error instance: {key} {value}")
132
+ continue
133
+ if isinstance(value, torch.Tensor):
134
+ # may not be true for continuous tokenizer
135
+ # Keep this assertion at this moment.
136
+ assert value.dim() == 1
137
+ value = value.cpu()
138
+ data_dict[key] = value
139
+ f_info.write(key+'\n')
140
+ if i > 0 and i % 1 == 0:
141
+ logging.info(f"processed {i} examples")
142
+ tar_o.close()
143
+ except:
144
+ logging.error(f"an error instance: {tar_path}")
145
+ else:
146
+ # kaldiio format
147
+ assert isinstance(tokenizer, AudioTokenizer)
148
+ iterator = ReadHelper('scp:'+args.wav_scp, args.segments)
149
+ count = 0
150
+ for key, (sr, value) in iterator:
151
+ value = torch.from_numpy(value.copy()) / 32768 # [channel, samples]
152
+ value = value.unsqueeze(0)
153
+ value = tokenizer.tokenize(value)
154
+ data_dict[key] = value
155
+ if count > 0 and count % 100 == 0:
156
+ logging.info(f"processed {count} examples")
157
+ count += 1
158
+ torch.save(data_dict, args.output_file)
159
+
160
+ if __name__ == "__main__":
161
+ main(sys.argv[1:])
UniAudio/UniAudio/egs/SE/data_scripts/select_spk2utt.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import argparse
3
+
4
+ def get_parser():
5
+ parser = argparse.ArgumentParser(
6
+ description="Revise the spk2utt file: it only contans a subset of the utts",
7
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
8
+ )
9
+ parser.add_argument("--in-spk2utt", type=str, help="original spk2utt file")
10
+ parser.add_argument("--out-spk2utt", type=str, help="revised spk2utt file")
11
+ parser.add_argument("--subset-list", type=str, help="list of utt subset")
12
+ return parser
13
+
14
+ def main(args):
15
+ args = get_parser().parse_args(args)
16
+
17
+ utts = open(args.subset_list).readlines()
18
+ utts = [line.strip().split()[0] for line in utts]
19
+ utts = {x: None for x in utts}
20
+
21
+ writer = open(args.out_spk2utt, 'w')
22
+ for line in open(args.in_spk2utt):
23
+ line = line.strip().split()
24
+ spk_id, spk_utts = line[0], line[1:]
25
+ spk_utts = [utt for utt in spk_utts if utt in utts]
26
+
27
+ out_str = " ".join([spk_id] + spk_utts)
28
+ writer.write(out_str + "\n")
29
+
30
+ if __name__ == "__main__":
31
+ main(sys.argv[1:])
UniAudio/UniAudio/egs/SE/local/data.sh ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -e
4
+ set -u
5
+ set -o pipefail
6
+
7
+ log() {
8
+ local fname=${BASH_SOURCE[1]##*/}
9
+ echo -e "$(date '+%Y-%m-%dT%H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
10
+ }
11
+ SECONDS=0
12
+
13
+ stage=-1
14
+ stop_stage=1
15
+ trim_all_silence=false
16
+
17
+ log "$0 $*"
18
+ . utils/parse_options.sh
19
+
20
+ if [ $# -ne 0 ]; then
21
+ log "Error: No positional arguments are required."
22
+ exit 2
23
+ fi
24
+
25
+ . ./path.sh || exit 1;
26
+ . ./cmd.sh || exit 1;
27
+
28
+ if [ -z "${LIBRITTS}" ]; then
29
+ log "Fill the value of 'LIBRITTS' of db.sh"
30
+ exit 1
31
+ fi
32
+ db_root=${LIBRITTS}
33
+ data_url=www.openslr.org/resources/60
34
+
35
+ if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
36
+ log "stage -1: local/donwload_and_untar.sh"
37
+ # download the original corpus
38
+ if [ ! -e "${db_root}"/LibriTTS/.complete ]; then
39
+ for part in dev-clean dev-other test-clean test-other train-clean-100 train-clean-360 train-other-500; do
40
+ local/download_and_untar.sh "${db_root}" "${data_url}" "${part}" &
41
+ done; wait
42
+ touch "${db_root}/LibriTTS/.complete"
43
+ else
44
+ log "Already done. Skiped."
45
+ fi
46
+
47
+ # download the additional labels
48
+ if [ ! -e "${db_root}"/LibriTTS/.lab_complete ]; then
49
+ # git clone https://github.com/kan-bayashi/LibriTTSCorpusLabel.git "${db_root}/LibriTTSCorpusLabel"
50
+ cat "${db_root}"/LibriTTSCorpusLabel/lab.tar.gz-* > "${db_root}/LibriTTS/lab.tar.gz"
51
+ cwd=$(pwd)
52
+ cd "${db_root}/LibriTTS"
53
+ for part in dev-clean dev-other test-clean test-other train-clean-100 train-clean-360 train-other-500; do
54
+ gunzip -c lab.tar.gz | tar xvf - "lab/phone/${part}" --strip-components=2 &
55
+ done; wait
56
+ touch .lab_complete
57
+ # rm -rf lab.tar.gz
58
+ cd "${cwd}"
59
+ else
60
+ log "Already done. Skiped."
61
+ fi
62
+ fi
63
+
64
+ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
65
+ log "stage 0: local/data_prep.sh"
66
+ if "${trim_all_silence}"; then
67
+ [ ! -e data/local ] && mkdir -p data/local
68
+ cp ${db_root}/LibriTTS/SPEAKERS.txt data/local
69
+ fi
70
+ for name in "dev-clean" "test-clean" "train-clean-100" "train-clean-360" "train-other-500"; do
71
+ (if "${trim_all_silence}"; then
72
+ # Remove all silence and re-create wav file
73
+ local/trim_all_silence.py "${db_root}/LibriTTS/${name}" data/local/${name}
74
+
75
+ # Copy normalized txt files while keeping the structure
76
+ cwd=$(pwd)
77
+ cd "${db_root}/LibriTTS/${name}"
78
+ find . -follow -name "*.normalized.txt" -print0 \
79
+ | tar c --null -T - -f - | tar xf - -C "${cwd}/data/local/${name}"
80
+ cd "${cwd}"
81
+
82
+ # Create kaldi data directory with the trimed audio
83
+ local/data_prep.sh "data/local/${name}" "data/${name}"
84
+ else
85
+ # Create kaldi data directory with the original audio
86
+ local/data_prep.sh "${db_root}/LibriTTS/${name}" "data/${name}"
87
+ fi
88
+ utils/fix_data_dir.sh "data/${name}") &
89
+ done; wait
90
+ fi
91
+
92
+ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
93
+ log "stage 1: utils/combine_data.sh"
94
+ utils/combine_data.sh data/train-960 data/train-clean-100 data/train-clean-360 data/train-other-500
95
+ fi
96
+
97
+ log "Successfully finished. [elapsed=${SECONDS}s]"
UniAudio/UniAudio/egs/SE/local/data_prep.sh ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2014 Vassil Panayotov
4
+ # 2014 Johns Hopkins University (author: Daniel Povey)
5
+ # Modifications Copyright 2019 Nagoya University (author: Takenori Yoshimura)
6
+ # Apache 2.0
7
+
8
+ if [ "$#" -ne 2 ]; then
9
+ echo "Usage: $0 <src-dir> <dst-dir>"
10
+ echo "e.g.: $0 /export/a15/vpanayotov/data/LibriTTS/dev-clean data/dev-clean"
11
+ exit 1
12
+ fi
13
+
14
+ src=$1
15
+ dst=$2
16
+
17
+ spk_file=$src/../SPEAKERS.txt
18
+
19
+ mkdir -p $dst || exit 1
20
+
21
+ [ ! -d $src ] && echo "$0: no such directory $src" && exit 1
22
+ [ ! -f $spk_file ] && echo "$0: expected file $spk_file to exist" && exit 1
23
+
24
+
25
+ wav_scp=$dst/wav.scp; [[ -f "$wav_scp" ]] && rm $wav_scp
26
+ trans=$dst/text; [[ -f "$trans" ]] && rm $trans
27
+ utt2spk=$dst/utt2spk; [[ -f "$utt2spk" ]] && rm $utt2spk
28
+ spk2gender=$dst/spk2gender; [[ -f $spk2gender ]] && rm $spk2gender
29
+
30
+ for reader_dir in $(find -L $src -mindepth 1 -maxdepth 1 -type d | sed -e "s/$/_/" | sort); do
31
+ reader_dir=$(echo $reader_dir | sed -e "s/_$//")
32
+ reader=$(basename $reader_dir)
33
+ if ! [ $reader -eq $reader ]; then # not integer.
34
+ echo "$0: unexpected subdirectory name $reader"
35
+ exit 1
36
+ fi
37
+
38
+ reader_gender=$(egrep "^$reader[ ]+\|" $spk_file | awk -F'|' '{gsub(/[ ]+/, ""); print tolower($2)}')
39
+ if [ "$reader_gender" != 'm' ] && [ "$reader_gender" != 'f' ]; then
40
+ echo "Unexpected gender: '$reader_gender'"
41
+ exit 1
42
+ fi
43
+
44
+ for chapter_dir in $(find -L $reader_dir/ -mindepth 1 -maxdepth 1 -type d | sort); do
45
+ chapter=$(basename $chapter_dir)
46
+ if ! [ "$chapter" -eq "$chapter" ]; then
47
+ echo "$0: unexpected chapter-subdirectory name $chapter"
48
+ exit 1
49
+ fi
50
+
51
+ spk="${reader}_${chapter}"
52
+
53
+ find -L $chapter_dir/ -iname "*.wav" | sort | while read -r wav_file; do
54
+ id=$(basename $wav_file .wav)
55
+ echo "$id $wav_file" >>$wav_scp
56
+
57
+ txt=$(cat $(echo $wav_file | sed -e "s/\.wav$/.normalized.txt/"))
58
+ echo "$id $txt" >>$trans
59
+
60
+ # NOTE: For now we are using per-chapter utt2spk. That is each chapter is considered
61
+ # to be a different speaker. This is done for simplicity and because we want
62
+ # e.g. the CMVN to be calculated per-chapter
63
+ echo "$id $spk" >>$utt2spk
64
+ done
65
+
66
+ # reader -> gender map (again using per-chapter granularity)
67
+ echo "$spk $reader_gender" >>$spk2gender
68
+ done
69
+ done
70
+
71
+ spk2utt=$dst/spk2utt
72
+ utils/utt2spk_to_spk2utt.pl <$utt2spk >$spk2utt || exit 1
73
+
74
+ ntrans=$(wc -l <$trans)
75
+ nutt2spk=$(wc -l <$utt2spk)
76
+ ! [ "$ntrans" -eq "$nutt2spk" ] && \
77
+ echo "Inconsistent #transcripts($ntrans) and #utt2spk($nutt2spk)" && exit 1
78
+
79
+ utils/validate_data_dir.sh --no-feats $dst || exit 1
80
+
81
+ echo "$0: successfully prepared data in $dst"
82
+
83
+ exit 0
UniAudio/UniAudio/egs/SE/local/download_and_untar.sh ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2014 Johns Hopkins University (author: Daniel Povey)
4
+ # Modifications Copyright 2019 Nagoya University (author: Takenori Yoshimura)
5
+ # Apache 2.0
6
+
7
+ remove_archive=false
8
+
9
+ if [ "$1" == --remove-archive ]; then
10
+ remove_archive=true
11
+ shift
12
+ fi
13
+
14
+ if [ $# -ne 3 ]; then
15
+ echo "Usage: $0 [--remove-archive] <data-base> <url-base> <corpus-part>"
16
+ echo "e.g.: $0 /export/a15/vpanayotov/data www.openslr.org/resources/60 dev-clean"
17
+ echo "With --remove-archive it will remove the archive after successfully un-tarring it."
18
+ echo "<corpus-part> can be one of: dev-clean, test-clean, dev-other, test-other,"
19
+ echo " train-clean-100, train-clean-360, train-other-500."
20
+ exit 1
21
+ fi
22
+
23
+ data=$1
24
+ url=$2
25
+ part=$3
26
+
27
+ if [ ! -d "$data" ]; then
28
+ echo "$0: no such directory $data"
29
+ exit 1
30
+ fi
31
+
32
+ part_ok=false
33
+ list="dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500"
34
+ for x in $list; do
35
+ if [ "$part" == $x ]; then part_ok=true; fi
36
+ done
37
+ if ! $part_ok; then
38
+ echo "$0: expected <corpus-part> to be one of $list, but got '$part'"
39
+ exit 1
40
+ fi
41
+
42
+ if [ -z "$url" ]; then
43
+ echo "$0: empty URL base."
44
+ exit 1
45
+ fi
46
+
47
+ if [ -f $data/LibriTTS/$part/.complete ]; then
48
+ echo "$0: data part $part was already successfully extracted, nothing to do."
49
+ exit 0
50
+ fi
51
+
52
+
53
+ # sizes of the archive files in bytes.
54
+ sizes="1291469655 924804676 1230670113 964502297 7723686890 27504073644 44565031479"
55
+
56
+ if [ -f $data/$part.tar.gz ]; then
57
+ size=$(/bin/ls -l $data/$part.tar.gz | awk '{print $5}')
58
+ size_ok=false
59
+ for s in $sizes; do if [ $s == $size ]; then size_ok=true; fi; done
60
+ if ! $size_ok; then
61
+ echo "$0: removing existing file $data/$part.tar.gz because its size in bytes $size"
62
+ echo "does not equal the size of one of the archives."
63
+ rm $data/$part.tar.gz
64
+ else
65
+ echo "$data/$part.tar.gz exists and appears to be complete."
66
+ fi
67
+ fi
68
+
69
+ if [ ! -f $data/$part.tar.gz ]; then
70
+ if ! which wget >/dev/null; then
71
+ echo "$0: wget is not installed."
72
+ exit 1
73
+ fi
74
+ full_url=$url/$part.tar.gz
75
+ echo "$0: downloading data from $full_url. This may take some time, please be patient."
76
+
77
+ if ! wget -P $data --no-check-certificate $full_url; then
78
+ echo "$0: error executing wget $full_url"
79
+ exit 1
80
+ fi
81
+ fi
82
+
83
+ if ! tar -C $data -xvzf $data/$part.tar.gz; then
84
+ echo "$0: error un-tarring archive $data/$part.tar.gz"
85
+ exit 1
86
+ fi
87
+
88
+ touch $data/LibriTTS/$part/.complete
89
+
90
+ echo "$0: Successfully downloaded and un-tarred $data/$part.tar.gz"
91
+
92
+ if $remove_archive; then
93
+ echo "$0: removing $data/$part.tar.gz file since --remove-archive option was supplied."
94
+ rm $data/$part.tar.gz
95
+ fi
UniAudio/UniAudio/egs/SE/local/trim_all_silence.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright 2020 Tomoki Hayashi
4
+ # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
5
+
6
+ """Trim silence in the audio and re-create wav files."""
7
+
8
+ import argparse
9
+ import fnmatch
10
+ import logging
11
+ import os
12
+ import sys
13
+
14
+ import numpy as np
15
+ import soundfile as sf
16
+
17
+
18
+ def find_files(root_dir, query="*.wav", include_root_dir=True):
19
+ """Find files recursively.
20
+
21
+ Args:
22
+ root_dir (str): Root root_dir to find.
23
+ query (str): Query to find.
24
+ include_root_dir (bool): If False, root_dir name is not included.
25
+
26
+ Returns:
27
+ list: List of found filenames.
28
+
29
+ """
30
+ files = []
31
+ for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
32
+ for filename in fnmatch.filter(filenames, query):
33
+ files.append(os.path.join(root, filename))
34
+ if not include_root_dir:
35
+ files = [file_.replace(root_dir + "/", "") for file_ in files]
36
+
37
+ return files
38
+
39
+
40
+ def main():
41
+ """Run trimming."""
42
+ parser = argparse.ArgumentParser()
43
+ parser.add_argument(
44
+ "--max_sp_length",
45
+ default=0.05,
46
+ type=float,
47
+ help="maximum length of shortpose in the middle of utterances",
48
+ )
49
+ parser.add_argument(
50
+ "--after_sp_length",
51
+ default=0.05,
52
+ type=float,
53
+ help="length of offset after shortpose",
54
+ )
55
+ parser.add_argument("db_root", type=str, help="root path of NKY corpus")
56
+ parser.add_argument(
57
+ "target_dir", type=str, help="directory to save the trimmed audio"
58
+ )
59
+ args = parser.parse_args()
60
+
61
+ logging.basicConfig(
62
+ level=logging.DEBUG,
63
+ stream=sys.stdout,
64
+ format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
65
+ )
66
+
67
+ db_root = args.db_root
68
+ target_dir = args.target_dir
69
+ max_sp_length = args.max_sp_length
70
+ after_sp_length = args.after_sp_length
71
+ wavfiles = sorted(find_files(db_root, include_root_dir=False))
72
+ labfiles = [f.replace(".wav", ".lab") for f in wavfiles]
73
+
74
+ for idx, (wavfile, labfile) in enumerate(zip(wavfiles, labfiles)):
75
+ if not os.path.exists(f"{db_root}/{labfile}"):
76
+ logging.warning(f"{labfile} does not exist. skipped.")
77
+ continue
78
+ x, fs = sf.read(f"{db_root}/{wavfile}")
79
+ with open(f"{db_root}/{labfile}") as f:
80
+ lines = [line.replace("\n", "") for line in f.readlines()]
81
+ start_times = [float(line.split("\t")[0]) for line in lines]
82
+ end_times = [float(line.split("\t")[1]) for line in lines]
83
+ labels = [line.split("\t")[2] for line in lines]
84
+ start_idxs = [int(t * fs) for t in start_times]
85
+ end_idxs = [int(t * fs) for t in end_times]
86
+
87
+ new_x = []
88
+ prev_label = None
89
+ prev_sp_legnth = 0.0
90
+ for start_idx, end_idx, label in zip(start_idxs, end_idxs, labels):
91
+ if label == "sil" or len(label) == 0:
92
+ continue
93
+ elif label in ["sp", "spn"]:
94
+ sp_length = (end_idx - start_idx) / fs
95
+ if sp_length > max_sp_length:
96
+ end_idx = start_idx + int(max_sp_length * fs)
97
+ prev_sp_legnth = sp_length
98
+ elif prev_label in ["sp", "spn"]:
99
+ if prev_sp_legnth > after_sp_length + max_sp_length:
100
+ start_idx -= int(after_sp_length * fs)
101
+ new_x += [x[start_idx:end_idx]]
102
+ prev_label = label
103
+ new_x = np.concatenate(new_x).reshape(-1)
104
+ write_path = f"{target_dir}/{wavfile}"
105
+ os.makedirs(os.path.dirname(write_path), exist_ok=True)
106
+ sf.write(f"{target_dir}/{wavfile}", new_x, fs)
107
+
108
+ if (idx + 1) % 1000 == 0:
109
+ logging.info(f"Now processing... ({idx + 1}/{len(wavfiles)})")
110
+
111
+
112
+ if __name__ == "__main__":
113
+ main()
UniAudio/UniAudio/egs/SE/make_noisy.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This code aims to prepare SE training data, which use clean wave and nosie wave to simulate nosiy speech.
2
+ import os
3
+ import random
4
+ import math
5
+ import csv
6
+ import argparse
7
+ import librosa
8
+ import soundfile as sf
9
+ import io
10
+ import numpy as np
11
+
12
+ import mmap
13
+ import os
14
+ import soundfile as sf
15
+
16
+ def mmap_read(path: str, offset: int, length: int) -> bytes:
17
+ with open(path, "rb") as f:
18
+ with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_o:
19
+ data = mmap_o[offset : offset + length]
20
+ return data
21
+
22
+ def read_from_stored_zip(zip_path: str, offset: int, length: int) -> bytes:
23
+ return mmap_read(zip_path, offset, length)
24
+
25
+ def get_parser():
26
+ parser = argparse.ArgumentParser(
27
+ description="helping to simulate speech enhancement data",
28
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
29
+ )
30
+ parser.add_argument("--clean-file", type=str, default=None, help="clean file in the format <exampe_id> <path>")
31
+ parser.add_argument("--noise-file", type=str, default=None, help="noise file in the format <example_id> <path>")
32
+ parser.add_argument("--noisy-path", type=str, default=None, help="the path to save noisy audio")
33
+ parser.add_argument("--clean-path", type=str, default=None, help="the path to save noisy audio")
34
+ parser.add_argument("--output-file", type=str, help="the output file, which records the noisy and clean information")
35
+ return parser
36
+
37
+ def parse_filelist(file_path):
38
+ '''
39
+ 解析txt路径文件
40
+ '''
41
+ files = []
42
+ f = open(file_path,'r')
43
+ for d in f.readlines():
44
+ if d[-1] == "\n":
45
+ files.append(d[:-1])
46
+ else:
47
+ files.append(d)
48
+ return files
49
+
50
+ def load_audio_from_path(file_path):
51
+ '''根据路径读取音频文件 '''
52
+ '''
53
+ audio_obj= wavio.read(file_path)
54
+ audio = audio_obj.data[:,0].astype(np.float32)
55
+ assert audio_obj.rate == 16000
56
+ '''
57
+ audio,sr = librosa.load(file_path, sr=16000)
58
+ assert sr == 16000
59
+ #print(audio_obj.sampwidth,audio_obj)
60
+ #audio = wavfile.read(file_path)[1]
61
+ #audio = audio / 32768 #2^15
62
+ #audio = librosa.load(file_path,sr=16000)[0]
63
+ #audio,fs= torchaudio.load(file_path,sr=16000)
64
+ #assert fs == 16000
65
+ #audio = audio.numpy()
66
+ return audio
67
+
68
+ def sparse_chunk_file(file_path):
69
+ ans = file_path.split(':')
70
+ return ans[0], ans[1], ans[2]
71
+
72
+ def load_audio_from_chunks(file_path):
73
+ ph, st, ed = sparse_chunk_file(file_path)
74
+ byte_data = read_from_stored_zip(ph, int(st), int(ed))
75
+ wav_path = io.BytesIO(byte_data)
76
+ waveform, sample_rate = sf.read(wav_path, dtype="float32") #
77
+ return waveform
78
+
79
+ def split_wave_into2g(waveform):
80
+ if waveform.shape[0] > 14*16000:
81
+ waveform1 = waveform[:7*16000]
82
+ waveform2 = waveform[7*16000:14*16000] #
83
+ else:
84
+ waveform1 = waveform[:int(waveform.shape[0])//2]
85
+ waveform2 = waveform[int(waveform.shape[0])//2:]
86
+ return waveform1, waveform2
87
+
88
+ def save_audio_to_path(audio,store_path):
89
+ '''将音频文件存储在给定路径下'''
90
+ sf.write(store_path,audio, 16000,subtype='PCM_16')
91
+ #wavfile.write(store_path, 16000, audio*32768)
92
+ #wavio.write(store_path,audio,16000,sampwidth=2)
93
+
94
+ def SNR2scale(clean,noise,snr):
95
+ clean_power = (clean**2).mean()
96
+ noise_power = (noise**2).mean()
97
+
98
+ scale = (
99
+ 10 ** (-snr / 20)
100
+ * np.sqrt(clean_power)
101
+ / np.sqrt(max(noise_power, 1e-10))
102
+ )
103
+ return scale
104
+
105
+ def mix_clean_noise_full(clean,noise,snr):
106
+ '''
107
+ 给定纯净语音和噪声片段,基于一定的信噪比
108
+ 对clean和noise进行对齐并叠加
109
+ '''
110
+ mix_audio = np.zeros(clean.shape)
111
+
112
+ print('mix:',mix_audio.shape,'noise:',noise.shape,'clean:',clean.shape)
113
+
114
+ if clean.shape[0] >= noise.shape[0]:#如果纯净语音比带噪语音长
115
+ offset = np.random.randint(0,2 * clean.shape[-1] - noise.shape[-1])
116
+ # Repeat noise,先将噪声补齐至两倍长
117
+ noise = np.pad(noise,[(offset, 2 * clean.shape[-1] - noise.shape[-1] - offset)],mode="wrap")
118
+ #然后所有的流程都需要走这一分支
119
+ #即:对于现在比原始音频长度长的噪声文件,随机选取start进行裁剪
120
+ start = np.random.randint(0,noise.shape[-1]-clean.shape[-1])
121
+ noise = noise[start:start+clean.shape[-1]]
122
+ scale = SNR2scale(clean,noise,snr)#calculating after align the noise audio with the clean one
123
+ noise = scale * noise
124
+ #store the noise audio
125
+
126
+ mix_audio = clean + noise
127
+ return mix_audio,noise
128
+
129
+ def main_full_with_path():
130
+ #write log in csv file
131
+ args = get_parser().parse_args()
132
+ mix_tr_rt = args.noisy_path #
133
+ clean_tr_rt = args.clean_path #
134
+ tr_csv = args.output_file
135
+ f = open(tr_csv,'w',encoding = 'utf-8',newline = "")
136
+ csv_write = csv.writer(f)
137
+ csv_write.writerow(['clean','noise','snr','clean_path','noisy_path'])
138
+
139
+ clean_rt = args.clean_file
140
+ noise_rt = args.noise_file
141
+
142
+ clean_lists = parse_filelist(clean_rt)
143
+ noise_lists = parse_filelist(noise_rt)
144
+
145
+ noise_nums = len(noise_lists)
146
+
147
+ SNR_ends = [-20, 40]
148
+ cnt = 0
149
+ set_duration = 7
150
+ #SNRs = [-20,-16,-12,-8,-4,0,4,8,12,16,20,24,28,32,36,40]
151
+ for clean_path in clean_lists:
152
+ clean_audio = load_audio_from_chunks(clean_path)
153
+ #print('clean_audio ', clean_audio.shape)
154
+ clean_audio1, clean_audio2 = split_wave_into2g(clean_audio)
155
+ # print('clean_audio1, clean_audio2', clean_audio1.shape, clean_audio2.shape)
156
+ # assert 1==2
157
+ if clean_audio1.shape[0] < set_duration*16000:
158
+ continue
159
+ #clean_audio = load_audio_from_path(clean_path)
160
+ noise_indexes = set()
161
+ clean_audios = [clean_audio1, clean_audio2]
162
+ for i, c_a in enumerate(clean_audios):
163
+ noise_idx = random.randint(0,noise_nums-1)
164
+ noise_path = noise_lists[noise_idx]
165
+ noise = load_audio_from_path(noise_path)
166
+ # print('noise ', noise.shape, c_a.shape)
167
+ # assert 1==2
168
+ n_zero = np.zeros(set_duration*16000)
169
+ c_zero = np.zeros(set_duration*16000)
170
+ if noise.shape[0] > set_duration*16000:
171
+ idx = random.randint(0, noise.shape[0]-set_duration*16000-1)
172
+ n_zero = noise[idx:idx+set_duration*16000] # 最多只用8s
173
+ else:
174
+ n_zero[:noise.shape[0]] = noise #
175
+ if c_a.shape[0] > set_duration*16000:
176
+ idx = random.randint(0, c_a.shape[0]-set_duration*16000-1)
177
+ c_zero = c_a[idx:idx + set_duration*16000]
178
+ else:
179
+ c_zero[:c_a.shape[0]] = c_a #
180
+ #select snr
181
+ snr = random.randint(SNR_ends[0],SNR_ends[1])
182
+ #snr = random.sample(SNRs,1)[0]
183
+ #TODO:现在的mix函数返回两个值,noisy和noise
184
+ mix_audio = mix_clean_noise_full(c_zero, n_zero, snr)[0]
185
+ # print('mix_audio ', mix_audio.shape, c_zero.shape)
186
+ # assert 1==2
187
+ ph = clean_path.replace(':', '_') # transfer the : into _
188
+ #sprint('ph ', ph, ph.split('/'))
189
+ clean_name = ph.split('/')[-1] # get the last item
190
+ print('clean_name ', clean_name)
191
+ new_clean_name = clean_name + '_'+str(i) # get the order
192
+ #clean_name和noise_name之间用'_'进行分隔
193
+ mix_name = os.path.join(mix_tr_rt, new_clean_name + '_'+noise_path.split('/')[-1])
194
+ print('mix_name:', mix_name)
195
+ clean_name = os.path.join(clean_tr_rt, new_clean_name) + '.wav'
196
+ #print('clean_name:', clean_name)
197
+ save_audio_to_path(mix_audio, mix_name)
198
+ save_audio_to_path(c_zero, clean_name)
199
+ csv_write.writerow([clean_name[:-4], noise_path.split('/')[-1][:-4], snr, clean_name, mix_name])
200
+ #print(clean_name[:-4],str(snr))
201
+ #assert 1==2
202
+ f.close()
203
+
204
+ if __name__ == "__main__":
205
+ main_full_with_path()
UniAudio/UniAudio/egs/SE/path.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ export LC_ALL=C
2
+ export PYTHONIOENCODING=UTF-8
3
+ export OMP_NUM_THREADS=1
4
+
5
+ # executable bins
6
+ export PATH=$PATH:utils:../../tools/data_scripts/
7
+
8
+ # python import root
9
+ export PYTHONPATH=${PYTHONPATH}:../../
UniAudio/UniAudio/egs/SE/readme.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ ## Recipe for SE
2
+ Firstly, prepare your data pair <noise_audio, clean_audio> . You can refer make_noisy.py code to prepare your own data.
3
+
4
+ After you get clean and noise data. Using clean.scp and noise.scp file to record their waveform path: <br>
5
+ ```name wav_path```
UniAudio/UniAudio/egs/SE/run.sh ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # A demo recipe for SE
3
+ # SE: noise audio --> clean audio
4
+ . ./path.sh
5
+
6
+ pip3 install fairseq==0.12.2 einops==0.6.0 sentencepiece encodec
7
+
8
+ stage=1
9
+ stop_stage=100
10
+ ngpu=1 # how many GPUs, you want to use to train the model
11
+
12
+ train_set="train"
13
+ valid_set="val"
14
+ test_sets=""
15
+
16
+ # training config
17
+ seed=999
18
+ debug=false
19
+ batch_scale=8000 # the total number of tokens in one batch
20
+ learning_rate=0.005 # the learning rate
21
+ port=12345
22
+ train_opts=
23
+ inference_opts=
24
+ tag=
25
+ inference_tag=default
26
+ resume=
27
+ data_tag=
28
+ TASK='SE'
29
+
30
+ if [ ! -d "utils" ]; then
31
+ ln -s ../tools/kaldi/utils ./
32
+ fi
33
+ if [ ! -d "data_scripts" ]; then
34
+ ln -s ../tools/data_scripts ./
35
+ fi
36
+
37
+ . utils/parse_options.sh
38
+
39
+ if [ ! -z $resume ]; then
40
+ train_opts="--resume $resume"
41
+ inference_opts="--resume $resume"
42
+ fi
43
+
44
+ if [ $debug == true ]; then
45
+ export HOST_GPU_NUM=1
46
+ export HOST_NUM=1
47
+ export NODE_NUM=1
48
+ export INDEX=0
49
+ export CHIEF_IP="localhost"
50
+ train_opts="$train_opts"
51
+
52
+ else
53
+ export HOST_GPU_NUM=8
54
+ export HOST_NUM=1
55
+ export NODE_NUM=1
56
+ export INDEX=0
57
+ export CHIEF_IP="localhost"
58
+ train_opts="$train_opts"
59
+ fi
60
+
61
+ ### stage 1-3: data preparation ###
62
+
63
+ # Prepare data following Espnet and split
64
+ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
65
+ echo "Prepare SE dataset"
66
+ # this part aims to get the information about the dataset.
67
+ # prepare wav.scp and noise.scp
68
+ fi
69
+
70
+ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
71
+ echo "split the data for $ngpu GPUs"
72
+
73
+ for part in $test_sets $valid_set $train_set; do
74
+ mkdir -p data/${part}/${ngpu}splits
75
+ # extra shuf to ensure balance across GPUs
76
+ # So the generated data cannot be reproduced due to the shuffle randomness
77
+ cat data/${part}/wav.scp | shuf > data/${part}/wav.scp.shuf
78
+ split_scp=
79
+ for n in `seq 1 $ngpu`; do
80
+ split_scp="$split_scp data/${part}/${ngpu}splits/wav.${n}.scp"
81
+ done
82
+ utils/split_scp.pl data/${part}/wav.scp.shuf $split_scp
83
+
84
+
85
+ done
86
+ fi
87
+
88
+ # Plain TTS requires 2 data keys: phone_seq, audio_seq
89
+ # stage 2-3 process audio_seq and phone_seq respectively
90
+ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
91
+ echo "Prepare audio sequence"
92
+ for part in $valid_set $train_set; do
93
+ # for part in $valid_set; do
94
+ echo "prepare $part ... "
95
+
96
+ # split noise.scp based on clean.scp
97
+ utils/run.pl JOB=1:$ngpu data/${part}/${ngpu}splits/log/filter_noise.JOB.log \
98
+ python3 data_scripts/filter_scp.py \
99
+ data/${part}/${ngpu}splits/wav.JOB.scp data/${part}/noise.scp \
100
+ data/${part}/${ngpu}splits/noise.JOB.scp || exit 1;
101
+
102
+ # clean Audio
103
+ utils/run.pl JOB=1:$ngpu data/${part}/${ngpu}splits/log/audio_codec_dump.JOB.log \
104
+ python3 data_scripts/offline_tokenization.py \
105
+ --input-file data/${part}/${ngpu}splits/wav.JOB.scp \
106
+ --output-file data/${part}/${ngpu}splits/clean_codec.JOB.pt \
107
+ --tokenizer audio --rank JOB || exit 1;
108
+
109
+ # noise Audio
110
+ utils/run.pl JOB=1:$ngpu data/${part}/${ngpu}splits/log/noise_codec_dump.JOB.log \
111
+ python3 data_scripts/offline_tokenization.py \
112
+ --input-file data/${part}/${ngpu}splits/noise.JOB.scp \
113
+ --output-file data/${part}/${ngpu}splits/noise_codec.JOB.pt \
114
+ --tokenizer audio --rank JOB || exit 1;
115
+
116
+ done
117
+ fi
118
+
119
+ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
120
+ echo "create data json"
121
+ # json index from 0 but all data files index from 1
122
+ for part in $valid_set $train_set; do
123
+ for n in `seq 0 $[$ngpu-1]`; do
124
+ # TTS
125
+ python3 data_scripts/create_data_json.py \
126
+ --task SE \
127
+ --out-json $PWD/data/${part}/${ngpu}splits/data_se.${n}.json \
128
+ --noise_seq $PWD/data/${part}/${ngpu}splits/noise_codec.$[$n+1].pt \
129
+ --audio_seq $PWD/data/${part}/${ngpu}splits/audio_codec.$[$n+1].pt \
130
+ &
131
+ done; wait
132
+
133
+ done
134
+ fi
135
+
136
+ ### Stage 4: Training ###
137
+ if [ -z $data_tag ] && [ $stop_stage -le 4 ]; then
138
+ echo "you should provide data tag" || exit 1;
139
+ fi
140
+
141
+ train_data_jsons="data/${train_set}/${ngpu}splits/data_se.ALL.json"
142
+ valid_data_jsons="data/${valid_set}/${ngpu}splits/data_se.ALL.json"
143
+
144
+ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
145
+ mkdir -p exp
146
+ if [ -z $tag ]; then
147
+ echo "please provide a tag for this experiment" && exit 1;
148
+ fi
149
+ echo "stage 5: training..."
150
+ NCCL_DEBUG=TRACE torchrun \
151
+ --nproc_per_node ${HOST_GPU_NUM} --master_port $port \
152
+ --nnodes=${HOST_NUM} --node_rank=${INDEX} --master_addr=${CHIEF_IP} \
153
+ ../../train.py \
154
+ --exp_dir exp \
155
+ --seed $seed \
156
+ --cudnn_deterministic \
157
+ --train_data_jsons $train_data_jsons \
158
+ --valid_data_jsons $valid_data_jsons \
159
+ --batch_scale $batch_scale \
160
+ --learning_rate $learning_rate \
161
+ --non-acoustic-repeat 3 \
162
+ --audio-tokenizer "soundstream" \
163
+ --audio-prompt-tokenizer "audio_prompt" \
164
+ --phone-tokenizer "alignment" \
165
+ --semantic-tokenizer "hubert" \
166
+ --semantic-tokenizer-duplicate true \
167
+ --singPhoneTokenizer "sing_phone" \
168
+ --singMidiTokenizer "sing_midi" \
169
+ --FrozenT5Embedder "text_t5" \
170
+ --n_layer 24 \
171
+ --n_head 16 \
172
+ --n_embd 1536 \
173
+ $train_opts
174
+ fi
175
+
176
+ # SE inference
177
+ vc_test_sets="se_test"
178
+ inference_tag="se_inference"
179
+ inference_dir=exp/${tag}/inference_${inference_tag}
180
+ ngpu=1
181
+ if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
182
+ echo "stage 2: TTS inference ..."
183
+ mkdir -p ${inference_dir}
184
+ for part in $vc_test_sets; do
185
+ mkdir -p ${inference_dir}/${part}
186
+ echo "inference on set: ${part}"
187
+ data_json="data.JOB.json" # your val set .json file
188
+
189
+ utils/run.pl --max-jobs-run 8 JOB=0:$[${ngpu}-1] \
190
+ ${inference_dir}/${part}/inference.JOB.log \
191
+ python3 ../../infer.py \
192
+ --exp_dir exp/${tag} \
193
+ --rank JOB \
194
+ --inference_mode 'sampling' \
195
+ --n_samples 1 \
196
+ --seed 888 \
197
+ --rank JOB \
198
+ --data_json $data_json \
199
+ --generate_target audio \
200
+ --fixed_length False \
201
+ --maxlen_ratio 7 \
202
+ --minlen_ratio 0.5 \
203
+ --output_dir ${inference_dir}/${part}/JOB \
204
+ $inference_opts
205
+ done
206
+ fi
UniAudio/UniAudio/egs/SE/utils/add_disambig.pl ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+ # Copyright 2010-2011 Microsoft Corporation
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
11
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
12
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
13
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
14
+ # See the Apache 2 License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ # Adds some specified number of disambig symbols to a symbol table.
19
+ # Adds these as #1, #2, etc.
20
+ # If the --include-zero option is specified, includes an extra one
21
+ # #0.
22
+
23
+ $include_zero = 0;
24
+ if($ARGV[0] eq "--include-zero") {
25
+ $include_zero = 1;
26
+ shift @ARGV;
27
+ }
28
+
29
+ if(@ARGV != 2) {
30
+ die "Usage: add_disambig.pl [--include-zero] symtab.txt num_extra > symtab_out.txt ";
31
+ }
32
+
33
+
34
+ $input = $ARGV[0];
35
+ $nsyms = $ARGV[1];
36
+
37
+ open(F, "<$input") || die "Opening file $input";
38
+
39
+ while(<F>) {
40
+ @A = split(" ", $_);
41
+ @A == 2 || die "Bad line $_";
42
+ $lastsym = $A[1];
43
+ print;
44
+ }
45
+
46
+ if(!defined($lastsym)){
47
+ die "Empty symbol file?";
48
+ }
49
+
50
+ if($include_zero) {
51
+ $lastsym++;
52
+ print "#0 $lastsym\n";
53
+ }
54
+
55
+ for($n = 1; $n <= $nsyms; $n++) {
56
+ $y = $n + $lastsym;
57
+ print "#$n $y\n";
58
+ }
UniAudio/UniAudio/egs/SE/utils/add_lex_disambig.pl ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+ # Copyright 2010-2011 Microsoft Corporation
3
+ # 2013-2016 Johns Hopkins University (author: Daniel Povey)
4
+ # 2015 Hainan Xu
5
+ # 2015 Guoguo Chen
6
+
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
15
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
16
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
17
+ # See the Apache 2 License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+
21
+ # Adds disambiguation symbols to a lexicon.
22
+ # Outputs still in the normal lexicon format.
23
+ # Disambig syms are numbered #1, #2, #3, etc. (#0
24
+ # reserved for symbol in grammar).
25
+ # Outputs the number of disambig syms to the standard output.
26
+ # With the --pron-probs option, expects the second field
27
+ # of each lexicon line to be a pron-prob.
28
+ # With the --sil-probs option, expects three additional
29
+ # fields after the pron-prob, representing various components
30
+ # of the silence probability model.
31
+
32
+ $pron_probs = 0;
33
+ $sil_probs = 0;
34
+ $first_allowed_disambig = 1;
35
+
36
+ for ($n = 1; $n <= 3 && @ARGV > 0; $n++) {
37
+ if ($ARGV[0] eq "--pron-probs") {
38
+ $pron_probs = 1;
39
+ shift @ARGV;
40
+ }
41
+ if ($ARGV[0] eq "--sil-probs") {
42
+ $sil_probs = 1;
43
+ shift @ARGV;
44
+ }
45
+ if ($ARGV[0] eq "--first-allowed-disambig") {
46
+ $first_allowed_disambig = 0 + $ARGV[1];
47
+ if ($first_allowed_disambig < 1) {
48
+ die "add_lex_disambig.pl: invalid --first-allowed-disambig option: $first_allowed_disambig\n";
49
+ }
50
+ shift @ARGV;
51
+ shift @ARGV;
52
+ }
53
+ }
54
+
55
+ if (@ARGV != 2) {
56
+ die "Usage: add_lex_disambig.pl [opts] <lexicon-in> <lexicon-out>\n" .
57
+ "This script adds disambiguation symbols to a lexicon in order to\n" .
58
+ "make decoding graphs determinizable; it adds pseudo-phone\n" .
59
+ "disambiguation symbols #1, #2 and so on at the ends of phones\n" .
60
+ "to ensure that all pronunciations are different, and that none\n" .
61
+ "is a prefix of another.\n" .
62
+ "It prints to the standard output the number of the largest-numbered" .
63
+ "disambiguation symbol that was used.\n" .
64
+ "\n" .
65
+ "Options: --pron-probs Expect pronunciation probabilities in the 2nd field\n" .
66
+ " --sil-probs [should be with --pron-probs option]\n" .
67
+ " Expect 3 extra fields after the pron-probs, for aspects of\n" .
68
+ " the silence probability model\n" .
69
+ " --first-allowed-disambig <n> The number of the first disambiguation symbol\n" .
70
+ " that this script is allowed to add. By default this is\n" .
71
+ " #1, but you can set this to a larger value using this option.\n" .
72
+ "e.g.:\n" .
73
+ " add_lex_disambig.pl lexicon.txt lexicon_disambig.txt\n" .
74
+ " add_lex_disambig.pl --pron-probs lexiconp.txt lexiconp_disambig.txt\n" .
75
+ " add_lex_disambig.pl --pron-probs --sil-probs lexiconp_silprob.txt lexiconp_silprob_disambig.txt\n";
76
+ }
77
+
78
+
79
+ $lexfn = shift @ARGV;
80
+ $lexoutfn = shift @ARGV;
81
+
82
+ open(L, "<$lexfn") || die "Error opening lexicon $lexfn";
83
+
84
+ # (1) Read in the lexicon.
85
+ @L = ( );
86
+ while(<L>) {
87
+ @A = split(" ", $_);
88
+ push @L, join(" ", @A);
89
+ }
90
+
91
+ # (2) Work out the count of each phone-sequence in the
92
+ # lexicon.
93
+
94
+ foreach $l (@L) {
95
+ @A = split(" ", $l);
96
+ shift @A; # Remove word.
97
+ if ($pron_probs) {
98
+ $p = shift @A;
99
+ if (!($p > 0.0 && $p <= 1.0)) { die "Bad lexicon line $l (expecting pron-prob as second field)"; }
100
+ }
101
+ if ($sil_probs) {
102
+ $silp = shift @A;
103
+ if (!($silp > 0.0 && $silp <= 1.0)) { die "Bad lexicon line $l for silprobs"; }
104
+ $correction = shift @A;
105
+ if ($correction <= 0.0) { die "Bad lexicon line $l for silprobs"; }
106
+ $correction = shift @A;
107
+ if ($correction <= 0.0) { die "Bad lexicon line $l for silprobs"; }
108
+ }
109
+ if (!(@A)) {
110
+ die "Bad lexicon line $1, no phone in phone list";
111
+ }
112
+ $count{join(" ",@A)}++;
113
+ }
114
+
115
+ # (3) For each left sub-sequence of each phone-sequence, note down
116
+ # that it exists (for identifying prefixes of longer strings).
117
+
118
+ foreach $l (@L) {
119
+ @A = split(" ", $l);
120
+ shift @A; # Remove word.
121
+ if ($pron_probs) { shift @A; } # remove pron-prob.
122
+ if ($sil_probs) {
123
+ shift @A; # Remove silprob
124
+ shift @A; # Remove silprob
125
+ shift @A; # Remove silprob, there three numbers for sil_probs
126
+ }
127
+ while(@A > 0) {
128
+ pop @A; # Remove last phone
129
+ $issubseq{join(" ",@A)} = 1;
130
+ }
131
+ }
132
+
133
+ # (4) For each entry in the lexicon:
134
+ # if the phone sequence is unique and is not a
135
+ # prefix of another word, no diambig symbol.
136
+ # Else output #1, or #2, #3, ... if the same phone-seq
137
+ # has already been assigned a disambig symbol.
138
+
139
+
140
+ open(O, ">$lexoutfn") || die "Opening lexicon file $lexoutfn for writing.\n";
141
+
142
+ # max_disambig will always be the highest-numbered disambiguation symbol that
143
+ # has been used so far.
144
+ $max_disambig = $first_allowed_disambig - 1;
145
+
146
+ foreach $l (@L) {
147
+ @A = split(" ", $l);
148
+ $word = shift @A;
149
+ if ($pron_probs) {
150
+ $pron_prob = shift @A;
151
+ }
152
+ if ($sil_probs) {
153
+ $sil_word_prob = shift @A;
154
+ $word_sil_correction = shift @A;
155
+ $prev_nonsil_correction = shift @A
156
+ }
157
+ $phnseq = join(" ", @A);
158
+ if (!defined $issubseq{$phnseq}
159
+ && $count{$phnseq} == 1) {
160
+ ; # Do nothing.
161
+ } else {
162
+ if ($phnseq eq "") { # need disambig symbols for the empty string
163
+ # that are not use anywhere else.
164
+ $max_disambig++;
165
+ $reserved_for_the_empty_string{$max_disambig} = 1;
166
+ $phnseq = "#$max_disambig";
167
+ } else {
168
+ $cur_disambig = $last_used_disambig_symbol_of{$phnseq};
169
+ if (!defined $cur_disambig) {
170
+ $cur_disambig = $first_allowed_disambig;
171
+ } else {
172
+ $cur_disambig++; # Get a number that has not been used yet for
173
+ # this phone sequence.
174
+ }
175
+ while (defined $reserved_for_the_empty_string{$cur_disambig}) {
176
+ $cur_disambig++;
177
+ }
178
+ if ($cur_disambig > $max_disambig) {
179
+ $max_disambig = $cur_disambig;
180
+ }
181
+ $last_used_disambig_symbol_of{$phnseq} = $cur_disambig;
182
+ $phnseq = $phnseq . " #" . $cur_disambig;
183
+ }
184
+ }
185
+ if ($pron_probs) {
186
+ if ($sil_probs) {
187
+ print O "$word\t$pron_prob\t$sil_word_prob\t$word_sil_correction\t$prev_nonsil_correction\t$phnseq\n";
188
+ } else {
189
+ print O "$word\t$pron_prob\t$phnseq\n";
190
+ }
191
+ } else {
192
+ print O "$word\t$phnseq\n";
193
+ }
194
+ }
195
+
196
+ print $max_disambig . "\n";
UniAudio/UniAudio/egs/SE/utils/analyze_segments.pl ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/perl
2
+ # Copyright 2015 GoVivace Inc. (Author: Nagendra Kumar Goel)
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
11
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
12
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
13
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
14
+ # See the Apache 2 License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Analyze a segments file and print important stats on it.
18
+
19
+ $dur = $total = 0;
20
+ $maxDur = 0;
21
+ $minDur = 9999999999;
22
+ $n = 0;
23
+ while(<>){
24
+ chomp;
25
+ @t = split(/\s+/);
26
+ $dur = $t[3] - $t[2];
27
+ $total += $dur;
28
+ if ($dur > $maxDur) {
29
+ $maxSegId = $t[0];
30
+ $maxDur = $dur;
31
+ }
32
+ if ($dur < $minDur) {
33
+ $minSegId = $t[0];
34
+ $minDur = $dur;
35
+ }
36
+ $n++;
37
+ }
38
+ $avg=$total/$n;
39
+ $hrs = $total/3600;
40
+ print "Total $hrs hours of data\n";
41
+ print "Average segment length $avg seconds\n";
42
+ print "Segment $maxSegId has length of $maxDur seconds\n";
43
+ print "Segment $minSegId has length of $minDur seconds\n";
UniAudio/UniAudio/egs/SE/utils/apply_map.pl ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+ use warnings; #sed replacement for -w perl parameter
3
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey)
4
+ # Apache 2.0.
5
+
6
+ # This program is a bit like ./sym2int.pl in that it applies a map
7
+ # to things in a file, but it's a bit more general in that it doesn't
8
+ # assume the things being mapped to are single tokens, they could
9
+ # be sequences of tokens. See the usage message.
10
+
11
+
12
+ $permissive = 0;
13
+
14
+ for ($x = 0; $x <= 2; $x++) {
15
+
16
+ if (@ARGV > 0 && $ARGV[0] eq "-f") {
17
+ shift @ARGV;
18
+ $field_spec = shift @ARGV;
19
+ if ($field_spec =~ m/^\d+$/) {
20
+ $field_begin = $field_spec - 1; $field_end = $field_spec - 1;
21
+ }
22
+ if ($field_spec =~ m/^(\d*)[-:](\d*)/) { # accept e.g. 1:10 as a courtesty (properly, 1-10)
23
+ if ($1 ne "") {
24
+ $field_begin = $1 - 1; # Change to zero-based indexing.
25
+ }
26
+ if ($2 ne "") {
27
+ $field_end = $2 - 1; # Change to zero-based indexing.
28
+ }
29
+ }
30
+ if (!defined $field_begin && !defined $field_end) {
31
+ die "Bad argument to -f option: $field_spec";
32
+ }
33
+ }
34
+
35
+ if (@ARGV > 0 && $ARGV[0] eq '--permissive') {
36
+ shift @ARGV;
37
+ # Mapping is optional (missing key is printed to output)
38
+ $permissive = 1;
39
+ }
40
+ }
41
+
42
+ if(@ARGV != 1) {
43
+ print STDERR "Invalid usage: " . join(" ", @ARGV) . "\n";
44
+ print STDERR <<'EOF';
45
+ Usage: apply_map.pl [options] map <input >output
46
+ options: [-f <field-range> ] [--permissive]
47
+ This applies a map to some specified fields of some input text:
48
+ For each line in the map file: the first field is the thing we
49
+ map from, and the remaining fields are the sequence we map it to.
50
+ The -f (field-range) option says which fields of the input file the map
51
+ map should apply to.
52
+ If the --permissive option is supplied, fields which are not present
53
+ in the map will be left as they were.
54
+ Applies the map 'map' to all input text, where each line of the map
55
+ is interpreted as a map from the first field to the list of the other fields
56
+ Note: <field-range> can look like 4-5, or 4-, or 5-, or 1, it means the field
57
+ range in the input to apply the map to.
58
+ e.g.: echo A B | apply_map.pl a.txt
59
+ where a.txt is:
60
+ A a1 a2
61
+ B b
62
+ will produce:
63
+ a1 a2 b
64
+ EOF
65
+ exit(1);
66
+ }
67
+
68
+ ($map_file) = @ARGV;
69
+ open(M, "<$map_file") || die "Error opening map file $map_file: $!";
70
+
71
+ while (<M>) {
72
+ @A = split(" ", $_);
73
+ @A >= 1 || die "apply_map.pl: empty line.";
74
+ $i = shift @A;
75
+ $o = join(" ", @A);
76
+ $map{$i} = $o;
77
+ }
78
+
79
+ while(<STDIN>) {
80
+ @A = split(" ", $_);
81
+ for ($x = 0; $x < @A; $x++) {
82
+ if ( (!defined $field_begin || $x >= $field_begin)
83
+ && (!defined $field_end || $x <= $field_end)) {
84
+ $a = $A[$x];
85
+ if (!defined $map{$a}) {
86
+ if (!$permissive) {
87
+ die "apply_map.pl: undefined key $a in $map_file\n";
88
+ } else {
89
+ print STDERR "apply_map.pl: warning! missing key $a in $map_file\n";
90
+ }
91
+ } else {
92
+ $A[$x] = $map{$a};
93
+ }
94
+ }
95
+ }
96
+ print join(" ", @A) . "\n";
97
+ }
UniAudio/UniAudio/egs/SE/utils/best_wer.sh ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #
3
+ # Copyright 2010-2011 Microsoft Corporation
4
+
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
12
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
13
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
14
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
15
+ # See the Apache 2 License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ # To be run from one directory above this script.
19
+
20
+ perl -e 'while(<>){
21
+ s/\|(\d)/\| $1/g; s/(\d)\|/$1 \|/g;
22
+ if (m/[WS]ER (\S+)/ && (!defined $bestwer || $bestwer > $1)){ $bestwer = $1; $bestline=$_; } # kaldi "compute-wer" tool.
23
+ elsif (m: (Mean|Sum/Avg|)\s*\|\s*\S+\s+\S+\s+\|\s+\S+\s+\S+\s+\S+\s+\S+\s+(\S+)\s+\S+\s+\|:
24
+ && (!defined $bestwer || $bestwer > $2)){ $bestwer = $2; $bestline=$_; } } # sclite.
25
+ if (defined $bestline){ print $bestline; } ' | \
26
+ awk 'BEGIN{ FS="%WER"; } { if(NF == 2) { print FS$2" "$1; } else { print $0; }}' | \
27
+ awk 'BEGIN{ FS="Sum/Avg"; } { if(NF == 2) { print $2" "$1; } else { print $0; }}' | \
28
+ awk '{ if($1!~/%WER/) { print "%WER "$9" "$0; } else { print $0; }}' | \
29
+ sed -e 's|\s\s*| |g' -e 's|\:$||' -e 's|\:\s*\|\s*$||'
30
+
31
+
32
+
UniAudio/UniAudio/egs/SE/utils/build_const_arpa_lm.sh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2014 Guoguo Chen
4
+ # Apache 2.0
5
+
6
+ # This script reads in an Arpa format language model, and converts it into the
7
+ # ConstArpaLm format language model.
8
+
9
+ # begin configuration section
10
+ # end configuration section
11
+
12
+ [ -f path.sh ] && . ./path.sh;
13
+
14
+ . utils/parse_options.sh
15
+
16
+ if [ $# != 3 ]; then
17
+ echo "Usage: "
18
+ echo " $0 [options] <arpa-lm-path> <old-lang-dir> <new-lang-dir>"
19
+ echo "e.g.:"
20
+ echo " $0 data/local/lm/3-gram.full.arpa.gz data/lang/ data/lang_test_tgmed"
21
+ echo "Options"
22
+ exit 1;
23
+ fi
24
+
25
+ export LC_ALL=C
26
+
27
+ arpa_lm=$1
28
+ old_lang=$2
29
+ new_lang=$3
30
+
31
+ mkdir -p $new_lang
32
+
33
+ mkdir -p $new_lang
34
+ cp -r $old_lang/* $new_lang
35
+
36
+ unk=`cat $old_lang/oov.int`
37
+ bos=`grep "^<s>\s" $old_lang/words.txt | awk '{print $2}'`
38
+ eos=`grep "^</s>\s" $old_lang/words.txt | awk '{print $2}'`
39
+ if [[ -z $bos || -z $eos ]]; then
40
+ echo "$0: <s> and </s> symbols are not in $old_lang/words.txt"
41
+ exit 1
42
+ fi
43
+ if [[ -z $unk ]]; then
44
+ echo "$0: can't find oov symbol id in $old_lang/oov.int"
45
+ exit 1
46
+ fi
47
+
48
+
49
+ arpa-to-const-arpa --bos-symbol=$bos \
50
+ --eos-symbol=$eos --unk-symbol=$unk \
51
+ "gunzip -c $arpa_lm | utils/map_arpa_lm.pl $new_lang/words.txt|" $new_lang/G.carpa || exit 1;
52
+
53
+ exit 0;
UniAudio/UniAudio/egs/SE/utils/build_kenlm_model_from_arpa.sh ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # 2020 author Jiayu DU
3
+ # Apache 2.0
4
+
5
+ # This script reads in an Arpa format language model, and converts it into the
6
+ # KenLM format language model.
7
+
8
+ [ -f path.sh ] && . ./path.sh;
9
+
10
+ # begin configuration section
11
+ kenlm_opts="" # e.g. "-q 8 -b 8" for 8bits quantization
12
+ model_type="trie" # "trie" or "probing". trie is smaller, probing is faster.
13
+ # end configuration section
14
+
15
+ . utils/parse_options.sh
16
+
17
+ if [ $# != 2 ]; then
18
+ echo "Usage: "
19
+ echo " $0 [options] <arpa-lm-path> <kenlm-path>"
20
+ echo "e.g.:"
21
+ echo " $0 data/local/lm/4gram.arpa data/lang_test/G.trie"
22
+ echo "Options:"
23
+ echo " --model-type can be either \"trie\" or \"probing\""
24
+ echo " --kenlm-opts directly pass through to kenlm"
25
+ echo " e.g. for 8bits quantization, feed \"-q 8 -b 8\""
26
+ exit 1;
27
+ fi
28
+
29
+ export LC_ALL=C
30
+
31
+ arpa_lm=$1
32
+ kenlm=$2
33
+
34
+ if ! which build_binary >& /dev/null ; then
35
+ echo "$0: cannot find KenLM's build_binary tool,"
36
+ echo "check kenlm installation (tools/extras/install_kenlm_query_only.sh)."
37
+ exit 1
38
+ fi
39
+
40
+ mkdir -p $(dirname $kenlm)
41
+ build_binary $kenlm_opts $model_type $arpa_lm $kenlm
42
+
43
+ echo "$0: Successfully built arpa into kenlm format: $kenlm"
44
+ exit 0
UniAudio/UniAudio/egs/SE/utils/combine_data.sh ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
3
+ # 2014 David Snyder
4
+
5
+ # This script combines the data from multiple source directories into
6
+ # a single destination directory.
7
+
8
+ # See http://kaldi-asr.org/doc/data_prep.html#data_prep_data for information
9
+ # about what these directories contain.
10
+
11
+ # Begin configuration section.
12
+ extra_files= # specify additional files in 'src-data-dir' to merge, ex. "file1 file2 ..."
13
+ skip_fix=false # skip the fix_data_dir.sh in the end
14
+ # End configuration section.
15
+
16
+ echo "$0 $@" # Print the command line for logging
17
+
18
+ if [ -f path.sh ]; then . ./path.sh; fi
19
+ . parse_options.sh || exit 1;
20
+
21
+ if [ $# -lt 2 ]; then
22
+ echo "Usage: combine_data.sh [--extra-files 'file1 file2'] <dest-data-dir> <src-data-dir1> <src-data-dir2> ..."
23
+ echo "Note, files that don't appear in all source dirs will not be combined,"
24
+ echo "with the exception of utt2uniq and segments, which are created where necessary."
25
+ exit 1
26
+ fi
27
+
28
+ dest=$1;
29
+ shift;
30
+
31
+ first_src=$1;
32
+
33
+ rm -r $dest 2>/dev/null || true
34
+ mkdir -p $dest;
35
+
36
+ export LC_ALL=C
37
+
38
+ for dir in $*; do
39
+ if [ ! -f $dir/utt2spk ]; then
40
+ echo "$0: no such file $dir/utt2spk"
41
+ exit 1;
42
+ fi
43
+ done
44
+
45
+ # Check that frame_shift are compatible, where present together with features.
46
+ dir_with_frame_shift=
47
+ for dir in $*; do
48
+ if [[ -f $dir/feats.scp && -f $dir/frame_shift ]]; then
49
+ if [[ $dir_with_frame_shift ]] &&
50
+ ! cmp -s $dir_with_frame_shift/frame_shift $dir/frame_shift; then
51
+ echo "$0:error: different frame_shift in directories $dir and " \
52
+ "$dir_with_frame_shift. Cannot combine features."
53
+ exit 1;
54
+ fi
55
+ dir_with_frame_shift=$dir
56
+ fi
57
+ done
58
+
59
+ # W.r.t. utt2uniq file the script has different behavior compared to other files
60
+ # it is not compulsary for it to exist in src directories, but if it exists in
61
+ # even one it should exist in all. We will create the files where necessary
62
+ has_utt2uniq=false
63
+ for in_dir in $*; do
64
+ if [ -f $in_dir/utt2uniq ]; then
65
+ has_utt2uniq=true
66
+ break
67
+ fi
68
+ done
69
+
70
+ if $has_utt2uniq; then
71
+ # we are going to create an utt2uniq file in the destdir
72
+ for in_dir in $*; do
73
+ if [ ! -f $in_dir/utt2uniq ]; then
74
+ # we assume that utt2uniq is a one to one mapping
75
+ cat $in_dir/utt2spk | awk '{printf("%s %s\n", $1, $1);}'
76
+ else
77
+ cat $in_dir/utt2uniq
78
+ fi
79
+ done | sort -k1 > $dest/utt2uniq
80
+ echo "$0: combined utt2uniq"
81
+ else
82
+ echo "$0 [info]: not combining utt2uniq as it does not exist"
83
+ fi
84
+ # some of the old scripts might provide utt2uniq as an extrafile, so just remove it
85
+ extra_files=$(echo "$extra_files"|sed -e "s/utt2uniq//g")
86
+
87
+ # segments are treated similarly to utt2uniq. If it exists in some, but not all
88
+ # src directories, then we generate segments where necessary.
89
+ has_segments=false
90
+ for in_dir in $*; do
91
+ if [ -f $in_dir/segments ]; then
92
+ has_segments=true
93
+ break
94
+ fi
95
+ done
96
+
97
+ if $has_segments; then
98
+ for in_dir in $*; do
99
+ if [ ! -f $in_dir/segments ]; then
100
+ echo "$0 [info]: will generate missing segments for $in_dir" 1>&2
101
+ utils/data/get_segments_for_data.sh $in_dir
102
+ else
103
+ cat $in_dir/segments
104
+ fi
105
+ done | sort -k1 > $dest/segments
106
+ echo "$0: combined segments"
107
+ else
108
+ echo "$0 [info]: not combining segments as it does not exist"
109
+ fi
110
+
111
+ for file in utt2spk utt2lang utt2dur utt2num_frames reco2dur feats.scp text cmvn.scp vad.scp reco2file_and_channel wav.scp spk2gender $extra_files; do
112
+ exists_somewhere=false
113
+ absent_somewhere=false
114
+ for d in $*; do
115
+ if [ -f $d/$file ]; then
116
+ exists_somewhere=true
117
+ else
118
+ absent_somewhere=true
119
+ fi
120
+ done
121
+
122
+ if ! $absent_somewhere; then
123
+ set -o pipefail
124
+ ( for f in $*; do cat $f/$file; done ) | sort -k1 > $dest/$file || exit 1;
125
+ set +o pipefail
126
+ echo "$0: combined $file"
127
+ else
128
+ if ! $exists_somewhere; then
129
+ echo "$0 [info]: not combining $file as it does not exist"
130
+ else
131
+ echo "$0 [info]: **not combining $file as it does not exist everywhere**"
132
+ fi
133
+ fi
134
+ done
135
+
136
+ utils/utt2spk_to_spk2utt.pl <$dest/utt2spk >$dest/spk2utt
137
+
138
+ if [[ $dir_with_frame_shift ]]; then
139
+ cp $dir_with_frame_shift/frame_shift $dest
140
+ fi
141
+
142
+ if ! $skip_fix ; then
143
+ utils/fix_data_dir.sh $dest || exit 1;
144
+ fi
145
+
146
+ exit 0
UniAudio/UniAudio/egs/SE/utils/convert_ctm.pl ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
4
+
5
+ # This takes as standard input a ctm file that's "relative to the utterance",
6
+ # i.e. times are measured relative to the beginning of the segments, and it
7
+ # uses a "segments" file (format:
8
+ # utterance-id recording-id start-time end-time
9
+ # ) and a "reco2file_and_channel" file (format:
10
+ # recording-id basename-of-file
11
+
12
+ $skip_unknown=undef;
13
+ if ( $ARGV[0] eq "--skip-unknown" ) {
14
+ $skip_unknown=1;
15
+ shift @ARGV;
16
+ }
17
+
18
+ if (@ARGV < 2 || @ARGV > 3) {
19
+ print STDERR "Usage: convert_ctm.pl <segments-file> <reco2file_and_channel-file> [<utterance-ctm>] > real-ctm\n";
20
+ exit(1);
21
+ }
22
+
23
+ $segments = shift @ARGV;
24
+ $reco2file_and_channel = shift @ARGV;
25
+
26
+ open(S, "<$segments") || die "opening segments file $segments";
27
+ while(<S>) {
28
+ @A = split(" ", $_);
29
+ @A == 4 || die "Bad line in segments file: $_";
30
+ ($utt, $recording_id, $begin_time, $end_time) = @A;
31
+ $utt2reco{$utt} = $recording_id;
32
+ $begin{$utt} = $begin_time;
33
+ $end{$utt} = $end_time;
34
+ }
35
+ close(S);
36
+ open(R, "<$reco2file_and_channel") || die "open reco2file_and_channel file $reco2file_and_channel";
37
+ while(<R>) {
38
+ @A = split(" ", $_);
39
+ @A == 3 || die "Bad line in reco2file_and_channel file: $_";
40
+ ($recording_id, $file, $channel) = @A;
41
+ $reco2file{$recording_id} = $file;
42
+ $reco2channel{$recording_id} = $channel;
43
+ }
44
+
45
+
46
+ # Now process the ctm file, which is either the standard input or the third
47
+ # command-line argument.
48
+ $num_done = 0;
49
+ while(<>) {
50
+ @A= split(" ", $_);
51
+ ( @A == 5 || @A == 6 ) || die "Unexpected ctm format: $_";
52
+ # lines look like:
53
+ # <utterance-id> 1 <begin-time> <length> <word> [ confidence ]
54
+ ($utt, $one, $wbegin, $wlen, $w, $conf) = @A;
55
+ $reco = $utt2reco{$utt};
56
+ if (!defined $reco) {
57
+ next if defined $skip_unknown;
58
+ die "Utterance-id $utt not defined in segments file $segments";
59
+ }
60
+ $file = $reco2file{$reco};
61
+ $channel = $reco2channel{$reco};
62
+ if (!defined $file || !defined $channel) {
63
+ die "Recording-id $reco not defined in reco2file_and_channel file $reco2file_and_channel";
64
+ }
65
+ $b = $begin{$utt};
66
+ $e = $end{$utt};
67
+ $wbegin_r = $wbegin + $b; # Make it relative to beginning of the recording.
68
+ $wbegin_r = sprintf("%.2f", $wbegin_r);
69
+ $wlen = sprintf("%.2f", $wlen);
70
+ if (defined $conf) {
71
+ $line = "$file $channel $wbegin_r $wlen $w $conf\n";
72
+ } else {
73
+ $line = "$file $channel $wbegin_r $wlen $w\n";
74
+ }
75
+ if ($wbegin_r + $wlen > $e + 0.01) {
76
+ print STDERR "Warning: word appears to be past end of recording; line is $line";
77
+ }
78
+ print $line; # goes to stdout.
79
+ $num_done++;
80
+ }
81
+
82
+ if ($num_done == 0) { exit 1; } else { exit 0; }
83
+
84
+ __END__
85
+
86
+ # Test example [also test it without the 0.5's]
87
+ echo utt reco 10.0 20.0 > segments
88
+ echo reco file A > reco2file_and_channel
89
+ echo utt 1 8.0 1.0 word 0.5 > ctm_in
90
+ echo file A 18.00 1.00 word 0.5 > ctm_out
91
+ utils/convert_ctm.pl segments reco2file_and_channel ctm_in | cmp - ctm_out || echo error
92
+ rm segments reco2file_and_channel ctm_in ctm_out
93
+
94
+
95
+
96
+
UniAudio/UniAudio/egs/SE/utils/convert_slf.pl ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2014 Brno University of Technology (author Karel Vesely)
4
+ # Copyright 2013 Korbinian Riedhammer
5
+
6
+ # Convert a kaldi-lattice to HTK SLF format; if given an output
7
+ # directory, each lattice will be put in an individual gzipped file.
8
+
9
+ # Internal representation of nodes, links:
10
+ # node hash:
11
+ # { W=>[word], t=>[time], n_out_arcs=>[number_of_outgoing_arcs] };
12
+ # (Time internally represented as integer number of frames.)
13
+ # link hash:
14
+ # { S=>[start_node], E=>[end_node], W=>[word], v=>[0], a=>[acoustic_score], l=>[graph_score] }
15
+ #
16
+ # The HTK output supports:
17
+ # - words on links [default],
18
+ # - simpler, same as in kaldi lattices, node-ids in output correspond to kaldi lattices
19
+ # - words on nodes,
20
+ # - apart from original nodes, there are extra nodes containing the words.
21
+ # - each original ark is replaced by word-node and two links, connecting it with original nodes.
22
+
23
+
24
+ use utf8;
25
+ use List::Util qw(max);
26
+
27
+ binmode(STDIN, ":encoding(utf8)");
28
+ binmode(STDOUT, ":encoding(utf8)");
29
+
30
+ # defaults
31
+ $framerate=0.01;
32
+ $wordtonode=0;
33
+
34
+ $usage="Convert kaldi lattices to HTK SLF (v1.1) format.\n".
35
+ "Usage: convert_slf.pl [options] lat-file.txt [out-dir]\n".
36
+ " e.g. lattice-align-words lang/phones/word_boundary.int final.mdl 'ark:gunzip -c lat.gz |' ark,t:- | utils/int2sym.pl -f 3 lang/words.txt | $0 - slf/\n".
37
+ "\n".
38
+ "Options regarding the SLF output:\n".
39
+ " --frame-rate x Frame rate to compute timing information (default: $framerate)\n".
40
+ " --word-to-node Print the word symbols on nodes (adds extra nodes+links; default: words at links)\n".
41
+ "\n";
42
+
43
+ # parse options
44
+ while (@ARGV gt 0 and $ARGV[0] =~ m/^--/) {
45
+ $param = shift @ARGV;
46
+ if ($param eq "--frame-rate") { $framerate = shift @ARGV; }
47
+ elsif ($param eq "--word-to-node") { $wordtonode = 1;}
48
+ else {
49
+ print STDERR "Unknown option $param\n";
50
+ print STDERR;
51
+ print STDERR $usage;
52
+ exit 1;
53
+ }
54
+ }
55
+
56
+ # check positional arg count
57
+ if (@ARGV < 1 || @ARGV > 2) {
58
+ print STDERR $usage;
59
+ exit 1;
60
+ }
61
+
62
+ # store gzipped lattices individually to outdir:
63
+ $outdir = "";
64
+ if (@ARGV == 2) {
65
+ $outdir = pop @ARGV;
66
+ unless (-d $outdir) { system("mkdir -p $outdir"); }
67
+ unless (-d $outdir) {
68
+ print STDERR "Could not create directory $outdir\n";
69
+ exit 1;
70
+ }
71
+ }
72
+ # or we'll print lattices to stdout:
73
+ if ($outdir eq "") {
74
+ open(FH, ">-") or die "Could not write to stdout (???)\n";
75
+ }
76
+
77
+
78
+ ### parse kaldi lattices:
79
+
80
+ $utt = "";
81
+ $arc = 0;
82
+ $latest_time = 0.0;
83
+ @links = ();
84
+ %nodes = ();
85
+ %nodes_extra = ();
86
+ %accepting_states = ();
87
+
88
+ open (FI, $ARGV[0]) or die "Could not read from file\n";
89
+ binmode(FI, ":encoding(utf8)");
90
+
91
+ while(<FI>) {
92
+ chomp;
93
+
94
+ @A = split /\s+/;
95
+
96
+ if (@A == 1 and $utt eq "") {
97
+ # new lattice
98
+ $utt = $A[0];
99
+ $nodes{0} = { W=>"!NULL", t=>0.0, n_out_arcs=>0 }; #initial node
100
+
101
+ } elsif (@A == 1) {
102
+ # accepting node without FST weight, store data for link to terminal super-state
103
+ $accepting_states{$A[0]} = { W=>"!NULL", v=>0, a=>0, l=>0 };
104
+
105
+ } elsif (@A == 2) {
106
+ # accepting state with FST weight on it, again store data for the link
107
+ ($s, $info) = @A;
108
+ ($gs, $as, $ss) = split(/,/, $info);
109
+
110
+ # kaldi saves -log, but HTK does it the other way round
111
+ $gs *= -1;
112
+ $as *= -1;
113
+
114
+ # the state sequence is something like 1_2_4_56_45, get number of tokens after splitting by '_':
115
+ $ss = scalar split(/_/, $ss);
116
+
117
+ # update the end time
118
+ die "Node $s not yet visited, is lattice sorted topologically? $utt" unless exists $nodes{$s}{t};
119
+ $time_end = $nodes{$s}{t} + $ss;
120
+ if ($latest_time < $time_end) { $latest_time = $time_end; }
121
+
122
+ # add the link data
123
+ $accepting_states{$A[0]} = { W=>"!NULL", v=>0, a=>$as, l=>$gs };
124
+
125
+ } elsif (@A == 4 or @A == 3) {
126
+ # FSA arc
127
+ ($s, $e, $w, $info) = @A;
128
+ if ($info ne "") {
129
+ ($gs, $as, $ss) = split(/,/, $info);
130
+ } else {
131
+ $gs = 0; $as = 0; $ss = "";
132
+ }
133
+
134
+ # rename epsilons to null
135
+ $w = "!NULL" if $w eq "<eps>";
136
+
137
+ # kaldi saves -log, but HTK does it the other way round
138
+ $gs *= -1;
139
+ $as *= -1;
140
+
141
+ # the state sequence is something like 1_2_4_56_45, get number of tokens after splitting by '_':
142
+ $ss = scalar split(/_/, $ss);
143
+
144
+ # keep track of the number of outgoing arcs for each node
145
+ # (later, we will connect sinks to the terminal state)
146
+ $nodes{$s}{n_out_arcs} += 1;
147
+
148
+ # keep track of timing
149
+ die "Node $s not yet visited, is lattice sorted topologically? $utt" unless exists $nodes{$s};
150
+ $time_end = $nodes{$s}{t} + $ss;
151
+ if ($latest_time < $time_end) { $latest_time = $time_end; }
152
+
153
+ # sanity check on already existing node
154
+ if (exists $nodes{$e}) {
155
+ die "Node $e previously stored with different time ".$nodes{$e}{t}." now $time_end, $utt.\n"
156
+ if $time_end ne $nodes{$e}{t};
157
+ }
158
+
159
+ # store internal representation of the arc
160
+ if (not $wordtonode) {
161
+ # The words on links, the lattice keeps it's original structure,
162
+ # add node; do not overwrite
163
+ $nodes{$e} = { t=>$time_end, n_out_arcs=>0 } unless defined $nodes{$e};
164
+ # add the link data
165
+ push @links, { S=>$s, E=>$e, W=>$w, v=>0, a=>$as, l=>$gs };
166
+
167
+ } else {
168
+ # The problem here was that, if we have a node with several incoming links,
169
+ # the links can have different words on it, so we cannot simply put word from
170
+ # link into the node.
171
+ #
172
+ # The simple solution is:
173
+ # each FST arc gets replaced by extra node with word and two links,
174
+ # connecting it with original nodes.
175
+ #
176
+ # The lattice gets larger, and it is good to minimize the lattice during importing.
177
+ #
178
+ # During reading the FST, we don't know how many nodes there are in total,
179
+ # so the extra nodes are stored separately, indexed by arc number,
180
+ # and links have flags describing which type of node are they connected to.
181
+
182
+ # add 'extra node' containing the word:
183
+ $nodes_extra{$arc} = { W=>$w, t=>$time_end };
184
+ # add 'original node'; do not overwrite
185
+ $nodes{$e} = { W=>"!NULL", t=>$time_end, n_out_arcs=>0 } unless defined $nodes{$e};
186
+
187
+ # add the link from 'original node' to 'extra node'
188
+ push @links, { S=>$s, E=>$arc, W=>$w, v=>0, a=>$as, l=>$gs, to_extra_node=>1 };
189
+ # add the link from 'extra node' to 'original node'
190
+ push @links, { S=>$arc, E=>$e, W=>$w, v=>0, a=>0, l=>0, from_extra_node=>1 };
191
+
192
+ # increase arc counter
193
+ $arc++;
194
+ }
195
+
196
+ } elsif (@A == 0) { # end of lattice reading, we'll add terminal super-state, and print it soon...
197
+ # find sinks
198
+ %sinks = ();
199
+ for $n (keys %nodes) {
200
+ $sinks{$n} = 1 if ($nodes{$n}{n_out_arcs} == 0);
201
+ }
202
+
203
+ # sanity check: lattices need at least one sink!
204
+ if (scalar keys %sinks == 0) {
205
+ print STDERR "Error: $utt does not have at least one sink node-- cyclic lattice??\n";
206
+ }
207
+
208
+ # add terminal super-state,
209
+ $last_node = max(keys(%nodes)) + 1;
210
+ $nodes{$last_node} = { W=>"!NULL", t=>$latest_time };
211
+
212
+ # connect all accepting states with terminal super-state,
213
+ for $accept (sort { $a <=> $b } keys %accepting_states) {
214
+ %a = %{$accepting_states{$accept}};
215
+ push @links, { S=>$accept, E=>$last_node, W=>$a{W}, v=>$a{v}, a=>$a{a}, l=>$a{l} };
216
+ }
217
+
218
+ # connect also all sinks that are not accepting states,
219
+ for $sink (sort { $a <=> $b } keys %sinks) {
220
+ unless(exists($accepting_states{$sink})) {
221
+ print STDERR "WARNING: detected sink node which is not accepting state in lattice $utt, incomplete lattice?\n";
222
+ $a = \$accepting_states{$accept};
223
+ push @links, { S=>$accept, E=>$last_node, W=>"!NULL", v=>0, a=>0, l=>0 };
224
+ }
225
+ }
226
+
227
+ # print out the lattice; open file handle first
228
+ unless ($outdir eq "") {
229
+ open(FH, "|-", "gzip -c > $outdir/$utt.lat.gz") or die "Could not write to $outdir/$utt.lat.gz\n";
230
+ binmode(FH, ":encoding(utf8)");
231
+ }
232
+
233
+ if (not $wordtonode) {
234
+ # print lattice with words on links:
235
+
236
+ # header
237
+ print FH "VERSION=1.1\n";
238
+ print FH "UTTERANCE=$utt\n";
239
+ print FH "N=".(keys %nodes)."\tL=".(@links)."\n";
240
+
241
+ # nodes
242
+ for $n (sort { $a <=> $b } keys %nodes) {
243
+ printf FH "I=%d\tt=%.2f\n", $n, $nodes{$n}{t}*$framerate;
244
+ }
245
+
246
+ # links/arks
247
+ for $i (0 .. $#links) {
248
+ %l = %{$links[$i]}; # get hash representing the link...
249
+ printf FH "J=$i\tS=%d\tE=%d\tW=%s\tv=%f\ta=%f\tl=%f\n", $l{S}, $l{E}, $l{W}, $l{v}, $l{a}, $l{l};
250
+ }
251
+
252
+ } else {
253
+ # print lattice with words in the nodes:
254
+
255
+ # header
256
+ print FH "VERSION=1.1\n";
257
+ print FH "UTTERANCE=$utt\n";
258
+ print FH "N=".(scalar(keys(%nodes))+scalar(keys(%nodes_extra)))."\tL=".(@links)."\n";
259
+
260
+ # number of original nodes, offset of extra_nodes
261
+ $node_id_offset = scalar keys %nodes;
262
+
263
+ # nodes
264
+ for $n (sort { $a <=> $b } keys %nodes) {
265
+ printf FH "I=%d\tW=%s\tt=%.2f\n", $n, $nodes{$n}{W}, $nodes{$n}{t}*$framerate;
266
+ }
267
+ # extra nodes
268
+ for $n (sort { $a <=> $b } keys %nodes_extra) {
269
+ printf FH "I=%d\tW=%s\tt=%.2f\n", $n+$node_id_offset, $nodes_extra{$n}{W}, $nodes_extra{$n}{t}*$framerate;
270
+ }
271
+
272
+ # links/arks
273
+ for $i (0 .. $#links) {
274
+ %l = %{$links[$i]}; # get hash representing the link...
275
+ if ($l{from_extra_node}) { $l{S} += $node_id_offset; }
276
+ if ($l{to_extra_node}) { $l{E} += $node_id_offset; }
277
+ printf FH "J=$i\tS=%d\tE=%d\tv=%f\ta=%f\tl=%f\n", $l{S}, $l{E}, $l{v}, $l{a}, $l{l};
278
+ }
279
+ }
280
+
281
+ print FH "\n";
282
+
283
+ # close handle if it was a file
284
+ close(FH) unless ($outdir eq "");
285
+
286
+ # clear data
287
+ $utt = "";
288
+ $arc = 0;
289
+ $latest_time = 0.0;
290
+ @links = ();
291
+ %nodes = ();
292
+ %nodes_extra = ();
293
+ %accepting_states = ();
294
+ } else {
295
+ die "Unexpected column number of input line\n$_";
296
+ }
297
+ }
298
+
299
+ if ($utt != "") {
300
+ print STDERR "Last lattice was not printed as it might be incomplete? Missing empty line?\n";
301
+ }
302
+
UniAudio/UniAudio/egs/SE/utils/convert_slf_parallel.sh ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # Copyright Brno University of Technology (Author: Karel Vesely) 2014. Apache 2.0.
3
+
4
+ # This script converts lattices to HTK format compatible with other toolkits.
5
+ # We can choose to put words to nodes or arcs, as both is valid in the SLF format.
6
+
7
+ # begin configuration section.
8
+ cmd=run.pl
9
+ dirname=lats-in-htk-slf
10
+ parallel_opts="--max-jobs-run 50" # We should limit disk stress
11
+ word_to_node=false # Words in arcs or nodes? [default:arcs]
12
+ #end configuration section.
13
+
14
+ echo "$0 $@"
15
+
16
+ [ -f ./path.sh ] && . ./path.sh
17
+ . parse_options.sh || exit 1;
18
+
19
+ if [ $# -ne 3 ]; then
20
+ echo "Usage: $0 [options] <data-dir> <lang-dir|graph-dir> <decode-dir>"
21
+ echo " Options:"
22
+ echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes."
23
+ echo " --word-to-link (true|false) # put word symbols on links or nodes."
24
+ echo " --parallel-opts STR # parallelization options (def.: '--max-jobs-run 50')."
25
+ echo "e.g.:"
26
+ echo "$0 data/dev data/lang exp/tri4a/decode_dev"
27
+ exit 1;
28
+ fi
29
+
30
+ data=$1
31
+ lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied.
32
+ dir=$3
33
+
34
+ model=$(dirname $dir)/final.mdl # assume model one level up from decoding dir.
35
+
36
+ for f in $lang/words.txt $lang/phones/align_lexicon.int $model $dir/lat.1.gz; do
37
+ [ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1;
38
+ done
39
+
40
+ [ ! -d $dir/$dirname/log ] && mkdir -p $dir/$dirname
41
+
42
+ echo "$0: Converting lattices into '$dir/$dirname'"
43
+
44
+ # Words in arcs or nodes? [default:nodes]
45
+ word_to_link_arg=
46
+ $word_to_node && word_to_node_arg="--word-to-node"
47
+
48
+ nj=$(cat $dir/num_jobs)
49
+
50
+ # convert the lattices (individually, gzipped)
51
+ $cmd $parallel_opts JOB=1:$nj $dir/$dirname/log/lat_convert.JOB.log \
52
+ mkdir -p $dir/$dirname/JOB/ '&&' \
53
+ lattice-align-words-lexicon --output-error-lats=true --output-if-empty=true \
54
+ $lang/phones/align_lexicon.int $model "ark:gunzip -c $dir/lat.JOB.gz |" ark,t:- \| \
55
+ utils/int2sym.pl -f 3 $lang/words.txt \| \
56
+ utils/convert_slf.pl $word_to_node_arg - $dir/$dirname/JOB/ || exit 1
57
+
58
+ # make list of lattices
59
+ find -L $PWD/$dir/$dirname -name *.lat.gz > $dir/$dirname/lat_htk.scp || exit 1
60
+
61
+ # check number of lattices:
62
+ nseg=$(cat $data/segments | wc -l)
63
+ nlat_out=$(cat $dir/$dirname/lat_htk.scp | wc -l)
64
+ echo "segments $nseg, saved-lattices $nlat_out"
65
+ #
66
+ [ $nseg -ne $nlat_out ] && echo "WARNING: missing $((nseg-nlat_out)) lattices for some segments!" \
67
+ && exit 1
68
+
69
+ echo "success, converted lats to HTK : $PWD/$dir/$dirname/lat_htk.scp"
70
+ exit 0
71
+
UniAudio/UniAudio/egs/SE/utils/copy_data_dir.sh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2013 Johns Hopkins University (author: Daniel Povey)
4
+ # Apache 2.0
5
+
6
+ # This script operates on a directory, such as in data/train/,
7
+ # that contains some subset of the following files:
8
+ # feats.scp
9
+ # wav.scp
10
+ # vad.scp
11
+ # spk2utt
12
+ # utt2spk
13
+ # text
14
+ #
15
+ # It copies to another directory, possibly adding a specified prefix or a suffix
16
+ # to the utterance and/or speaker names. Note, the recording-ids stay the same.
17
+ #
18
+
19
+
20
+ # begin configuration section
21
+ spk_prefix=
22
+ utt_prefix=
23
+ spk_suffix=
24
+ utt_suffix=
25
+ validate_opts= # should rarely be needed.
26
+ # end configuration section
27
+
28
+ . utils/parse_options.sh
29
+
30
+ if [ $# != 2 ]; then
31
+ echo "Usage: "
32
+ echo " $0 [options] <srcdir> <destdir>"
33
+ echo "e.g.:"
34
+ echo " $0 --spk-prefix=1- --utt-prefix=1- data/train data/train_1"
35
+ echo "Options"
36
+ echo " --spk-prefix=<prefix> # Prefix for speaker ids, default empty"
37
+ echo " --utt-prefix=<prefix> # Prefix for utterance ids, default empty"
38
+ echo " --spk-suffix=<suffix> # Suffix for speaker ids, default empty"
39
+ echo " --utt-suffix=<suffix> # Suffix for utterance ids, default empty"
40
+ exit 1;
41
+ fi
42
+
43
+
44
+ export LC_ALL=C
45
+
46
+ srcdir=$1
47
+ destdir=$2
48
+
49
+ if [ ! -f $srcdir/utt2spk ]; then
50
+ echo "copy_data_dir.sh: no such file $srcdir/utt2spk"
51
+ exit 1;
52
+ fi
53
+
54
+ if [ "$destdir" == "$srcdir" ]; then
55
+ echo "$0: this script requires <srcdir> and <destdir> to be different."
56
+ exit 1
57
+ fi
58
+
59
+ set -e;
60
+
61
+ mkdir -p $destdir
62
+
63
+ cat $srcdir/utt2spk | awk -v p=$utt_prefix -v s=$utt_suffix '{printf("%s %s%s%s\n", $1, p, $1, s);}' > $destdir/utt_map
64
+ cat $srcdir/spk2utt | awk -v p=$spk_prefix -v s=$spk_suffix '{printf("%s %s%s%s\n", $1, p, $1, s);}' > $destdir/spk_map
65
+
66
+ if [ ! -f $srcdir/utt2uniq ]; then
67
+ if [[ ! -z $utt_prefix || ! -z $utt_suffix ]]; then
68
+ cat $srcdir/utt2spk | awk -v p=$utt_prefix -v s=$utt_suffix '{printf("%s%s%s %s\n", p, $1, s, $1);}' > $destdir/utt2uniq
69
+ fi
70
+ else
71
+ cat $srcdir/utt2uniq | awk -v p=$utt_prefix -v s=$utt_suffix '{printf("%s%s%s %s\n", p, $1, s, $2);}' > $destdir/utt2uniq
72
+ fi
73
+
74
+ cat $srcdir/utt2spk | utils/apply_map.pl -f 1 $destdir/utt_map | \
75
+ utils/apply_map.pl -f 2 $destdir/spk_map >$destdir/utt2spk
76
+
77
+ utils/utt2spk_to_spk2utt.pl <$destdir/utt2spk >$destdir/spk2utt
78
+
79
+ if [ -f $srcdir/feats.scp ]; then
80
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/feats.scp >$destdir/feats.scp
81
+ fi
82
+
83
+ if [ -f $srcdir/vad.scp ]; then
84
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/vad.scp >$destdir/vad.scp
85
+ fi
86
+
87
+ if [ -f $srcdir/utt2lang ]; then
88
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/utt2lang >$destdir/utt2lang
89
+ fi
90
+
91
+ if [ -f $srcdir/segments ]; then
92
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/segments >$destdir/segments
93
+ cp $srcdir/wav.scp $destdir
94
+ else # no segments->wav indexed by utt.
95
+ if [ -f $srcdir/wav.scp ]; then
96
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/wav.scp >$destdir/wav.scp
97
+ fi
98
+ fi
99
+
100
+ if [ -f $srcdir/reco2file_and_channel ]; then
101
+ cp $srcdir/reco2file_and_channel $destdir/
102
+ fi
103
+
104
+ if [ -f $srcdir/text ]; then
105
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/text >$destdir/text
106
+ fi
107
+ if [ -f $srcdir/utt2dur ]; then
108
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/utt2dur >$destdir/utt2dur
109
+ fi
110
+ if [ -f $srcdir/utt2num_frames ]; then
111
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/utt2num_frames >$destdir/utt2num_frames
112
+ fi
113
+ if [ -f $srcdir/reco2dur ]; then
114
+ if [ -f $srcdir/segments ]; then
115
+ cp $srcdir/reco2dur $destdir/reco2dur
116
+ else
117
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/reco2dur >$destdir/reco2dur
118
+ fi
119
+ fi
120
+ if [ -f $srcdir/spk2gender ]; then
121
+ utils/apply_map.pl -f 1 $destdir/spk_map <$srcdir/spk2gender >$destdir/spk2gender
122
+ fi
123
+ if [ -f $srcdir/cmvn.scp ]; then
124
+ utils/apply_map.pl -f 1 $destdir/spk_map <$srcdir/cmvn.scp >$destdir/cmvn.scp
125
+ fi
126
+ for f in frame_shift stm glm ctm; do
127
+ if [ -f $srcdir/$f ]; then
128
+ cp $srcdir/$f $destdir
129
+ fi
130
+ done
131
+
132
+ rm $destdir/spk_map $destdir/utt_map
133
+
134
+ echo "$0: copied data from $srcdir to $destdir"
135
+
136
+ for f in feats.scp cmvn.scp vad.scp utt2lang utt2uniq utt2dur utt2num_frames text wav.scp reco2file_and_channel frame_shift stm glm ctm; do
137
+ if [ -f $destdir/$f ] && [ ! -f $srcdir/$f ]; then
138
+ echo "$0: file $f exists in dest $destdir but not in src $srcdir. Moving it to"
139
+ echo " ... $destdir/.backup/$f"
140
+ mkdir -p $destdir/.backup
141
+ mv $destdir/$f $destdir/.backup/
142
+ fi
143
+ done
144
+
145
+
146
+ [ ! -f $srcdir/feats.scp ] && validate_opts="$validate_opts --no-feats"
147
+ [ ! -f $srcdir/text ] && validate_opts="$validate_opts --no-text"
148
+
149
+ utils/validate_data_dir.sh $validate_opts $destdir
UniAudio/UniAudio/egs/SE/utils/create_data_link.pl ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2013 Guoguo Chen
4
+ # 2014 Johns Hopkins University (author: Daniel Povey)
5
+ # Apache 2.0.
6
+ #
7
+ # This script distributes data onto different file systems by making symbolic
8
+ # links. It is supposed to use together with utils/create_split_dir.pl, which
9
+ # creates a "storage" directory that links to different file systems.
10
+ #
11
+ # If a sub-directory egs/storage does not exist, it does nothing. If it exists,
12
+ # then it selects pseudo-randomly a number from those available in egs/storage/*
13
+ # creates a link such as
14
+ #
15
+ # egs/egs.3.4.ark -> storage/4/egs.3.4.ark
16
+ #
17
+ use strict;
18
+ use warnings;
19
+ use File::Basename;
20
+ use File::Spec;
21
+ use Getopt::Long;
22
+
23
+ sub GetGCD {
24
+ my ($a, $b) = @_;
25
+ while ($a != $b) {
26
+ if ($a > $b) {
27
+ $a = $a - $b;
28
+ } else {
29
+ $b = $b - $a;
30
+ }
31
+ }
32
+ return $a;
33
+ }
34
+
35
+ my $Usage = <<EOU;
36
+ create_data_link.pl:
37
+ This script distributes data onto different file systems by making symbolic
38
+ links. It is supposed to use together with utils/create_split_dir.pl, which
39
+ creates a "storage" directory that links to different file systems.
40
+
41
+ If a sub-directory foo/storage does not exist, it does nothing. If it exists,
42
+ then it selects pseudo-randomly a number from those available in foo/storage/*
43
+ creates a link such as
44
+
45
+ foo/egs.3.4.ark -> storage/4/egs.3.4.ark
46
+
47
+ Usage: utils/create_data_link.pl <data-archive1> [<data-archive2> ... ]
48
+ e.g.: utils/create_data_link.pl foo/bar/egs.3.4.ark foo/bar/egs.3.5.ark
49
+ (note: the dirname, e.g. foo/bar/, must be the same in all cases).
50
+
51
+ See also utils/remove_data_links.sh
52
+ EOU
53
+
54
+ GetOptions();
55
+
56
+ if (@ARGV == 0) {
57
+ die $Usage;
58
+ }
59
+
60
+ my $example_fullpath = $ARGV[0];
61
+
62
+ # Check if the storage has been created. If so, do nothing.
63
+ my $dirname = dirname($example_fullpath);
64
+ if (! -d "$dirname/storage") {
65
+ exit(0);
66
+ }
67
+
68
+ # Storage exists, create symbolic links in the next few steps.
69
+
70
+ # First, get a list of the available storage directories, and check if they are
71
+ # properly created.
72
+ opendir(my $dh, "$dirname/storage/") || die "$0: Fail to open $dirname/storage/\n";
73
+ my @storage_dirs = grep(/^[0-9]*$/, readdir($dh));
74
+ closedir($dh);
75
+ my $num_storage = scalar(@storage_dirs);
76
+ for (my $x = 1; $x <= $num_storage; $x++) {
77
+ (-d "$dirname/storage/$x") || die "$0: $dirname/storage/$x does not exist\n";
78
+ }
79
+
80
+ # Second, get the coprime list.
81
+ my @coprimes;
82
+ for (my $n = 1; $n <= $num_storage; $n++) {
83
+ if (GetGCD($n, $num_storage) == 1) {
84
+ push(@coprimes, $n);
85
+ }
86
+ }
87
+
88
+ my $ret = 0;
89
+
90
+ foreach my $fullpath (@ARGV) {
91
+ if ($dirname ne dirname($fullpath)) {
92
+ die "Mismatch in directory names of arguments: $example_fullpath versus $fullpath";
93
+ }
94
+
95
+ # Finally, work out the directory index where we should put the data to.
96
+ my $basename = basename($fullpath);
97
+ my $filename_numbers = $basename;
98
+ $filename_numbers =~ s/[^0-9]+/ /g;
99
+ my @filename_numbers = split(" ", $filename_numbers);
100
+ my $total = 0;
101
+ my $index = 0;
102
+ foreach my $x (@filename_numbers) {
103
+ if ($index >= scalar(@coprimes)) {
104
+ $index = 0;
105
+ }
106
+ $total += $x * $coprimes[$index];
107
+ $index++;
108
+ }
109
+ my $dir_index = $total % $num_storage + 1;
110
+
111
+ # Make the symbolic link.
112
+ if (-e $fullpath) {
113
+ unlink($fullpath);
114
+ }
115
+ if (symlink("storage/$dir_index/$basename", $fullpath) != 1) { # failure
116
+ $ret = 1; # will exit with error status.
117
+ }
118
+ }
119
+
120
+ exit($ret);
121
+
122
+ ## testing:
123
+ # rm -rf foo bar
124
+ # mkdir -p bar/{1,2,3,4}
125
+ # mkdir -p foo/storage
126
+ # for x in 1 2 3 4; do ln -s ../../bar/$x foo/storage/$x; done
127
+ # utils/create_data_link.pl utils/create_data_link.pl foo/1.3.ark foo/2.3.ark
128
+ # ls -l foo
129
+ # total 0
130
+ # lrwxrwxrwx 1 dpovey fax 17 Sep 2 17:41 1.3.ark -> storage/3/1.3.ark
131
+ # lrwxrwxrwx 1 dpovey fax 17 Sep 2 17:41 2.3.ark -> storage/4/2.3.ark
132
+ # drwxr-xr-x 2 dpovey fax 38 Sep 2 17:40 storage
UniAudio/UniAudio/egs/SE/utils/create_split_dir.pl ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2013 Guoguo Chen
4
+ # Apache 2.0.
5
+ #
6
+ # This script creates storage directories on different file systems, and creates
7
+ # symbolic links to those directories. For example, a command
8
+ #
9
+ # utils/create_split_dir.pl /export/gpu-0{3,4,5}/egs/storage egs/storage
10
+ #
11
+ # will mkdir -p all of those directories, and will create links
12
+ #
13
+ # egs/storage/1 -> /export/gpu-03/egs/storage
14
+ # egs/storage/2 -> /export/gpu-03/egs/storage
15
+ # ...
16
+ #
17
+ use strict;
18
+ use warnings;
19
+ use File::Spec;
20
+ use Getopt::Long;
21
+
22
+ my $Usage = <<EOU;
23
+ create_split_dir.pl:
24
+ This script creates storage directories on different file systems, and creates
25
+ symbolic links to those directories.
26
+
27
+ Usage: utils/create_split_dir.pl <actual_storage_dirs> <pseudo_storage_dir>
28
+ e.g.: utils/create_split_dir.pl /export/gpu-0{3,4,5}/egs/storage egs/storage
29
+
30
+ Allowed options:
31
+ --suffix : Common suffix to <actual_storage_dirs> (string, default = "")
32
+
33
+ See also create_data_link.pl, which is intended to work with the resulting
34
+ directory structure, and remove_data_links.sh
35
+ EOU
36
+
37
+ my $suffix="";
38
+ GetOptions('suffix=s' => \$suffix);
39
+
40
+ if (@ARGV < 2) {
41
+ die $Usage;
42
+ }
43
+
44
+ my $ans = 1;
45
+
46
+ my $dir = pop(@ARGV);
47
+ system("mkdir -p $dir 2>/dev/null");
48
+
49
+ my @all_actual_storage = ();
50
+ foreach my $file (@ARGV) {
51
+ push @all_actual_storage, File::Spec->rel2abs($file . "/" . $suffix);
52
+ }
53
+
54
+ my $index = 1;
55
+ foreach my $actual_storage (@all_actual_storage) {
56
+ my $pseudo_storage = "$dir/$index";
57
+
58
+ # If the symbolic link already exists, delete it.
59
+ if (-l $pseudo_storage) {
60
+ print STDERR "$0: link $pseudo_storage already exists, not overwriting.\n";
61
+ $index++;
62
+ next;
63
+ }
64
+
65
+ # Create the destination directory and make the link.
66
+ system("mkdir -p $actual_storage 2>/dev/null");
67
+ if ($? != 0) {
68
+ print STDERR "$0: error creating directory $actual_storage\n";
69
+ exit(1);
70
+ }
71
+ { # create a README file for easier deletion.
72
+ open(R, ">$actual_storage/README.txt");
73
+ my $storage_dir = File::Spec->rel2abs($dir);
74
+ print R "# This directory is linked from $storage_dir, as part of Kaldi striped data\n";
75
+ print R "# The full list of directories where this data resides is:\n";
76
+ foreach my $d (@all_actual_storage) {
77
+ print R "$d\n";
78
+ }
79
+ close(R);
80
+ }
81
+ my $ret = symlink($actual_storage, $pseudo_storage);
82
+
83
+ # Process the returned values
84
+ $ans = $ans && $ret;
85
+ if (! $ret) {
86
+ print STDERR "Error linking $actual_storage to $pseudo_storage\n";
87
+ }
88
+
89
+ $index++;
90
+ }
91
+
92
+ exit($ans == 1 ? 0 : 1);
UniAudio/UniAudio/egs/SE/utils/ctm/convert_ctm.pl ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
4
+
5
+ # This takes as standard input a ctm file that's "relative to the utterance",
6
+ # i.e. times are measured relative to the beginning of the segments, and it
7
+ # uses a "segments" file (format:
8
+ # utterance-id recording-id start-time end-time
9
+ # ) and a "reco2file_and_channel" file (format:
10
+ # recording-id basename-of-file
11
+
12
+ $skip_unknown=undef;
13
+ if ( $ARGV[0] eq "--skip-unknown" ) {
14
+ $skip_unknown=1;
15
+ shift @ARGV;
16
+ }
17
+
18
+ if (@ARGV < 2 || @ARGV > 3) {
19
+ print STDERR "Usage: convert_ctm.pl <segments-file> <reco2file_and_channel-file> [<utterance-ctm>] > real-ctm\n";
20
+ exit(1);
21
+ }
22
+
23
+ $segments = shift @ARGV;
24
+ $reco2file_and_channel = shift @ARGV;
25
+
26
+ open(S, "<$segments") || die "opening segments file $segments";
27
+ while(<S>) {
28
+ @A = split(" ", $_);
29
+ @A == 4 || die "Bad line in segments file: $_";
30
+ ($utt, $recording_id, $begin_time, $end_time) = @A;
31
+ $utt2reco{$utt} = $recording_id;
32
+ $begin{$utt} = $begin_time;
33
+ $end{$utt} = $end_time;
34
+ }
35
+ close(S);
36
+ open(R, "<$reco2file_and_channel") || die "open reco2file_and_channel file $reco2file_and_channel";
37
+ while(<R>) {
38
+ @A = split(" ", $_);
39
+ @A == 3 || die "Bad line in reco2file_and_channel file: $_";
40
+ ($recording_id, $file, $channel) = @A;
41
+ $reco2file{$recording_id} = $file;
42
+ $reco2channel{$recording_id} = $channel;
43
+ }
44
+
45
+
46
+ # Now process the ctm file, which is either the standard input or the third
47
+ # command-line argument.
48
+ $num_done = 0;
49
+ while(<>) {
50
+ @A= split(" ", $_);
51
+ ( @A == 5 || @A == 6 ) || die "Unexpected ctm format: $_";
52
+ # lines look like:
53
+ # <utterance-id> 1 <begin-time> <length> <word> [ confidence ]
54
+ ($utt, $one, $wbegin, $wlen, $w, $conf) = @A;
55
+ $reco = $utt2reco{$utt};
56
+ if (!defined $reco) {
57
+ next if defined $skip_unknown;
58
+ die "Utterance-id $utt not defined in segments file $segments";
59
+ }
60
+ $file = $reco2file{$reco};
61
+ $channel = $reco2channel{$reco};
62
+ if (!defined $file || !defined $channel) {
63
+ die "Recording-id $reco not defined in reco2file_and_channel file $reco2file_and_channel";
64
+ }
65
+ $b = $begin{$utt};
66
+ $e = $end{$utt};
67
+ $wbegin_r = $wbegin + $b; # Make it relative to beginning of the recording.
68
+ $wbegin_r = sprintf("%.2f", $wbegin_r);
69
+ $wlen = sprintf("%.2f", $wlen);
70
+ if (defined $conf) {
71
+ $line = "$file $channel $wbegin_r $wlen $w $conf\n";
72
+ } else {
73
+ $line = "$file $channel $wbegin_r $wlen $w\n";
74
+ }
75
+ if ($wbegin_r + $wlen > $e + 0.01) {
76
+ print STDERR "Warning: word appears to be past end of recording; line is $line";
77
+ }
78
+ print $line; # goes to stdout.
79
+ $num_done++;
80
+ }
81
+
82
+ if ($num_done == 0) { exit 1; } else { exit 0; }
83
+
84
+ __END__
85
+
86
+ # Test example [also test it without the 0.5's]
87
+ echo utt reco 10.0 20.0 > segments
88
+ echo reco file A > reco2file_and_channel
89
+ echo utt 1 8.0 1.0 word 0.5 > ctm_in
90
+ echo file A 18.00 1.00 word 0.5 > ctm_out
91
+ utils/convert_ctm.pl segments reco2file_and_channel ctm_in | cmp - ctm_out || echo error
92
+ rm segments reco2file_and_channel ctm_in ctm_out
93
+
94
+
95
+
96
+
UniAudio/UniAudio/egs/SE/utils/ctm/fix_ctm.sh ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /bin/bash
2
+
3
+ stmfile=$1
4
+ ctmfile=$2
5
+
6
+ segments_stm=`cat $stmfile | cut -f 1 -d ' ' | sort -u`
7
+ segments_ctm=`cat $ctmfile | cut -f 1 -d ' ' | sort -u`
8
+
9
+ segments_stm_count=`echo "$segments_stm" | wc -l `
10
+ segments_ctm_count=`echo "$segments_ctm" | wc -l `
11
+
12
+ #echo $segments_stm_count
13
+ #echo $segments_ctm_count
14
+
15
+ if [ "$segments_stm_count" -gt "$segments_ctm_count" ] ; then
16
+ pp=$( diff <(echo "$segments_stm") <(echo "$segments_ctm" ) | grep "^<" | sed "s/^< *//g")
17
+ (
18
+ for elem in $pp ; do
19
+ echo "$elem 1 0 0 EMPTY_RECOGNIZED_PHRASE"
20
+ done
21
+ ) >> $ctmfile
22
+ echo "FIXED CTM FILE"
23
+ exit 0
24
+ elif [ "$segments_stm_count" -lt "$segments_ctm_count" ] ; then
25
+ echo "Segment STM count: $segments_stm_count"
26
+ echo "Segment CTM count: $segments_ctm_count"
27
+ echo "FAILURE FIXING CTM FILE"
28
+ exit 1
29
+ else
30
+ exit 0
31
+ fi
32
+
UniAudio/UniAudio/egs/SE/utils/ctm/resolve_ctm_overlaps.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/env python
2
+
3
+ # Copyright 2014 Johns Hopkins University (Authors: Daniel Povey)
4
+ # 2014 Vijayaditya Peddinti
5
+ # 2016 Vimal Manohar
6
+ # Apache 2.0.
7
+
8
+ """
9
+ Script to combine ctms with overlapping segments.
10
+ The current approach is very simple. It ignores the words,
11
+ which are hypothesized in the half of the overlapped region
12
+ that is closer to the utterance boundary.
13
+ So if there are two segments
14
+ in the region 0s to 30s and 25s to 55s, with overlap of 5s,
15
+ the last 2.5s of the first utterance i.e. from 27.5s to 30s is truncated
16
+ and the first 2.5s of the second utterance i.e. from 25s to 27.s is truncated.
17
+ """
18
+
19
+ from __future__ import print_function
20
+ from __future__ import division
21
+ import argparse
22
+ import collections
23
+ import logging
24
+
25
+ from collections import defaultdict
26
+
27
+ logger = logging.getLogger(__name__)
28
+ logger.setLevel(logging.INFO)
29
+ handler = logging.StreamHandler()
30
+ handler.setLevel(logging.INFO)
31
+ formatter = logging.Formatter(
32
+ '%(asctime)s [%(pathname)s:%(lineno)s - '
33
+ '%(funcName)s - %(levelname)s ] %(message)s')
34
+ handler.setFormatter(formatter)
35
+ logger.addHandler(handler)
36
+
37
+
38
+ def get_args():
39
+ """gets command line arguments"""
40
+
41
+ usage = """ Python script to resolve overlaps in ctms. May be used with
42
+ utils/data/subsegment_data_dir.sh. """
43
+ parser = argparse.ArgumentParser(usage)
44
+ parser.add_argument('segments', type=argparse.FileType('r'),
45
+ help='use segments to resolve overlaps')
46
+ parser.add_argument('ctm_in', type=argparse.FileType('r'),
47
+ help='input_ctm_file')
48
+ parser.add_argument('ctm_out', type=argparse.FileType('w'),
49
+ help='output_ctm_file')
50
+ parser.add_argument('--verbose', type=int, default=0,
51
+ help="Higher value for more verbose logging.")
52
+ args = parser.parse_args()
53
+
54
+ if args.verbose > 2:
55
+ logger.setLevel(logging.DEBUG)
56
+ handler.setLevel(logging.DEBUG)
57
+
58
+ return args
59
+
60
+
61
+ def read_segments(segments_file):
62
+ """Read from segments and returns two dictionaries,
63
+ {utterance-id: (recording_id, start_time, end_time)}
64
+ {recording_id: list-of-utterances}
65
+ """
66
+ segments = {}
67
+ reco2utt = defaultdict(list)
68
+
69
+ num_lines = 0
70
+ for line in segments_file:
71
+ num_lines += 1
72
+ parts = line.strip().split()
73
+ assert len(parts) in [4, 5]
74
+ segments[parts[0]] = (parts[1], float(parts[2]), float(parts[3]))
75
+ reco2utt[parts[1]].append(parts[0])
76
+
77
+ logger.info("Read %d lines from segments file %s",
78
+ num_lines, segments_file.name)
79
+ segments_file.close()
80
+
81
+ return segments, reco2utt
82
+
83
+
84
+ def read_ctm(ctm_file, segments):
85
+ """Read CTM from ctm_file into a dictionary of values indexed by the
86
+ recording.
87
+ It is assumed to be sorted by the recording-id and utterance-id.
88
+
89
+ Returns a dictionary {recording : ctm_lines}
90
+ where ctm_lines is a list of lines of CTM corresponding to the
91
+ utterances in the recording.
92
+ The format is as follows:
93
+ [[(utteranceA, channelA, start_time1, duration1, hyp_word1, conf1),
94
+ (utteranceA, channelA, start_time2, duration2, hyp_word2, conf2),
95
+ ...
96
+ (utteranceA, channelA, start_timeN, durationN, hyp_wordN, confN)],
97
+ [(utteranceB, channelB, start_time1, duration1, hyp_word1, conf1),
98
+ (utteranceB, channelB, start_time2, duration2, hyp_word2, conf2),
99
+ ...],
100
+ ...
101
+ [...
102
+ (utteranceZ, channelZ, start_timeN, durationN, hyp_wordN, confN)]
103
+ ]
104
+ """
105
+ ctms = {}
106
+
107
+ num_lines = 0
108
+ for line in ctm_file:
109
+ num_lines += 1
110
+ parts = line.split()
111
+
112
+ utt = parts[0]
113
+ reco = segments[utt][0]
114
+
115
+ if (reco, utt) not in ctms:
116
+ ctms[(reco, utt)] = []
117
+
118
+ ctms[(reco, utt)].append([parts[0], parts[1], float(parts[2]),
119
+ float(parts[3])] + parts[4:])
120
+
121
+ logger.info("Read %d lines from CTM %s", num_lines, ctm_file.name)
122
+
123
+ ctm_file.close()
124
+ return ctms
125
+
126
+
127
+ def resolve_overlaps(ctms, segments):
128
+ """Resolve overlaps within segments of the same recording.
129
+
130
+ Returns new lines of CTM for the recording.
131
+
132
+ Arguments:
133
+ ctms - The CTM lines for a single recording. This is one value stored
134
+ in the dictionary read by read_ctm(). Assumes that the lines
135
+ are sorted by the utterance-ids.
136
+ The format is the following:
137
+ [[(utteranceA, channelA, start_time1, duration1, hyp_word1, conf1),
138
+ (utteranceA, channelA, start_time2, duration2, hyp_word2, conf2),
139
+ ...
140
+ (utteranceA, channelA, start_timeN, durationN, hyp_wordN, confN)
141
+ ],
142
+ [(utteranceB, channelB, start_time1, duration1, hyp_word1, conf1),
143
+ (utteranceB, channelB, start_time2, duration2, hyp_word2, conf2),
144
+ ...],
145
+ ...
146
+ [...
147
+ (utteranceZ, channelZ, start_timeN, durationN, hyp_wordN, confN)]
148
+ ]
149
+ segments - Dictionary containing the output of read_segments()
150
+ { utterance_id: (recording_id, start_time, end_time) }
151
+ """
152
+ total_ctm = []
153
+ if len(ctms) == 0:
154
+ raise RuntimeError('CTMs for recording is empty. '
155
+ 'Something wrong with the input ctms')
156
+
157
+ # First column of first line in CTM for first utterance
158
+ next_utt = ctms[0][0][0]
159
+ for utt_index, ctm_for_cur_utt in enumerate(ctms):
160
+ if utt_index == len(ctms) - 1:
161
+ break
162
+
163
+ if len(ctm_for_cur_utt) == 0:
164
+ next_utt = ctms[utt_index + 1][0][0]
165
+ continue
166
+
167
+ cur_utt = ctm_for_cur_utt[0][0]
168
+ if cur_utt != next_utt:
169
+ logger.error(
170
+ "Current utterance %s is not the same as the next "
171
+ "utterance %s in previous iteration.\n"
172
+ "CTM is not sorted by utterance-id?",
173
+ cur_utt, next_utt)
174
+ raise ValueError
175
+
176
+ # Assumption here is that the segments are written in
177
+ # consecutive order?
178
+ ctm_for_next_utt = ctms[utt_index + 1]
179
+ next_utt = ctm_for_next_utt[0][0]
180
+ if segments[next_utt][1] < segments[cur_utt][1]:
181
+ logger.error(
182
+ "Next utterance %s <= Current utterance %s. "
183
+ "CTM is not sorted by start-time of utterance-id.",
184
+ next_utt, cur_utt)
185
+ raise ValueError
186
+
187
+ try:
188
+ # length of this utterance
189
+ window_length = segments[cur_utt][2] - segments[cur_utt][1]
190
+
191
+ # overlap of this segment with the next segment
192
+ # i.e. current_utterance_end_time - next_utterance_start_time
193
+ # Note: It is possible for this to be negative when there is
194
+ # actually no overlap between consecutive segments.
195
+ try:
196
+ overlap = segments[cur_utt][2] - segments[next_utt][1]
197
+ except KeyError:
198
+ logger("Could not find utterance %s in segments",
199
+ next_utt)
200
+ raise
201
+
202
+ if overlap > 0 and segments[next_utt][2] <= segments[cur_utt][2]:
203
+ # Next utterance is entirely within this utterance.
204
+ # So we leave this ctm as is and make the next one empty.
205
+ total_ctm.extend(ctm_for_cur_utt)
206
+ ctms[utt_index + 1] = []
207
+ continue
208
+
209
+ # find a break point (a line in the CTM) for the current utterance
210
+ # i.e. the first line that has more than half of it outside
211
+ # the first half of the overlap region.
212
+ # Note: This line will not be included in the output CTM, which is
213
+ # only upto the line before this.
214
+ try:
215
+ index = next(
216
+ (i for i, line in enumerate(ctm_for_cur_utt)
217
+ if (line[2] + line[3] / 2.0
218
+ > window_length - overlap / 2.0)))
219
+ except StopIteration:
220
+ # It is possible for such a word to not exist, e.g the last
221
+ # word in the CTM is longer than overlap length and starts
222
+ # before the beginning of the overlap.
223
+ # or the last word ends before the middle of the overlap.
224
+ index = len(ctm_for_cur_utt)
225
+
226
+ # Ignore the hypotheses beyond this midpoint. They will be
227
+ # considered as part of the next segment.
228
+ total_ctm.extend(ctm_for_cur_utt[:index])
229
+
230
+ # Find a break point (a line in the CTM) for the next utterance
231
+ # i.e. the first line that has more than half of it outside
232
+ # the first half of the overlap region.
233
+ try:
234
+ index = next(
235
+ (i for i, line in enumerate(ctm_for_next_utt)
236
+ if line[2] + line[3] / 2.0 > overlap / 2.0))
237
+ except StopIteration:
238
+ # This can happen if there is no word hypothesized after
239
+ # half the overlap region.
240
+ ctms[utt_index + 1] = []
241
+ continue
242
+
243
+ if index > 0:
244
+ # Update the ctm_for_next_utt to include only the lines
245
+ # starting from index.
246
+ ctms[utt_index + 1] = ctm_for_next_utt[index:]
247
+ # else leave the ctm as is.
248
+ except:
249
+ logger.error("Could not resolve overlaps between CTMs for "
250
+ "%s and %s", cur_utt, next_utt)
251
+ logger.error("Current CTM:")
252
+ for line in ctm_for_cur_utt:
253
+ logger.error(ctm_line_to_string(line))
254
+ logger.error("Next CTM:")
255
+ for line in ctm_for_next_utt:
256
+ logger.error(ctm_line_to_string(line))
257
+ raise
258
+
259
+ # merge the last ctm entirely
260
+ total_ctm.extend(ctms[-1])
261
+
262
+ return total_ctm
263
+
264
+
265
+ def ctm_line_to_string(line):
266
+ """Converts a line of CTM to string."""
267
+ return "{0} {1} {2} {3} {4}".format(line[0], line[1], line[2], line[3],
268
+ " ".join(line[4:]))
269
+
270
+
271
+ def write_ctm(ctm_lines, out_file):
272
+ """Writes CTM lines stored in a list to file."""
273
+ for line in ctm_lines:
274
+ print(ctm_line_to_string(line), file=out_file)
275
+
276
+
277
+ def run(args):
278
+ """this method does everything in this script"""
279
+ segments, reco2utt = read_segments(args.segments)
280
+ ctms = read_ctm(args.ctm_in, segments)
281
+
282
+ for reco, utts in reco2utt.items():
283
+ ctms_for_reco = []
284
+ for utt in sorted(utts, key=lambda x: segments[x][1]):
285
+ if (reco, utt) in ctms:
286
+ ctms_for_reco.append(ctms[(reco, utt)])
287
+ if len(ctms_for_reco) == 0:
288
+ logger.info("CTM for recording {0} was empty".format(reco))
289
+ continue
290
+ try:
291
+ # Process CTMs in the recordings
292
+ ctms_for_reco = resolve_overlaps(ctms_for_reco, segments)
293
+ write_ctm(ctms_for_reco, args.ctm_out)
294
+ except Exception:
295
+ logger.error("Failed to process CTM for recording %s",
296
+ reco)
297
+ raise
298
+ args.ctm_out.close()
299
+ logger.info("Wrote CTM for %d recordings.", len(ctms))
300
+
301
+
302
+ def main():
303
+ """The main function which parses arguments and call run()."""
304
+ args = get_args()
305
+ try:
306
+ run(args)
307
+ except:
308
+ logger.error("Failed to resolve overlaps", exc_info=True)
309
+ raise SystemExit(1)
310
+ finally:
311
+ try:
312
+ for f in [args.segments, args.ctm_in, args.ctm_out]:
313
+ if f is not None:
314
+ f.close()
315
+ except IOError:
316
+ logger.error("Could not close some files. "
317
+ "Disk error or broken pipes?")
318
+ raise
319
+ except UnboundLocalError:
320
+ raise SystemExit(1)
321
+
322
+
323
+ if __name__ == "__main__":
324
+ main()
UniAudio/UniAudio/egs/SE/utils/data/combine_data.sh ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
3
+ # 2014 David Snyder
4
+
5
+ # This script combines the data from multiple source directories into
6
+ # a single destination directory.
7
+
8
+ # See http://kaldi-asr.org/doc/data_prep.html#data_prep_data for information
9
+ # about what these directories contain.
10
+
11
+ # Begin configuration section.
12
+ extra_files= # specify additional files in 'src-data-dir' to merge, ex. "file1 file2 ..."
13
+ skip_fix=false # skip the fix_data_dir.sh in the end
14
+ # End configuration section.
15
+
16
+ echo "$0 $@" # Print the command line for logging
17
+
18
+ if [ -f path.sh ]; then . ./path.sh; fi
19
+ . parse_options.sh || exit 1;
20
+
21
+ if [ $# -lt 2 ]; then
22
+ echo "Usage: combine_data.sh [--extra-files 'file1 file2'] <dest-data-dir> <src-data-dir1> <src-data-dir2> ..."
23
+ echo "Note, files that don't appear in all source dirs will not be combined,"
24
+ echo "with the exception of utt2uniq and segments, which are created where necessary."
25
+ exit 1
26
+ fi
27
+
28
+ dest=$1;
29
+ shift;
30
+
31
+ first_src=$1;
32
+
33
+ rm -r $dest 2>/dev/null || true
34
+ mkdir -p $dest;
35
+
36
+ export LC_ALL=C
37
+
38
+ for dir in $*; do
39
+ if [ ! -f $dir/utt2spk ]; then
40
+ echo "$0: no such file $dir/utt2spk"
41
+ exit 1;
42
+ fi
43
+ done
44
+
45
+ # Check that frame_shift are compatible, where present together with features.
46
+ dir_with_frame_shift=
47
+ for dir in $*; do
48
+ if [[ -f $dir/feats.scp && -f $dir/frame_shift ]]; then
49
+ if [[ $dir_with_frame_shift ]] &&
50
+ ! cmp -s $dir_with_frame_shift/frame_shift $dir/frame_shift; then
51
+ echo "$0:error: different frame_shift in directories $dir and " \
52
+ "$dir_with_frame_shift. Cannot combine features."
53
+ exit 1;
54
+ fi
55
+ dir_with_frame_shift=$dir
56
+ fi
57
+ done
58
+
59
+ # W.r.t. utt2uniq file the script has different behavior compared to other files
60
+ # it is not compulsary for it to exist in src directories, but if it exists in
61
+ # even one it should exist in all. We will create the files where necessary
62
+ has_utt2uniq=false
63
+ for in_dir in $*; do
64
+ if [ -f $in_dir/utt2uniq ]; then
65
+ has_utt2uniq=true
66
+ break
67
+ fi
68
+ done
69
+
70
+ if $has_utt2uniq; then
71
+ # we are going to create an utt2uniq file in the destdir
72
+ for in_dir in $*; do
73
+ if [ ! -f $in_dir/utt2uniq ]; then
74
+ # we assume that utt2uniq is a one to one mapping
75
+ cat $in_dir/utt2spk | awk '{printf("%s %s\n", $1, $1);}'
76
+ else
77
+ cat $in_dir/utt2uniq
78
+ fi
79
+ done | sort -k1 > $dest/utt2uniq
80
+ echo "$0: combined utt2uniq"
81
+ else
82
+ echo "$0 [info]: not combining utt2uniq as it does not exist"
83
+ fi
84
+ # some of the old scripts might provide utt2uniq as an extrafile, so just remove it
85
+ extra_files=$(echo "$extra_files"|sed -e "s/utt2uniq//g")
86
+
87
+ # segments are treated similarly to utt2uniq. If it exists in some, but not all
88
+ # src directories, then we generate segments where necessary.
89
+ has_segments=false
90
+ for in_dir in $*; do
91
+ if [ -f $in_dir/segments ]; then
92
+ has_segments=true
93
+ break
94
+ fi
95
+ done
96
+
97
+ if $has_segments; then
98
+ for in_dir in $*; do
99
+ if [ ! -f $in_dir/segments ]; then
100
+ echo "$0 [info]: will generate missing segments for $in_dir" 1>&2
101
+ utils/data/get_segments_for_data.sh $in_dir
102
+ else
103
+ cat $in_dir/segments
104
+ fi
105
+ done | sort -k1 > $dest/segments
106
+ echo "$0: combined segments"
107
+ else
108
+ echo "$0 [info]: not combining segments as it does not exist"
109
+ fi
110
+
111
+ for file in utt2spk utt2lang utt2dur utt2num_frames reco2dur feats.scp text cmvn.scp vad.scp reco2file_and_channel wav.scp spk2gender $extra_files; do
112
+ exists_somewhere=false
113
+ absent_somewhere=false
114
+ for d in $*; do
115
+ if [ -f $d/$file ]; then
116
+ exists_somewhere=true
117
+ else
118
+ absent_somewhere=true
119
+ fi
120
+ done
121
+
122
+ if ! $absent_somewhere; then
123
+ set -o pipefail
124
+ ( for f in $*; do cat $f/$file; done ) | sort -k1 > $dest/$file || exit 1;
125
+ set +o pipefail
126
+ echo "$0: combined $file"
127
+ else
128
+ if ! $exists_somewhere; then
129
+ echo "$0 [info]: not combining $file as it does not exist"
130
+ else
131
+ echo "$0 [info]: **not combining $file as it does not exist everywhere**"
132
+ fi
133
+ fi
134
+ done
135
+
136
+ utils/utt2spk_to_spk2utt.pl <$dest/utt2spk >$dest/spk2utt
137
+
138
+ if [[ $dir_with_frame_shift ]]; then
139
+ cp $dir_with_frame_shift/frame_shift $dest
140
+ fi
141
+
142
+ if ! $skip_fix ; then
143
+ utils/fix_data_dir.sh $dest || exit 1;
144
+ fi
145
+
146
+ exit 0
UniAudio/UniAudio/egs/SE/utils/data/combine_short_segments.sh ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2013 Johns Hopkins University (author: Daniel Povey)
4
+ # Apache 2.0
5
+
6
+ # This script copies and modifies a data directory while combining
7
+ # segments whose duration is lower than a specified minimum segment
8
+ # length.
9
+ #
10
+ # Note: this does not work for the wav.scp, since there is no natural way to
11
+ # concatenate segments; you have to operate on directories that already have
12
+ # features extracted.
13
+
14
+ #
15
+
16
+
17
+ # begin configuration section
18
+ cleanup=true
19
+ speaker_only=false # If true, utterances are only combined from the same speaker.
20
+ # It may be useful for the speaker recognition task.
21
+ # If false, utterances are preferentially combined from the same speaker,
22
+ # and then combined across different speakers.
23
+ # end configuration section
24
+
25
+
26
+ . utils/parse_options.sh
27
+
28
+ if [ $# != 3 ]; then
29
+ echo "Usage: "
30
+ echo " $0 [options] <srcdir> <min-segment-length-in-seconds> <dir>"
31
+ echo "e.g.:"
32
+ echo " $0 data/train 1.55 data/train_comb"
33
+ echo " Options:"
34
+ echo " --speaker-only <true|false> # options to internal/choose_utts_to_combine.py, default false."
35
+ exit 1;
36
+ fi
37
+
38
+
39
+ export LC_ALL=C
40
+
41
+ srcdir=$1
42
+ min_seg_len=$2
43
+ dir=$3
44
+
45
+ if [ "$dir" == "$srcdir" ]; then
46
+ echo "$0: this script requires <srcdir> and <dir> to be different."
47
+ exit 1
48
+ fi
49
+
50
+ for f in $srcdir/utt2spk $srcdir/feats.scp; do
51
+ [ ! -s $f ] && echo "$0: expected file $f to exist and be nonempty" && exit 1
52
+ done
53
+
54
+ if ! awk '{if (NF != 2) exit(1);}' <$srcdir/feats.scp; then
55
+ echo "$0: could not combine short segments because $srcdir/feats.scp has "
56
+ echo " entries with too many fields"
57
+ fi
58
+
59
+ if ! mkdir -p $dir; then
60
+ echo "$0: could not create directory $dir"
61
+ exit 1;
62
+ fi
63
+
64
+ if ! utils/validate_data_dir.sh --no-text $srcdir; then
65
+ echo "$0: failed to validate input directory $srcdir. If needed, run utils/fix_data_dir.sh $srcdir"
66
+ exit 1
67
+ fi
68
+
69
+ if ! python -c "x=float('$min_seg_len'); assert(x>0.0 and x<100.0);" 2>/dev/null; then
70
+ echo "$0: bad <min-segment-length-in-seconds>: got '$min_seg_len'"
71
+ exit 1
72
+ fi
73
+
74
+ set -e
75
+ set -o pipefail
76
+
77
+ # make sure $srcdir/utt2dur exists.
78
+ utils/data/get_utt2dur.sh $srcdir
79
+
80
+ utils/data/internal/choose_utts_to_combine.py --min-duration=$min_seg_len \
81
+ --merge-within-speakers-only=$speaker_only \
82
+ $srcdir/spk2utt $srcdir/utt2dur $dir/utt2utts $dir/utt2spk $dir/utt2dur
83
+
84
+ utils/utt2spk_to_spk2utt.pl < $dir/utt2spk > $dir/spk2utt
85
+
86
+ # create the feats.scp.
87
+ # if a line of utt2utts is like 'utt2-comb2 utt2 utt3', then
88
+ # the utils/apply_map.pl will create a line that looks like
89
+ # 'utt2-comb2 foo.ark:4315 foo.ark:431423'
90
+ # and the awk command creates suitable command lines like:
91
+ # 'utt2-comb2 concat-feats foo.ark:4315 foo.ark:431423 - |'
92
+ utils/apply_map.pl -f 2- $srcdir/feats.scp <$dir/utt2utts | \
93
+ awk '{if (NF<=2){print;} else { $1 = $1 " concat-feats --print-args=false"; $NF = $NF " - |"; print; }}' > $dir/feats.scp
94
+
95
+ # create $dir/text by concatenating the source 'text' entries for the original
96
+ # utts.
97
+ if [ -f $srcdir/text ]; then
98
+ utils/apply_map.pl -f 2- $srcdir/text <$dir/utt2utts > $dir/text
99
+ fi
100
+
101
+ if [ -f $srcdir/utt2uniq ]; then
102
+ # the utt2uniq file is such that if 2 utts were derived from the same original
103
+ # utt (e.g. by speed perturbing) they map to the same 'uniq' value. This is
104
+ # so that we can properly hold out validation data for neural net training and
105
+ # know that we're not training on perturbed verions of that utterance. We
106
+ # need to obtain the utt2uniq file so that if any 2 'new' utts contain any of
107
+ # the same 'old' utts, their 'uniq' values are the same [but otherwise as far
108
+ # as possible, the 'uniq' values are different.]
109
+ #
110
+ # we'll do this by arranging the old 'uniq' values into groups as necessary to
111
+ # capture this property.
112
+
113
+ # The following command creates 'uniq_sets', each line of which contains
114
+ # a set of original 'uniq' values, and effectively we assert that they must
115
+ # be grouped together to the same 'uniq' value.
116
+ # the first awk command prints a group of the original utterance-ids that
117
+ # are combined together into a single new utterance, and the apply_map
118
+ # command converts those into a list of original 'uniq' values.
119
+ awk '{$1 = ""; print;}' < $dir/utt2utts | \
120
+ utils/apply_map.pl $srcdir/utt2uniq > $dir/uniq_sets
121
+
122
+ # The next command creates $dir/uniq2merged_uniq, which is a map from the
123
+ # original 'uniq' values to the 'merged' uniq values.
124
+ # for example, if $dir/uniq_sets were to contain
125
+ # a b
126
+ # b c
127
+ # d
128
+ # then we'd obtain a uniq2merged_uniq file that looks like:
129
+ # a a
130
+ # b a
131
+ # c a
132
+ # d d
133
+ # ... because a and b appear together, and b and c appear together,
134
+ # they have to be merged into the same set, and we name that set 'a'
135
+ # (in general, we take the lowest string in lexicographical order).
136
+
137
+ cat $dir/uniq_sets | LC_ALL=C python3 -c '
138
+ import sys;
139
+ from collections import defaultdict
140
+ uniq2orig_uniq = dict()
141
+ equal_pairs = set() # set of 2-tuples (a,b) which should have equal orig_uniq
142
+ while True:
143
+ line = sys.stdin.readline()
144
+ if line == "": break
145
+ split_line = line.split() # list of uniq strings that should map in same set
146
+ # initialize uniq2orig_uniq to the identity mapping
147
+ for uniq in split_line: uniq2orig_uniq[uniq] = uniq
148
+ for a in split_line[1:]: equal_pairs.add((split_line[0], a))
149
+
150
+ changed = True
151
+ while changed:
152
+ changed = False
153
+ for a,b in equal_pairs:
154
+ min_orig_uniq = min(uniq2orig_uniq[a], uniq2orig_uniq[b])
155
+ for x in [a,b]:
156
+ if uniq2orig_uniq[x] != min_orig_uniq:
157
+ uniq2orig_uniq[x] = min_orig_uniq
158
+ changed = True
159
+
160
+ for uniq in sorted(uniq2orig_uniq.keys()):
161
+ print(uniq, uniq2orig_uniq[uniq])
162
+ ' > $dir/uniq_to_orig_uniq
163
+ rm $dir/uniq_sets
164
+
165
+
166
+ # In the following command, suppose we have a line like:
167
+ # utt1-comb2 utt1 utt2
168
+ # .. the first awk command retains only the first original utt, to give
169
+ # utt1-comb2 utt1
170
+ # [we can pick one arbitrarily since we know any of them would map to the same
171
+ # orig_uniq value.]
172
+ # the first apply_map.pl command maps the 'utt1' to the 'uniq' value it mapped to
173
+ # in $srcdir, and the second apply_map.pl command maps it to the grouped 'uniq'
174
+ # value obtained by the inline python script above.
175
+ awk '{print $1, $2}' < $dir/utt2utts | utils/apply_map.pl -f 2 $srcdir/utt2uniq | \
176
+ utils/apply_map.pl -f 2 $dir/uniq_to_orig_uniq > $dir/utt2uniq
177
+ rm $dir/uniq_to_orig_uniq
178
+ fi
179
+
180
+ # note: the user will have to recompute the cmvn, as the speakers may have changed.
181
+ rm $dir/cmvn.scp 2>/dev/null || true
182
+
183
+ utils/validate_data_dir.sh --no-text --no-wav $dir
184
+
185
+ if $cleanup; then
186
+ rm $dir/utt2utts
187
+ fi
UniAudio/UniAudio/egs/SE/utils/data/convert_data_dir_to_whole.sh ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /bin/bash
2
+
3
+ # Copyright 2016-2018 Vimal Manohar
4
+ # Apache 2.0
5
+
6
+ # This scripts converts a data directory into a "whole" data directory
7
+ # by removing the segments and using the recordings themselves as
8
+ # utterances
9
+
10
+ set -o pipefail
11
+
12
+ . ./path.sh
13
+
14
+ . utils/parse_options.sh
15
+
16
+ if [ $# -ne 2 ]; then
17
+ echo "Usage: convert_data_dir_to_whole.sh <in-data> <out-data>"
18
+ echo " e.g.: convert_data_dir_to_whole.sh data/dev data/dev_whole"
19
+ exit 1
20
+ fi
21
+
22
+ data=$1
23
+ dir=$2
24
+
25
+ if [ ! -f $data/segments ]; then
26
+ echo "$0: Data directory already does not contain segments. So just copying it."
27
+ utils/copy_data_dir.sh $data $dir
28
+ exit 0
29
+ fi
30
+
31
+ mkdir -p $dir
32
+ cp $data/wav.scp $dir
33
+ if [ -f $data/reco2file_and_channel ]; then
34
+ cp $data/reco2file_and_channel $dir;
35
+ fi
36
+
37
+ mkdir -p $dir/.backup
38
+ if [ -f $dir/feats.scp ]; then
39
+ mv $dir/feats.scp $dir/.backup
40
+ fi
41
+ if [ -f $dir/cmvn.scp ]; then
42
+ mv $dir/cmvn.scp $dir/.backup
43
+ fi
44
+ if [ -f $dir/utt2spk ]; then
45
+ mv $dir/utt2spk $dir/.backup
46
+ fi
47
+
48
+ [ -f $data/stm ] && cp $data/stm $dir
49
+ [ -f $data/glm ] && cp $data/glm $dir
50
+
51
+ utils/data/internal/combine_segments_to_recording.py \
52
+ --write-reco2utt=$dir/reco2sorted_utts $data/segments $dir/utt2spk || exit 1
53
+
54
+ if [ -f $data/text ]; then
55
+ utils/apply_map.pl -f 2- $data/text < $dir/reco2sorted_utts > $dir/text || exit 1
56
+ fi
57
+
58
+ rm $dir/reco2sorted_utts
59
+
60
+ utils/fix_data_dir.sh $dir || exit 1
61
+
62
+ exit 0
UniAudio/UniAudio/egs/SE/utils/data/copy_data_dir.sh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2013 Johns Hopkins University (author: Daniel Povey)
4
+ # Apache 2.0
5
+
6
+ # This script operates on a directory, such as in data/train/,
7
+ # that contains some subset of the following files:
8
+ # feats.scp
9
+ # wav.scp
10
+ # vad.scp
11
+ # spk2utt
12
+ # utt2spk
13
+ # text
14
+ #
15
+ # It copies to another directory, possibly adding a specified prefix or a suffix
16
+ # to the utterance and/or speaker names. Note, the recording-ids stay the same.
17
+ #
18
+
19
+
20
+ # begin configuration section
21
+ spk_prefix=
22
+ utt_prefix=
23
+ spk_suffix=
24
+ utt_suffix=
25
+ validate_opts= # should rarely be needed.
26
+ # end configuration section
27
+
28
+ . utils/parse_options.sh
29
+
30
+ if [ $# != 2 ]; then
31
+ echo "Usage: "
32
+ echo " $0 [options] <srcdir> <destdir>"
33
+ echo "e.g.:"
34
+ echo " $0 --spk-prefix=1- --utt-prefix=1- data/train data/train_1"
35
+ echo "Options"
36
+ echo " --spk-prefix=<prefix> # Prefix for speaker ids, default empty"
37
+ echo " --utt-prefix=<prefix> # Prefix for utterance ids, default empty"
38
+ echo " --spk-suffix=<suffix> # Suffix for speaker ids, default empty"
39
+ echo " --utt-suffix=<suffix> # Suffix for utterance ids, default empty"
40
+ exit 1;
41
+ fi
42
+
43
+
44
+ export LC_ALL=C
45
+
46
+ srcdir=$1
47
+ destdir=$2
48
+
49
+ if [ ! -f $srcdir/utt2spk ]; then
50
+ echo "copy_data_dir.sh: no such file $srcdir/utt2spk"
51
+ exit 1;
52
+ fi
53
+
54
+ if [ "$destdir" == "$srcdir" ]; then
55
+ echo "$0: this script requires <srcdir> and <destdir> to be different."
56
+ exit 1
57
+ fi
58
+
59
+ set -e;
60
+
61
+ mkdir -p $destdir
62
+
63
+ cat $srcdir/utt2spk | awk -v p=$utt_prefix -v s=$utt_suffix '{printf("%s %s%s%s\n", $1, p, $1, s);}' > $destdir/utt_map
64
+ cat $srcdir/spk2utt | awk -v p=$spk_prefix -v s=$spk_suffix '{printf("%s %s%s%s\n", $1, p, $1, s);}' > $destdir/spk_map
65
+
66
+ if [ ! -f $srcdir/utt2uniq ]; then
67
+ if [[ ! -z $utt_prefix || ! -z $utt_suffix ]]; then
68
+ cat $srcdir/utt2spk | awk -v p=$utt_prefix -v s=$utt_suffix '{printf("%s%s%s %s\n", p, $1, s, $1);}' > $destdir/utt2uniq
69
+ fi
70
+ else
71
+ cat $srcdir/utt2uniq | awk -v p=$utt_prefix -v s=$utt_suffix '{printf("%s%s%s %s\n", p, $1, s, $2);}' > $destdir/utt2uniq
72
+ fi
73
+
74
+ cat $srcdir/utt2spk | utils/apply_map.pl -f 1 $destdir/utt_map | \
75
+ utils/apply_map.pl -f 2 $destdir/spk_map >$destdir/utt2spk
76
+
77
+ utils/utt2spk_to_spk2utt.pl <$destdir/utt2spk >$destdir/spk2utt
78
+
79
+ if [ -f $srcdir/feats.scp ]; then
80
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/feats.scp >$destdir/feats.scp
81
+ fi
82
+
83
+ if [ -f $srcdir/vad.scp ]; then
84
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/vad.scp >$destdir/vad.scp
85
+ fi
86
+
87
+ if [ -f $srcdir/utt2lang ]; then
88
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/utt2lang >$destdir/utt2lang
89
+ fi
90
+
91
+ if [ -f $srcdir/segments ]; then
92
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/segments >$destdir/segments
93
+ cp $srcdir/wav.scp $destdir
94
+ else # no segments->wav indexed by utt.
95
+ if [ -f $srcdir/wav.scp ]; then
96
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/wav.scp >$destdir/wav.scp
97
+ fi
98
+ fi
99
+
100
+ if [ -f $srcdir/reco2file_and_channel ]; then
101
+ cp $srcdir/reco2file_and_channel $destdir/
102
+ fi
103
+
104
+ if [ -f $srcdir/text ]; then
105
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/text >$destdir/text
106
+ fi
107
+ if [ -f $srcdir/utt2dur ]; then
108
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/utt2dur >$destdir/utt2dur
109
+ fi
110
+ if [ -f $srcdir/utt2num_frames ]; then
111
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/utt2num_frames >$destdir/utt2num_frames
112
+ fi
113
+ if [ -f $srcdir/reco2dur ]; then
114
+ if [ -f $srcdir/segments ]; then
115
+ cp $srcdir/reco2dur $destdir/reco2dur
116
+ else
117
+ utils/apply_map.pl -f 1 $destdir/utt_map <$srcdir/reco2dur >$destdir/reco2dur
118
+ fi
119
+ fi
120
+ if [ -f $srcdir/spk2gender ]; then
121
+ utils/apply_map.pl -f 1 $destdir/spk_map <$srcdir/spk2gender >$destdir/spk2gender
122
+ fi
123
+ if [ -f $srcdir/cmvn.scp ]; then
124
+ utils/apply_map.pl -f 1 $destdir/spk_map <$srcdir/cmvn.scp >$destdir/cmvn.scp
125
+ fi
126
+ for f in frame_shift stm glm ctm; do
127
+ if [ -f $srcdir/$f ]; then
128
+ cp $srcdir/$f $destdir
129
+ fi
130
+ done
131
+
132
+ rm $destdir/spk_map $destdir/utt_map
133
+
134
+ echo "$0: copied data from $srcdir to $destdir"
135
+
136
+ for f in feats.scp cmvn.scp vad.scp utt2lang utt2uniq utt2dur utt2num_frames text wav.scp reco2file_and_channel frame_shift stm glm ctm; do
137
+ if [ -f $destdir/$f ] && [ ! -f $srcdir/$f ]; then
138
+ echo "$0: file $f exists in dest $destdir but not in src $srcdir. Moving it to"
139
+ echo " ... $destdir/.backup/$f"
140
+ mkdir -p $destdir/.backup
141
+ mv $destdir/$f $destdir/.backup/
142
+ fi
143
+ done
144
+
145
+
146
+ [ ! -f $srcdir/feats.scp ] && validate_opts="$validate_opts --no-feats"
147
+ [ ! -f $srcdir/text ] && validate_opts="$validate_opts --no-text"
148
+
149
+ utils/validate_data_dir.sh $validate_opts $destdir
UniAudio/UniAudio/egs/SE/utils/data/extend_segment_times.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import print_function
4
+ import sys
5
+ import argparse
6
+ from collections import defaultdict
7
+
8
+
9
+ parser = argparse.ArgumentParser(description="""
10
+ Usage: extend_segment_times.py [options] <input-segments >output-segments
11
+ This program pads the times in a 'segments' file (e.g. data/train/segments)
12
+ with specified left and right context (for cases where there was no
13
+ silence padding in the original segments file)""")
14
+
15
+ parser.add_argument("--start-padding", type = float, default = 0.1,
16
+ help="Amount of padding, in seconds, for the start time of "
17
+ "each segment (start times <0 will be set to zero).")
18
+ parser.add_argument("--end-padding", type = float, default = 0.1,
19
+ help="Amount of padding, in seconds, for the end time of "
20
+ "each segment.")
21
+ parser.add_argument("--last-segment-end-padding", type = float, default = 0.1,
22
+ help="Amount of padding, in seconds, for the end time of "
23
+ "the last segment of each file (maximum allowed).")
24
+ parser.add_argument("--fix-overlapping-segments", type = str,
25
+ default = 'true', choices=['true', 'false'],
26
+ help="If true, prevent segments from overlapping as a result "
27
+ "of the padding (or that were already overlapping)")
28
+ args = parser.parse_args()
29
+
30
+
31
+ # the input file will be a sequence of lines which are each of the form:
32
+ # <utterance-id> <recording-id> <start-time> <end-time>
33
+ # e.g.
34
+ # utt-1 recording-1 0.62 5.40
35
+ # The output will be in the same format and in the same
36
+ # order, except wiht modified times.
37
+
38
+ # This variable maps from a recording-id to a listof the utterance
39
+ # indexes (as integer indexes into 'entries']
40
+ # that are part of that recording.
41
+ recording_to_utt_indexes = defaultdict(list)
42
+
43
+ # This is an array of the entries in the segments file, in the fomrat:
44
+ # (utterance-id as astring, recording-id as string,
45
+ # start-time as float, end-time as float)
46
+ entries = []
47
+
48
+
49
+ while True:
50
+ line = sys.stdin.readline()
51
+ if line == '':
52
+ break
53
+ try:
54
+ [ utt_id, recording_id, start_time, end_time ] = line.split()
55
+ start_time = float(start_time)
56
+ end_time = float(end_time)
57
+ except:
58
+ sys.exit("extend_segment_times.py: could not interpret line: " + line)
59
+ if not end_time > start_time:
60
+ print("extend_segment_times.py: bad segment (ignoring): " + line,
61
+ file = sys.stderr)
62
+ recording_to_utt_indexes[recording_id].append(len(entries))
63
+ entries.append([utt_id, recording_id, start_time, end_time])
64
+
65
+ num_times_fixed = 0
66
+
67
+ for recording, utt_indexes in recording_to_utt_indexes.items():
68
+ # this_entries is a list of lists, sorted on mid-time.
69
+ # Notice: because lists are objects, when we change 'this_entries'
70
+ # we change the underlying entries.
71
+ this_entries = sorted([ entries[x] for x in utt_indexes ],
72
+ key = lambda x : 0.5 * (x[2] + x[3]))
73
+ min_time = 0
74
+ max_time = max([ x[3] for x in this_entries ]) + args.last_segment_end_padding
75
+ start_padding = args.start_padding
76
+ end_padding = args.end_padding
77
+ for n in range(len(this_entries)):
78
+ this_entries[n][2] = max(min_time, this_entries[n][2] - start_padding)
79
+ this_entries[n][3] = min(max_time, this_entries[n][3] + end_padding)
80
+
81
+ for n in range(len(this_entries) - 1):
82
+ this_end_time = this_entries[n][3]
83
+ next_start_time = this_entries[n+1][2]
84
+ if this_end_time > next_start_time and args.fix_overlapping_segments == 'true':
85
+ midpoint = 0.5 * (this_end_time + next_start_time)
86
+ this_entries[n][3] = midpoint
87
+ this_entries[n+1][2] = midpoint
88
+ num_times_fixed += 1
89
+
90
+
91
+ # this prints a number with a certain number of digits after
92
+ # the point, while removing trailing zeros.
93
+ def FloatToString(f):
94
+ num_digits = 6 # we want to print 6 digits after the zero
95
+ g = f
96
+ while abs(g) > 1.0:
97
+ g *= 0.1
98
+ num_digits += 1
99
+ format_str = '%.{0}g'.format(num_digits)
100
+ return format_str % f
101
+
102
+ for entry in entries:
103
+ [ utt_id, recording_id, start_time, end_time ] = entry
104
+ if not start_time < end_time:
105
+ print("extend_segment_times.py: bad segment after processing (ignoring): " +
106
+ ' '.join(entry), file = sys.stderr)
107
+ continue
108
+ print(utt_id, recording_id, FloatToString(start_time), FloatToString(end_time))
109
+
110
+
111
+ print("extend_segment_times.py: extended {0} segments; fixed {1} "
112
+ "overlapping segments".format(len(entries), num_times_fixed),
113
+ file = sys.stderr)
114
+
115
+ ## test:
116
+ # (echo utt1 reco1 0.2 6.2; echo utt2 reco1 6.3 9.8 )| extend_segment_times.py
117
+ # and also try the above with the options --last-segment-end-padding=0.0 --fix-overlapping-segments=false
118
+
UniAudio/UniAudio/egs/SE/utils/data/extract_wav_segments_data_dir.sh ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2017 Hossein Hadian
4
+ # Apache 2.0
5
+
6
+ # This script copies a data directory (which has a 'segments' file), extracting
7
+ # wav segments (according to the 'segments' file)
8
+ # so that the resulting data directory does not have a 'segments' file anymore.
9
+
10
+ nj=4
11
+ cmd=run.pl
12
+
13
+ . ./utils/parse_options.sh
14
+ . ./path.sh
15
+
16
+ if [ $# != 2 ]; then
17
+ echo "Usage: $0 <srcdir> <destdir>"
18
+ echo " This script copies data directory <srcdir> to <destdir> and removes"
19
+ echo " the 'segments' file by extracting the wav segments."
20
+ echo "Options: "
21
+ echo " --nj <nj> # number of parallel jobs"
22
+ echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
23
+ exit 1;
24
+ fi
25
+
26
+
27
+ export LC_ALL=C
28
+
29
+ srcdir=$1
30
+ dir=$2
31
+ logdir=$dir/log
32
+
33
+ if ! mkdir -p $dir/data; then
34
+ echo "$0: failed to create directory $dir/data"
35
+ exit 1
36
+ fi
37
+ mkdir -p $logdir
38
+
39
+ set -eu -o pipefail
40
+ utils/copy_data_dir.sh $srcdir $dir
41
+
42
+ split_segments=""
43
+ for n in $(seq $nj); do
44
+ split_segments="$split_segments $logdir/segments.$n"
45
+ done
46
+
47
+ utils/split_scp.pl $srcdir/segments $split_segments
48
+
49
+ $cmd JOB=1:$nj $logdir/extract_wav_segments.JOB.log \
50
+ extract-segments scp,p:$srcdir/wav.scp $logdir/segments.JOB \
51
+ ark,scp:$dir/data/wav_segments.JOB.ark,$dir/data/wav_segments.JOB.scp
52
+
53
+ # concatenate the .scp files together.
54
+ for n in $(seq $nj); do
55
+ cat $dir/data/wav_segments.$n.scp
56
+ done > $dir/data/wav_segments.scp
57
+
58
+ cat $dir/data/wav_segments.scp | awk '{ print $1 " wav-copy " $2 " - |" }' >$dir/wav.scp
59
+ rm $dir/{segments,reco2file_and_channel} 2>/dev/null || true
UniAudio/UniAudio/egs/SE/utils/data/fix_data_dir.sh ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # This script makes sure that only the segments present in
4
+ # all of "feats.scp", "wav.scp" [if present], segments [if present]
5
+ # text, and utt2spk are present in any of them.
6
+ # It puts the original contents of data-dir into
7
+ # data-dir/.backup
8
+
9
+ cmd="$@"
10
+
11
+ utt_extra_files=
12
+ spk_extra_files=
13
+
14
+ . utils/parse_options.sh
15
+
16
+ if [ $# != 1 ]; then
17
+ echo "Usage: utils/data/fix_data_dir.sh <data-dir>"
18
+ echo "e.g.: utils/data/fix_data_dir.sh data/train"
19
+ echo "This script helps ensure that the various files in a data directory"
20
+ echo "are correctly sorted and filtered, for example removing utterances"
21
+ echo "that have no features (if feats.scp is present)"
22
+ exit 1
23
+ fi
24
+
25
+ data=$1
26
+
27
+ if [ -f $data/images.scp ]; then
28
+ image/fix_data_dir.sh $cmd
29
+ exit $?
30
+ fi
31
+
32
+ mkdir -p $data/.backup
33
+
34
+ [ ! -d $data ] && echo "$0: no such directory $data" && exit 1;
35
+
36
+ [ ! -f $data/utt2spk ] && echo "$0: no such file $data/utt2spk" && exit 1;
37
+
38
+ set -e -o pipefail -u
39
+
40
+ tmpdir=$(mktemp -d /tmp/kaldi.XXXX);
41
+ trap 'rm -rf "$tmpdir"' EXIT HUP INT PIPE TERM
42
+
43
+ export LC_ALL=C
44
+
45
+ function check_sorted {
46
+ file=$1
47
+ sort -k1,1 -u <$file >$file.tmp
48
+ if ! cmp -s $file $file.tmp; then
49
+ echo "$0: file $1 is not in sorted order or not unique, sorting it"
50
+ mv $file.tmp $file
51
+ else
52
+ rm $file.tmp
53
+ fi
54
+ }
55
+
56
+ for x in utt2spk spk2utt feats.scp text segments wav.scp cmvn.scp vad.scp \
57
+ reco2file_and_channel spk2gender utt2lang utt2uniq utt2dur reco2dur utt2num_frames; do
58
+ if [ -f $data/$x ]; then
59
+ cp $data/$x $data/.backup/$x
60
+ check_sorted $data/$x
61
+ fi
62
+ done
63
+
64
+
65
+ function filter_file {
66
+ filter=$1
67
+ file_to_filter=$2
68
+ cp $file_to_filter ${file_to_filter}.tmp
69
+ utils/filter_scp.pl $filter ${file_to_filter}.tmp > $file_to_filter
70
+ if ! cmp ${file_to_filter}.tmp $file_to_filter >&/dev/null; then
71
+ length1=$(cat ${file_to_filter}.tmp | wc -l)
72
+ length2=$(cat ${file_to_filter} | wc -l)
73
+ if [ $length1 -ne $length2 ]; then
74
+ echo "$0: filtered $file_to_filter from $length1 to $length2 lines based on filter $filter."
75
+ fi
76
+ fi
77
+ rm $file_to_filter.tmp
78
+ }
79
+
80
+ function filter_recordings {
81
+ # We call this once before the stage when we filter on utterance-id, and once
82
+ # after.
83
+
84
+ if [ -f $data/segments ]; then
85
+ # We have a segments file -> we need to filter this and the file wav.scp, and
86
+ # reco2file_and_utt, if it exists, to make sure they have the same list of
87
+ # recording-ids.
88
+
89
+ if [ ! -f $data/wav.scp ]; then
90
+ echo "$0: $data/segments exists but not $data/wav.scp"
91
+ exit 1;
92
+ fi
93
+ awk '{print $2}' < $data/segments | sort | uniq > $tmpdir/recordings
94
+ n1=$(cat $tmpdir/recordings | wc -l)
95
+ [ ! -s $tmpdir/recordings ] && \
96
+ echo "Empty list of recordings (bad file $data/segments)?" && exit 1;
97
+ utils/filter_scp.pl $data/wav.scp $tmpdir/recordings > $tmpdir/recordings.tmp
98
+ mv $tmpdir/recordings.tmp $tmpdir/recordings
99
+
100
+
101
+ cp $data/segments{,.tmp}; awk '{print $2, $1, $3, $4}' <$data/segments.tmp >$data/segments
102
+ filter_file $tmpdir/recordings $data/segments
103
+ cp $data/segments{,.tmp}; awk '{print $2, $1, $3, $4}' <$data/segments.tmp >$data/segments
104
+ rm $data/segments.tmp
105
+
106
+ filter_file $tmpdir/recordings $data/wav.scp
107
+ [ -f $data/reco2file_and_channel ] && filter_file $tmpdir/recordings $data/reco2file_and_channel
108
+ [ -f $data/reco2dur ] && filter_file $tmpdir/recordings $data/reco2dur
109
+ true
110
+ fi
111
+ }
112
+
113
+ function filter_speakers {
114
+ # throughout this program, we regard utt2spk as primary and spk2utt as derived, so...
115
+ utils/utt2spk_to_spk2utt.pl $data/utt2spk > $data/spk2utt
116
+
117
+ cat $data/spk2utt | awk '{print $1}' > $tmpdir/speakers
118
+ for s in cmvn.scp spk2gender; do
119
+ f=$data/$s
120
+ if [ -f $f ]; then
121
+ filter_file $f $tmpdir/speakers
122
+ fi
123
+ done
124
+
125
+ filter_file $tmpdir/speakers $data/spk2utt
126
+ utils/spk2utt_to_utt2spk.pl $data/spk2utt > $data/utt2spk
127
+
128
+ for s in cmvn.scp spk2gender $spk_extra_files; do
129
+ f=$data/$s
130
+ if [ -f $f ]; then
131
+ filter_file $tmpdir/speakers $f
132
+ fi
133
+ done
134
+ }
135
+
136
+ function filter_utts {
137
+ cat $data/utt2spk | awk '{print $1}' > $tmpdir/utts
138
+
139
+ ! cat $data/utt2spk | sort | cmp - $data/utt2spk && \
140
+ echo "utt2spk is not in sorted order (fix this yourself)" && exit 1;
141
+
142
+ ! cat $data/utt2spk | sort -k2 | cmp - $data/utt2spk && \
143
+ echo "utt2spk is not in sorted order when sorted first on speaker-id " && \
144
+ echo "(fix this by making speaker-ids prefixes of utt-ids)" && exit 1;
145
+
146
+ ! cat $data/spk2utt | sort | cmp - $data/spk2utt && \
147
+ echo "spk2utt is not in sorted order (fix this yourself)" && exit 1;
148
+
149
+ if [ -f $data/utt2uniq ]; then
150
+ ! cat $data/utt2uniq | sort | cmp - $data/utt2uniq && \
151
+ echo "utt2uniq is not in sorted order (fix this yourself)" && exit 1;
152
+ fi
153
+
154
+ maybe_wav=
155
+ maybe_reco2dur=
156
+ [ ! -f $data/segments ] && maybe_wav=wav.scp # wav indexed by utts only if segments does not exist.
157
+ [ -s $data/reco2dur ] && [ ! -f $data/segments ] && maybe_reco2dur=reco2dur # reco2dur indexed by utts
158
+
159
+ maybe_utt2dur=
160
+ if [ -f $data/utt2dur ]; then
161
+ cat $data/utt2dur | \
162
+ awk '{ if (NF == 2 && $2 > 0) { print }}' > $data/utt2dur.ok || exit 1
163
+ maybe_utt2dur=utt2dur.ok
164
+ fi
165
+
166
+ maybe_utt2num_frames=
167
+ if [ -f $data/utt2num_frames ]; then
168
+ cat $data/utt2num_frames | \
169
+ awk '{ if (NF == 2 && $2 > 0) { print }}' > $data/utt2num_frames.ok || exit 1
170
+ maybe_utt2num_frames=utt2num_frames.ok
171
+ fi
172
+
173
+ for x in feats.scp text segments utt2lang $maybe_wav $maybe_utt2dur $maybe_utt2num_frames; do
174
+ if [ -f $data/$x ]; then
175
+ utils/filter_scp.pl $data/$x $tmpdir/utts > $tmpdir/utts.tmp
176
+ mv $tmpdir/utts.tmp $tmpdir/utts
177
+ fi
178
+ done
179
+ rm $data/utt2dur.ok 2>/dev/null || true
180
+ rm $data/utt2num_frames.ok 2>/dev/null || true
181
+
182
+ [ ! -s $tmpdir/utts ] && echo "fix_data_dir.sh: no utterances remained: not proceeding further." && \
183
+ rm $tmpdir/utts && exit 1;
184
+
185
+
186
+ if [ -f $data/utt2spk ]; then
187
+ new_nutts=$(cat $tmpdir/utts | wc -l)
188
+ old_nutts=$(cat $data/utt2spk | wc -l)
189
+ if [ $new_nutts -ne $old_nutts ]; then
190
+ echo "fix_data_dir.sh: kept $new_nutts utterances out of $old_nutts"
191
+ else
192
+ echo "fix_data_dir.sh: kept all $old_nutts utterances."
193
+ fi
194
+ fi
195
+
196
+ for x in utt2spk utt2uniq feats.scp vad.scp text segments utt2lang utt2dur utt2num_frames $maybe_wav $maybe_reco2dur $utt_extra_files; do
197
+ if [ -f $data/$x ]; then
198
+ cp $data/$x $data/.backup/$x
199
+ if ! cmp -s $data/$x <( utils/filter_scp.pl $tmpdir/utts $data/$x ) ; then
200
+ utils/filter_scp.pl $tmpdir/utts $data/.backup/$x > $data/$x
201
+ fi
202
+ fi
203
+ done
204
+
205
+ }
206
+
207
+ filter_recordings
208
+ filter_speakers
209
+ filter_utts
210
+ filter_speakers
211
+ filter_recordings
212
+
213
+ utils/utt2spk_to_spk2utt.pl $data/utt2spk > $data/spk2utt
214
+
215
+ echo "fix_data_dir.sh: old files are kept in $data/.backup"
UniAudio/UniAudio/egs/SE/utils/data/fix_subsegment_feats.pl ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2016 Vimal Manohar
4
+ # Apache 2.0.
5
+
6
+ use warnings;
7
+
8
+ # This script reads from stdin a feats.scp file that contains frame ranges and
9
+ # ensures that they don't exceed the maximum number of frames supplied in the
10
+ # <utt2max-frames> file.
11
+ # <utt2max-frames> is usually computed using get_utt2num_frames.sh on the
12
+ # original directory which will be segmented using
13
+ # utils/data/subsegment_data_dir.sh.
14
+ #
15
+ # e.g. feats.scp
16
+ # utt_foo-1 foo-bar.ark:514231[721:892]
17
+ #
18
+ # utt2max-frames
19
+ # utt_foo-1 891
20
+ #
21
+ # fixed_feats.scp
22
+ # utt_foo-1 foo-bar.ark:514231[721:890]
23
+ #
24
+ # Note: Here 891 is the number of frames in the archive foo-bar.ark
25
+ # The frame end for utt_foo-1, i.e. 892 (0-indexed) exceeds the archive size
26
+ # (891) by two frames. This script fixes that line by truncating the range
27
+ # to 890.
28
+
29
+ if (scalar @ARGV != 1) {
30
+ my $usage = <<END;
31
+ This script reads from stdin a feats.scp file that contains frame ranges and
32
+ ensures that they don't exceed the maximum number of frames supplied in the
33
+ <utt2max-frames> file.
34
+
35
+ Usage: $0 <utt2max-frames> < feats.scp > fixed_feats.scp
36
+ END
37
+ die "$usage";
38
+ }
39
+
40
+ my $utt2max_frames_file = $ARGV[0];
41
+
42
+ open MAX_FRAMES, $utt2max_frames_file or die "$0: Could not open file $utt2max_frames_file";
43
+
44
+ my %utt2max_frames;
45
+
46
+ while (<MAX_FRAMES>) {
47
+ chomp;
48
+ my @F = split;
49
+
50
+ (scalar @F == 2) or die "$0: Invalid line $_ in $utt2max_frames_file";
51
+
52
+ $utt2max_frames{$F[0]} = $F[1];
53
+ }
54
+
55
+ while (<STDIN>) {
56
+ my $line = $_;
57
+
58
+ #if (m/\[([^][]*)\]\[([^][]*)\]\s*$/) {
59
+ # print STDERR ("fix_subsegment_feats.pl: this script only supports single indices");
60
+ # exit(1);
61
+ #}
62
+
63
+ my $before_range = "";
64
+ my $range = "";
65
+
66
+ if (m/^(.*)\[([^][]*)\]\s*$/) {
67
+ $before_range = $1;
68
+ $range = $2;
69
+ } else {
70
+ print;
71
+ next;
72
+ }
73
+
74
+ my @F = split(/ /, $before_range);
75
+ my $utt = shift @F;
76
+ defined $utt2max_frames{$utt} or die "fix_subsegment_feats.pl: Could not find key $utt in $utt2max_frames_file.\nError with line $line";
77
+
78
+ if ($range !~ m/^(\d*):(\d*)([,]?.*)$/) {
79
+ print STDERR "fix_subsegment_feats.pl: could not make sense of input line $_";
80
+ exit(1);
81
+ }
82
+
83
+ my $row_start = $1;
84
+ my $row_end = $2;
85
+ my $col_range = $3;
86
+
87
+ if ($row_start >= $utt2max_frames{$utt}) {
88
+ print STDERR "Removing $utt because row_start $row_start >= file max length $utt2max_frames{$utt}\n";
89
+ next;
90
+ }
91
+ if ($row_end >= $utt2max_frames{$utt}) {
92
+ print STDERR "Fixed row_end for $utt from $row_end to $utt2max_frames{$utt}-1\n";
93
+ $row_end = $utt2max_frames{$utt} - 1;
94
+ }
95
+
96
+ if ($row_start ne "") {
97
+ $range = "$row_start:$row_end";
98
+ } else {
99
+ $range = "";
100
+ }
101
+
102
+ if ($col_range ne "") {
103
+ $range .= ",$col_range";
104
+ }
105
+ print ("$utt " . join(" ", @F) . "[" . $range . "]\n");
106
+ }
UniAudio/UniAudio/egs/SE/utils/data/get_allowed_durations.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright 2017 Hossein Hadian
4
+ # 2019 Facebook Inc. (Author: Vimal Manohar)
5
+ # Apache 2.0
6
+
7
+
8
+ """ This script generates a set of allowed lengths of utterances
9
+ spaced by a factor (like 10%). This is useful for generating
10
+ fixed-length chunks for chain training.
11
+ """
12
+
13
+ import argparse
14
+ import os
15
+ import sys
16
+ import copy
17
+ import math
18
+ import logging
19
+
20
+ sys.path.insert(0, 'steps')
21
+ import libs.common as common_lib
22
+
23
+ logger = logging.getLogger('libs')
24
+ logger.setLevel(logging.INFO)
25
+ handler = logging.StreamHandler()
26
+ handler.setLevel(logging.INFO)
27
+ formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s - "
28
+ "%(funcName)s - %(levelname)s ] %(message)s")
29
+ handler.setFormatter(formatter)
30
+ logger.addHandler(handler)
31
+
32
+ def get_args():
33
+ parser = argparse.ArgumentParser(description="""
34
+ This script creates a list of allowed durations of utterances for flatstart
35
+ LF-MMI training corresponding to input data directory 'data_dir' and writes
36
+ it in two files in output directory 'dir':
37
+ 1) allowed_durs.txt -- durations are in seconds
38
+ 2) allowed_lengths.txt -- lengths are in number of frames
39
+
40
+ Both the allowed_durs.txt and allowed_lengths.txt are formatted to
41
+ have one entry on each line. Examples are as follows:
42
+
43
+ $ echo data/train/allowed_lengths.txt
44
+ 414
45
+ 435
46
+ 468
47
+
48
+ $ echo data/train/allowed_durs.txt
49
+ 4.16
50
+ 4.37
51
+ 4.70
52
+
53
+ These files can then be used by a downstream script to perturb the
54
+ utterances to these lengths.
55
+ A perturbed data directory (created by a downstream script
56
+ similar to utils/data/perturb_speed_to_allowed_lengths.py)
57
+ that only contains utterances of these allowed durations,
58
+ along with the corresponding allowed_lengths.txt are
59
+ consumed by the e2e chain egs preparation script.
60
+ See steps/nnet3/chain/e2e/get_egs_e2e.sh for how these are used.
61
+
62
+ See also:
63
+ * egs/cifar/v1/image/get_allowed_lengths.py -- a similar script for OCR datasets
64
+ * utils/data/perturb_speed_to_allowed_lengths.py --
65
+ creates the allowed_lengths.txt AND perturbs the data directory
66
+ """)
67
+ parser.add_argument('factor', type=float, default=12,
68
+ help='Spacing (in percentage) between allowed lengths. '
69
+ 'Can be 0, which means all seen lengths that are a multiple of '
70
+ 'frame_subsampling_factor will be allowed.')
71
+ parser.add_argument('data_dir', type=str, help='path to data dir. Assumes that '
72
+ 'it contains the utt2dur file.')
73
+ parser.add_argument('dir', type=str, help='We write the output files '
74
+ 'allowed_lengths.txt and allowed_durs.txt to this directory.')
75
+ parser.add_argument('--coverage-factor', type=float, default=0.05,
76
+ help="""Percentage of durations not covered from each
77
+ side of duration histogram.""")
78
+ parser.add_argument('--frame-shift', type=int, default=10,
79
+ help="""Frame shift in milliseconds.""")
80
+ parser.add_argument('--frame-length', type=int, default=25,
81
+ help="""Frame length in milliseconds.""")
82
+ parser.add_argument('--frame-subsampling-factor', type=int, default=3,
83
+ help="""Chain frame subsampling factor.
84
+ See steps/nnet3/chain/train.py""")
85
+ args = parser.parse_args()
86
+ return args
87
+
88
+
89
+ def read_kaldi_mapfile(path):
90
+ """ Read any Kaldi mapping file - like text, .scp files, etc.
91
+ """
92
+
93
+ m = {}
94
+ with open(path, 'r', encoding='latin-1') as f:
95
+ for line in f:
96
+ line = line.strip(" \t\r\n")
97
+ sp_pos = line.find(' ')
98
+ key = line[:sp_pos]
99
+ val = line[sp_pos+1:]
100
+ m[key] = val
101
+ return m
102
+
103
+
104
+ def find_duration_range(utt2dur, coverage_factor):
105
+ """Given a list of utterance durations, find the start and end duration to cover
106
+
107
+ If we try to cover
108
+ all durations which occur in the training set, the number of
109
+ allowed lengths could become very large.
110
+
111
+ Returns
112
+ -------
113
+ start_dur: float
114
+ end_dur: float
115
+ """
116
+ durs = [float(val) for key, val in utt2dur.items()]
117
+ durs.sort()
118
+ to_ignore_dur = 0
119
+ tot_dur = sum(durs)
120
+ for d in durs:
121
+ to_ignore_dur += d
122
+ if to_ignore_dur * 100.0 / tot_dur > coverage_factor:
123
+ start_dur = d
124
+ break
125
+ to_ignore_dur = 0
126
+ for d in reversed(durs):
127
+ to_ignore_dur += d
128
+ if to_ignore_dur * 100.0 / tot_dur > coverage_factor:
129
+ end_dur = d
130
+ break
131
+ if start_dur < 0.3:
132
+ start_dur = 0.3 # a hard limit to avoid too many allowed lengths --not critical
133
+ return start_dur, end_dur
134
+
135
+
136
+ def get_allowed_durations(start_dur, end_dur, args):
137
+ """Given the start and end duration, find a set of
138
+ allowed durations spaced by args.factor%. Also write
139
+ out the list of allowed durations and the corresponding
140
+ allowed lengths (in frames) on disk.
141
+
142
+ Returns
143
+ -------
144
+ allowed_durations: list of allowed durations (in seconds)
145
+ """
146
+
147
+ allowed_durations = []
148
+ d = start_dur
149
+ with open(os.path.join(args.dir, 'allowed_durs.txt'), 'w', encoding='latin-1') as durs_fp, \
150
+ open(os.path.join(args.dir, 'allowed_lengths.txt'), 'w', encoding='latin-1') as lengths_fp:
151
+ while d < end_dur:
152
+ length = int(d * 1000 - args.frame_length) / args.frame_shift + 1
153
+ if length % args.frame_subsampling_factor != 0:
154
+ length = (args.frame_subsampling_factor *
155
+ (length // args.frame_subsampling_factor))
156
+ d = (args.frame_shift * (length - 1.0)
157
+ + args.frame_length + args.frame_shift / 2) / 1000.0
158
+ allowed_durations.append(d)
159
+ durs_fp.write("{}\n".format(d))
160
+ lengths_fp.write("{}\n".format(int(length)))
161
+ d *= args.factor
162
+ return allowed_durations
163
+
164
+
165
+ def get_trivial_allowed_durations(utt2dur, args):
166
+ lengths = list(set(
167
+ [int(float(d) * 1000 - args.frame_length) / args.frame_shift + 1
168
+ for key, d in utt2dur.items()]
169
+ ))
170
+ lengths.sort()
171
+
172
+ allowed_durations = []
173
+ with open(os.path.join(args.dir, 'allowed_durs.txt'), 'w', encoding='latin-1') as durs_fp, \
174
+ open(os.path.join(args.dir, 'allowed_lengths.txt'), 'w', encoding='latin-1') as lengths_fp:
175
+ for length in lengths:
176
+ if length % args.frame_subsampling_factor != 0:
177
+ length = (args.frame_subsampling_factor *
178
+ (length // args.frame_subsampling_factor))
179
+ d = (args.frame_shift * (length - 1.0)
180
+ + args.frame_length + args.frame_shift / 2) / 1000.0
181
+ allowed_durations.append(d)
182
+ durs_fp.write("{}\n".format(d))
183
+ lengths_fp.write("{}\n".format(int(length)))
184
+
185
+ assert len(allowed_durations) > 0
186
+ start_dur = allowed_durations[0]
187
+ end_dur = allowed_durations[-1]
188
+
189
+ logger.info("Durations in the range [{},{}] will be covered."
190
+ "".format(start_dur, end_dur))
191
+ logger.info("There will be {} unique allowed lengths "
192
+ "for the utterances.".format(len(allowed_durations)))
193
+
194
+ return allowed_durations
195
+
196
+
197
+ def main():
198
+ args = get_args()
199
+ utt2dur = read_kaldi_mapfile(os.path.join(args.data_dir, 'utt2dur'))
200
+
201
+ if args.factor == 0.0:
202
+ get_trivial_allowed_durations(utt2dur, args)
203
+ return
204
+
205
+ args.factor = 1.0 + args.factor / 100.0
206
+
207
+ start_dur, end_dur = find_duration_range(utt2dur, args.coverage_factor)
208
+ logger.info("Durations in the range [{},{}] will be covered. "
209
+ "Coverage rate: {}%".format(start_dur, end_dur,
210
+ 100.0 - args.coverage_factor * 2))
211
+ logger.info("There will be {} unique allowed lengths "
212
+ "for the utterances.".format(int(math.log(end_dur / start_dur)/
213
+ math.log(args.factor))))
214
+
215
+ get_allowed_durations(start_dur, end_dur, args)
216
+
217
+
218
+ if __name__ == '__main__':
219
+ main()
UniAudio/UniAudio/egs/SE/utils/data/get_frame_shift.sh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2016 Johns Hopkins University (author: Daniel Povey)
4
+ # Apache 2.0
5
+
6
+ # This script takes as input a data directory, such as data/train/, preferably
7
+ # with utt2dur file already existing (or the utt2dur file will be created if
8
+ # not), and it attempts to work out the approximate frame shift by comparing the
9
+ # utt2dur with the output of feat-to-len on the feats.scp. It prints it out.
10
+ # if the shift is very close to, but above, 0.01 (the normal frame shift) it
11
+ # rounds it down.
12
+
13
+ . utils/parse_options.sh
14
+ . ./path.sh
15
+
16
+ if [ $# != 1 ]; then
17
+ cat >&2 <<EOF
18
+ Usage: frame_shift=\$($0 <datadir>)
19
+ e.g.: frame_shift=\$($0 data/train)
20
+
21
+ This script prints the frame-shift in seconds (e.g. 0.01) to the standard out.
22
+ Its output is intended to be captured in a shell variable.
23
+
24
+ If <datadir> does not contain the file utt2dur, this script may invoke
25
+ utils/data/get_utt2dur.sh, which will require write permission to <datadir>.
26
+ EOF
27
+ exit 1
28
+ fi
29
+
30
+ export LC_ALL=C
31
+
32
+ dir=$1
33
+
34
+ if [[ -s $dir/frame_shift ]]; then
35
+ cat $dir/frame_shift
36
+ exit
37
+ fi
38
+
39
+ if [ ! -f $dir/feats.scp ]; then
40
+ echo "$0: $dir/feats.scp does not exist" 1>&2
41
+ exit 1
42
+ fi
43
+
44
+ if [ ! -s $dir/utt2dur ]; then
45
+ if [ ! -e $dir/wav.scp ] && [ ! -s $dir/segments ]; then
46
+ echo "$0: neither $dir/wav.scp nor $dir/segments exist; assuming a frame shift of 0.01." 1>&2
47
+ echo 0.01
48
+ exit 0
49
+ fi
50
+ echo "$0: $dir/utt2dur does not exist: creating it" 1>&2
51
+ utils/data/get_utt2dur.sh 1>&2 $dir || exit 1
52
+ fi
53
+
54
+ temp=$(mktemp /tmp/tmp.XXXX) || exit 1
55
+
56
+ feat-to-len --print-args=false "scp:head -n 10 $dir/feats.scp|" ark,t:- > $temp
57
+
58
+ if [[ ! -s $temp ]]; then
59
+ rm $temp
60
+ echo "$0: error running feat-to-len" 1>&2
61
+ exit 1
62
+ fi
63
+
64
+ frame_shift=$(head -n 10 $dir/utt2dur | paste - $temp | awk '
65
+ { dur += $2; frames += $4; }
66
+ END { shift = dur / frames;
67
+ if (shift > 0.01 && shift < 0.0102) shift = 0.01;
68
+ print shift; }') || exit 1;
69
+
70
+ rm $temp
71
+
72
+ echo $frame_shift > $dir/frame_shift
73
+ echo $frame_shift
74
+ exit 0
UniAudio/UniAudio/egs/SE/utils/data/get_num_frames.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # This script works out the approximate number of frames in a training directory.
4
+ # This is sometimes needed by higher-level scripts
5
+
6
+
7
+ if [ -f path.sh ]; then . ./path.sh; fi
8
+ . parse_options.sh || exit 1;
9
+
10
+ if [ $# -ne 1 ]; then
11
+ (
12
+ echo "Usage: $0 <data-dir>"
13
+ echo "Prints the number of frames of data in the data-dir"
14
+ ) 1>&2
15
+ fi
16
+
17
+ data=$1
18
+
19
+ if [ ! -f $data/utt2dur ]; then
20
+ utils/data/get_utt2dur.sh $data 1>&2 || exit 1
21
+ fi
22
+
23
+ frame_shift=$(utils/data/get_frame_shift.sh $data) || exit 1
24
+
25
+ awk -v s=$frame_shift '{n += $2} END{printf("%.0f\n", (n / s))}' <$data/utt2dur
UniAudio/UniAudio/egs/SE/utils/data/get_reco2dur.sh ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # Copyright 2016 Johns Hopkins University (author: Daniel Povey)
4
+ # 2018 Andrea Carmantini
5
+ # Apache 2.0
6
+
7
+ # This script operates on a data directory, such as in data/train/, and adds the
8
+ # reco2dur file if it does not already exist. The file 'reco2dur' maps from
9
+ # recording to the duration of the recording in seconds. This script works it
10
+ # out from the 'wav.scp' file, or, if utterance-ids are the same as recording-ids, from the
11
+ # utt2dur file (it first tries interrogating the headers, and if this fails, it reads the wave
12
+ # files in entirely.)
13
+ # We could use durations from segments file, but that's not the duration of the recordings
14
+ # but the sum of utterance lenghts (silence in between could be excluded from segments)
15
+ # For sum of utterance lenghts:
16
+ # awk 'FNR==NR{uttdur[$1]=$2;next}
17
+ # { for(i=2;i<=NF;i++){dur+=uttdur[$i];}
18
+ # print $1 FS dur; dur=0 }' $data/utt2dur $data/reco2utt
19
+
20
+
21
+ frame_shift=0.01
22
+ cmd=run.pl
23
+ nj=4
24
+
25
+ . utils/parse_options.sh
26
+ . ./path.sh
27
+
28
+ if [ $# != 1 ]; then
29
+ echo "Usage: $0 [options] <datadir>"
30
+ echo "e.g.:"
31
+ echo " $0 data/train"
32
+ echo " Options:"
33
+ echo " --frame-shift # frame shift in seconds. Only relevant when we are"
34
+ echo " # getting duration from feats.scp (default: 0.01). "
35
+ exit 1
36
+ fi
37
+
38
+ export LC_ALL=C
39
+
40
+ data=$1
41
+
42
+
43
+ if [ -s $data/reco2dur ] && \
44
+ [ $(wc -l < $data/wav.scp) -eq $(wc -l < $data/reco2dur) ]; then
45
+ echo "$0: $data/reco2dur already exists with the expected length. We won't recompute it."
46
+ exit 0;
47
+ fi
48
+
49
+ if [ -s $data/utt2dur ] && \
50
+ [ $(wc -l < $data/utt2spk) -eq $(wc -l < $data/utt2dur) ] && \
51
+ [ ! -s $data/segments ]; then
52
+
53
+ echo "$0: $data/wav.scp indexed by utt-id; copying utt2dur to reco2dur"
54
+ cp $data/utt2dur $data/reco2dur && exit 0;
55
+
56
+ elif [ -f $data/wav.scp ]; then
57
+ echo "$0: obtaining durations from recordings"
58
+
59
+ # if the wav.scp contains only lines of the form
60
+ # utt1 /foo/bar/sph2pipe -f wav /baz/foo.sph |
61
+ if cat $data/wav.scp | perl -e '
62
+ while (<>) { s/\|\s*$/ |/; # make sure final | is preceded by space.
63
+ @A = split; if (!($#A == 5 && $A[1] =~ m/sph2pipe$/ &&
64
+ $A[2] eq "-f" && $A[3] eq "wav" && $A[5] eq "|")) { exit(1); }
65
+ $reco = $A[0]; $sphere_file = $A[4];
66
+
67
+ if (!open(F, "<$sphere_file")) { die "Error opening sphere file $sphere_file"; }
68
+ $sample_rate = -1; $sample_count = -1;
69
+ for ($n = 0; $n <= 30; $n++) {
70
+ $line = <F>;
71
+ if ($line =~ m/sample_rate -i (\d+)/) { $sample_rate = $1; }
72
+ if ($line =~ m/sample_count -i (\d+)/) { $sample_count = $1; }
73
+ if ($line =~ m/end_head/) { break; }
74
+ }
75
+ close(F);
76
+ if ($sample_rate == -1 || $sample_count == -1) {
77
+ die "could not parse sphere header from $sphere_file";
78
+ }
79
+ $duration = $sample_count * 1.0 / $sample_rate;
80
+ print "$reco $duration\n";
81
+ } ' > $data/reco2dur; then
82
+ echo "$0: successfully obtained recording lengths from sphere-file headers"
83
+ else
84
+ echo "$0: could not get recording lengths from sphere-file headers, using wav-to-duration"
85
+ if ! command -v wav-to-duration >/dev/null; then
86
+ echo "$0: wav-to-duration is not on your path"
87
+ exit 1;
88
+ fi
89
+
90
+ read_entire_file=false
91
+ if grep -q 'sox.*speed' $data/wav.scp; then
92
+ read_entire_file=true
93
+ echo "$0: reading from the entire wav file to fix the problem caused by sox commands with speed perturbation. It is going to be slow."
94
+ echo "... It is much faster if you call get_reco2dur.sh *before* doing the speed perturbation via e.g. perturb_data_dir_speed.sh or "
95
+ echo "... perturb_data_dir_speed_3way.sh."
96
+ fi
97
+
98
+ num_recos=$(wc -l <$data/wav.scp)
99
+ if [ $nj -gt $num_recos ]; then
100
+ nj=$num_recos
101
+ fi
102
+
103
+ temp_data_dir=$data/wav${nj}split
104
+ wavscps=$(for n in `seq $nj`; do echo $temp_data_dir/$n/wav.scp; done)
105
+ subdirs=$(for n in `seq $nj`; do echo $temp_data_dir/$n; done)
106
+
107
+ if ! mkdir -p $subdirs >&/dev/null; then
108
+ for n in `seq $nj`; do
109
+ mkdir -p $temp_data_dir/$n
110
+ done
111
+ fi
112
+
113
+ utils/split_scp.pl $data/wav.scp $wavscps
114
+
115
+
116
+ $cmd JOB=1:$nj $data/log/get_reco_durations.JOB.log \
117
+ wav-to-duration --read-entire-file=$read_entire_file \
118
+ scp:$temp_data_dir/JOB/wav.scp ark,t:$temp_data_dir/JOB/reco2dur || \
119
+ { echo "$0: there was a problem getting the durations"; exit 1; } # This could
120
+
121
+ for n in `seq $nj`; do
122
+ cat $temp_data_dir/$n/reco2dur
123
+ done > $data/reco2dur
124
+ fi
125
+ rm -r $temp_data_dir
126
+ else
127
+ echo "$0: Expected $data/wav.scp to exist"
128
+ exit 1
129
+ fi
130
+
131
+ len1=$(wc -l < $data/wav.scp)
132
+ len2=$(wc -l < $data/reco2dur)
133
+ if [ "$len1" != "$len2" ]; then
134
+ echo "$0: warning: length of reco2dur does not equal that of wav.scp, $len2 != $len1"
135
+ if [ $len1 -gt $[$len2*2] ]; then
136
+ echo "$0: less than half of recordings got a duration: failing."
137
+ exit 1
138
+ fi
139
+ fi
140
+
141
+ echo "$0: computed $data/reco2dur"
142
+
143
+ exit 0
UniAudio/UniAudio/egs/SE/utils/data/get_reco2utt_for_data.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /bin/bash
2
+
3
+ # Copyright 2016 Vimal Manohar
4
+ # Apache 2.0
5
+
6
+ if [ $# -ne 1 ]; then
7
+ echo "This script outputs a mapping from recording to a list of utterances "
8
+ echo "corresponding to the recording. It is analogous to the content of "
9
+ echo "a spk2utt file, but is indexed by recording instead of speaker."
10
+ echo "Usage: get_reco2utt.sh <data>"
11
+ echo " e.g.: get_reco2utt.sh data/train"
12
+ exit 1
13
+ fi
14
+
15
+ data=$1
16
+
17
+ if [ ! -s $data/segments ]; then
18
+ utils/data/get_segments_for_data.sh $data > $data/segments
19
+ fi
20
+
21
+ cut -d ' ' -f 1,2 $data/segments | utils/utt2spk_to_spk2utt.pl
UniAudio/UniAudio/egs/SE/utils/data/get_segments_for_data.sh ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # This script operates on a data directory, such as in data/train/,
4
+ # and writes new segments to stdout. The file 'segments' maps from
5
+ # utterance to time offsets into a recording, with the format:
6
+ # <utterance-id> <recording-id> <segment-begin> <segment-end>
7
+ # This script assumes utterance and recording ids are the same (i.e., that
8
+ # wav.scp is indexed by utterance), and uses durations from 'utt2dur',
9
+ # created if necessary by get_utt2dur.sh.
10
+
11
+ . ./path.sh
12
+
13
+ if [ $# != 1 ]; then
14
+ echo "Usage: $0 [options] <datadir>"
15
+ echo "e.g.:"
16
+ echo " $0 data/train > data/train/segments"
17
+ exit 1
18
+ fi
19
+
20
+ data=$1
21
+
22
+ if [ ! -s $data/utt2dur ]; then
23
+ utils/data/get_utt2dur.sh $data 1>&2 || exit 1;
24
+ fi
25
+
26
+ # <utt-id> <utt-id> 0 <utt-dur>
27
+ awk '{ print $1, $1, 0, $2 }' $data/utt2dur
28
+
29
+ exit 0
UniAudio/UniAudio/egs/SE/utils/data/get_uniform_subsegments.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/env python
2
+
3
+ # Copyright 2017 Vimal Manohar
4
+ # 2017 Matthew Maciejewski
5
+ # Apache 2.0.
6
+
7
+ from __future__ import print_function
8
+ import argparse
9
+ import logging
10
+ import sys
11
+ import textwrap
12
+
13
+ def get_args():
14
+ parser = argparse.ArgumentParser(
15
+ description=textwrap.dedent("""
16
+ Creates a subsegments file from an input segments file.
17
+
18
+ The output format is
19
+
20
+ <subsegment-id> <utterance-id> <start-time> <end-time>,
21
+
22
+ where the timings are relative to the start-time of the
23
+ <utterance-id> in the input segments file.
24
+ Reminder: the format of the input segments file is:
25
+
26
+ <utterance-id> <recording-id> <start-time> <end-time>
27
+
28
+ where the recording-id corresponds to a wav file (or a channel of
29
+ a wav file) from wav.scp. Note: you can use
30
+ utils/data/get_segments_for_data.sh to generate a 'default'
31
+ segments file for your data if one doesn't already exist.
32
+
33
+ e.g.: get_uniform_subsegments.py data/dev/segments > \\
34
+ data/dev_uniform_segments/sub_segments
35
+
36
+ utils/data/subsegment_data_dir.sh data/dev \\
37
+ data/dev_uniform_segments/sub_segments data/dev_uniform_segments
38
+
39
+ The output is written to stdout. The resulting file can be
40
+ passed to utils/data/subsegment_data_dir.sh to sub-segment
41
+ the data directory."""),
42
+ formatter_class=argparse.RawDescriptionHelpFormatter)
43
+ parser.add_argument("--max-segment-duration", type=float,
44
+ default=30, help="""Maximum duration of the
45
+ subsegments (in seconds)""")
46
+ parser.add_argument("--overlap-duration", type=float,
47
+ default=5, help="""Overlap between
48
+ adjacent segments (in seconds)""")
49
+ parser.add_argument("--max-remaining-duration", type=float,
50
+ default=10, help="""Segment is not split
51
+ if the left-over duration is more than this
52
+ many seconds""")
53
+ parser.add_argument("--constant-duration", type=bool,
54
+ default=False, help="""Final segment is given
55
+ a start time max-segment-duration before the
56
+ end to force a constant segment duration. This
57
+ overrides the max-remaining-duration parameter""")
58
+ parser.add_argument("segments_file", type=argparse.FileType('r'),
59
+ help="""Input kaldi segments file""")
60
+
61
+ args = parser.parse_args()
62
+ return args
63
+
64
+
65
+ def run(args):
66
+ if (args.constant_duration):
67
+ dur_threshold = args.max_segment_duration
68
+ else:
69
+ dur_threshold = args.max_segment_duration + args.max_remaining_duration
70
+
71
+ for line in args.segments_file:
72
+ parts = line.strip().split()
73
+ utt_id = parts[0]
74
+ start_time = float(parts[2])
75
+ end_time = float(parts[3])
76
+
77
+ dur = end_time - start_time
78
+
79
+ start = start_time
80
+ while (dur > dur_threshold):
81
+ end = start + args.max_segment_duration
82
+ start_relative = start - start_time
83
+ end_relative = end - start_time
84
+ new_utt = "{utt_id}-{s:08d}-{e:08d}".format(
85
+ utt_id=utt_id, s=int(100 * start_relative),
86
+ e=int(100 * end_relative))
87
+ print ("{new_utt} {utt_id} {s:.3f} {e:.3f}".format(
88
+ new_utt=new_utt, utt_id=utt_id, s=start_relative,
89
+ e=start_relative + args.max_segment_duration))
90
+ start += args.max_segment_duration - args.overlap_duration
91
+ dur -= args.max_segment_duration - args.overlap_duration
92
+
93
+ if (args.constant_duration):
94
+ if (dur < 0):
95
+ continue
96
+ if (dur < args.max_remaining_duration):
97
+ start = max(end_time - args.max_segment_duration, start_time)
98
+ end = min(start + args.max_segment_duration, end_time)
99
+ else:
100
+ end = end_time
101
+ new_utt = "{utt_id}-{s:08d}-{e:08d}".format(
102
+ utt_id=utt_id, s=int(round(100 * (start - start_time))),
103
+ e=int(round(100 * (end - start_time))))
104
+ print ("{new_utt} {utt_id} {s:.3f} {e:.3f}".format(
105
+ new_utt=new_utt, utt_id=utt_id, s=start - start_time,
106
+ e=end - start_time))
107
+
108
+
109
+ def main():
110
+ args = get_args()
111
+ try:
112
+ run(args)
113
+ except Exception:
114
+ logging.error("Failed creating subsegments", exc_info=True)
115
+ raise SystemExit(1)
116
+ finally:
117
+ args.segments_file.close()
118
+
119
+
120
+ if __name__ == '__main__':
121
+ main()