Upload 7 files
Browse files- tools/utils/extract.py +128 -0
- tools/utils/istft_multi.m +59 -0
- tools/utils/json2mat.m +65 -0
- tools/utils/localize.m +158 -0
- tools/utils/mat2json.m +66 -0
- tools/utils/stft_multi.m +58 -0
- tools/utils/viterbi.m +45 -0
tools/utils/extract.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
""" extract.py
|
| 3 |
+
|
| 4 |
+
Extract CHiME 3 audio segments from continuous audio
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
extract.py [-f] [-p pad] [-c channel] <segfilenm> <inwavroot> <outwavroot>
|
| 8 |
+
extract.py --help
|
| 9 |
+
|
| 10 |
+
Options:
|
| 11 |
+
<segfilenm> Name of the segmentation file
|
| 12 |
+
<inwavroot> Name of the root dir for the input audio file
|
| 13 |
+
<outwavroot> Name of the root dir for the output segments
|
| 14 |
+
-p <pad>, --padding=<pad> padding at start and end in seconds [default: 0]
|
| 15 |
+
-f, --fullname Use fullname for outfile
|
| 16 |
+
-c <chan>, --channel=<chan> Recording channel (defaults to all)
|
| 17 |
+
--help print this help screen
|
| 18 |
+
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from __future__ import print_function
|
| 22 |
+
import json
|
| 23 |
+
import os
|
| 24 |
+
import subprocess
|
| 25 |
+
import argparse
|
| 26 |
+
import sys
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def extract(segment, in_root, out_root, padding=0.0, channel=0, fullname=False):
|
| 30 |
+
"""use sox to extract segment from wav file
|
| 31 |
+
|
| 32 |
+
in_root - root directory for unsegmented audio files
|
| 33 |
+
out_root - root directory for output audio segments
|
| 34 |
+
"""
|
| 35 |
+
infilenm = '{}/{}.CH{}.wav'.format(in_root, segment['wavfile'], channel)
|
| 36 |
+
|
| 37 |
+
if fullname:
|
| 38 |
+
outtemplate = '{}/{}.{}.{}.{}.{:02d}.{:03d}.ch{}.wav'
|
| 39 |
+
outfilenm = outtemplate.format(out_root,
|
| 40 |
+
segment['wavfile'],
|
| 41 |
+
segment['wsj_name'],
|
| 42 |
+
segment['environment'],
|
| 43 |
+
segment['speaker'],
|
| 44 |
+
segment['repeat'],
|
| 45 |
+
segment['index'],
|
| 46 |
+
channel)
|
| 47 |
+
else:
|
| 48 |
+
outfilenm = '{}/{}_{}_{}.CH{}.wav'.format(out_root,
|
| 49 |
+
segment['speaker'],
|
| 50 |
+
segment['wsj_name'],
|
| 51 |
+
segment['environment'],
|
| 52 |
+
channel)
|
| 53 |
+
|
| 54 |
+
subprocess.call(['sox', infilenm, outfilenm,
|
| 55 |
+
'trim',
|
| 56 |
+
str(segment['start'] - padding),
|
| 57 |
+
'=' + str(segment['end'] + padding)])
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def to_string(segment):
|
| 61 |
+
return "{}:{}-{}:{:03d}({:03d})".format(segment['wavfile'],
|
| 62 |
+
segment['start'],
|
| 63 |
+
segment['end'],
|
| 64 |
+
segment['index'],
|
| 65 |
+
segment['repeat'])
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def do_extract(seg_filenm, in_root, out_root,
|
| 69 |
+
padding=0.0, channel=0, fullname=False):
|
| 70 |
+
"""
|
| 71 |
+
Extract segments listed in seg file from recording channel, 'channel'
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
with open(seg_filenm, 'r') as infile:
|
| 75 |
+
json_string = infile.read()
|
| 76 |
+
segments = json.loads(json_string)
|
| 77 |
+
|
| 78 |
+
if not os.path.isdir(out_root):
|
| 79 |
+
os.makedirs(out_root)
|
| 80 |
+
|
| 81 |
+
print('Extracting audio in channel {}...'.format(channel))
|
| 82 |
+
|
| 83 |
+
for i, segment in enumerate(segments):
|
| 84 |
+
sys.stdout.write(' Processing segment {: 5}/{: <5}\r'.format(i+1, len(segments)))
|
| 85 |
+
sys.stdout.flush()
|
| 86 |
+
|
| 87 |
+
extract(segment, in_root, out_root, padding=padding,
|
| 88 |
+
channel=channel, fullname=fullname)
|
| 89 |
+
sys.stdout.write('\n')
|
| 90 |
+
sys.stdout.flush()
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def main():
|
| 94 |
+
"""Main method called from commandline."""
|
| 95 |
+
parser = argparse.ArgumentParser(description='Extract CHiME 3 audio segments from continuous audio.')
|
| 96 |
+
parser.add_argument('segfilenm', metavar='<segfilenm>',
|
| 97 |
+
help='Name of the segmentation file', type=str)
|
| 98 |
+
parser.add_argument('inwavroot', metavar='<inwavroot>',
|
| 99 |
+
help='Name of the root dir for the input audio file', type=str)
|
| 100 |
+
parser.add_argument('outwavroot', metavar='<outwavroot>',
|
| 101 |
+
help='Name of the root dir for the output segments', type=str)
|
| 102 |
+
parser.add_argument('-p', '--padding', metavar='pad',
|
| 103 |
+
help='Padding at start and end in seconds [default: 0]', type=float, default=0)
|
| 104 |
+
parser.add_argument('-f', '--fullname',
|
| 105 |
+
help='Use fullname for outfile', action='store_true')
|
| 106 |
+
parser.add_argument('-c', '--channel', metavar='channel',
|
| 107 |
+
help='Recording channel (defaults to all).', action='append', type=int, default=[])
|
| 108 |
+
|
| 109 |
+
args = parser.parse_args()
|
| 110 |
+
|
| 111 |
+
segfilenm = args.segfilenm
|
| 112 |
+
in_root = args.inwavroot
|
| 113 |
+
out_root = args.outwavroot
|
| 114 |
+
padding = args.padding
|
| 115 |
+
fullname = args.fullname
|
| 116 |
+
channels = args.channel
|
| 117 |
+
|
| 118 |
+
if len(channels) == 0:
|
| 119 |
+
channels = [0, 1, 2, 3, 4, 5, 6]
|
| 120 |
+
|
| 121 |
+
for channel in channels:
|
| 122 |
+
do_extract(segfilenm, in_root, out_root, padding, channel, fullname)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
if __name__ == '__main__':
|
| 126 |
+
main()
|
| 127 |
+
|
| 128 |
+
# ./extract.py ../../data/annotations/utterance/LR_141103_01.json ../../data/16khz16bit xxx
|
tools/utils/istft_multi.m
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function x=istft_multi(X,nsampl)
|
| 2 |
+
|
| 3 |
+
% ISTFT_MULTI Multichannel inverse short-time Fourier transform (ISTFT)
|
| 4 |
+
% using half-overlapping sine windows.
|
| 5 |
+
%
|
| 6 |
+
% x=istft_multi(X,nsampl)
|
| 7 |
+
%
|
| 8 |
+
% Inputs:
|
| 9 |
+
% X: nbin x nfram x nsrc matrix containing STFT coefficients for nsrc
|
| 10 |
+
% sources with nbin frequency bins and nfram time frames or nbin x nfram x
|
| 11 |
+
% nsrc x nchan matrix containing the STFT coefficients of nsrc spatial
|
| 12 |
+
% source images over nchan channels
|
| 13 |
+
% nsampl: number of samples to which the corresponding time-domain signals
|
| 14 |
+
% are to be truncated
|
| 15 |
+
%
|
| 16 |
+
% Output:
|
| 17 |
+
% x: nsrc x nsampl matrix or nsrc x nsampl x nchan matrix containing the
|
| 18 |
+
% corresponding time-domain signals
|
| 19 |
+
% If x is a set of signals of length nsampl and X=stft_multi(x), then
|
| 20 |
+
% x=istft_multi(X,nsampl).
|
| 21 |
+
%
|
| 22 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 23 |
+
% Copyright 2008 Emmanuel Vincent
|
| 24 |
+
% This software is distributed under the terms of the GNU Public License
|
| 25 |
+
% version 3 (http://www.gnu.org/licenses/gpl.txt)
|
| 26 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
%%% Errors and warnings %%%
|
| 30 |
+
if nargin<2, error('Not enough input arguments.'); end
|
| 31 |
+
[nbin,nfram,nsrc,nchan]=size(X);
|
| 32 |
+
if nbin==2*floor(nbin/2), error('The number of frequency bins must be odd.'); end
|
| 33 |
+
wlen=2*(nbin-1);
|
| 34 |
+
|
| 35 |
+
%%% Computing inverse STFT signal %%%
|
| 36 |
+
% Defining sine window
|
| 37 |
+
win=sin((.5:wlen-.5)/wlen*pi);
|
| 38 |
+
% Pre-processing for edges
|
| 39 |
+
swin=zeros(1,(nfram+1)*wlen/2);
|
| 40 |
+
for t=0:nfram-1,
|
| 41 |
+
swin(t*wlen/2+1:t*wlen/2+wlen)=swin(t*wlen/2+1:t*wlen/2+wlen)+win.^2;
|
| 42 |
+
end
|
| 43 |
+
swin=sqrt(swin/wlen);
|
| 44 |
+
x=zeros(nsrc,(nfram+1)*wlen/2,nchan);
|
| 45 |
+
for i=1:nchan,
|
| 46 |
+
for j=1:nsrc,
|
| 47 |
+
for t=0:nfram-1,
|
| 48 |
+
% IFFT
|
| 49 |
+
fframe=[X(:,t+1,j,i);conj(X(wlen/2:-1:2,t+1,j,i))];
|
| 50 |
+
frame=real(ifft(fframe));
|
| 51 |
+
% Overlap-add
|
| 52 |
+
x(j,t*wlen/2+1:t*wlen/2+wlen,i)=x(j,t*wlen/2+1:t*wlen/2+wlen,i)+frame.'.*win./swin(t*wlen/2+1:t*wlen/2+wlen);
|
| 53 |
+
end
|
| 54 |
+
end
|
| 55 |
+
end
|
| 56 |
+
% Truncation
|
| 57 |
+
x=x(:,wlen/4+1:wlen/4+nsampl,:);
|
| 58 |
+
|
| 59 |
+
return;
|
tools/utils/json2mat.m
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function mat=json2mat(filename)
|
| 2 |
+
|
| 3 |
+
% JSON2MAT Reads a JSON file
|
| 4 |
+
%
|
| 5 |
+
% mat=json2mat(filename)
|
| 6 |
+
%
|
| 7 |
+
% Input:
|
| 8 |
+
% filename: JSON filename (.json extension)
|
| 9 |
+
%
|
| 10 |
+
% Output:
|
| 11 |
+
% mat: Matlab cell array whose entries are Matlab structures containing the
|
| 12 |
+
% value for each JSON field
|
| 13 |
+
%
|
| 14 |
+
% Note: all numeric fields are rounded to double precision. Digits beyond
|
| 15 |
+
% double precision are lost.
|
| 16 |
+
%
|
| 17 |
+
% If you use this software in a publication, please cite:
|
| 18 |
+
%
|
| 19 |
+
% Jon Barker, Ricard Marxer, Emmanuel Vincent, and Shinji Watanabe, The
|
| 20 |
+
% third 'CHiME' Speech Separation and Recognition Challenge: Dataset,
|
| 21 |
+
% task and baselines, submitted to IEEE 2015 Automatic Speech Recognition
|
| 22 |
+
% and Understanding Workshop (ASRU), 2015.
|
| 23 |
+
%
|
| 24 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 25 |
+
% Copyright 2015 University of Sheffield (Jon Barker, Ricard Marxer)
|
| 26 |
+
% Inria (Emmanuel Vincent)
|
| 27 |
+
% Mitsubishi Electric Research Labs (Shinji Watanabe)
|
| 28 |
+
% This software is distributed under the terms of the GNU Public License
|
| 29 |
+
% version 3 (http://www.gnu.org/licenses/gpl.txt)
|
| 30 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 31 |
+
|
| 32 |
+
fid=fopen(filename,'r');
|
| 33 |
+
fgetl(fid); % [
|
| 34 |
+
txt=fgetl(fid); % { or ]
|
| 35 |
+
txt=fgetl(fid); % first field
|
| 36 |
+
mat=cell(1);
|
| 37 |
+
ind=1; % entry index
|
| 38 |
+
while txt~=-1, % end of file
|
| 39 |
+
if strcmp(txt,' }, ') || strcmp(txt,' }'), % next entry
|
| 40 |
+
ind=ind+1;
|
| 41 |
+
txt=fgetl(fid); % { or ]
|
| 42 |
+
else
|
| 43 |
+
try
|
| 44 |
+
pos=strfind(txt,'"');
|
| 45 |
+
field=txt(pos(1)+1:pos(2)-1);
|
| 46 |
+
catch
|
| 47 |
+
keyboard;
|
| 48 |
+
end
|
| 49 |
+
if ~strcmp(txt(end-1:end),', '), % last field
|
| 50 |
+
txt=txt(pos(2)+3:end);
|
| 51 |
+
else
|
| 52 |
+
txt=txt(pos(2)+3:end-2);
|
| 53 |
+
end
|
| 54 |
+
if strcmp(txt(1),'"') && strcmp(txt(end),'"'), % text value
|
| 55 |
+
value=txt(2:end-1);
|
| 56 |
+
else % boolean or numerical value
|
| 57 |
+
value=eval(txt);
|
| 58 |
+
end
|
| 59 |
+
mat{ind}.(field)=value;
|
| 60 |
+
end
|
| 61 |
+
txt=fgetl(fid); % next field
|
| 62 |
+
end
|
| 63 |
+
fclose(fid);
|
| 64 |
+
|
| 65 |
+
return
|
tools/utils/localize.m
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function [path,TDOA]=localize(X,chanlist)
|
| 2 |
+
|
| 3 |
+
% LOCALIZE Tracks the speaker spatial position over time and computes the
|
| 4 |
+
% corresponding TDOA using SRP-PHAT and the Viterbi algorithm
|
| 5 |
+
%
|
| 6 |
+
% [path,TDOA]=localize(X,chanlist)
|
| 7 |
+
%
|
| 8 |
+
% Inputs:
|
| 9 |
+
% Y: nbin x nfram x nchan STFT of the inpu signal
|
| 10 |
+
% chanlist: list of input channels (from 1 to 6)
|
| 11 |
+
%
|
| 12 |
+
% Output:
|
| 13 |
+
% path: 3 x nfram position of the speaker over time in centimeters
|
| 14 |
+
% TDOA: nchan x nfram corresponding TDOAs between the speaker position and
|
| 15 |
+
% the microphone positions
|
| 16 |
+
%
|
| 17 |
+
% Note: for computational efficiency, the position on the z-axis is assumed
|
| 18 |
+
% to be constant over time.
|
| 19 |
+
%
|
| 20 |
+
% If you use this software in a publication, please cite:
|
| 21 |
+
%
|
| 22 |
+
% Jon Barker, Ricard Marxer, Emmanuel Vincent, and Shinji Watanabe, The
|
| 23 |
+
% third 'CHiME' Speech Separation and Recognition Challenge: Dataset,
|
| 24 |
+
% task and baselines, submitted to IEEE 2015 Automatic Speech Recognition
|
| 25 |
+
% and Understanding Workshop (ASRU), 2015.
|
| 26 |
+
%
|
| 27 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 28 |
+
% Copyright 2015-2016 University of Sheffield (Jon Barker, Ricard Marxer)
|
| 29 |
+
% Inria (Emmanuel Vincent)
|
| 30 |
+
% Mitsubishi Electric Research Labs (Shinji Watanabe)
|
| 31 |
+
% This software is distributed under the terms of the GNU Public License
|
| 32 |
+
% version 3 (http://www.gnu.org/licenses/gpl.txt)
|
| 33 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 34 |
+
|
| 35 |
+
if nargin < 2,
|
| 36 |
+
chanlist=[1 3:6];
|
| 37 |
+
end
|
| 38 |
+
|
| 39 |
+
% Define hyper-parameters
|
| 40 |
+
pow_thresh=-20; % threshold in dB below which a microphone is considered to fail
|
| 41 |
+
center_factor=0.05; % weight given to the prior that the speaker's horizontal position is close to the center
|
| 42 |
+
smoothing_factor=3; % weight given to the transition probabilities
|
| 43 |
+
|
| 44 |
+
% Remove zero frequency
|
| 45 |
+
X = X(2:end,:,:);
|
| 46 |
+
[nbin,nfram,nchan] = size(X);
|
| 47 |
+
wlen=2*nbin;
|
| 48 |
+
f=16000/wlen*(1:nbin).';
|
| 49 |
+
|
| 50 |
+
% Compute relative channel power
|
| 51 |
+
if length(chanlist) > 2,
|
| 52 |
+
xpow=shiftdim(sum(sum(abs(X).^2,2),1));
|
| 53 |
+
xpow=10*log10(xpow/max(xpow));
|
| 54 |
+
else
|
| 55 |
+
xpow=zeros(1,2);
|
| 56 |
+
end
|
| 57 |
+
|
| 58 |
+
% Define microphone positions in centimeters
|
| 59 |
+
xmic=[-10 0 10 -10 0 10]; % left to right axis
|
| 60 |
+
ymic=[9.5 9.5 9.5 -9.5 -9.5 -9.5]; % bottom to top axis
|
| 61 |
+
zmic=[0 -2 0 0 0 0]; % back to front axis
|
| 62 |
+
xmic=xmic(chanlist);
|
| 63 |
+
ymic=ymic(chanlist);
|
| 64 |
+
zmic=zmic(chanlist);
|
| 65 |
+
|
| 66 |
+
% Define grid of possible speaker positions in centimeters
|
| 67 |
+
xres=46;
|
| 68 |
+
xpos=linspace(-45,45,xres);
|
| 69 |
+
yres=46;
|
| 70 |
+
ypos=linspace(-45,45,yres);
|
| 71 |
+
zres=4;
|
| 72 |
+
zpos=linspace(15,45,zres);
|
| 73 |
+
ngrid=xres*yres*zres;
|
| 74 |
+
|
| 75 |
+
% Compute horizontal distances between grid points
|
| 76 |
+
xvect=reshape(repmat(xpos.',[1 yres]),xres*yres,1);
|
| 77 |
+
yvect=reshape(repmat(ypos,[xres 1]),xres*yres,1);
|
| 78 |
+
pair_dist=sqrt((repmat(xvect,[1 xres*yres])-repmat(xvect.',[xres*yres 1])).^2+(repmat(yvect,[1 xres*yres])-repmat(yvect.',[xres*yres 1])).^2);
|
| 79 |
+
|
| 80 |
+
% Compute horizontal distances to the center
|
| 81 |
+
center_dist=sqrt((xvect-mean(xpos)).^2+(yvect-mean(ypos)).^2);
|
| 82 |
+
|
| 83 |
+
% Compute theoretical TDOAs between front pairs
|
| 84 |
+
d_grid=zeros(nchan,xres,yres,zres); % speaker-to-microphone distances
|
| 85 |
+
for c=1:nchan,
|
| 86 |
+
d_grid(c,:,:,:)=sqrt(repmat((xpos.'-xmic(c)).^2,[1 yres zres])+repmat((ypos-ymic(c)).^2,[xres 1 zres])+repmat((permute(zpos,[3 1 2])-zmic(c)).^2,[xres yres 1]));
|
| 87 |
+
end
|
| 88 |
+
d_grid=reshape(d_grid,nchan,ngrid);
|
| 89 |
+
pairs=[];
|
| 90 |
+
for c=1:nchan,
|
| 91 |
+
pairs=[pairs [c*ones(1,nchan-c); c+1:nchan]]; % microphone pairs
|
| 92 |
+
end
|
| 93 |
+
npairs=size(pairs,2);
|
| 94 |
+
tau_grid=zeros(npairs,ngrid); % TDOAs
|
| 95 |
+
for p=1:npairs,
|
| 96 |
+
c1=pairs(1,p);
|
| 97 |
+
c2=pairs(2,p);
|
| 98 |
+
tau_grid(p,:)=(d_grid(c2,:)-d_grid(c1,:))/343/100;
|
| 99 |
+
end
|
| 100 |
+
|
| 101 |
+
% Compute the SRP-PHAT pseudo-spectrum
|
| 102 |
+
srp=zeros(nfram,ngrid);
|
| 103 |
+
for p=1:npairs, % Loop over front pairs
|
| 104 |
+
c1=pairs(1,p);
|
| 105 |
+
c2=pairs(2,p);
|
| 106 |
+
d=sqrt((xmic(c1)-xmic(c2))^2+(ymic(c1)-ymic(c2))^2+(zmic(c1)-zmic(c2))^2);
|
| 107 |
+
alpha=10*343/(d*16000);
|
| 108 |
+
lin_grid=linspace(min(tau_grid(p,:)),max(tau_grid(p,:)),100);
|
| 109 |
+
lin_spec=zeros(nbin,nfram,100); % GCC-PHAT pseudo-spectrum over a uniform interval
|
| 110 |
+
if (xpow(c1)>pow_thresh) && (xpow(c2)>pow_thresh), % discard channels with low power (microphone failure)
|
| 111 |
+
P=X(:,:,c1).*conj(X(:,:,c2));
|
| 112 |
+
P=P./abs(P);
|
| 113 |
+
for ind=1:100,
|
| 114 |
+
EXP=repmat(exp(-2*1i*pi*lin_grid(ind)*f),1,nfram);
|
| 115 |
+
lin_spec(:,:,ind)=ones(nbin,nfram)-tanh(alpha*real(sqrt(2-2*real(P.*EXP))));
|
| 116 |
+
end
|
| 117 |
+
end
|
| 118 |
+
lin_spec=shiftdim(sum(lin_spec,1));
|
| 119 |
+
tau_spec=zeros(nfram,ngrid); % GCC-PHAT pseudo-spectrum over the whole grid
|
| 120 |
+
for t=1:nfram,
|
| 121 |
+
tau_spec(t,:)=interp1(lin_grid,lin_spec(t,:),tau_grid(p,:));
|
| 122 |
+
end
|
| 123 |
+
srp=srp+tau_spec; % sum over the microphone pairs
|
| 124 |
+
end
|
| 125 |
+
|
| 126 |
+
% Loop over possible z-axis positions
|
| 127 |
+
path=zeros(zres,nfram);
|
| 128 |
+
logpost=zeros(zres,1);
|
| 129 |
+
xpath=zeros(zres,nfram);
|
| 130 |
+
ypath=zeros(zres,nfram);
|
| 131 |
+
zpath=zeros(zres,nfram);
|
| 132 |
+
srp=reshape(srp,nfram,xres*yres,zres);
|
| 133 |
+
for zind=1:zres,
|
| 134 |
+
|
| 135 |
+
% Weight by distance to the center
|
| 136 |
+
weighted_srp=srp(:,:,zind)-center_factor*repmat(center_dist.',[nfram 1]);
|
| 137 |
+
|
| 138 |
+
% Track the source position over time
|
| 139 |
+
[path(zind,:),logpost(zind)]=viterbi(weighted_srp.',zeros(xres*yres,1),zeros(xres*yres,1),-smoothing_factor*pair_dist);
|
| 140 |
+
for t=1:nfram,
|
| 141 |
+
[xpath(zind,t),ypath(zind,t)]=ind2sub([xres yres],path(zind,t));
|
| 142 |
+
zpath(zind,t)=zind;
|
| 143 |
+
end
|
| 144 |
+
end
|
| 145 |
+
|
| 146 |
+
% Select the best z-axis position
|
| 147 |
+
[~,zind]=max(logpost);
|
| 148 |
+
path=(zind-1)*xres*yres+path(zind,:);
|
| 149 |
+
xpath=xpos(xpath(zind,:));
|
| 150 |
+
ypath=ypos(ypath(zind,:));
|
| 151 |
+
zpath=zpos(zpath(zind,:));
|
| 152 |
+
|
| 153 |
+
% Derive TDOA
|
| 154 |
+
d_path=d_grid(:,path);
|
| 155 |
+
TDOA=d_path/343/100;
|
| 156 |
+
path=[xpath; ypath; zpath];
|
| 157 |
+
|
| 158 |
+
return
|
tools/utils/mat2json.m
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function mat2json(mat,filename)
|
| 2 |
+
|
| 3 |
+
% MAT2JSON Writes a JSON file
|
| 4 |
+
%
|
| 5 |
+
% mat2json(mat,filename)
|
| 6 |
+
%
|
| 7 |
+
% Inputs:
|
| 8 |
+
% mat: Matlab cell array whose entries are Matlab structures containing the
|
| 9 |
+
% value for each JSON field
|
| 10 |
+
% filename: JSON filename (.json extension)
|
| 11 |
+
%
|
| 12 |
+
% Note: using JSON2MAT followed by MAT2JSON will generally not lead back to
|
| 13 |
+
% the original JSON file due to the loss of digits beyond double precision
|
| 14 |
+
% and to the handling of trailing zeros.
|
| 15 |
+
%
|
| 16 |
+
% If you use this software in a publication, please cite:
|
| 17 |
+
%
|
| 18 |
+
% Jon Barker, Ricard Marxer, Emmanuel Vincent, and Shinji Watanabe, The
|
| 19 |
+
% third 'CHiME' Speech Separation and Recognition Challenge: Dataset,
|
| 20 |
+
% task and baselines, submitted to IEEE 2015 Automatic Speech Recognition
|
| 21 |
+
% and Understanding Workshop (ASRU), 2015.
|
| 22 |
+
%
|
| 23 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 24 |
+
% Copyright 2015 University of Sheffield (Jon Barker, Ricard Marxer)
|
| 25 |
+
% Inria (Emmanuel Vincent)
|
| 26 |
+
% Mitsubishi Electric Research Labs (Shinji Watanabe)
|
| 27 |
+
% This software is distributed under the terms of the GNU Public License
|
| 28 |
+
% version 3 (http://www.gnu.org/licenses/gpl.txt)
|
| 29 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 30 |
+
|
| 31 |
+
fid=fopen(filename,'w');
|
| 32 |
+
fprintf(fid,'%s\n','[');
|
| 33 |
+
for ind=1:length(mat), % loop over entries
|
| 34 |
+
fprintf(fid,' %s\n','{'); % entry delimiter
|
| 35 |
+
fields=fieldnames(mat{ind});
|
| 36 |
+
for f=1:length(fields), % loop over fields
|
| 37 |
+
field=fields{f};
|
| 38 |
+
value=mat{ind}.(field);
|
| 39 |
+
if ischar(value), % text field
|
| 40 |
+
fprintf(fid,' "%s": "%s"',field,value);
|
| 41 |
+
elseif islogical(value), % boolean field
|
| 42 |
+
if value,
|
| 43 |
+
fprintf(fid,' "%s": true',field);
|
| 44 |
+
else
|
| 45 |
+
fprintf(fid,' "%s": false',field);
|
| 46 |
+
end
|
| 47 |
+
elseif value==floor(value), % integer field
|
| 48 |
+
fprintf(fid,' "%s": %d',field,value);
|
| 49 |
+
else % double field
|
| 50 |
+
fprintf(fid,' "%s": %17.*f',field,15-max(0,floor(log10(value))),value);
|
| 51 |
+
end
|
| 52 |
+
if f~=length(fields), % field delimiter
|
| 53 |
+
fprintf(fid,', ');
|
| 54 |
+
end
|
| 55 |
+
fprintf(fid,'\n');
|
| 56 |
+
end
|
| 57 |
+
fprintf(fid,' }'); % entry delimiter
|
| 58 |
+
if ind~=length(mat),
|
| 59 |
+
fprintf(fid,', ');
|
| 60 |
+
end
|
| 61 |
+
fprintf(fid,'\n');
|
| 62 |
+
end
|
| 63 |
+
fprintf(fid,']');
|
| 64 |
+
fclose(fid);
|
| 65 |
+
|
| 66 |
+
return
|
tools/utils/stft_multi.m
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function X=stft_multi(x,wlen)
|
| 2 |
+
|
| 3 |
+
% STFT_MULTI Multichannel short-time Fourier transform (STFT) using
|
| 4 |
+
% half-overlapping sine windows.
|
| 5 |
+
%
|
| 6 |
+
% X=stft_multi(x)
|
| 7 |
+
% X=stft_multi(x,wlen)
|
| 8 |
+
%
|
| 9 |
+
% Inputs:
|
| 10 |
+
% x: nchan x nsampl matrix containing nchan time-domain mixture signals
|
| 11 |
+
% with nsampl samples
|
| 12 |
+
% wlen: window length (default: 1024 samples or 64ms at 16 kHz, which is
|
| 13 |
+
% optimal for speech source separation via binary time-frequency masking)
|
| 14 |
+
%
|
| 15 |
+
% Output:
|
| 16 |
+
% X: nbin x nfram x nchan matrix containing the STFT coefficients with nbin
|
| 17 |
+
% frequency bins and nfram time frames
|
| 18 |
+
%
|
| 19 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 20 |
+
% Copyright 2008 Emmanuel Vincent
|
| 21 |
+
% This software is distributed under the terms of the GNU Public License
|
| 22 |
+
% version 3 (http://www.gnu.org/licenses/gpl.txt)
|
| 23 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
%%% Errors and warnings %%%
|
| 27 |
+
if nargin<1, error('Not enough input arguments.'); end
|
| 28 |
+
if nargin<2, wlen=1024; end
|
| 29 |
+
[nchan,nsampl]=size(x);
|
| 30 |
+
if nchan>nsampl, error('The signals must be within rows.'); end
|
| 31 |
+
if wlen~=4*floor(wlen/4), error('The window length must be a multiple of 4.'); end
|
| 32 |
+
|
| 33 |
+
%%% Computing STFT coefficients %%%
|
| 34 |
+
% Defining sine window
|
| 35 |
+
win=sin((.5:wlen-.5)/wlen*pi).';
|
| 36 |
+
% Zero-padding
|
| 37 |
+
nfram=ceil(nsampl/wlen*2);
|
| 38 |
+
x=[x,zeros(nchan,nfram*wlen/2-nsampl)];
|
| 39 |
+
% Pre-processing for edges
|
| 40 |
+
x=[zeros(nchan,wlen/4),x,zeros(nchan,wlen/4)];
|
| 41 |
+
swin=zeros((nfram+1)*wlen/2,1);
|
| 42 |
+
for t=0:nfram-1,
|
| 43 |
+
swin(t*wlen/2+1:t*wlen/2+wlen)=swin(t*wlen/2+1:t*wlen/2+wlen)+win.^2;
|
| 44 |
+
end
|
| 45 |
+
swin=sqrt(wlen*swin);
|
| 46 |
+
nbin=wlen/2+1;
|
| 47 |
+
X=zeros(nbin,nfram,nchan);
|
| 48 |
+
for i=1:nchan,
|
| 49 |
+
for t=0:nfram-1,
|
| 50 |
+
% Framing
|
| 51 |
+
frame=x(i,t*wlen/2+1:t*wlen/2+wlen).'.*win./swin(t*wlen/2+1:t*wlen/2+wlen);
|
| 52 |
+
% FFT
|
| 53 |
+
fframe=fft(frame);
|
| 54 |
+
X(:,t+1,i)=fframe(1:nbin);
|
| 55 |
+
end
|
| 56 |
+
end
|
| 57 |
+
|
| 58 |
+
return;
|
tools/utils/viterbi.m
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
function [path,logpost]=viterbi(loglik,loginitp,logfinalp,logtransp)
|
| 2 |
+
|
| 3 |
+
% VITERBI Viterbi algorithm
|
| 4 |
+
%
|
| 5 |
+
% path=viterbi(loglik,loginitp,logfinalp,logtransp)
|
| 6 |
+
%
|
| 7 |
+
% Inputs:
|
| 8 |
+
% loglik: nstates x nfram matrix of log-likelihood values
|
| 9 |
+
% loginitp: nstates x 1 vector of initial log-probability values
|
| 10 |
+
% logfinalp: nstates x 1 vector of final log-probability values
|
| 11 |
+
% logtransp: nstates x nstates matrix of transition log-probabilities
|
| 12 |
+
%
|
| 13 |
+
% Output:
|
| 14 |
+
% path: 1 x nfram best state sequence
|
| 15 |
+
% logpost: log-posterior probability of the best state sequence
|
| 16 |
+
%
|
| 17 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 18 |
+
% Copyright 2015 University of Sheffield (Jon Barker, Ricard Marxer)
|
| 19 |
+
% Inria (Emmanuel Vincent)
|
| 20 |
+
% Mitsubishi Electric Research Labs (Shinji Watanabe)
|
| 21 |
+
% This software is distributed under the terms of the GNU Public License
|
| 22 |
+
% version 3 (http://www.gnu.org/licenses/gpl.txt)
|
| 23 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 24 |
+
|
| 25 |
+
[nstates,nfram]=size(loglik);
|
| 26 |
+
|
| 27 |
+
% Forward pass
|
| 28 |
+
logalpha=loglik(:,1)+loginitp;
|
| 29 |
+
prev=zeros(nstates,nfram-1);
|
| 30 |
+
for t=2:nfram,
|
| 31 |
+
logalphaprev=logalpha;
|
| 32 |
+
for n=1:nstates,
|
| 33 |
+
[logalpha(n),prev(n,t-1)]=max(logalphaprev+logtransp(:,n));
|
| 34 |
+
end
|
| 35 |
+
logalpha=logalpha+loglik(:,t);
|
| 36 |
+
end
|
| 37 |
+
|
| 38 |
+
% Backward pass
|
| 39 |
+
path=zeros(1,nfram);
|
| 40 |
+
[logpost,path(nfram)]=max(logalpha+logfinalp);
|
| 41 |
+
for t=nfram-1:-1:1,
|
| 42 |
+
path(t)=prev(path(t+1),t);
|
| 43 |
+
end
|
| 44 |
+
|
| 45 |
+
return
|