Danh Tran
commited on
Upload demo_libf0.ipynb
Browse files- demo_libf0.ipynb +334 -0
demo_libf0.ipynb
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"id": "feeb7b4a",
|
6 |
+
"metadata": {},
|
7 |
+
"source": [
|
8 |
+
"# libf0 - A Python Library for F0-Estimation in Music Recordings"
|
9 |
+
]
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"cell_type": "code",
|
13 |
+
"execution_count": null,
|
14 |
+
"id": "8a81c6dc",
|
15 |
+
"metadata": {},
|
16 |
+
"outputs": [],
|
17 |
+
"source": [
|
18 |
+
"import numpy as np\n",
|
19 |
+
"import librosa\n",
|
20 |
+
"from scipy.interpolate import interp1d\n",
|
21 |
+
"\n",
|
22 |
+
"import IPython.display as ipd\n",
|
23 |
+
"import matplotlib.pyplot as plt\n",
|
24 |
+
"\n",
|
25 |
+
"import libf0"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"cell_type": "code",
|
30 |
+
"execution_count": null,
|
31 |
+
"id": "82a6a26e-db62-41f0-9e8c-c718b1359752",
|
32 |
+
"metadata": {},
|
33 |
+
"outputs": [],
|
34 |
+
"source": [
|
35 |
+
"# Plot function\n",
|
36 |
+
"def plot_f0_trajectory(Y_LF, t, f, f0, t_f0, figsize=(8.5, 3.4), xlim=(0, 11.5), ylim=(2000, 6000)):\n",
|
37 |
+
" \"\"\"\n",
|
38 |
+
" Plot a calculated f0 trajectory on the corresponding spectrogram\n",
|
39 |
+
" \n",
|
40 |
+
" Parameters\n",
|
41 |
+
" ----------\n",
|
42 |
+
" Y_LF : np.ndarray\n",
|
43 |
+
" log-frequency spectrogram\n",
|
44 |
+
" t : np.ndarray\n",
|
45 |
+
" time axis of the spectrogram\n",
|
46 |
+
" f : np.ndarray\n",
|
47 |
+
" log-frequency axis of the spectrogram in cents\n",
|
48 |
+
" f0 : np.ndarray\n",
|
49 |
+
" f0 trajectory in cents\n",
|
50 |
+
" t_f0 : np.ndarray\n",
|
51 |
+
" time points of the f0 trajectory frames\n",
|
52 |
+
" figsize : tuple\n",
|
53 |
+
" figure size\n",
|
54 |
+
" xlim : tuple\n",
|
55 |
+
" x-limits\n",
|
56 |
+
" ylim : tuple\n",
|
57 |
+
" y-limits\n",
|
58 |
+
" \"\"\"\n",
|
59 |
+
" plt.figure(figsize=figsize)\n",
|
60 |
+
"\n",
|
61 |
+
" plt.imshow(Y_LF, cmap='gray_r', aspect='auto', origin='lower', extent=[t[0], t[-1], f[0], f[-1]])\n",
|
62 |
+
" plt.plot(t_f0, f0, linestyle='', marker='.', markersize=5, color=[192/256, 0, 0])\n",
|
63 |
+
"\n",
|
64 |
+
" plt.xlim(xlim)\n",
|
65 |
+
" plt.ylim(ylim)\n",
|
66 |
+
"\n",
|
67 |
+
" plt.gca().tick_params(axis='both', which='major', labelsize=10)\n",
|
68 |
+
" plt.gca().tick_params(axis='both', which='minor', labelsize=10)\n",
|
69 |
+
" \n",
|
70 |
+
" plt.xlabel(\"Time (seconds)\", fontsize=12)\n",
|
71 |
+
" plt.ylabel(\"Log-Frequency (cents)\", fontsize=12)\n",
|
72 |
+
" \n",
|
73 |
+
" cbar = plt.colorbar()\n",
|
74 |
+
" cbar.ax.get_yaxis().labelpad = 15\n",
|
75 |
+
" cbar.ax.set_ylabel('Log-Magnitude', rotation=270)\n",
|
76 |
+
"\n",
|
77 |
+
" plt.tight_layout()\n",
|
78 |
+
" plt.show()"
|
79 |
+
]
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"cell_type": "code",
|
83 |
+
"execution_count": null,
|
84 |
+
"id": "8f646465",
|
85 |
+
"metadata": {},
|
86 |
+
"outputs": [],
|
87 |
+
"source": [
|
88 |
+
"# load demo audio (a throat microphone recording of a soprano singer)\n",
|
89 |
+
"fn_wav = \"./data/DCS_LI_QuartetB_Take03_S1_LRX_excerpt.wav\"\n",
|
90 |
+
"x, Fs = librosa.load(fn_wav, sr=22050)\n",
|
91 |
+
"ipd.display(ipd.Audio(x, rate=Fs, normalize=True)) # audio playback\n",
|
92 |
+
"\n",
|
93 |
+
"# shared parameters\n",
|
94 |
+
"N = 2048 # window size in samples\n",
|
95 |
+
"H = 256 # hop size in samples\n",
|
96 |
+
"zero_pad = 2048 # zero-padding for STFT (only for visualization)\n",
|
97 |
+
"F_min = 55.0 # minimum frequency of interest in Hz\n",
|
98 |
+
"F_max = 1760.0 # maximum frequency of interest in Hz\n",
|
99 |
+
"R = 10 # resolution of F0-estimations in cents\n",
|
100 |
+
"\n",
|
101 |
+
"# calculate magnitude spectrogram of input signal for visualization\n",
|
102 |
+
"X = librosa.stft(x, n_fft=N+zero_pad, hop_length=H, win_length=N, window='hann', pad_mode='constant', center=True)\n",
|
103 |
+
"Y = np.abs(X)\n",
|
104 |
+
"F_coef_lin = librosa.fft_frequencies(sr=Fs, n_fft=N+zero_pad)\n",
|
105 |
+
"T_coef = librosa.frames_to_time(np.arange(X.shape[1]), sr=Fs, hop_length=H)\n",
|
106 |
+
"\n",
|
107 |
+
"# interpolate magnitude spectrogram to a logarithmic frequency axis \n",
|
108 |
+
"B = np.floor((1200 / R) * np.log2(F_max / F_min) + 0.5)\n",
|
109 |
+
"F_coef_log_cents = np.arange(0, B) * R \n",
|
110 |
+
"F_coef_log_hz = 2 ** (F_coef_log_cents / 1200) * F_min\n",
|
111 |
+
"compute_Y_interpol = interp1d(F_coef_lin, Y, kind='cubic', axis=0)\n",
|
112 |
+
"Y_LF = compute_Y_interpol(F_coef_log_hz)\n",
|
113 |
+
"Y_LF[Y_LF < 0] = 0 # discard negative values after interpolation\n",
|
114 |
+
"\n",
|
115 |
+
"# use log-magnitude for visualizations\n",
|
116 |
+
"Y_LF = np.log(1 + Y_LF)\n",
|
117 |
+
"\n",
|
118 |
+
"plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, [], [])"
|
119 |
+
]
|
120 |
+
},
|
121 |
+
{
|
122 |
+
"cell_type": "markdown",
|
123 |
+
"id": "a2d8f9fe",
|
124 |
+
"metadata": {},
|
125 |
+
"source": [
|
126 |
+
"### YIN\n",
|
127 |
+
"\n",
|
128 |
+
"For algorithmic details, see:\n",
|
129 |
+
"\n",
|
130 |
+
"Alain de Cheveigné and Hideki Kawahara. YIN, a fundamental frequency estimator for speech and music. Journal of the Acoustical Society of America (JASA), 111(4):1917–1930, 2002."
|
131 |
+
]
|
132 |
+
},
|
133 |
+
{
|
134 |
+
"cell_type": "code",
|
135 |
+
"execution_count": null,
|
136 |
+
"id": "a9a165be",
|
137 |
+
"metadata": {},
|
138 |
+
"outputs": [],
|
139 |
+
"source": [
|
140 |
+
"# YIN parameters\n",
|
141 |
+
"threshold = 0.15\n",
|
142 |
+
"\n",
|
143 |
+
"# run YIN algorithm\n",
|
144 |
+
"f0_yin, t_yin, ap_yin = libf0.yin(x, Fs=Fs, N=N, H=H, F_min=F_min, F_max=F_max, threshold=threshold, verbose=True)\n",
|
145 |
+
"\n",
|
146 |
+
"# convert trajectory to cent scale\n",
|
147 |
+
"f0_yin_cents = libf0.hz_to_cents(f0_yin, F_min)\n",
|
148 |
+
"\n",
|
149 |
+
"# plot the filtered result\n",
|
150 |
+
"plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_yin_cents, t_yin)\n",
|
151 |
+
"\n",
|
152 |
+
"# sonify the filtered result (left: sonification, right: original audio)\n",
|
153 |
+
"x_son_yin = libf0.sonify_trajectory_with_sinusoid(f0_yin, t_yin, len(x), Fs=Fs)\n",
|
154 |
+
"ipd.display(ipd.Audio(np.vstack((x_son_yin.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))"
|
155 |
+
]
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"cell_type": "markdown",
|
159 |
+
"id": "f27baf76",
|
160 |
+
"metadata": {},
|
161 |
+
"source": [
|
162 |
+
"### pYIN\n",
|
163 |
+
"\n",
|
164 |
+
"For algorithmic details, see:\n",
|
165 |
+
"\n",
|
166 |
+
"Matthias Mauch and Simon Dixon. pYIN: A fundamental frequency estimator using probabilistic threshold distributions. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 659–663, Florence, Italy, 2014."
|
167 |
+
]
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"cell_type": "code",
|
171 |
+
"execution_count": null,
|
172 |
+
"id": "9986dfe1",
|
173 |
+
"metadata": {},
|
174 |
+
"outputs": [],
|
175 |
+
"source": [
|
176 |
+
"# set parameters\n",
|
177 |
+
"thresholds = np.arange(0.01, 1, 0.01)\n",
|
178 |
+
"R = 10 # bin resolution in cents\n",
|
179 |
+
"\n",
|
180 |
+
"# run pYIN algorithm\n",
|
181 |
+
"f0_pyin, t_pyin, conf_pyin = libf0.pyin(x, Fs=Fs, N=N, H=H, F_min=F_min, F_max=F_max, R=R, thresholds=thresholds)\n",
|
182 |
+
"\n",
|
183 |
+
"# convert trajectory to cent scale\n",
|
184 |
+
"f0_pyin_cents = libf0.hz_to_cents(f0_pyin, F_min)\n",
|
185 |
+
"\n",
|
186 |
+
"# plot the filtered result\n",
|
187 |
+
"plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_pyin_cents, t_pyin)\n",
|
188 |
+
"\n",
|
189 |
+
"# sonify the filtered result (left: sonification, right: original audio)\n",
|
190 |
+
"x_son_pyin = libf0.sonify_trajectory_with_sinusoid(f0_pyin, t_pyin, len(x), Fs=Fs)\n",
|
191 |
+
"ipd.display(ipd.Audio(np.vstack((x_son_pyin.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))"
|
192 |
+
]
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"cell_type": "markdown",
|
196 |
+
"id": "5218adb5-862b-4315-a85d-375dc69cc0e1",
|
197 |
+
"metadata": {},
|
198 |
+
"source": [
|
199 |
+
"### Salience Algorithm\n",
|
200 |
+
"\n",
|
201 |
+
"For algorithmic details, see:\n",
|
202 |
+
"\n",
|
203 |
+
"Justin Salamon and Emilia Gómez. Melody extraction from polyphonic music signals using pitch contour characteristics. IEEE Transactions on Audio, Speech, and Language Processing, 20(6): 1759–1770, 2012.\n",
|
204 |
+
"\n",
|
205 |
+
"Meinard Müller. Fundamentals of Music Processing – Using Python and Jupyter Notebooks. Springer\n",
|
206 |
+
"Verlag, 2nd edition, 2021. ISBN 978-3-030-69807-2. doi: 10.1007/978-3-030-69808-9."
|
207 |
+
]
|
208 |
+
},
|
209 |
+
{
|
210 |
+
"cell_type": "code",
|
211 |
+
"execution_count": null,
|
212 |
+
"id": "b0f0edcb-1e8a-47e1-a40d-9a28dae69879",
|
213 |
+
"metadata": {},
|
214 |
+
"outputs": [],
|
215 |
+
"source": [
|
216 |
+
"# set parameters\n",
|
217 |
+
"num_harm = 10 # number of harmonics for the summation\n",
|
218 |
+
"freq_smoothing = 11 # length of the smoothing filter\n",
|
219 |
+
"\n",
|
220 |
+
"# run the salience algorithm\n",
|
221 |
+
"f0_sal, t_sal, conf_sal = libf0.salience(x, Fs=Fs, N=N, H=H, F_min=F_min, F_max=F_max, R=R, num_harm=num_harm, freq_smooth_len=freq_smoothing)\n",
|
222 |
+
"\n",
|
223 |
+
"# convert trajectory to cent scale\n",
|
224 |
+
"f0_sal_cents = libf0.hz_to_cents(f0_sal, F_min)\n",
|
225 |
+
"\n",
|
226 |
+
"# plot the result\n",
|
227 |
+
"plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_sal_cents, t_sal)\n",
|
228 |
+
"\n",
|
229 |
+
"# sonify the result (left: sonification, right: original audio)\n",
|
230 |
+
"x_son_sal = libf0.sonify_trajectory_with_sinusoid(f0_sal, t_sal, len(x), Fs=Fs)\n",
|
231 |
+
"ipd.display(ipd.Audio(np.vstack((x_son_sal.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))"
|
232 |
+
]
|
233 |
+
},
|
234 |
+
{
|
235 |
+
"cell_type": "markdown",
|
236 |
+
"id": "0eabab3f",
|
237 |
+
"metadata": {},
|
238 |
+
"source": [
|
239 |
+
"### SWIPE\n",
|
240 |
+
"\n",
|
241 |
+
"For algorithmic details, see:\n",
|
242 |
+
"\n",
|
243 |
+
"Arturo Camacho and John G. Harris. A sawtooth waveform inspired pitch estimator for speech and music. The Journal of the Acoustical Society of America, 124(3):1638–1652, 2008."
|
244 |
+
]
|
245 |
+
},
|
246 |
+
{
|
247 |
+
"cell_type": "code",
|
248 |
+
"execution_count": null,
|
249 |
+
"id": "ee7a8ff4",
|
250 |
+
"metadata": {},
|
251 |
+
"outputs": [],
|
252 |
+
"source": [
|
253 |
+
"# set parameters\n",
|
254 |
+
"threshold = 0.5 # confidence threshold between 0 and 1\n",
|
255 |
+
"\n",
|
256 |
+
"# run the SWIPE algorithm\n",
|
257 |
+
"f0_swipe, t_swipe, conf_swipe = libf0.swipe(x, Fs, H, F_min, F_max, strength_threshold=threshold)\n",
|
258 |
+
"\n",
|
259 |
+
"# convert trajectory to cent scale\n",
|
260 |
+
"f0_swipe_cents = libf0.hz_to_cents(f0_swipe, F_min)\n",
|
261 |
+
"\n",
|
262 |
+
"# plot the result\n",
|
263 |
+
"plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_swipe_cents, t_swipe)\n",
|
264 |
+
"\n",
|
265 |
+
"# sonify the result (left: sonification, right: original audio)\n",
|
266 |
+
"x_son_swipe = libf0.sonify_trajectory_with_sinusoid(f0_swipe, t_swipe, len(x), Fs=Fs)\n",
|
267 |
+
"ipd.display(ipd.Audio(np.vstack((x_son_swipe.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))"
|
268 |
+
]
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"cell_type": "markdown",
|
272 |
+
"id": "08b2121c",
|
273 |
+
"metadata": {},
|
274 |
+
"source": [
|
275 |
+
"### SWIPE (slim)\n",
|
276 |
+
"\n",
|
277 |
+
"A more efficient and didactic implementation of the SWIPE algorithm."
|
278 |
+
]
|
279 |
+
},
|
280 |
+
{
|
281 |
+
"cell_type": "code",
|
282 |
+
"execution_count": null,
|
283 |
+
"id": "bee19066",
|
284 |
+
"metadata": {},
|
285 |
+
"outputs": [],
|
286 |
+
"source": [
|
287 |
+
"# set parameters\n",
|
288 |
+
"threshold = 0.5 # confidence threshold between 0 and 1\n",
|
289 |
+
"\n",
|
290 |
+
"# run the SWIPE algorithm\n",
|
291 |
+
"f0_swipes, t_swipes, conf_swipes = libf0.swipe_slim(x, Fs, H, F_min, F_max, strength_threshold=threshold) # a simplified implementation\n",
|
292 |
+
"\n",
|
293 |
+
"# convert trajectory to cent scale\n",
|
294 |
+
"f0_swipes_cents = libf0.hz_to_cents(f0_swipes, F_min)\n",
|
295 |
+
"\n",
|
296 |
+
"# plot the result\n",
|
297 |
+
"plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_swipes_cents, t_swipe)\n",
|
298 |
+
"\n",
|
299 |
+
"# sonify the result (left: sonification, right: original audio)\n",
|
300 |
+
"x_son_swipes = libf0.sonify_trajectory_with_sinusoid(f0_swipes, t_swipes, len(x), Fs=Fs)\n",
|
301 |
+
"ipd.display(ipd.Audio(np.vstack((x_son_swipes.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))"
|
302 |
+
]
|
303 |
+
},
|
304 |
+
{
|
305 |
+
"cell_type": "code",
|
306 |
+
"execution_count": null,
|
307 |
+
"id": "b271aa6d",
|
308 |
+
"metadata": {},
|
309 |
+
"outputs": [],
|
310 |
+
"source": []
|
311 |
+
}
|
312 |
+
],
|
313 |
+
"metadata": {
|
314 |
+
"kernelspec": {
|
315 |
+
"display_name": "Python 3",
|
316 |
+
"language": "python",
|
317 |
+
"name": "python3"
|
318 |
+
},
|
319 |
+
"language_info": {
|
320 |
+
"codemirror_mode": {
|
321 |
+
"name": "ipython",
|
322 |
+
"version": 3
|
323 |
+
},
|
324 |
+
"file_extension": ".py",
|
325 |
+
"mimetype": "text/x-python",
|
326 |
+
"name": "python",
|
327 |
+
"nbconvert_exporter": "python",
|
328 |
+
"pygments_lexer": "ipython3",
|
329 |
+
"version": "3.8.10"
|
330 |
+
}
|
331 |
+
},
|
332 |
+
"nbformat": 4,
|
333 |
+
"nbformat_minor": 5
|
334 |
+
}
|