{ "cells": [ { "cell_type": "markdown", "id": "feeb7b4a", "metadata": {}, "source": [ "# libf0 - A Python Library for F0-Estimation in Music Recordings" ] }, { "cell_type": "code", "execution_count": null, "id": "8a81c6dc", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import librosa\n", "from scipy.interpolate import interp1d\n", "\n", "import IPython.display as ipd\n", "import matplotlib.pyplot as plt\n", "\n", "import libf0" ] }, { "cell_type": "code", "execution_count": null, "id": "82a6a26e-db62-41f0-9e8c-c718b1359752", "metadata": {}, "outputs": [], "source": [ "# Plot function\n", "def plot_f0_trajectory(Y_LF, t, f, f0, t_f0, figsize=(8.5, 3.4), xlim=(0, 11.5), ylim=(2000, 6000)):\n", " \"\"\"\n", " Plot a calculated f0 trajectory on the corresponding spectrogram\n", " \n", " Parameters\n", " ----------\n", " Y_LF : np.ndarray\n", " log-frequency spectrogram\n", " t : np.ndarray\n", " time axis of the spectrogram\n", " f : np.ndarray\n", " log-frequency axis of the spectrogram in cents\n", " f0 : np.ndarray\n", " f0 trajectory in cents\n", " t_f0 : np.ndarray\n", " time points of the f0 trajectory frames\n", " figsize : tuple\n", " figure size\n", " xlim : tuple\n", " x-limits\n", " ylim : tuple\n", " y-limits\n", " \"\"\"\n", " plt.figure(figsize=figsize)\n", "\n", " plt.imshow(Y_LF, cmap='gray_r', aspect='auto', origin='lower', extent=[t[0], t[-1], f[0], f[-1]])\n", " plt.plot(t_f0, f0, linestyle='', marker='.', markersize=5, color=[192/256, 0, 0])\n", "\n", " plt.xlim(xlim)\n", " plt.ylim(ylim)\n", "\n", " plt.gca().tick_params(axis='both', which='major', labelsize=10)\n", " plt.gca().tick_params(axis='both', which='minor', labelsize=10)\n", " \n", " plt.xlabel(\"Time (seconds)\", fontsize=12)\n", " plt.ylabel(\"Log-Frequency (cents)\", fontsize=12)\n", " \n", " cbar = plt.colorbar()\n", " cbar.ax.get_yaxis().labelpad = 15\n", " cbar.ax.set_ylabel('Log-Magnitude', rotation=270)\n", "\n", " plt.tight_layout()\n", " plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "8f646465", "metadata": {}, "outputs": [], "source": [ "# load demo audio (a throat microphone recording of a soprano singer)\n", "fn_wav = \"./data/DCS_LI_QuartetB_Take03_S1_LRX_excerpt.wav\"\n", "x, Fs = librosa.load(fn_wav, sr=22050)\n", "ipd.display(ipd.Audio(x, rate=Fs, normalize=True)) # audio playback\n", "\n", "# shared parameters\n", "N = 2048 # window size in samples\n", "H = 256 # hop size in samples\n", "zero_pad = 2048 # zero-padding for STFT (only for visualization)\n", "F_min = 55.0 # minimum frequency of interest in Hz\n", "F_max = 1760.0 # maximum frequency of interest in Hz\n", "R = 10 # resolution of F0-estimations in cents\n", "\n", "# calculate magnitude spectrogram of input signal for visualization\n", "X = librosa.stft(x, n_fft=N+zero_pad, hop_length=H, win_length=N, window='hann', pad_mode='constant', center=True)\n", "Y = np.abs(X)\n", "F_coef_lin = librosa.fft_frequencies(sr=Fs, n_fft=N+zero_pad)\n", "T_coef = librosa.frames_to_time(np.arange(X.shape[1]), sr=Fs, hop_length=H)\n", "\n", "# interpolate magnitude spectrogram to a logarithmic frequency axis \n", "B = np.floor((1200 / R) * np.log2(F_max / F_min) + 0.5)\n", "F_coef_log_cents = np.arange(0, B) * R \n", "F_coef_log_hz = 2 ** (F_coef_log_cents / 1200) * F_min\n", "compute_Y_interpol = interp1d(F_coef_lin, Y, kind='cubic', axis=0)\n", "Y_LF = compute_Y_interpol(F_coef_log_hz)\n", "Y_LF[Y_LF < 0] = 0 # discard negative values after interpolation\n", "\n", "# use log-magnitude for visualizations\n", "Y_LF = np.log(1 + Y_LF)\n", "\n", "plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, [], [])" ] }, { "cell_type": "markdown", "id": "a2d8f9fe", "metadata": {}, "source": [ "### YIN\n", "\n", "For algorithmic details, see:\n", "\n", "Alain de Cheveigné and Hideki Kawahara. YIN, a fundamental frequency estimator for speech and music. Journal of the Acoustical Society of America (JASA), 111(4):1917–1930, 2002." ] }, { "cell_type": "code", "execution_count": null, "id": "a9a165be", "metadata": {}, "outputs": [], "source": [ "# YIN parameters\n", "threshold = 0.15\n", "\n", "# run YIN algorithm\n", "f0_yin, t_yin, ap_yin = libf0.yin(x, Fs=Fs, N=N, H=H, F_min=F_min, F_max=F_max, threshold=threshold, verbose=True)\n", "\n", "# convert trajectory to cent scale\n", "f0_yin_cents = libf0.hz_to_cents(f0_yin, F_min)\n", "\n", "# plot the filtered result\n", "plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_yin_cents, t_yin)\n", "\n", "# sonify the filtered result (left: sonification, right: original audio)\n", "x_son_yin = libf0.sonify_trajectory_with_sinusoid(f0_yin, t_yin, len(x), Fs=Fs)\n", "ipd.display(ipd.Audio(np.vstack((x_son_yin.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))" ] }, { "cell_type": "markdown", "id": "f27baf76", "metadata": {}, "source": [ "### pYIN\n", "\n", "For algorithmic details, see:\n", "\n", "Matthias Mauch and Simon Dixon. pYIN: A fundamental frequency estimator using probabilistic threshold distributions. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 659–663, Florence, Italy, 2014." ] }, { "cell_type": "code", "execution_count": null, "id": "9986dfe1", "metadata": {}, "outputs": [], "source": [ "# set parameters\n", "thresholds = np.arange(0.01, 1, 0.01)\n", "R = 10 # bin resolution in cents\n", "\n", "# run pYIN algorithm\n", "f0_pyin, t_pyin, conf_pyin = libf0.pyin(x, Fs=Fs, N=N, H=H, F_min=F_min, F_max=F_max, R=R, thresholds=thresholds)\n", "\n", "# convert trajectory to cent scale\n", "f0_pyin_cents = libf0.hz_to_cents(f0_pyin, F_min)\n", "\n", "# plot the filtered result\n", "plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_pyin_cents, t_pyin)\n", "\n", "# sonify the filtered result (left: sonification, right: original audio)\n", "x_son_pyin = libf0.sonify_trajectory_with_sinusoid(f0_pyin, t_pyin, len(x), Fs=Fs)\n", "ipd.display(ipd.Audio(np.vstack((x_son_pyin.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))" ] }, { "cell_type": "markdown", "id": "5218adb5-862b-4315-a85d-375dc69cc0e1", "metadata": {}, "source": [ "### Salience Algorithm\n", "\n", "For algorithmic details, see:\n", "\n", "Justin Salamon and Emilia Gómez. Melody extraction from polyphonic music signals using pitch contour characteristics. IEEE Transactions on Audio, Speech, and Language Processing, 20(6): 1759–1770, 2012.\n", "\n", "Meinard Müller. Fundamentals of Music Processing – Using Python and Jupyter Notebooks. Springer\n", "Verlag, 2nd edition, 2021. ISBN 978-3-030-69807-2. doi: 10.1007/978-3-030-69808-9." ] }, { "cell_type": "code", "execution_count": null, "id": "b0f0edcb-1e8a-47e1-a40d-9a28dae69879", "metadata": {}, "outputs": [], "source": [ "# set parameters\n", "num_harm = 10 # number of harmonics for the summation\n", "freq_smoothing = 11 # length of the smoothing filter\n", "\n", "# run the salience algorithm\n", "f0_sal, t_sal, conf_sal = libf0.salience(x, Fs=Fs, N=N, H=H, F_min=F_min, F_max=F_max, R=R, num_harm=num_harm, freq_smooth_len=freq_smoothing)\n", "\n", "# convert trajectory to cent scale\n", "f0_sal_cents = libf0.hz_to_cents(f0_sal, F_min)\n", "\n", "# plot the result\n", "plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_sal_cents, t_sal)\n", "\n", "# sonify the result (left: sonification, right: original audio)\n", "x_son_sal = libf0.sonify_trajectory_with_sinusoid(f0_sal, t_sal, len(x), Fs=Fs)\n", "ipd.display(ipd.Audio(np.vstack((x_son_sal.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))" ] }, { "cell_type": "markdown", "id": "0eabab3f", "metadata": {}, "source": [ "### SWIPE\n", "\n", "For algorithmic details, see:\n", "\n", "Arturo Camacho and John G. Harris. A sawtooth waveform inspired pitch estimator for speech and music. The Journal of the Acoustical Society of America, 124(3):1638–1652, 2008." ] }, { "cell_type": "code", "execution_count": null, "id": "ee7a8ff4", "metadata": {}, "outputs": [], "source": [ "# set parameters\n", "threshold = 0.5 # confidence threshold between 0 and 1\n", "\n", "# run the SWIPE algorithm\n", "f0_swipe, t_swipe, conf_swipe = libf0.swipe(x, Fs, H, F_min, F_max, strength_threshold=threshold)\n", "\n", "# convert trajectory to cent scale\n", "f0_swipe_cents = libf0.hz_to_cents(f0_swipe, F_min)\n", "\n", "# plot the result\n", "plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_swipe_cents, t_swipe)\n", "\n", "# sonify the result (left: sonification, right: original audio)\n", "x_son_swipe = libf0.sonify_trajectory_with_sinusoid(f0_swipe, t_swipe, len(x), Fs=Fs)\n", "ipd.display(ipd.Audio(np.vstack((x_son_swipe.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))" ] }, { "cell_type": "markdown", "id": "08b2121c", "metadata": {}, "source": [ "### SWIPE (slim)\n", "\n", "A more efficient and didactic implementation of the SWIPE algorithm." ] }, { "cell_type": "code", "execution_count": null, "id": "bee19066", "metadata": {}, "outputs": [], "source": [ "# set parameters\n", "threshold = 0.5 # confidence threshold between 0 and 1\n", "\n", "# run the SWIPE algorithm\n", "f0_swipes, t_swipes, conf_swipes = libf0.swipe_slim(x, Fs, H, F_min, F_max, strength_threshold=threshold) # a simplified implementation\n", "\n", "# convert trajectory to cent scale\n", "f0_swipes_cents = libf0.hz_to_cents(f0_swipes, F_min)\n", "\n", "# plot the result\n", "plot_f0_trajectory(Y_LF, T_coef, F_coef_log_cents, f0_swipes_cents, t_swipe)\n", "\n", "# sonify the result (left: sonification, right: original audio)\n", "x_son_swipes = libf0.sonify_trajectory_with_sinusoid(f0_swipes, t_swipes, len(x), Fs=Fs)\n", "ipd.display(ipd.Audio(np.vstack((x_son_swipes.reshape(1, -1), x.reshape(1, -1))), rate=Fs, normalize=True))" ] }, { "cell_type": "code", "execution_count": null, "id": "b271aa6d", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 5 }