Update README.md
Browse files
README.md
CHANGED
@@ -161,3 +161,79 @@ This dataset is a structured extraction of the [Million Song Subset](http://mill
|
|
161 |
For more details, visit the [Million Song Dataset website](http://millionsongdataset.com).
|
162 |
|
163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
For more details, visit the [Million Song Dataset website](http://millionsongdataset.com).
|
162 |
|
163 |
|
164 |
+
## Appendix: Processing Code
|
165 |
+
|
166 |
+
The dataset was converted using the following snippet:
|
167 |
+
|
168 |
+
```python
|
169 |
+
import os
|
170 |
+
import unibox as ub
|
171 |
+
import pandas as pd
|
172 |
+
import numpy as np
|
173 |
+
import h5py
|
174 |
+
from tqdm import tqdm
|
175 |
+
from concurrent.futures import ProcessPoolExecutor
|
176 |
+
|
177 |
+
# https://github.com/tbertinmahieux/MSongsDB/blob/0c276e289606d5bd6f3991f713e7e9b1d4384e44/PythonSrc/hdf5_getters.py
|
178 |
+
import hdf5_getters
|
179 |
+
|
180 |
+
# Define dataset path
|
181 |
+
dataset_path = "/lv0/yada/dataproc5/data/MillionSongSubset"
|
182 |
+
|
183 |
+
# Function to extract all available fields from an HDF5 file
|
184 |
+
def extract_song_data(file_path):
|
185 |
+
"""Extracts all available fields from an HDF5 song file using hdf5_getters."""
|
186 |
+
song_data = {}
|
187 |
+
|
188 |
+
try:
|
189 |
+
with hdf5_getters.open_h5_file_read(file_path) as h5:
|
190 |
+
# Get all getter functions from hdf5_getters
|
191 |
+
getters = [func for func in dir(hdf5_getters) if func.startswith("get_")]
|
192 |
+
|
193 |
+
for getter in getters:
|
194 |
+
try:
|
195 |
+
# Dynamically call each getter function
|
196 |
+
value = getattr(hdf5_getters, getter)(h5)
|
197 |
+
|
198 |
+
# Optimize conversions
|
199 |
+
if isinstance(value, np.ndarray):
|
200 |
+
value = value.tolist()
|
201 |
+
elif isinstance(value, bytes):
|
202 |
+
value = value.decode()
|
203 |
+
|
204 |
+
# Store in dictionary with a cleaned-up key name
|
205 |
+
song_data[getter[4:]] = value
|
206 |
+
|
207 |
+
except Exception:
|
208 |
+
continue # Skip errors but don't slow down
|
209 |
+
|
210 |
+
except Exception as e:
|
211 |
+
print(f"Error processing {file_path}: {e}")
|
212 |
+
|
213 |
+
return song_data
|
214 |
+
|
215 |
+
# Function to process multiple files in parallel
|
216 |
+
def process_files_in_parallel(h5_files, num_workers=8):
|
217 |
+
"""Processes multiple .h5 files in parallel."""
|
218 |
+
all_songs = []
|
219 |
+
|
220 |
+
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
221 |
+
for song_data in tqdm(executor.map(extract_song_data, h5_files), total=len(h5_files)):
|
222 |
+
if song_data:
|
223 |
+
all_songs.append(song_data)
|
224 |
+
|
225 |
+
return all_songs
|
226 |
+
|
227 |
+
# Find all .h5 files
|
228 |
+
h5_files = [os.path.join(root, file) for root, _, files in os.walk(dataset_path) for file in files if file.endswith(".h5")]
|
229 |
+
|
230 |
+
# Process files in parallel
|
231 |
+
all_songs = process_files_in_parallel(h5_files, num_workers=24)
|
232 |
+
|
233 |
+
# Convert to Pandas DataFrame
|
234 |
+
df = pd.DataFrame(all_songs)
|
235 |
+
|
236 |
+
ub.saves(df, "hf://trojblue/million-song-subset", private=False)
|
237 |
+
```
|
238 |
+
|
239 |
+
|