Datasets:
Add scripts from jeli-asr repo
Browse files
scripts/clean_tsv.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright 2024 RobotsMali AI4D Lab.
|
3 |
+
|
4 |
+
Licensed under the Creative Commons Attribution 4.0 International License (the "License");
|
5 |
+
you may not use this file except in compliance with the License.
|
6 |
+
You may obtain a copy of the License at
|
7 |
+
|
8 |
+
https://creativecommons.org/licenses/by/4.0/
|
9 |
+
|
10 |
+
Unless required by applicable law or agreed to in writing, software
|
11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
See the License for the specific language governing permissions and
|
14 |
+
limitations under the License.
|
15 |
+
"""
|
16 |
+
import csv
|
17 |
+
import re
|
18 |
+
import os
|
19 |
+
import sys
|
20 |
+
import glob
|
21 |
+
|
22 |
+
def clean_tsv(input_file : str, output_file: str, revision_file: str) -> None:
|
23 |
+
"""Generic function
|
24 |
+
|
25 |
+
Args:
|
26 |
+
input_file (str): The path to the tsv file to clean
|
27 |
+
output_file (str): The path to the file to save the cleaned rows in. (Typically the same file)
|
28 |
+
revision_file (str): The path to the file to save still inconsistent rows in
|
29 |
+
|
30 |
+
Returns:
|
31 |
+
_type_: _description_
|
32 |
+
"""
|
33 |
+
cleaned_rows = []
|
34 |
+
revision_rows = []
|
35 |
+
|
36 |
+
# Helper function to clean the line
|
37 |
+
def clean_line(line):
|
38 |
+
# Remove unwanted characters
|
39 |
+
line = re.sub(r'[<>"]', '', line)
|
40 |
+
|
41 |
+
# Replace consecutive tabs with a single tab
|
42 |
+
line = re.sub(r'\t+', '\t', line)
|
43 |
+
|
44 |
+
return line
|
45 |
+
|
46 |
+
# Open the input file and process each row
|
47 |
+
with open(input_file, 'r', encoding='utf-8') as infile:
|
48 |
+
reader = csv.reader(infile, delimiter='\t')
|
49 |
+
|
50 |
+
for row in reader:
|
51 |
+
# Clean the row
|
52 |
+
row = [clean_line(item) for item in row]
|
53 |
+
|
54 |
+
# If the row has exactly 4 items, ensure the first two are numbers
|
55 |
+
if len(row) == 4:
|
56 |
+
try:
|
57 |
+
# Ensure the first two items are numbers
|
58 |
+
# Those values aren't intended for use, simple format check by Exception Catching.
|
59 |
+
start_time = int(row[0])
|
60 |
+
end_time = int(row[1])
|
61 |
+
|
62 |
+
# Append the cleaned row
|
63 |
+
cleaned_rows.append(row)
|
64 |
+
except ValueError:
|
65 |
+
# If timestamps are not valid integers, move to revision
|
66 |
+
revision_rows.append(row)
|
67 |
+
print("One problematic row has been added to revision")
|
68 |
+
# If the row has more than 4 items and no consecutive \t, check for \t swapped with space
|
69 |
+
elif len(row) > 4:
|
70 |
+
row_str = "\t".join(row)
|
71 |
+
if ',' in row_str:
|
72 |
+
row_str = re.sub(r',\t', ', ', row_str)
|
73 |
+
# After fixing spaces, split again and check the length
|
74 |
+
row_fixed = row_str.split('\t')
|
75 |
+
if len(row_fixed) == 4:
|
76 |
+
cleaned_rows.append(row_fixed)
|
77 |
+
else:
|
78 |
+
revision_rows.append(row_fixed)
|
79 |
+
print("One problematic row has been added to revision")
|
80 |
+
else:
|
81 |
+
# For rows with incorrect number of elements, move to revision
|
82 |
+
revision_rows.append(row)
|
83 |
+
print("One problematic row has been added to revision")
|
84 |
+
|
85 |
+
# Write the cleaned rows to the output file
|
86 |
+
with open(output_file, 'w', encoding='utf-8', newline='') as outfile:
|
87 |
+
writer = csv.writer(outfile, delimiter='\t')
|
88 |
+
writer.writerows(cleaned_rows)
|
89 |
+
print(f"**** New cleaned tsv file saved at {output_file} ****")
|
90 |
+
|
91 |
+
if revision_rows:
|
92 |
+
# Write the revision rows to the revision file if there are rows to review
|
93 |
+
with open(revision_file, 'w', encoding='utf-8', newline='') as revfile:
|
94 |
+
writer = csv.writer(revfile, delimiter='\t')
|
95 |
+
writer.writerows(revision_rows)
|
96 |
+
print(f"**** New revision file saved at {revision_file} ****")
|
97 |
+
|
98 |
+
if __name__ == "__main__":
|
99 |
+
transcription_dir = sys.argv[1]
|
100 |
+
|
101 |
+
# Ensure revision directory exist
|
102 |
+
rev_dir = f'{transcription_dir}/revisions'
|
103 |
+
os.makedirs(rev_dir, exist_ok=True)
|
104 |
+
|
105 |
+
# get the paths to all the tsv files
|
106 |
+
tsv_files = glob.glob(transcription_dir + "/*.tsv")
|
107 |
+
|
108 |
+
for tsv_file in tsv_files:
|
109 |
+
in_file = tsv_file
|
110 |
+
out_file = in_file
|
111 |
+
rev_file = rev_dir + "/" + tsv_file.split("/")[-1][:-4] + "-rev.tsv"
|
112 |
+
clean_tsv(input_file=in_file, output_file=out_file, revision_file=rev_file)
|
scripts/convert_to_mono_channel.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright 2024 RobotsMali AI4D Lab.
|
3 |
+
|
4 |
+
Licensed under the Creative Commons Attribution 4.0 International License (the "License");
|
5 |
+
you may not use this file except in compliance with the License.
|
6 |
+
You may obtain a copy of the License at
|
7 |
+
|
8 |
+
https://creativecommons.org/licenses/by/4.0/
|
9 |
+
|
10 |
+
Unless required by applicable law or agreed to in writing, software
|
11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
See the License for the specific language governing permissions and
|
14 |
+
limitations under the License.
|
15 |
+
"""
|
16 |
+
from pydub import AudioSegment
|
17 |
+
import sys
|
18 |
+
import json
|
19 |
+
|
20 |
+
def convert_to_mono(file_path):
|
21 |
+
"""Convert an audio file to mono if it has multiple channels."""
|
22 |
+
audio = AudioSegment.from_file(file_path)
|
23 |
+
if audio.channels > 1:
|
24 |
+
audio = audio.set_channels(1)
|
25 |
+
audio.export(file_path, format="wav")
|
26 |
+
print(f"Converted {file_path} to mono.")
|
27 |
+
|
28 |
+
def check_and_convert_audio_channels(manifest_path):
|
29 |
+
"""Check the number of channels in audio files and convert to mono if necessary."""
|
30 |
+
with open(manifest_path, 'r') as f:
|
31 |
+
lines = f.readlines()
|
32 |
+
|
33 |
+
for line in lines:
|
34 |
+
audio_path = json.loads(line)['audio_filepath']
|
35 |
+
audio = AudioSegment.from_file(audio_path)
|
36 |
+
if audio.channels > 1:
|
37 |
+
print(f"{audio_path} is not mono. Converting...")
|
38 |
+
convert_to_mono(audio_path)
|
39 |
+
else:
|
40 |
+
print(f"{audio_path} is already mono.")
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
if len(sys.argv) != 2:
|
44 |
+
print("Usage: python script.py <manifest_path>")
|
45 |
+
sys.exit(1)
|
46 |
+
|
47 |
+
manifest_path = sys.argv[1]
|
48 |
+
check_and_convert_audio_channels(manifest_path=manifest_path)
|
scripts/create_data_manifest.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright 2024 RobotsMali AI4D Lab.
|
3 |
+
|
4 |
+
Licensed under the Creative Commons Attribution 4.0 International License (the "License");
|
5 |
+
you may not use this file except in compliance with the License.
|
6 |
+
You may obtain a copy of the License at
|
7 |
+
|
8 |
+
https://creativecommons.org/licenses/by/4.0/
|
9 |
+
|
10 |
+
Unless required by applicable law or agreed to in writing, software
|
11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
See the License for the specific language governing permissions and
|
14 |
+
limitations under the License.
|
15 |
+
"""
|
16 |
+
## Imports
|
17 |
+
import glob
|
18 |
+
import os
|
19 |
+
import csv
|
20 |
+
import random
|
21 |
+
import json
|
22 |
+
import shutil
|
23 |
+
import sys
|
24 |
+
from pydub import AudioSegment
|
25 |
+
|
26 |
+
# Key callable to sort wav files paths
|
27 |
+
def key_sort_paths(path: str) -> int:
|
28 |
+
"""Serve as key function to sort the wav files paths
|
29 |
+
|
30 |
+
Args:
|
31 |
+
path (str): An individual path
|
32 |
+
|
33 |
+
Returns:
|
34 |
+
int: The number of the split (between 1 and 6)
|
35 |
+
"""
|
36 |
+
return int(path[-5])
|
37 |
+
|
38 |
+
# Function to read and combine the audios
|
39 |
+
def read_audios(glob_paths: list[str]) -> AudioSegment:
|
40 |
+
"""Read the six 10 mns audio as AudioSegments and returns the combined 1 hr audio
|
41 |
+
|
42 |
+
Args:
|
43 |
+
glob_paths (list[str]): list of the paths of the 6 .wav files
|
44 |
+
|
45 |
+
Returns:
|
46 |
+
AudioSegment: The combined audio
|
47 |
+
"""
|
48 |
+
audios = []
|
49 |
+
for wav_file in sorted(glob_paths, key=key_sort_paths):
|
50 |
+
audios.append(AudioSegment.from_file(file=wav_file, format="wav"))
|
51 |
+
final_audio = sum(audios[1:], start=audios[0])
|
52 |
+
return final_audio
|
53 |
+
|
54 |
+
# A function that reads and return the utterances from .tsv files
|
55 |
+
def read_tsv(tsv_file_path: str) -> list[list[int | str]]:
|
56 |
+
"""Read a .tsv file and return the utterances in it
|
57 |
+
|
58 |
+
Args:
|
59 |
+
tsv_file_path (str): The path to the tsv file
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
list[list[int | str]]: The returned utterances with the timestamps coverted to int
|
63 |
+
"""
|
64 |
+
with open(tsv_file_path,"r", encoding='utf-8') as recording_transcript:
|
65 |
+
tsv_file_rows = csv.reader(recording_transcript, delimiter="\t")
|
66 |
+
utterances = [[int(start), int(end), bam, french] for start, end, bam, french in tsv_file_rows]
|
67 |
+
return utterances
|
68 |
+
|
69 |
+
# Function to subdivide the audio (transcript) into multiple variable length slices
|
70 |
+
def create_var_length_samples(utterances: list[list[int | str]], min_duration: int = 1000,
|
71 |
+
max_duration: int = 120000) -> list[list[list[int | str]]]:
|
72 |
+
"""Create variable length combination of utterances to make samples which duration vary between 1s and 2mns
|
73 |
+
|
74 |
+
Args:
|
75 |
+
utterances (list[list[int | str]]): The read tsv file containing the transcriptions of the audio
|
76 |
+
min_duration (int, optional): min duration of a sample in milliseconds. Defaults to 1000.
|
77 |
+
max_duration (int, optional): max duration of a sample in milliseconds. Defaults to 120000.
|
78 |
+
|
79 |
+
Returns:
|
80 |
+
list[list[list[int | str]]]: The list of created samples
|
81 |
+
"""
|
82 |
+
samples = []
|
83 |
+
current_slice = []
|
84 |
+
current_duration = 0
|
85 |
+
|
86 |
+
i = 0
|
87 |
+
while i < len(utterances):
|
88 |
+
utterance_start, utterance_end = utterances[i][:2]
|
89 |
+
utterance_duration = utterance_end - utterance_start
|
90 |
+
|
91 |
+
# If current slice duration is less than max duration, add the utterance to this sample
|
92 |
+
if current_duration + utterance_duration <= max_duration:
|
93 |
+
current_slice.append(utterances[i])
|
94 |
+
current_duration += utterance_duration
|
95 |
+
i += 1
|
96 |
+
else:
|
97 |
+
# Save the current sample and reset for a new one
|
98 |
+
samples.append(current_slice)
|
99 |
+
current_slice = []
|
100 |
+
current_duration = 0
|
101 |
+
|
102 |
+
# Randomly decide whether to end the current sample based on time or number of utterances
|
103 |
+
if current_duration >= min_duration:
|
104 |
+
if random.choice([True, False, False]) or len(current_slice) >= random.randint(1, 20):
|
105 |
+
samples.append(current_slice)
|
106 |
+
current_slice = []
|
107 |
+
current_duration = 0
|
108 |
+
|
109 |
+
# Add the final slice if it exists
|
110 |
+
if current_slice: # equivalent to if current_slice is empty
|
111 |
+
samples.append(current_slice)
|
112 |
+
|
113 |
+
return samples
|
114 |
+
|
115 |
+
# Function to create and save the audio samples for a specific list of samples
|
116 |
+
def slice_and_save_audios(samples: list[list[list[int | str]]], griot_id: str,
|
117 |
+
data_dir: str, audio_dir_path: str) -> list[list[float | str]]:
|
118 |
+
"""Slice and save the audio samples created for a specific 1hr recording
|
119 |
+
|
120 |
+
Args:
|
121 |
+
samples (list[list[list[int | str]]]): The samples created with function "create_var_length_samples"
|
122 |
+
griot_id (str): The ID of the griot in the recording (eg: griots_r17)
|
123 |
+
data_dir (str): The directory containing all the data.
|
124 |
+
audio_dir_path (str): The diretory the save the sliced audios in.
|
125 |
+
|
126 |
+
Returns:
|
127 |
+
list[list[int | str]]: A list version of manifests (eg: [[audiofile_path, duration, bambara, translation], ...])
|
128 |
+
"""
|
129 |
+
wav_files_paths = glob.glob(f'{data_dir}/{griot_id}/*.wav')
|
130 |
+
griot_recording = read_audios(glob_paths=wav_files_paths)
|
131 |
+
# A list to store only the data needed to create
|
132 |
+
list_manifests = []
|
133 |
+
|
134 |
+
for sample in samples:
|
135 |
+
start = sample[0][0]
|
136 |
+
end = sample[-1][1]
|
137 |
+
duration = (end - start) / 1000 # in seconds
|
138 |
+
# Flag audios with more than 100 seconds
|
139 |
+
more_than_100s = " ###" if duration >= 100 else ""
|
140 |
+
|
141 |
+
# get trancriptions and translations of utterances composing the samples
|
142 |
+
transcriptions, translations = [utt[2] for utt in sample], [utt[3] for utt in sample]
|
143 |
+
transcription = " ".join(transcriptions)
|
144 |
+
translation = " ".join(translations)
|
145 |
+
|
146 |
+
# create the sample wav file and save it
|
147 |
+
audio_file_path = f"{audio_dir_path}/{griot_id}-{start}-{end}.wav"
|
148 |
+
griot_recording[start:end].export(out_f=audio_file_path, format="wav")
|
149 |
+
print(f"Sample {griot_id}-{start}-{end} saved in {audio_file_path}{more_than_100s}")
|
150 |
+
|
151 |
+
# Create the manifest list and save it
|
152 |
+
list_manifests.append([audio_file_path, duration, transcription, translation])
|
153 |
+
return list_manifests
|
154 |
+
|
155 |
+
# A function to shuffle and split samples
|
156 |
+
def shuffle_and_split(dataset: list[list[float | str]],
|
157 |
+
test: int | float = 0.15) -> tuple[list[list[float | str]]]:
|
158 |
+
"""Shuffle and split the whole dataset
|
159 |
+
|
160 |
+
Args:
|
161 |
+
dataset (list[list[int | str]]): The combined list of all list manifest returned by "slice_and_save_audios"
|
162 |
+
test (int | float, optional): The number of sample to include that make the test set or and percentage of the whole dataset to use as the test set. Defaults to 0.15.
|
163 |
+
|
164 |
+
Returns:
|
165 |
+
tuple[list[list[list[int | str]]]]: The train and test sets samples returned separately
|
166 |
+
"""
|
167 |
+
random.shuffle(dataset)
|
168 |
+
if isinstance(test, float):
|
169 |
+
test = int(test * len(dataset))
|
170 |
+
test_set_samples = dataset[0:test]
|
171 |
+
train_set_samples = dataset[test:]
|
172 |
+
return train_set_samples, test_set_samples
|
173 |
+
|
174 |
+
# A function to create audio sample files and manifests
|
175 |
+
def create_manifest(dataset_split: list[list[float | str]], split_name: str,
|
176 |
+
dir_path: str) -> None:
|
177 |
+
"""Create manifest files
|
178 |
+
|
179 |
+
Args:
|
180 |
+
dataset_split (list[list[float | str]]): Split of the dataset to create manifest for
|
181 |
+
split_name (str): Name of the split
|
182 |
+
dir_path (str): The directory to save the new data manifest in
|
183 |
+
"""
|
184 |
+
# Ensure directories for manifests and audios
|
185 |
+
os.makedirs(f'{dir_path}/manifests', exist_ok=True)
|
186 |
+
os.makedirs(f'{dir_path}/french-manifests', exist_ok=True)
|
187 |
+
os.makedirs(f'{dir_path}/audios/{split_name}', exist_ok=True)
|
188 |
+
|
189 |
+
# Define manifest file paths
|
190 |
+
manifest_path = f'{dir_path}/manifests/{split_name}_manifest.json'
|
191 |
+
french_manifest_path = f'{dir_path}/french-manifests/{split_name}_french_manifest.json'
|
192 |
+
audio_dir_path = f'{dir_path}/audios/{split_name}'
|
193 |
+
|
194 |
+
with open(manifest_path, 'w', encoding="utf-8") as manifest_file, open(french_manifest_path, 'w', encoding="utf-8") as french_file:
|
195 |
+
for sample in dataset_split:
|
196 |
+
# move the audio sample file in the corresponding split directory
|
197 |
+
new_audio_path = f'{audio_dir_path}/{sample[0].split("/")[-1]}'
|
198 |
+
shutil.move(src=sample[0], dst=new_audio_path)
|
199 |
+
|
200 |
+
# Prepare the manifest line
|
201 |
+
manifest_line = {
|
202 |
+
"audio_filepath": os.path.relpath(new_audio_path),
|
203 |
+
"duration": sample[1],
|
204 |
+
"text": sample[2] # Bambara transcription goes to the text field
|
205 |
+
}
|
206 |
+
|
207 |
+
french_manifest_line = {
|
208 |
+
"audio_filepath": os.path.relpath(new_audio_path),
|
209 |
+
"duration": sample[1],
|
210 |
+
"text": sample[3]
|
211 |
+
}
|
212 |
+
|
213 |
+
# Write manifest files
|
214 |
+
manifest_file.write(json.dumps(manifest_line) + '\n')
|
215 |
+
french_file.write(json.dumps(french_manifest_line) + '\n')
|
216 |
+
print(f"{split_name} manifests files have been created successfully!\nCorresponding audios files have been moved to {audio_dir_path}")
|
217 |
+
|
218 |
+
if __name__ == "__main__":
|
219 |
+
data_path = sys.argv[1]
|
220 |
+
manifest_dir = sys.argv[2]
|
221 |
+
tsv_dir = f'{data_path}/aligned-transcriptions'
|
222 |
+
|
223 |
+
# Get all the revised transcription files in .tsv format
|
224 |
+
tsv_paths = glob.glob(f'{tsv_dir}/*.tsv')
|
225 |
+
# list to store the list manifests per griots
|
226 |
+
final_list_manifest = []
|
227 |
+
for tsv_file in tsv_paths:
|
228 |
+
id_griot = tsv_file.split("/")[-1][:-4]
|
229 |
+
griot_utterances = read_tsv(tsv_file_path=tsv_file)
|
230 |
+
# Get samples (can be made of one or more utterances)
|
231 |
+
griot_samples = create_var_length_samples(utterances=griot_utterances)
|
232 |
+
list_manifest = slice_and_save_audios(samples=griot_samples, griot_id=id_griot,
|
233 |
+
data_dir=data_path, audio_dir_path=f'{manifest_dir}/audios')
|
234 |
+
final_list_manifest.append(list_manifest)
|
235 |
+
# Get a single list manifest for all the samples
|
236 |
+
final_list_manifest = sum(final_list_manifest, start=[])
|
237 |
+
# Shuffle and split the final list of all sample,manifests
|
238 |
+
train_set, test_set = shuffle_and_split(dataset=final_list_manifest, test=0.15) # Use 15% of the dataset for test
|
239 |
+
print(f'len(train_set) == {len(train_set)} and len(test_set) == {len(test_set)}')
|
240 |
+
|
241 |
+
create_manifest(dataset_split=train_set, split_name="train", dir_path=manifest_dir)
|
242 |
+
create_manifest(dataset_split=test_set, split_name="test", dir_path=manifest_dir)
|
scripts/create_manifest_oza_bam_asr.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright 2024 RobotsMali AI4D Lab.
|
3 |
+
|
4 |
+
Licensed under the Creative Commons Attribution 4.0 International License (the "License");
|
5 |
+
you may not use this file except in compliance with the License.
|
6 |
+
You may obtain a copy of the License at
|
7 |
+
|
8 |
+
https://creativecommons.org/licenses/by/4.0/
|
9 |
+
|
10 |
+
Unless required by applicable law or agreed to in writing, software
|
11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
See the License for the specific language governing permissions and
|
14 |
+
limitations under the License.
|
15 |
+
"""
|
16 |
+
from datasets import load_dataset
|
17 |
+
from huggingface_hub import login
|
18 |
+
import os
|
19 |
+
import json
|
20 |
+
import soundfile as sf
|
21 |
+
import numpy as np
|
22 |
+
|
23 |
+
# Log in to Hugging Face
|
24 |
+
login()
|
25 |
+
|
26 |
+
# Load the clean subset of the dataset
|
27 |
+
oza_bam_asr_clean = load_dataset("oza75/bambara-asr", name="clean")
|
28 |
+
|
29 |
+
# Define root directory for the Nemo version
|
30 |
+
root_dir = "oza-bam-asr-clean"
|
31 |
+
os.makedirs(root_dir, exist_ok=True)
|
32 |
+
os.makedirs(f"{root_dir}/audios", exist_ok=True)
|
33 |
+
os.makedirs(f"{root_dir}/manifests", exist_ok=True)
|
34 |
+
os.makedirs(f"{root_dir}/french-manifests", exist_ok=True)
|
35 |
+
|
36 |
+
def save_audio_and_create_manifest(datasets, manifest_path, french_manifest_path):
|
37 |
+
total_duration = 0
|
38 |
+
with open(manifest_path, "w", encoding="utf-8") as manifest_file, open(french_manifest_path, "w", encoding="utf-8") as french_file:
|
39 |
+
idx = 0
|
40 |
+
for dataset in datasets:
|
41 |
+
for example in dataset:
|
42 |
+
# Save audio to .wav file
|
43 |
+
audio_path = f"{root_dir}/audios/oza75-bam-asr-{idx}.wav"
|
44 |
+
sf.write(audio_path, np.array(example['audio']['array']), example['audio']['sampling_rate'])
|
45 |
+
|
46 |
+
# Calculate duration and accumulate
|
47 |
+
duration = example['duration']
|
48 |
+
total_duration += duration
|
49 |
+
|
50 |
+
# Create manifest entries
|
51 |
+
manifest_entry = {
|
52 |
+
"audio_filepath": os.path.relpath(audio_path),
|
53 |
+
"duration": duration,
|
54 |
+
"text": example['bambara'].lower()
|
55 |
+
}
|
56 |
+
french_manifest_entry = {
|
57 |
+
"audio_filepath": os.path.relpath(audio_path),
|
58 |
+
"duration": duration,
|
59 |
+
"text": example['french'].lower()
|
60 |
+
}
|
61 |
+
|
62 |
+
# Write to manifest files
|
63 |
+
manifest_file.write(json.dumps(manifest_entry, ensure_ascii=False) + "\n")
|
64 |
+
french_file.write(json.dumps(french_manifest_entry, ensure_ascii=False) + "\n")
|
65 |
+
|
66 |
+
idx += 1
|
67 |
+
|
68 |
+
return total_duration
|
69 |
+
|
70 |
+
if __name__ == "__main__":
|
71 |
+
# Combine train and test sets into one
|
72 |
+
total_duration = save_audio_and_create_manifest(
|
73 |
+
[oza_bam_asr_clean['train'], oza_bam_asr_clean['test']],
|
74 |
+
f"{root_dir}/manifests/train_manifest.json",
|
75 |
+
f"{root_dir}/french-manifests/train_french_manifest.json"
|
76 |
+
)
|
77 |
+
|
78 |
+
# Convert duration to hours
|
79 |
+
total_hours = total_duration / 3600
|
80 |
+
|
81 |
+
# Print the result
|
82 |
+
print(f"Created Nemo manifest for 'oza75/bambara-asr' totalling {total_hours:.2f} hours of audio.")
|
scripts/filter_silent_and_inaudible.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright 2024 RobotsMali AI4D Lab.
|
3 |
+
|
4 |
+
Licensed under the Creative Commons Attribution 4.0 International License (the "License");
|
5 |
+
you may not use this file except in compliance with the License.
|
6 |
+
You may obtain a copy of the License at
|
7 |
+
|
8 |
+
https://creativecommons.org/licenses/by/4.0/
|
9 |
+
|
10 |
+
Unless required by applicable law or agreed to in writing, software
|
11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
See the License for the specific language governing permissions and
|
14 |
+
limitations under the License.
|
15 |
+
"""
|
16 |
+
import librosa
|
17 |
+
import json
|
18 |
+
import numpy as np
|
19 |
+
|
20 |
+
# Get the mean mel energy for of a silent audio to better tune the params in flag_silent_audio_mel
|
21 |
+
def get_mel_energy(audio_path, n_mels=80, frame_duration=0.025):
|
22 |
+
"""
|
23 |
+
Computes the average Mel-frequency energy of an audio file.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
audio_path (str): Path to the audio file.
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
float: Average Mel-frequency energy.
|
30 |
+
"""
|
31 |
+
# Load audio
|
32 |
+
y, sr = librosa.load(audio_path, sr=None)
|
33 |
+
|
34 |
+
# Compute frame length in samples
|
35 |
+
frame_length = int(frame_duration * sr)
|
36 |
+
hop_length = frame_length # No overlap for simplicity
|
37 |
+
|
38 |
+
# Compute Mel spectrogram
|
39 |
+
mel_spectrogram = librosa.feature.melspectrogram(
|
40 |
+
y=y, sr=sr, n_mels=n_mels, hop_length=hop_length, power=2.0
|
41 |
+
)
|
42 |
+
|
43 |
+
# Compute average energy for each frame
|
44 |
+
mel_energies = np.mean(mel_spectrogram, axis=0)
|
45 |
+
|
46 |
+
# Normalize energy
|
47 |
+
mel_energies /= np.max(mel_energies) # Normalize to [0, 1]
|
48 |
+
|
49 |
+
return np.mean(mel_energies)
|
50 |
+
|
51 |
+
def flag_silent_audio_mel(manifest_path, mel_energy_threshold=0.001, speech_fraction=0.2, n_mels=80, frame_duration=0.025):
|
52 |
+
"""
|
53 |
+
Flags audio files with little or no speech based on Mel-frequency energy.
|
54 |
+
|
55 |
+
Args:
|
56 |
+
manifest_path (str): Path to the manifest file.
|
57 |
+
mel_energy_threshold (float): Threshold for Mel-frequency energy to consider a frame as containing speech.
|
58 |
+
speech_fraction (float): Minimum fraction of frames required to classify as speech.
|
59 |
+
n_mels (int): Number of Mel bands to use for the Mel spectrogram.
|
60 |
+
frame_duration (float): Duration of each frame in seconds.
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
list: A list of audio filepaths flagged as silent or low-energy.
|
64 |
+
"""
|
65 |
+
silent_audio_files = []
|
66 |
+
|
67 |
+
with open(manifest_path, 'r', encoding='utf-8') as f:
|
68 |
+
manifest = [json.loads(line.strip()) for line in f]
|
69 |
+
|
70 |
+
for entry in manifest:
|
71 |
+
audio_path = entry['audio_filepath']
|
72 |
+
try:
|
73 |
+
# Load audio
|
74 |
+
y, sr = librosa.load(audio_path, sr=None)
|
75 |
+
|
76 |
+
# Compute frame length in samples
|
77 |
+
frame_length = int(frame_duration * sr)
|
78 |
+
hop_length = frame_length # No overlap for simplicity
|
79 |
+
|
80 |
+
# Compute Mel spectrogram
|
81 |
+
mel_spectrogram = librosa.feature.melspectrogram(
|
82 |
+
y=y, sr=sr, n_mels=n_mels, hop_length=hop_length, power=2.0
|
83 |
+
)
|
84 |
+
|
85 |
+
# Compute average energy for each frame
|
86 |
+
mel_energies = np.mean(mel_spectrogram, axis=0)
|
87 |
+
|
88 |
+
# Normalize energy
|
89 |
+
mel_energies /= np.max(mel_energies) # Normalize to [0, 1]
|
90 |
+
|
91 |
+
# Count frames above the threshold
|
92 |
+
speech_frames = np.sum(mel_energies > mel_energy_threshold)
|
93 |
+
total_frames = len(mel_energies)
|
94 |
+
|
95 |
+
# Classify as silent if speech fraction is below threshold
|
96 |
+
if speech_frames / total_frames < speech_fraction:
|
97 |
+
print(f"Audio file {audio_path} is classified as silent.")
|
98 |
+
silent_audio_files.append(audio_path)
|
99 |
+
except Exception as e:
|
100 |
+
print(f"Error processing file {audio_path}: {e}")
|
101 |
+
|
102 |
+
return silent_audio_files
|
103 |
+
|
104 |
+
def remove_silent_audio_from_manifest(manifest_path, silent_audio_files, output_manifest_path):
|
105 |
+
"""
|
106 |
+
Removes entries of flagged audio files from the manifest.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
manifest_path (str): Path to the input manifest file.
|
110 |
+
silent_audio_files (list): List of audio filepaths flagged as silent or low-energy.
|
111 |
+
output_manifest_path (str): Path to save the filtered manifest.
|
112 |
+
|
113 |
+
Returns:
|
114 |
+
None
|
115 |
+
"""
|
116 |
+
with open(manifest_path, 'r', encoding='utf-8') as f:
|
117 |
+
manifest = [json.loads(line.strip()) for line in f]
|
118 |
+
|
119 |
+
# Filter out silent audio files
|
120 |
+
filtered_manifest = [entry for entry in manifest if entry['audio_filepath'] not in silent_audio_files]
|
121 |
+
|
122 |
+
# Write the filtered manifest to the output file
|
123 |
+
with open(output_manifest_path, 'w', encoding='utf-8') as f:
|
124 |
+
for entry in filtered_manifest:
|
125 |
+
f.write(json.dumps(entry) + '\n')
|
126 |
+
|
127 |
+
print(f"Filtered manifest saved to {output_manifest_path}")
|
128 |
+
|
129 |
+
if __name__ =="__main__":
|
130 |
+
import sys
|
131 |
+
|
132 |
+
if len(sys.argv) != 2:
|
133 |
+
print("Usage: python script.py <manifest_path>")
|
134 |
+
sys.exit(1)
|
135 |
+
|
136 |
+
manifest_path = sys.argv[1]
|
137 |
+
silent_audio_files = flag_silent_audio_mel(manifest_path)
|
138 |
+
output_manifest_path = manifest_path.replace(".json", "_filtered.json")
|
139 |
+
remove_silent_audio_from_manifest(manifest_path, silent_audio_files, output_manifest_path)
|
140 |
+
|
scripts/lower_transcriptions_in_manifests.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright 2024 RobotsMali AI4D Lab.
|
3 |
+
|
4 |
+
Licensed under the Creative Commons Attribution 4.0 International License (the "License");
|
5 |
+
you may not use this file except in compliance with the License.
|
6 |
+
You may obtain a copy of the License at
|
7 |
+
|
8 |
+
https://creativecommons.org/licenses/by/4.0/
|
9 |
+
|
10 |
+
Unless required by applicable law or agreed to in writing, software
|
11 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
See the License for the specific language governing permissions and
|
14 |
+
limitations under the License.
|
15 |
+
"""
|
16 |
+
import json
|
17 |
+
import os
|
18 |
+
|
19 |
+
# Hardcoded paths for the manifest files
|
20 |
+
manifest_paths = {
|
21 |
+
"train_bambara": "jeli-data-manifest/manifests/train_manifest.json",
|
22 |
+
"test_bambara": "jeli-data-manifest/manifests/test_manifest.json",
|
23 |
+
"train_french": "jeli-data-manifest/french-manifests/train_french_manifest.json",
|
24 |
+
"test_french": "jeli-data-manifest/french-manifests/test_french_manifest.json",
|
25 |
+
}
|
26 |
+
|
27 |
+
# Directory to save the lowercase transcriptions
|
28 |
+
output_dir = "lower-case-transcriptions"
|
29 |
+
os.makedirs(output_dir, exist_ok=True)
|
30 |
+
|
31 |
+
# Function to process a manifest file
|
32 |
+
def lowercase_manifest(input_path, output_path):
|
33 |
+
"""Convert all transcriptions in a manifest file to lowercase."""
|
34 |
+
with open(input_path, "r", encoding="utf-8") as infile, open(output_path, "w", encoding="utf-8") as outfile:
|
35 |
+
for line in infile:
|
36 |
+
entry = json.loads(line)
|
37 |
+
entry["text"] = entry["text"].lower()
|
38 |
+
outfile.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
39 |
+
|
40 |
+
# Process each manifest file
|
41 |
+
for key, input_path in manifest_paths.items():
|
42 |
+
output_path = os.path.join(output_dir, os.path.basename(input_path))
|
43 |
+
lowercase_manifest(input_path, output_path)
|
44 |
+
print(f"Processed {input_path} and saved to {output_path}")
|
45 |
+
|
46 |
+
print("All manifest files have been processed.")
|