Update README.md
Browse files---
dataset_info:
features:
- name: audio
dtype: audio
- name: sentence
dtype: string
- name: length
dtype: float64
splits:
- name: train
num_bytes: 6455680072.888
num_examples: 181408
download_size: 6403081821
dataset_size: 6455680072.888
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
license: artistic-2.0
task_categories:
- automatic-speech-recognition
- translation
- text-to-speech
- text-to-audio
language:
- ja
tags:
- Japanese
- good dataset
pretty_name: 'Audio and text from games. Japanese. Edited for NLP and ASR training. '
size_categories:
- 100K<n<1M
---
max = 25.0
min = 2.0
dataset = load_dataset("Sin2pi/JA_audio_JA_text_180k_samples", "default", split="train", trust_remote_code=True, streaming=True, token="")
name = "gv"
ouput_dir = "./datasets/"
output_file = 'metadata.csv'
os.makedirs(ouput_dir + name, exist_ok=True)
folder_path = ouput_dir + name # Create a folder to store the audio and transcription files
char = '[ 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890]'
special_characters = '[♬「」?!“%‘”~♪…?!゛#$%&()*+:;〈=〉@^_{|}~"█♩♫』『.;:<>_()*&^$#@`, ]'
for i, sample in tqdm(enumerate(dataset)): # Process each sample in the dataset
audio_sample = name + f'_{i}.mp3' # or wav
audio_path = os.path.join(folder_path, audio_sample)
transcription_path = os.path.join(folder_path, output_file) # Path to save transcription file
sample["audio_length"] = len(sample["audio"]["array"]) / sample["audio"]["sampling_rate"] # Get audio length, remove if not needed
sample["sentence"] = re.sub(special_characters,'', sample["sentence"])
if not os.path.exists(audio_path) and bool(sample["sentence"]) and sample["audio_length"] > min and sample["audio_length"] < max and not re.search(char, sample["sentence"]):
sf.write(audio_path, sample['audio']['array'], sample['audio']['sampling_rate'])
with open(transcription_path, 'a', encoding='utf-8') as transcription_file:
transcription_file.write(audio_sample+",") # Save transcription file name
transcription_file.write(sample['sentence']) # Save transcription
transcription_file.write(str(","+str(sample['audio_length']))) # Save audio length, remove if not needed
transcription_file.write('\n')
@@ -35,37 +35,6 @@ size_categories:
|
|
35 |
---
|
36 |
|
37 |
|
38 |
-
Hello :) I removed all samples that had roman letters. Also, a ton of other things.. please see code for details.. For training language models.
|
39 |
|
40 |
-
Something like this...
|
41 |
-
|
42 |
-
max = 25.0
|
43 |
-
min = 2.0
|
44 |
-
|
45 |
-
dataset = load_dataset("Sin2pi/JA_audio_JA_text_180k_samples", "default", split="train", trust_remote_code=True, streaming=True, token="")
|
46 |
-
|
47 |
-
name = "gv"
|
48 |
-
ouput_dir = "./datasets/"
|
49 |
-
output_file = 'metadata.csv'
|
50 |
-
|
51 |
-
os.makedirs(ouput_dir + name, exist_ok=True)
|
52 |
-
folder_path = ouput_dir + name # Create a folder to store the audio and transcription files
|
53 |
-
|
54 |
-
char = '[ 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890]'
|
55 |
-
special_characters = '[♬「」?!“%‘”~♪…?!゛#$%&()*+:;〈=〉@^_{|}~"█♩♫』『.;:<>_()*&^$#@`, ]'
|
56 |
-
|
57 |
-
for i, sample in tqdm(enumerate(dataset)): # Process each sample in the dataset
|
58 |
-
audio_sample = name + f'_{i}.mp3' # or wav
|
59 |
-
audio_path = os.path.join(folder_path, audio_sample)
|
60 |
-
transcription_path = os.path.join(folder_path, output_file) # Path to save transcription file
|
61 |
-
sample["audio_length"] = len(sample["audio"]["array"]) / sample["audio"]["sampling_rate"] # Get audio length, remove if not needed
|
62 |
-
sample["sentence"] = re.sub(special_characters,'', sample["sentence"])
|
63 |
-
if not os.path.exists(audio_path) and bool(sample["sentence"]) and sample["audio_length"] > min and sample["audio_length"] < max and not re.search(char, sample["sentence"]):
|
64 |
-
sf.write(audio_path, sample['audio']['array'], sample['audio']['sampling_rate'])
|
65 |
-
with open(transcription_path, 'a', encoding='utf-8') as transcription_file:
|
66 |
-
transcription_file.write(audio_sample+",") # Save transcription file name
|
67 |
-
transcription_file.write(sample['sentence']) # Save transcription
|
68 |
-
transcription_file.write(str(","+str(sample['audio_length']))) # Save audio length, remove if not needed
|
69 |
-
transcription_file.write('\n')
|
70 |
|
71 |
|
|
|
35 |
---
|
36 |
|
37 |
|
|
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
|