language:
- en
- de
- es
- fr
- es
- zh
license:
- cc-by-nc-sa-3.0
multilinguality:
- multilingual
task_categories:
- automatic-speech-recognition
- text-to-speech
- text-to-audio
pretty_name: talkbank_4_stt
dataset_info:
- config_name: de
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcript
dtype: string
- name: language_code
dtype: string
- name: subset
dtype: string
- name: full_language
dtype: string
- name: switch_id
dtype: string
- name: segment_id
dtype: string
- name: transcript_filename
dtype: string
- name: audio_len_sec
dtype: string
- name: orig_file_start
dtype: string
- name: orig_file_end
dtype: string
- name: channel
dtype: string
- name: speaker_id
dtype: string
splits:
- name: segment
num_bytes: 132776951
num_examples: 12944
- name: switch
num_bytes: 415139252
num_examples: 701
download_size: 542393172
dataset_size: 547916203
- config_name: en
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcript
dtype: string
- name: language_code
dtype: string
- name: subset
dtype: string
- name: full_language
dtype: string
- name: switch_id
dtype: string
- name: segment_id
dtype: string
- name: transcript_filename
dtype: string
- name: audio_len_sec
dtype: string
- name: orig_file_start
dtype: string
- name: orig_file_end
dtype: string
- name: channel
dtype: string
- name: speaker_id
dtype: string
splits:
- name: segment
num_bytes: 429002045
num_examples: 32092
- name: switch
num_bytes: 835236989
num_examples: 908
download_size: 1252758071
dataset_size: 1264239034
- config_name: es
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcript
dtype: string
- name: language_code
dtype: string
- name: subset
dtype: string
- name: full_language
dtype: string
- name: switch_id
dtype: string
- name: segment_id
dtype: string
- name: transcript_filename
dtype: string
- name: audio_len_sec
dtype: string
- name: orig_file_start
dtype: string
- name: orig_file_end
dtype: string
- name: channel
dtype: string
- name: speaker_id
dtype: string
splits:
- name: segment
num_bytes: 709904162
num_examples: 57150
- name: switch
num_bytes: 1540874873
num_examples: 2130
download_size: 2231409742
dataset_size: 2250779035
- config_name: fr
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcript
dtype: string
- name: language_code
dtype: string
- name: subset
dtype: string
- name: full_language
dtype: string
- name: switch_id
dtype: string
- name: segment_id
dtype: string
- name: transcript_filename
dtype: string
- name: audio_len_sec
dtype: string
- name: orig_file_start
dtype: string
- name: orig_file_end
dtype: string
- name: channel
dtype: string
- name: speaker_id
dtype: string
splits:
- name: segment
num_bytes: 289155390
num_examples: 14796
- name: switch
num_bytes: 481521011
num_examples: 281
download_size: 767082881
dataset_size: 770676401
- config_name: jp
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcript
dtype: string
- name: language_code
dtype: string
- name: subset
dtype: string
- name: full_language
dtype: string
- name: switch_id
dtype: string
- name: segment_id
dtype: string
- name: transcript_filename
dtype: string
- name: audio_len_sec
dtype: string
- name: orig_file_start
dtype: string
- name: orig_file_end
dtype: string
- name: channel
dtype: string
- name: speaker_id
dtype: string
splits:
- name: segment
num_bytes: 223881799
num_examples: 23272
- name: switch
num_bytes: 437307151
num_examples: 318
download_size: 653773923
dataset_size: 661188950
- config_name: zh
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcript
dtype: string
- name: language_code
dtype: string
- name: subset
dtype: string
- name: full_language
dtype: string
- name: switch_id
dtype: string
- name: segment_id
dtype: string
- name: transcript_filename
dtype: string
- name: audio_len_sec
dtype: string
- name: orig_file_start
dtype: string
- name: orig_file_end
dtype: string
- name: channel
dtype: string
- name: speaker_id
dtype: string
splits:
- name: segment
num_bytes: 221370323
num_examples: 23364
- name: switch
num_bytes: 456634693
num_examples: 1691
download_size: 667819759
dataset_size: 678005016
configs:
- config_name: de
data_files:
- split: segment
path: de/segment-*
- split: switch
path: de/switch-*
- config_name: en
data_files:
- split: segment
path: en/segment-*
- split: switch
path: en/switch-*
- config_name: es
data_files:
- split: segment
path: es/segment-*
- split: switch
path: es/switch-*
- config_name: fr
data_files:
- split: segment
path: fr/segment-*
- split: switch
path: fr/switch-*
- config_name: jp
data_files:
- split: segment
path: jp/segment-*
- split: switch
path: jp/switch-*
- config_name: zh
data_files:
- split: segment
path: zh/segment-*
- split: switch
path: zh/switch-*
Dataset Card
Dataset Description
This dataset is a benchmark based on the TalkBank[1] corpus—a large multilingual repository of conversational speech that captures real-world, unstructured interactions. We use CA-Bank [2], which focuses on phone conversations between adults, which include natural speech phenomena such as laughter, pauses, and interjections. To ensure the dataset is highly accurate and suitable for benchmarking conversational ASR systems, we employ extensive set of pre-processing.
Preprocessing Steps
We apply the following preprocessing steps to ensure the dataset’s quality:
- Manual filtering of conversations
- Speaker-channel alignment
- Timestamp alignment using voice activity detection (VAD)
- Discarding segments based on Word Error Rate (WER) thresholds
Paper and Code Repository
For a comprehensive explanation of the preprocessing pipeline and dataset details, refer to our paper ASR Benchmarking: The Need for a More Representative Conversational Dataset and explore our GitHub repository for code and additional resources.
Segmentation Types: Speaker Switch vs Annotation
We offer two types of segmentation for this dataset:
- Annotation-based Segmentation: Segments are derived directly from the annotations provided in the original TalkBank corpus.
- Speaker Switch Segmentation: We consolidate consecutive segments from the same speaker into a single, larger audio segment, providing an alternative structure for analysis.
Citations
While using this dataset please cite:
@article{maheshwari2024asr,
title={ASR Benchmarking: Need for a More Representative Conversational Dataset},
author={Maheshwari, Gaurav and Ivanov, Dmitry and Johannet, Th{\'e}o and Haddad, Kevin El},
journal={arXiv preprint arXiv:2409.12042},
year={2024}
}
In addition, please acknowledge the TalkBank dataset::
@article{macwhinney2010transcribing,
title={Transcribing, searching and data sharing: The CLAN software and the TalkBank data repository},
author={MacWhinney, Brian and Wagner, Johannes},
journal={Gesprachsforschung: Online-Zeitschrift zur verbalen Interaktion},
volume={11},
pages={154},
year={2010},
publisher={NIH Public Access}
}
Licensing Information
This dataset is released under the CC BY-NC-SA 3.0.
References
[1]: MacWhinney, Brian. "TalkBank: Building an open unified multimodal database of communicative interaction." (2004). [2]: MacWhinney, Brian, and Johannes Wagner. "Transcribing, searching and data sharing: The CLAN software and the TalkBank data repository." Gesprachsforschung: Online-Zeitschrift zur verbalen Interaktion 11 (2010): 154.