|
--- |
|
dataset_info: |
|
features: |
|
- name: context |
|
dtype: audio |
|
- name: instruction |
|
dtype: string |
|
- name: answer |
|
dtype: string |
|
splits: |
|
- name: test |
|
num_bytes: 280587307.0 |
|
num_examples: 2610 |
|
download_size: 278679268 |
|
dataset_size: 280587307.0 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: test |
|
path: data/test-* |
|
--- |
|
|
|
|
|
|
|
``` |
|
@article{poria2018meld, |
|
title={Meld: A multimodal multi-party dataset for emotion recognition in conversations}, |
|
author={Poria, Soujanya and Hazarika, Devamanyu and Majumder, Navonil and Naik, Gautam and Cambria, Erik and Mihalcea, Rada}, |
|
journal={arXiv preprint arXiv:1810.02508}, |
|
year={2018} |
|
} |
|
``` |
|
|
|
|
|
|
|
``` |
|
@article{wang2024audiobench, |
|
title={AudioBench: A Universal Benchmark for Audio Large Language Models}, |
|
author={Wang, Bin and Zou, Xunlong and Lin, Geyu and Sun, Shuo and Liu, Zhuohan and Zhang, Wenyu and Liu, Zhengyuan and Aw, AiTi and Chen, Nancy F}, |
|
journal={NAACL}, |
|
year={2025} |
|
} |
|
``` |