Update README.md
Browse files
README.md
CHANGED
@@ -1,199 +1,199 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
---
|
4 |
-
# Emotion2Vec-S
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
## Introduction
|
9 |
-
|
10 |
-
This repository contains the implementation of Emotion2Vec-S, a self-supervised learning (SSL) model for speech emotion recognition, as presented in our paper "Steering Language Model to Stable Speech Emotion Recognition via Contextual Perception and Chain of Thought".
|
11 |
-
|
12 |
-
## Requirements and Installation
|
13 |
-
|
14 |
-
This project follows the fairseq installation process.
|
15 |
-
|
16 |
-
### Requirements
|
17 |
-
|
18 |
-
- PyTorch version >= 1.10.0
|
19 |
-
- Python version >= 3.8
|
20 |
-
|
21 |
-
### Installation
|
22 |
-
|
23 |
-
To install fairseq and develop locally:
|
24 |
-
|
25 |
-
```bash
|
26 |
-
git clone https://github.com/pytorch/fairseq
|
27 |
-
cd fairseq
|
28 |
-
pip install --editable ./
|
29 |
-
```
|
30 |
-
|
31 |
-
### Feature Extraction
|
32 |
-
|
33 |
-
You can download the pre-trained [Emotion2vec-S model](https://drive.google.com/drive/folders/1LWWi6bahzn7fJP4fCgPleOyQ30sD_BWO?usp=drive_link) and put it in the `./Emotion2Vec-S/ckpt` folder.
|
34 |
-
Meanwhile,we have provided the pretrained checkpoints in the huggingface model hub. You can also download ckpt file from [here](https://huggingface.co/ASLP-lab/Emotion2Vec-S). We also provide [here](https://drive.google.com/drive/folders/12AOVJT7I9GSLJnjHa-Elc-UKgog-mZR2) the feature files for the Emo-Emilia dataset extracted using Emotion2vec-S.
|
35 |
-
|
36 |
-
If you want to extract features using Emotion2Vec-S,you will also need to provide a `wav.scp` file and place it in the `./Emotion2Vec-S` directory. Here is an example of the `wav.scp` file::
|
37 |
-
```pgsql
|
38 |
-
audio_name1 /path/to/audio_name1.wav
|
39 |
-
audio_name2 /path/to/audio_name2.wav
|
40 |
-
audio_name3 /path/to/audio_name3.wav
|
41 |
-
```
|
42 |
-
|
43 |
-
Next, you can directly run the following code to extract features:
|
44 |
-
```python
|
45 |
-
import torch
|
46 |
-
import os
|
47 |
-
import sys
|
48 |
-
import json
|
49 |
-
import numpy as np
|
50 |
-
import argparse
|
51 |
-
from tqdm import tqdm
|
52 |
-
import torchaudio
|
53 |
-
import torch.nn.functional as F
|
54 |
-
import fairseq
|
55 |
-
from dataclasses import dataclass
|
56 |
-
|
57 |
-
SAMPLING_RATE=16000
|
58 |
-
|
59 |
-
@dataclass
|
60 |
-
class UserDirModule:
|
61 |
-
user_dir: str
|
62 |
-
|
63 |
-
def extract_fairseq_feature(wav_path, model, device):
|
64 |
-
try:
|
65 |
-
wav, sr = torchaudio.load(wav_path)
|
66 |
-
# 合并多声道为单声道(取平均)
|
67 |
-
if wav.size(0) > 1:
|
68 |
-
wav = torch.mean(wav, dim=0, keepdim=True)
|
69 |
-
if sr != SAMPLING_RATE:
|
70 |
-
wav = torchaudio.functional.resample(wav, sr, SAMPLING_RATE)
|
71 |
-
wav = wav[0, :].view(1, -1)
|
72 |
-
wav = wav.to(device)
|
73 |
-
out = model.extract_features(wav)
|
74 |
-
return out
|
75 |
-
except Exception as e:
|
76 |
-
print(f"Error processing audio file {wav_path}: {e}")
|
77 |
-
return None
|
78 |
-
|
79 |
-
if __name__ == '__main__':
|
80 |
-
|
81 |
-
parser = argparse.ArgumentParser()
|
82 |
-
parser.add_argument('--model_path', type=str, default="./Emotion2Vec-S/ckpt/checkpoint.pt")
|
83 |
-
parser.add_argument('--model_dir', type=str, default="./Emotion2Vec-S/examples/data2vec/")
|
84 |
-
parser.add_argument('--dump_dir', type=str, default="./Emotion2Vec-S/features_frm")
|
85 |
-
parser.add_argument('--device', type=str, default='cuda')
|
86 |
-
parser.add_argument('--data', type=str, default="./Emotion2Vec-S/wav.scp")
|
87 |
-
parser.add_argument('--level', type=str, default="frame", help="frame or utterance")
|
88 |
-
args = parser.parse_args()
|
89 |
-
|
90 |
-
data = {}
|
91 |
-
with open(args.data, 'r') as f:
|
92 |
-
for line in f:
|
93 |
-
seg_id, wav_path = line.strip().split(maxsplit=1)
|
94 |
-
data[seg_id] = wav_path
|
95 |
-
|
96 |
-
os.makedirs(args.dump_dir, exist_ok=True)
|
97 |
-
|
98 |
-
seg_ids = data.keys()
|
99 |
-
print(f'Loaded {len(seg_ids)} audio entries')
|
100 |
-
# load models
|
101 |
-
my_model_path = UserDirModule(args.model_dir)
|
102 |
-
fairseq.utils.import_user_module(my_model_path)
|
103 |
-
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([args.model_path])
|
104 |
-
model = model[0].to(args.device)
|
105 |
-
|
106 |
-
for seg_id in tqdm(seg_ids):
|
107 |
-
|
108 |
-
wav_path = data[seg_id]
|
109 |
-
if not os.path.exists(wav_path):
|
110 |
-
print(f"WARNING: {wav_path} does not exist")
|
111 |
-
continue
|
112 |
-
try:
|
113 |
-
torchaudio.load(wav_path)
|
114 |
-
except:
|
115 |
-
print(f'ERROR: Failed to load {wav_path}')
|
116 |
-
continue
|
117 |
-
|
118 |
-
feat = extract_fairseq_feature(wav_path, model, args.device)
|
119 |
-
|
120 |
-
if feat is not None:
|
121 |
-
if args.level == 'frame':
|
122 |
-
feat = feat['x'].cpu().detach().numpy()[0]
|
123 |
-
elif args.level == 'utterance':
|
124 |
-
feat = feat['utt_x'].cpu().detach().numpy()[0]
|
125 |
-
else:
|
126 |
-
raise ValueError("Unknown level: {}".format(args.level))
|
127 |
-
|
128 |
-
save_path = os.path.join(args.dump_dir, f"{seg_id}.npy")
|
129 |
-
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
130 |
-
np.save(save_path, feat)
|
131 |
-
print(f"Processed: {seg_id} | Shape: {feat.shape} | Saved to: {save_path}")
|
132 |
-
else:
|
133 |
-
print(f"Skipped problematic file: {seg_id}")
|
134 |
-
|
135 |
-
```
|
136 |
-
Alternatively, you can adjust the code according to your needs. The code path is `./Emotion2Vec-S/speech_feature_extraction.py`. You can also use the `./Emotion2Vec-S/extract_feature.sh` script to batch process features for multiple datasets. The script supports parallel processing and offers the following parameters:
|
137 |
-
|
138 |
-
- `--model_path`: Path to the checkpoint file
|
139 |
-
- `--model_dir`: Path to the model
|
140 |
-
- `--dump_dir`: Directory to save extracted features
|
141 |
-
- `--device`: Device to run the model on (e.g., 'cuda:0')
|
142 |
-
- `--data`: Path to the dataset scp file
|
143 |
-
- `--level`: Level of feature (frame level or utterance level)
|
144 |
-
|
145 |
-
## 2. Training and testing on EmoBox using extracted features
|
146 |
-
|
147 |
-
If you want to test our model on other datasets using [EmoBox](https://github.com/emo-box/EmoBox/tree/main). There is also an example provided below, which you can modify to suit your needs:
|
148 |
-
|
149 |
-
Use k-fold cross-validation with learning rates (1e-3, 1e-4) and hidden sizes (128, 256):
|
150 |
-
|
151 |
-
```bash
|
152 |
-
cd examples/sb
|
153 |
-
data=/path/to/your/data_files
|
154 |
-
lrs=(1e-3 1e-4) # Learning rate list
|
155 |
-
hidden_sizes=(128 256) # Hidden size list
|
156 |
-
gpus=(0 1 2 3) # GPU list
|
157 |
-
task_id=0
|
158 |
-
declare -A dataset_folds=(
|
159 |
-
["mesd"]=1
|
160 |
-
)
|
161 |
-
declare -A dataset_classes=(
|
162 |
-
["mesd"]=6
|
163 |
-
)
|
164 |
-
datasets=("mesd")
|
165 |
-
|
166 |
-
for dataset in "${datasets[@]}"; do
|
167 |
-
folds=${dataset_folds[$dataset]}
|
168 |
-
n_classes=${dataset_classes[$dataset]}
|
169 |
-
|
170 |
-
for lr in "${lrs[@]}"; do
|
171 |
-
for hidden_size in "${hidden_sizes[@]}"; do
|
172 |
-
gpu=${gpus[$task_id % ${#gpus[@]}]}
|
173 |
-
export CUDA_VISIBLE_DEVICES=$gpu
|
174 |
-
task_number=$((task_id + 1))
|
175 |
-
for fold in $(seq 1 $folds); do
|
176 |
-
echo "Training fold $fold with lr=$lr, hidden_size=$hidden_size on GPU $gpu, task_number=$task_number, dataset=$dataset..."
|
177 |
-
python3 train.py \
|
178 |
-
hparams/data2vec2-large_freeze.yaml \
|
179 |
-
--output_folder /path/to/your/${dataset}-S/fold${fold}_lr${lr}_hidden${hidden_size} \
|
180 |
-
--seed 1234 \
|
181 |
-
--batch_size 32 \
|
182 |
-
--lr $lr \
|
183 |
-
--train_annotation ${data}/${dataset}/fold_${fold}/${dataset}_train_fold_${fold}.json \
|
184 |
-
--test_annotation ${data}/${dataset}/fold_${fold}/${dataset}_test_fold_${fold}.json \
|
185 |
-
--number_of_epochs 100 \
|
186 |
-
--feat_dir /path/to/your/dump_${dataset}-S \
|
187 |
-
--label_map ${data}/${dataset}/label_map.json \
|
188 |
-
--device cuda \
|
189 |
-
--out_n_neurons ${n_classes} \
|
190 |
-
--hidden_size $hidden_size &
|
191 |
-
done
|
192 |
-
task_id=$((task_id + 1))
|
193 |
-
done
|
194 |
-
done
|
195 |
-
done
|
196 |
-
|
197 |
-
wait
|
198 |
-
echo "All training tasks completed."
|
199 |
```
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
---
|
4 |
+
# Emotion2Vec-S
|
5 |
+
|
6 |
+
C<sup>2</sup>SER: [Paper](https://arxiv.org/abs/2502.18186) | [Code](https://github.com/zxzhao0/C2SER) | [HuggingFace](https://huggingface.co/collections/ASLP-lab/c2ser-67bc735d820403e7969fe8a0)
|
7 |
+
|
8 |
+
## Introduction
|
9 |
+
|
10 |
+
This repository contains the implementation of Emotion2Vec-S, a self-supervised learning (SSL) model for speech emotion recognition, as presented in our paper "Steering Language Model to Stable Speech Emotion Recognition via Contextual Perception and Chain of Thought".
|
11 |
+
|
12 |
+
## Requirements and Installation
|
13 |
+
|
14 |
+
This project follows the fairseq installation process.
|
15 |
+
|
16 |
+
### Requirements
|
17 |
+
|
18 |
+
- PyTorch version >= 1.10.0
|
19 |
+
- Python version >= 3.8
|
20 |
+
|
21 |
+
### Installation
|
22 |
+
|
23 |
+
To install fairseq and develop locally:
|
24 |
+
|
25 |
+
```bash
|
26 |
+
git clone https://github.com/pytorch/fairseq
|
27 |
+
cd fairseq
|
28 |
+
pip install --editable ./
|
29 |
+
```
|
30 |
+
|
31 |
+
### Feature Extraction
|
32 |
+
|
33 |
+
You can download the pre-trained [Emotion2vec-S model](https://drive.google.com/drive/folders/1LWWi6bahzn7fJP4fCgPleOyQ30sD_BWO?usp=drive_link) and put it in the `./Emotion2Vec-S/ckpt` folder.
|
34 |
+
Meanwhile,we have provided the pretrained checkpoints in the huggingface model hub. You can also download ckpt file from [here](https://huggingface.co/ASLP-lab/Emotion2Vec-S). We also provide [here](https://drive.google.com/drive/folders/12AOVJT7I9GSLJnjHa-Elc-UKgog-mZR2) the feature files for the Emo-Emilia dataset extracted using Emotion2vec-S.
|
35 |
+
|
36 |
+
If you want to extract features using Emotion2Vec-S,you will also need to provide a `wav.scp` file and place it in the `./Emotion2Vec-S` directory. Here is an example of the `wav.scp` file::
|
37 |
+
```pgsql
|
38 |
+
audio_name1 /path/to/audio_name1.wav
|
39 |
+
audio_name2 /path/to/audio_name2.wav
|
40 |
+
audio_name3 /path/to/audio_name3.wav
|
41 |
+
```
|
42 |
+
|
43 |
+
Next, you can directly run the following code to extract features:
|
44 |
+
```python
|
45 |
+
import torch
|
46 |
+
import os
|
47 |
+
import sys
|
48 |
+
import json
|
49 |
+
import numpy as np
|
50 |
+
import argparse
|
51 |
+
from tqdm import tqdm
|
52 |
+
import torchaudio
|
53 |
+
import torch.nn.functional as F
|
54 |
+
import fairseq
|
55 |
+
from dataclasses import dataclass
|
56 |
+
|
57 |
+
SAMPLING_RATE=16000
|
58 |
+
|
59 |
+
@dataclass
|
60 |
+
class UserDirModule:
|
61 |
+
user_dir: str
|
62 |
+
|
63 |
+
def extract_fairseq_feature(wav_path, model, device):
|
64 |
+
try:
|
65 |
+
wav, sr = torchaudio.load(wav_path)
|
66 |
+
# 合并多声道为单声道(取平均)
|
67 |
+
if wav.size(0) > 1:
|
68 |
+
wav = torch.mean(wav, dim=0, keepdim=True)
|
69 |
+
if sr != SAMPLING_RATE:
|
70 |
+
wav = torchaudio.functional.resample(wav, sr, SAMPLING_RATE)
|
71 |
+
wav = wav[0, :].view(1, -1)
|
72 |
+
wav = wav.to(device)
|
73 |
+
out = model.extract_features(wav)
|
74 |
+
return out
|
75 |
+
except Exception as e:
|
76 |
+
print(f"Error processing audio file {wav_path}: {e}")
|
77 |
+
return None
|
78 |
+
|
79 |
+
if __name__ == '__main__':
|
80 |
+
|
81 |
+
parser = argparse.ArgumentParser()
|
82 |
+
parser.add_argument('--model_path', type=str, default="./Emotion2Vec-S/ckpt/checkpoint.pt")
|
83 |
+
parser.add_argument('--model_dir', type=str, default="./Emotion2Vec-S/examples/data2vec/")
|
84 |
+
parser.add_argument('--dump_dir', type=str, default="./Emotion2Vec-S/features_frm")
|
85 |
+
parser.add_argument('--device', type=str, default='cuda')
|
86 |
+
parser.add_argument('--data', type=str, default="./Emotion2Vec-S/wav.scp")
|
87 |
+
parser.add_argument('--level', type=str, default="frame", help="frame or utterance")
|
88 |
+
args = parser.parse_args()
|
89 |
+
|
90 |
+
data = {}
|
91 |
+
with open(args.data, 'r') as f:
|
92 |
+
for line in f:
|
93 |
+
seg_id, wav_path = line.strip().split(maxsplit=1)
|
94 |
+
data[seg_id] = wav_path
|
95 |
+
|
96 |
+
os.makedirs(args.dump_dir, exist_ok=True)
|
97 |
+
|
98 |
+
seg_ids = data.keys()
|
99 |
+
print(f'Loaded {len(seg_ids)} audio entries')
|
100 |
+
# load models
|
101 |
+
my_model_path = UserDirModule(args.model_dir)
|
102 |
+
fairseq.utils.import_user_module(my_model_path)
|
103 |
+
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([args.model_path])
|
104 |
+
model = model[0].to(args.device)
|
105 |
+
|
106 |
+
for seg_id in tqdm(seg_ids):
|
107 |
+
|
108 |
+
wav_path = data[seg_id]
|
109 |
+
if not os.path.exists(wav_path):
|
110 |
+
print(f"WARNING: {wav_path} does not exist")
|
111 |
+
continue
|
112 |
+
try:
|
113 |
+
torchaudio.load(wav_path)
|
114 |
+
except:
|
115 |
+
print(f'ERROR: Failed to load {wav_path}')
|
116 |
+
continue
|
117 |
+
|
118 |
+
feat = extract_fairseq_feature(wav_path, model, args.device)
|
119 |
+
|
120 |
+
if feat is not None:
|
121 |
+
if args.level == 'frame':
|
122 |
+
feat = feat['x'].cpu().detach().numpy()[0]
|
123 |
+
elif args.level == 'utterance':
|
124 |
+
feat = feat['utt_x'].cpu().detach().numpy()[0]
|
125 |
+
else:
|
126 |
+
raise ValueError("Unknown level: {}".format(args.level))
|
127 |
+
|
128 |
+
save_path = os.path.join(args.dump_dir, f"{seg_id}.npy")
|
129 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
130 |
+
np.save(save_path, feat)
|
131 |
+
print(f"Processed: {seg_id} | Shape: {feat.shape} | Saved to: {save_path}")
|
132 |
+
else:
|
133 |
+
print(f"Skipped problematic file: {seg_id}")
|
134 |
+
|
135 |
+
```
|
136 |
+
Alternatively, you can adjust the code according to your needs. The code path is `./Emotion2Vec-S/speech_feature_extraction.py`. You can also use the `./Emotion2Vec-S/extract_feature.sh` script to batch process features for multiple datasets. The script supports parallel processing and offers the following parameters:
|
137 |
+
|
138 |
+
- `--model_path`: Path to the checkpoint file
|
139 |
+
- `--model_dir`: Path to the model
|
140 |
+
- `--dump_dir`: Directory to save extracted features
|
141 |
+
- `--device`: Device to run the model on (e.g., 'cuda:0')
|
142 |
+
- `--data`: Path to the dataset scp file
|
143 |
+
- `--level`: Level of feature (frame level or utterance level)
|
144 |
+
|
145 |
+
## 2. Training and testing on EmoBox using extracted features
|
146 |
+
|
147 |
+
If you want to test our model on other datasets using [EmoBox](https://github.com/emo-box/EmoBox/tree/main). There is also an example provided below, which you can modify to suit your needs:
|
148 |
+
|
149 |
+
Use k-fold cross-validation with learning rates (1e-3, 1e-4) and hidden sizes (128, 256):
|
150 |
+
|
151 |
+
```bash
|
152 |
+
cd examples/sb
|
153 |
+
data=/path/to/your/data_files
|
154 |
+
lrs=(1e-3 1e-4) # Learning rate list
|
155 |
+
hidden_sizes=(128 256) # Hidden size list
|
156 |
+
gpus=(0 1 2 3) # GPU list
|
157 |
+
task_id=0
|
158 |
+
declare -A dataset_folds=(
|
159 |
+
["mesd"]=1
|
160 |
+
)
|
161 |
+
declare -A dataset_classes=(
|
162 |
+
["mesd"]=6
|
163 |
+
)
|
164 |
+
datasets=("mesd")
|
165 |
+
|
166 |
+
for dataset in "${datasets[@]}"; do
|
167 |
+
folds=${dataset_folds[$dataset]}
|
168 |
+
n_classes=${dataset_classes[$dataset]}
|
169 |
+
|
170 |
+
for lr in "${lrs[@]}"; do
|
171 |
+
for hidden_size in "${hidden_sizes[@]}"; do
|
172 |
+
gpu=${gpus[$task_id % ${#gpus[@]}]}
|
173 |
+
export CUDA_VISIBLE_DEVICES=$gpu
|
174 |
+
task_number=$((task_id + 1))
|
175 |
+
for fold in $(seq 1 $folds); do
|
176 |
+
echo "Training fold $fold with lr=$lr, hidden_size=$hidden_size on GPU $gpu, task_number=$task_number, dataset=$dataset..."
|
177 |
+
python3 train.py \
|
178 |
+
hparams/data2vec2-large_freeze.yaml \
|
179 |
+
--output_folder /path/to/your/${dataset}-S/fold${fold}_lr${lr}_hidden${hidden_size} \
|
180 |
+
--seed 1234 \
|
181 |
+
--batch_size 32 \
|
182 |
+
--lr $lr \
|
183 |
+
--train_annotation ${data}/${dataset}/fold_${fold}/${dataset}_train_fold_${fold}.json \
|
184 |
+
--test_annotation ${data}/${dataset}/fold_${fold}/${dataset}_test_fold_${fold}.json \
|
185 |
+
--number_of_epochs 100 \
|
186 |
+
--feat_dir /path/to/your/dump_${dataset}-S \
|
187 |
+
--label_map ${data}/${dataset}/label_map.json \
|
188 |
+
--device cuda \
|
189 |
+
--out_n_neurons ${n_classes} \
|
190 |
+
--hidden_size $hidden_size &
|
191 |
+
done
|
192 |
+
task_id=$((task_id + 1))
|
193 |
+
done
|
194 |
+
done
|
195 |
+
done
|
196 |
+
|
197 |
+
wait
|
198 |
+
echo "All training tasks completed."
|
199 |
```
|