Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# HuBERT fine-tuned on DUSHA dataset for speech emotion recognition in russian language
|
2 |
+
|
3 |
+
The pre-trained model is this one - [facebook/hubert-large-ls960-ft](https://huggingface.co/facebook/hubert-large-ls960-ft)
|
4 |
+
|
5 |
+
The DUSHA dataset used can be found [here](https://github.com/salute-developers/golos/tree/master/dusha#dataset-structure)
|
6 |
+
|
7 |
+
# Fine-tuning
|
8 |
+
|
9 |
+
Fine-tuned in Google Colab using Pro account with A100 GPU
|
10 |
+
|
11 |
+
Freezed all layers exept projector, classifier and all 24 HubertEncoderLayerStableLayerNorm layers
|
12 |
+
|
13 |
+
Used half of the train dataset
|
14 |
+
|
15 |
+
# Training parameters
|
16 |
+
|
17 |
+
2 epochs \
|
18 |
+
train batch size = 8 \
|
19 |
+
eval batch size = 8 \
|
20 |
+
gradient accumulation steps = 4 \
|
21 |
+
learning rate = 5e-5 without warm up and decay
|
22 |
+
|
23 |
+
# Metrics
|
24 |
+
|
25 |
+
Achieved \
|
26 |
+
accuracy = 0.86 \
|
27 |
+
balanced = 0.76 \
|
28 |
+
macro f1 score = 0.81 \
|
29 |
+
on test set, improving accucary and f1 score compared to dataset baseline
|
30 |
+
|
31 |
+
# Usage
|
32 |
+
|
33 |
+
```python
|
34 |
+
from transformers import HubertForSequenceClassification, Wav2Vec2FeatureExtractor
|
35 |
+
import torchaudio
|
36 |
+
import torch
|
37 |
+
|
38 |
+
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/hubert-large-ls960-ft")
|
39 |
+
model = HubertForSequenceClassification.from_pretrained("xbgoose/hubert-dusha-finetuned")
|
40 |
+
num2emotion = {0: 'neutral', 1: 'angry', 2: 'positive', 3: 'sad', 4: 'other'}
|
41 |
+
|
42 |
+
filepath = "path/to/audio.wav"
|
43 |
+
|
44 |
+
waveform, sample_rate = torchaudio.load(filepath, normalize=True)
|
45 |
+
transform = torchaudio.transforms.Resample(sample_rate, 16000)
|
46 |
+
waveform = transform(waveform)
|
47 |
+
|
48 |
+
inputs = feature_extractor(
|
49 |
+
waveform,
|
50 |
+
sampling_rate=feature_extractor.sampling_rate,
|
51 |
+
return_tensors="pt",
|
52 |
+
padding=True,
|
53 |
+
max_length=16000 * 10,
|
54 |
+
truncation=True
|
55 |
+
)
|
56 |
+
|
57 |
+
logits = model(inputs['input_values'][0]).logits
|
58 |
+
predictions = torch.argmax(logits, dim=-1)
|
59 |
+
predicted_emotion = num2emotion[predictions.numpy()[0]]
|
60 |
+
print(predicted_emotion)
|
61 |
+
```
|