How to accelerate emotion extract speed?

#10
by kdrkdrkdr - opened

Is there a way to run it on gpu with float16 or bfloat16 or lower bit?

audEERING GmbH org

You can do this using this code:

import numpy as np
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from transformers import Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import (
    Wav2Vec2Model,
    Wav2Vec2PreTrainedModel,
)


class RegressionHead(nn.Module):
    r"""Classification head."""

    def __init__(self, config):

        super().__init__()

        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.dropout = nn.Dropout(config.final_dropout)
        self.out_proj = nn.Linear(config.hidden_size, config.num_labels)

    def forward(self, features, **kwargs):

        x = features
        x = self.dropout(x)
        x = self.dense(x)
        x = torch.tanh(x)
        x = self.dropout(x)
        x = self.out_proj(x)

        return x


class EmotionModel(Wav2Vec2PreTrainedModel):
    r"""Speech emotion classifier."""

    def __init__(self, config):

        super().__init__(config)

        self.config = config
        self.wav2vec2 = Wav2Vec2Model(config)
        self.classifier = RegressionHead(config)
        self.init_weights()

    def forward(
            self,
            input_values,
    ):

        outputs = self.wav2vec2(input_values)
        hidden_states = outputs[0]
        hidden_states = torch.mean(hidden_states, dim=1)
        logits = self.classifier(hidden_states)

        return hidden_states, logits



# load model from hub
device = 'cuda'
torch_type = torch.float16
model_name = 'audeering/wav2vec2-large-robust-12-ft-emotion-msp-dim'
processor = Wav2Vec2Processor.from_pretrained(model_name)
model = EmotionModel.from_pretrained(model_name).to(device)

# dummy signal
sampling_rate = 16000
signal = np.zeros((1, sampling_rate), dtype=np.float32)


def process_func(
    x: np.ndarray,
    sampling_rate: int,
    embeddings: bool = False,
) -> np.ndarray:
    r"""Predict emotions or extract embeddings from raw audio signal."""

    # run through processor to normalize signal
    # always returns a batch, so we just get the first entry
    # then we put it on the device
    y = processor(x, sampling_rate=sampling_rate)
    y = y['input_values'][0]
    y = y.reshape(1, -1)
    y = torch.from_numpy(y).to(device)

    # run through model
    import time
    with autocast(dtype=torch_type), torch.no_grad():
        start = time.perf_counter()
        y = model(y)[0 if embeddings else 1]
        print(f'Inference time: {time.perf_counter() - start} s')

    # convert to numpy
    y = y.detach().cpu().numpy()

    return y


print(process_func(signal, sampling_rate))
#  Arousal    dominance valence
# [[0.5460754  0.6062266  0.40431657]]

print(process_func(signal, sampling_rate, embeddings=True))
# Pooled hidden states of last transformer layer
# [[-0.00752167  0.0065819  -0.00746342 ...  0.00663632  0.00848748
#    0.00599211]]

You will see that, using device = 'cuda', inference time will decrease when running the inference several times.

  • device = 'cpu':
Inference time: 0.2972284574061632 s
[[0.5460751  0.60622627 0.40431607]]
Inference time: 0.07324816845357418 s
[[-0.00752167  0.0065819  -0.00746342 ...  0.00663632  0.00848748
   0.00599211]]
  • device = 'cuda':
Inference time: 0.2644754536449909 s
[[0.5464 0.6064 0.4043]]
Inference time: 0.010410545393824577 s
[[-0.00752179  0.00658289 -0.0074669  ...  0.00663554  0.00848903
   0.00599307]]

The dtype can also be specified. bfloat16 is also possible but slightly more complicated as it requires some workaround to convert back to numpy and it won't be much different from float16.

Sign up or log in to comment