File size: 2,051 Bytes
1646c30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# Evaluate with Seed-TTS testset

import sys, os
sys.path.append(os.getcwd())

import multiprocessing as mp
import numpy as np

from model.utils import (
    get_seed_tts_test,
    run_asr_wer,
    run_sim,
)


eval_task = "wer"  # sim | wer
lang = "zh"        # zh | en
metalst = f"data/seedtts_testset/{lang}/meta.lst"  # seed-tts testset
# gen_wav_dir = f"data/seedtts_testset/{lang}/wavs"  # ground truth wavs
gen_wav_dir = f"PATH_TO_GENERATED"  # generated wavs


# NOTE. paraformer-zh result will be slightly different according to the number of gpus, cuz batchsize is different
#       zh 1.254 seems a result of 4 workers wer_seed_tts 
gpus = [0,1,2,3,4,5,6,7]
test_set = get_seed_tts_test(metalst, gen_wav_dir, gpus)

local = False
if local:  # use local custom checkpoint dir
    if lang == "zh":
        asr_ckpt_dir = "../checkpoints/funasr" # paraformer-zh dir under funasr
    elif lang == "en":
        asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3"
else:
    asr_ckpt_dir = ""  # auto download to cache dir

wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth"


# --------------------------- WER ---------------------------

if eval_task == "wer":
    wers = []

    with mp.Pool(processes=len(gpus)) as pool:
        args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set]
        results = pool.map(run_asr_wer, args)
        for wers_ in results:
            wers.extend(wers_)

    wer = round(np.mean(wers)*100, 3)
    print(f"\nTotal {len(wers)} samples")
    print(f"WER      : {wer}%")


# --------------------------- SIM ---------------------------

if eval_task == "sim":
    sim_list = []

    with mp.Pool(processes=len(gpus)) as pool:
        args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set]
        results = pool.map(run_sim, args)
        for sim_ in results:
            sim_list.extend(sim_)

    sim = round(sum(sim_list)/len(sim_list), 3)
    print(f"\nTotal {len(sim_list)} samples")
    print(f"SIM      : {sim}")