File size: 6,444 Bytes
3932541
83435d7
3932541
 
 
09564a6
3932541
 
 
83435d7
 
 
 
 
 
 
 
3932541
 
 
 
 
 
 
 
 
83435d7
3932541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c12f7f2
3932541
 
 
 
 
 
 
 
 
 
 
a0d87da
3932541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0d87da
3932541
 
 
 
 
 
6ff756a
3932541
 
 
a0d87da
 
 
 
 
3932541
 
 
743f616
 
 
 
 
 
 
 
 
 
 
83435d7
 
cbdec74
743f616
83435d7
 
 
743f616
 
 
 
 
83435d7
 
 
 
 
 
 
 
3932541
 
 
dadac84
 
44f9ae8
83435d7
 
743f616
dadac84
83435d7
3932541
743f616
3932541
a046b93
743f616
3932541
 
 
 
743f616
83435d7
743f616
 
 
 
 
 
 
 
 
 
 
 
 
3932541
743f616
3932541
 
 
743f616
 
3932541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
743f616
 
 
 
 
 
 
 
 
dadac84
743f616
 
 
 
83435d7
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
import requests
import json
import pandas as pd
from tqdm.auto import tqdm
import streamlit as st
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load

cer_langs = ["ja", "zh-CN", "zh-HK", "zh-TW"]
with open("languages.json") as f:
    lang2name = json.load(f)
suggested_datasets = [
    "librispeech_asr",
    "mozilla-foundation/common_voice_8_0",
    "mozilla-foundation/common_voice_7_0",
    "speech-recognition-community-v2/eval_data",
]


def make_clickable(model_name):
    link = "https://huggingface.co/" + model_name
    return f'<a target="_blank" href="{link}">{model_name}</a>'


def get_model_ids():
    api = HfApi()
    models = api.list_models(filter="hf-asr-leaderboard")
    model_ids = [x.modelId for x in models]
    return model_ids


def get_metadata(model_id):
    try:
        readme_path = hf_hub_download(model_id, filename="README.md")
        return metadata_load(readme_path)
    except requests.exceptions.HTTPError:
        # 404 README.md not found
        return None


def parse_metric_value(value):
    if isinstance(value, str):
        "".join(value.split("%"))
        try:
            value = float(value)
        except:  # noqa: E722
            value = None
    elif isinstance(value, float) and value < 1.1:
        # assuming that WER is given in 0.xx format
        value = 100 * value
    elif isinstance(value, list):
        if len(value) > 0:
            value = value[0]
        else:
            value = None
    value = round(value, 2) if value is not None else None
    return value


def parse_metrics_rows(meta):
    if "model-index" not in meta or "language" not in meta:
        return None
    lang = meta["language"]
    lang = lang[0] if isinstance(lang, list) else lang
    for result in meta["model-index"][0]["results"]:
        if "dataset" not in result or "metrics" not in result:
            continue
        dataset = result["dataset"]["type"]
        if "args" not in result["dataset"]:
            continue
        dataset_config = result["dataset"]["args"]
        row = {"dataset": dataset, "lang": lang}
        for metric in result["metrics"]:
            type = metric["type"].lower().strip()
            if type not in ["wer", "cer"]:
                continue
            value = parse_metric_value(metric["value"])
            if value is None:
                continue
            if type not in row or value < row[type]:
                # overwrite the metric if the new value is lower (e.g. with LM)
                row[type] = value
        if "wer" in row or "cer" in row:
            yield row


@st.cache(ttl=600)
def get_data():
    data = []
    model_ids = get_model_ids()
    for model_id in tqdm(model_ids):
        meta = get_metadata(model_id)
        if meta is None:
            continue
        for row in parse_metrics_rows(meta):
            if row is None:
                continue
            row["model_id"] = model_id
            data.append(row)
    return pd.DataFrame.from_records(data)


def sort_datasets(datasets):
    # 1. sort by name
    datasets = sorted(datasets)
    # 2. bring the suggested datasets to the top and append the rest
    datasets = sorted(
        datasets,
        key=lambda dataset_id: suggested_datasets.index(dataset_id)
        if dataset_id in suggested_datasets
        else len(suggested_datasets),
    )
    return datasets


@st.cache(ttl=600)
def generate_dataset_info(datasets):
    msg = f"""
    The models have been trained and/or evaluated on the following datasets: 
    """
    for dataset_id in datasets:
        if dataset_id in suggested_datasets:
            msg += f"* [{dataset_id}](https://hf.co/datasets/{dataset_id}) *(recommended)*\n"
        else:
            msg += f"* [{dataset_id}](https://hf.co/datasets/{dataset_id})\n"
    msg += """
    Choose the dataset that is most relevant to your task and select it from the dropdown below.
    """

    msg = "\n".join([line.strip() for line in msg.split("\n")])
    return msg


dataframe = get_data()
dataframe = dataframe.fillna("")

st.sidebar.image("logo.png", width=200)

st.markdown("# The πŸ€— Speech Bench")

st.markdown(
    "This is a leaderboard over all speech recognition models and datasets.\n\n"
    "β¬… Please select the language you want to find a model for from the dropdown on the left."
)

lang = st.sidebar.selectbox(
    "Language",
    sorted(dataframe["lang"].unique(), key=lambda key: lang2name.get(key, key)),
    format_func=lambda key: lang2name.get(key, key),
    index=0,
)
lang_df = dataframe[dataframe.lang == lang]

sorted_datasets = sort_datasets(lang_df["dataset"].unique())

text = generate_dataset_info(sorted_datasets)
st.sidebar.markdown(text)

lang_name = lang2name[lang] if lang in lang2name else ""
num_models = len(lang_df["model_id"].unique())
num_datasets = len(lang_df["dataset"].unique())
text = f"""
For the `{lang}` ({lang_name}) language, there are currently `{num_models}` model(s) 
trained on `{num_datasets}` dataset(s) available for `automatic-speech-recognition`.
"""
st.markdown(text)

dataset = st.sidebar.selectbox(
    "Dataset",
    sorted_datasets,
    index=0,
)
dataset_df = lang_df[lang_df.dataset == dataset]

# sort by WER or CER depending on the language
if lang in cer_langs:
    dataset_df = dataset_df[["model_id", "cer"]]
    dataset_df.sort_values("cer", inplace=True)
else:
    dataset_df = dataset_df[["model_id", "wer"]]
    dataset_df.sort_values("wer", inplace=True)
dataset_df.rename(
    columns={
        "model_id": "Model",
        "wer": "WER (lower is better)",
        "cer": "CER (lower is better)",
    },
    inplace=True,
)

st.markdown(
    "Please click on the model's name to be redirected to its model card which includes documentation and examples on how to use it."
)

# display the model ranks
dataset_df = dataset_df.reset_index(drop=True)
dataset_df.index += 1

# turn the model ids into clickable links
dataset_df["Model"] = dataset_df["Model"].apply(make_clickable)

table_html = dataset_df.to_html(escape=False)
table_html = table_html.replace("<th>", '<th align="left">')  # left-align the headers
st.write(table_html, unsafe_allow_html=True)

if lang in cer_langs:
    st.markdown(
        "---\n\* **CER** is [Char Error Rate](https://huggingface.co/metrics/cer)"
    )
else:
    st.markdown(
        "---\n\* **WER** is [Word Error Rate](https://huggingface.co/metrics/wer)"
    )