File size: 10,343 Bytes
3a556c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7cbab0
 
 
 
 
 
4262c97
 
 
 
 
 
 
a7cbab0
4262c97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a556c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c5436a
3a556c6
 
 
9c5436a
3a556c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0a30a52
3a556c6
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
#!/usr/bin/env python3
import os
import re
from pathlib import Path
from typing import List

BASE_URL = "https://huggingface.co/csukuangfj/sherpa-onnx-apk/resolve/main/"

from dataclasses import dataclass


@dataclass
class APK:
    major: int
    minor: int
    patch: int
    arch: str
    short_name: str

    def __init__(self, s):
        # sherpa-onnx-1.9.23-arm64-v8a-vad_asr-en-whisper_tiny.apk
        #  sherpa-onnx-1.9.23-x86-vad_asr-en-whisper_tiny.apk
        s = str(s)[len("vad-asr/") :]
        split = s.split("-")
        self.major, self.minor, self.patch = list(map(int, split[2].split(".")))
        self.arch = split[3]
        self.lang = split[5]
        self.short_name = split[6]
        if "arm" in s:
            self.arch += "-" + split[4]
            self.lang = split[6]
            self.short_name = split[7]

        if "armeabi" in self.arch:
            self.arch = "y" + self.arch

        if "arm64" in self.arch:
            self.arch = "z" + self.arch

        if "small" in self.short_name:
            self.short_name = "zzz" + self.short_name


def sort_by_apk(x):
    x = APK(x)
    return (x.major, x.minor, x.patch, x.arch, x.lang, x.short_name)


def generate_url(files: List[str]) -> List[str]:
    ans = []
    base = BASE_URL
    for f in files:
        ans.append(base + str(f))
    return ans


def get_all_files(d: str, suffix: str) -> List[str]:
    ans = sorted(Path(d).glob(suffix), key=sort_by_apk, reverse=True)
    return list(map(lambda x: BASE_URL + str(x), ans))


def to_file(filename: str, files: List[str]):
    content = r"""
<h1> APKs for VAD + non-streaming speech recognition </h1>
This page lists the <strong>VAD + non-streaming speech recognition</strong> APKs for <a href="http://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a>,
one of the deployment frameworks of <a href="https://github.com/k2-fsa">the Next-gen Kaldi project</a>.
<br/>
The name of an APK has the following rule:
<ul>
 <li> sherpa-onnx-{version}-{arch}-vad_asr-{lang}-{model}.apk
</ul>
where
<ul>
 <li> version: It specifies the current version, e.g., 1.9.23
 <li> arch: The architecture targeted by this APK, e.g., arm64-v8a, armeabi-v7a, x86_64, x86
 <li> lang: The lang of the model used in the APK, e.g., en for English, zh for Chinese
 <li> model: The name of the model used in the APK
</ul>

<br/>

You can download all supported models from
<a href="https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models">https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models</a>

<br/>
<br/>

<!--
see https://www.tablesgenerator.com/html_tables#
-->

<style type="text/css">
.tg  {border-collapse:collapse;border-spacing:0;}
.tg td{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
  overflow:hidden;padding:10px 5px;word-break:normal;}
.tg th{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
  font-weight:normal;overflow:hidden;padding:10px 5px;word-break:normal;}
.tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:top}
.tg .tg-0lax{text-align:left;vertical-align:top}
</style>
<table class="tg">
<thead>
  <tr>
    <th class="tg-0pky">APK</th>
    <th class="tg-0lax">Comment</th>
    <th class="tg-0pky">VAD model</th>
    <th class="tg-0pky">Non-streaming ASR model</th>
  </tr>
</thead>
<tbody>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-zh-telespeech.apk</td>
    <td class="tg-0lax">支持非常多种中文方言. It is converted from <a href="https://github.com/Tele-AI/TeleSpeech-ASR">https://github.com/Tele-AI/TeleSpeech-ASR</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2">sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2</a></td>
  </tr>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-be_de_en_es_fr_hr_it_pl_ru_uk-fast_conformer_ctc_20k.apk</td>
    <td class="tg-0lax">It supports <span style="color:red;">10 languages</span>: Belarusian, German, English, Spanish, French, Croatian, Italian, Polish, Russian, and Ukrainian. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_multilingual_fastconformer_hybrid_large_pc">STT Multilingual FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on ~20000 hours of data.</td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2</a></td>
  </tr>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-en_des_es_fr-fast_conformer_ctc_14288.apk</td>
    <td class="tg-0lax">It supports <span style="color:red;">4 languages</span>:  German, English, Spanish, and French . It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_multilingual_fastconformer_hybrid_large_pc_blend_eu">STT European FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 14288 hours of data.</td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-en-de-es-fr-14288.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-en-de-es-fr-14288.tar.bz2</a></td>
  </tr>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-es-fast_conformer_ctc_1424.apk</td>
    <td class="tg-0lax">It supports only Spanish. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_es_fastconformer_hybrid_large_pc">STT Es FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 1424 hours of data.</td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-es-1424.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-es-1424.tar.bz2</a></td>
  </tr>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-en-fast_conformer_ctc_24500.apk</td>
    <td class="tg-0lax">It supports only English. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_fastconformer_hybrid_large_pc">STT En FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 8500 hours of data.</td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-en-24500.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-en-24500.tar.bz2</a></td>
  </tr>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-zh-zipformer.apk</td>
    <td class="tg-0lax">It supports only Chinese.</td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/icefall-asr-zipformer-wenetspeech-20230615.tar.bz2">icefall-asr-zipformer-wenetspeech-20230615</a></td>
  </tr>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-zh-paraformer.apk</td>
    <td class="tg-0lax"><span style="font-weight:400;font-style:normal">It supports both Chinese and English.</span></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2">sherpa-onnx-paraformer-zh-2023-03-28</a></td>
  </tr>
  <tr>
    <td class="tg-0pky">sherpa-onnx-x.y.z-arm64-v8a-vad_asr-en-whisper_tiny.apk</td>
    <td class="tg-0lax">It supports only English.</td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
    <td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2">sherpa-onnx-whisper-tiny.en</a></td>
  </tr>
</tbody>
</table>

<br/>
<br/>

<div/>
    """
    if "-cn" not in filename:
        content += """
        For Chinese users, please <a href="./apk-asr-cn.html">visit this address</a>,
        which replaces <a href="huggingface.co">huggingface.co</a> with <a href="hf-mirror.com">hf-mirror.com</a>
        <br/>
        <br/>
        中国用户, 请访问<a href="./apk-asr-cn.html">这个地址</a>
        <br/>
        <br/>
        """

    with open(filename, "w") as f:
        print(content, file=f)
        for x in files:
            name = x.rsplit("/", maxsplit=1)[-1]
            print(f'<a href="{x}" />{name}<br/>', file=f)


def main():
    apk = get_all_files("vad-asr", suffix="*.apk")
    to_file("./apk-vad-asr.html", apk)

    # for Chinese users
    apk2 = []
    for a in apk:
        a = a.replace("huggingface.co", "hf-mirror.com")
        a = a.replace("resolve", "blob")
        apk2.append(a)

    to_file("./apk-vad-asr-cn.html", apk2)


if __name__ == "__main__":
    main()