csukuangfj
commited on
Commit
•
91a6485
1
Parent(s):
22ffb52
generate htmls
Browse files- .gitignore +1 -0
- generate-vad-asr.py +257 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.html
|
generate-vad-asr.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import List
|
6 |
+
|
7 |
+
BASE_URL = "https://huggingface.co/csukuangfj/sherpa-onnx-harmony-os/resolve/main/"
|
8 |
+
|
9 |
+
from dataclasses import dataclass
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class HAP:
|
14 |
+
major: int
|
15 |
+
minor: int
|
16 |
+
patch: int
|
17 |
+
short_name: str
|
18 |
+
lang: str
|
19 |
+
|
20 |
+
def __init__(self, s):
|
21 |
+
# sherpa-onnx-1.10.32-vad_asr-ru-zipformer.hap
|
22 |
+
s = str(s)
|
23 |
+
s = s.split("/")[-1]
|
24 |
+
split = s.split("-")
|
25 |
+
self.major, self.minor, self.patch = list(map(int, split[2].split(".")))
|
26 |
+
self.lang = split[4]
|
27 |
+
self.short_name = split[5]
|
28 |
+
if "small" in self.short_name:
|
29 |
+
self.short_name = "zzz" + self.short_name
|
30 |
+
|
31 |
+
|
32 |
+
def sort_by_hap(x):
|
33 |
+
x = HAP(x)
|
34 |
+
return (x.major, x.minor, x.patch, x.lang, x.short_name)
|
35 |
+
|
36 |
+
|
37 |
+
def get_all_files(d_list: List[str], suffix: str) -> List[str]:
|
38 |
+
if isinstance(d_list, str):
|
39 |
+
d_list = [d_list]
|
40 |
+
min_major = 1
|
41 |
+
min_minor = 9
|
42 |
+
min_patch = 10
|
43 |
+
|
44 |
+
ss = []
|
45 |
+
for d in d_list:
|
46 |
+
for root, _, files in os.walk(d):
|
47 |
+
for f in files:
|
48 |
+
if f.endswith(suffix):
|
49 |
+
major, minor, patch = list(map(int, f.split("-")[2].split(".")))
|
50 |
+
if major >= min_major and minor >= min_minor and patch >= min_patch:
|
51 |
+
ss.append(os.path.join(root, f))
|
52 |
+
|
53 |
+
ans = sorted(ss, key=sort_by_hap, reverse=True)
|
54 |
+
|
55 |
+
return list(map(lambda x: BASE_URL + str(x), ans))
|
56 |
+
|
57 |
+
|
58 |
+
def to_file(filename: str, files: List[str]):
|
59 |
+
content = r"""
|
60 |
+
<h1> HAPs for VAD + non-streaming speech recognition (HarmonyOS) </h1>
|
61 |
+
This page lists the <strong>VAD + non-streaming speech recognition</strong> HAPs for <a href="http://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a>,
|
62 |
+
one of the deployment frameworks of <a href="https://github.com/k2-fsa">the Next-gen Kaldi project</a>.
|
63 |
+
<br/>
|
64 |
+
The name of an HAP has the following rule:
|
65 |
+
<ul>
|
66 |
+
<li> sherpa-onnx-{version}-vad_asr-{lang}-{model}.hap
|
67 |
+
</ul>
|
68 |
+
where
|
69 |
+
<ul>
|
70 |
+
<li> version: It specifies the current version, e.g., 1.9.23
|
71 |
+
<li> lang: The lang of the model used in the HAP, e.g., en for English, zh for Chinese
|
72 |
+
<li> model: The name of the model used in the HAP
|
73 |
+
</ul>
|
74 |
+
|
75 |
+
<br/>
|
76 |
+
|
77 |
+
You can download all supported models from
|
78 |
+
<a href="https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models">https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models</a>
|
79 |
+
|
80 |
+
<br/>
|
81 |
+
<br/>
|
82 |
+
|
83 |
+
<strong>Note about the license</strong> The code of Next-gen Kaldi is using
|
84 |
+
<a href="https://www.apache.org/licenses/LICENSE-2.0">Apache-2.0 license</a>. However,
|
85 |
+
we support models from different frameworks. Please check the license of your selected model.
|
86 |
+
|
87 |
+
<br/>
|
88 |
+
<br/>
|
89 |
+
|
90 |
+
<!--
|
91 |
+
see https://www.tablesgenerator.com/html_tables#
|
92 |
+
-->
|
93 |
+
|
94 |
+
<style type="text/css">
|
95 |
+
.tg {border-collapse:collapse;border-spacing:0;}
|
96 |
+
.tg td{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
|
97 |
+
overflow:hidden;padding:10px 5px;word-break:normal;}
|
98 |
+
.tg th{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
|
99 |
+
font-weight:normal;overflow:hidden;padding:10px 5px;word-break:normal;}
|
100 |
+
.tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:top}
|
101 |
+
.tg .tg-0lax{text-align:left;vertical-align:top}
|
102 |
+
</style>
|
103 |
+
<table class="tg">
|
104 |
+
<thead>
|
105 |
+
<tr>
|
106 |
+
<th class="tg-0pky">HAP</th>
|
107 |
+
<th class="tg-0lax">Comment</th>
|
108 |
+
<th class="tg-0pky">VAD model</th>
|
109 |
+
<th class="tg-0pky">Non-streaming ASR model</th>
|
110 |
+
</tr>
|
111 |
+
</thead>
|
112 |
+
<tbody>
|
113 |
+
<tr>
|
114 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-ja-zipformer_reazonspeech.hap</td>
|
115 |
+
<td class="tg-0lax">It supports only Japanese. It is from <a href="https://github.com/reazon-research/ReazonSpeech">https://github.com/reazon-research/ReazonSpeech</a></td>
|
116 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
117 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01.tar.bz2">sherpa-onnx-zipformer-ja-reazonspeech-2024-08-01.tar.bz2</a></td>
|
118 |
+
</tr>
|
119 |
+
<tr>
|
120 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-zh_en_ko_ja_yue-sense_voice.hap</td>
|
121 |
+
<td class="tg-0lax">It supports Chinese, Cantonese, English, Korean, and Japanese (中、英、粤、日、韩5种语音). It is converted from <a href="https://github.com/FunAudioLLM/SenseVoice">https://github.com/FunAudioLLM/SenseVoice</a></td>
|
122 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
123 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2">sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2</a></td>
|
124 |
+
</tr>
|
125 |
+
<tr>
|
126 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-zh-telespeech.hap</td>
|
127 |
+
<td class="tg-0lax">支持���常多种中文方言. It is converted from <a href="https://github.com/Tele-AI/TeleSpeech-ASR">https://github.com/Tele-AI/TeleSpeech-ASR</a></td>
|
128 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
129 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2">sherpa-onnx-telespeech-ctc-int8-zh-2024-06-04.tar.bz2</a></td>
|
130 |
+
</tr>
|
131 |
+
<tr>
|
132 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-th-zipformer.hap</td>
|
133 |
+
<td class="tg-0lax">It supports only Thai. It is converted from <a href="https://huggingface.co/yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20/tree/main">https://huggingface.co/yfyeung/icefall-asr-gigaspeech2-th-zipformer-2024-06-20/tree/main</a></td>
|
134 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
135 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-thai-2024-06-20.tar.bz2">sherpa-onnx-zipformer-thai-2024-06-20.tar.bz2</a></td>
|
136 |
+
</tr>
|
137 |
+
<tr>
|
138 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-ko-zipformer.hap</td>
|
139 |
+
<td class="tg-0lax">It supports only Korean. It is converted from <a href="https://huggingface.co/johnBamma/icefall-asr-ksponspeech-zipformer-2024-06-24">https://huggingface.co/johnBamma/icefall-asr-ksponspeech-zipformer-2024-06-24</a></td>
|
140 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
141 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-korean-2024-06-24.tar.bz2">sherpa-onnx-zipformer-korean-2024-06-24.tar.bz2</a></td>
|
142 |
+
</tr>
|
143 |
+
<tr>
|
144 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-be_de_en_es_fr_hr_it_pl_ru_uk-fast_conformer_ctc_20k.hap</td>
|
145 |
+
<td class="tg-0lax">It supports <span style="color:red;">10 languages</span>: Belarusian, German, English, Spanish, French, Croatian, Italian, Polish, Russian, and Ukrainian. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_multilingual_fastconformer_hybrid_large_pc">STT Multilingual FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on ~20000 hours of data.</td>
|
146 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
147 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2</a></td>
|
148 |
+
</tr>
|
149 |
+
<tr>
|
150 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-en_des_es_fr-fast_conformer_ctc_14288.hap</td>
|
151 |
+
<td class="tg-0lax">It supports <span style="color:red;">4 languages</span>: German, English, Spanish, and French . It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_multilingual_fastconformer_hybrid_large_pc_blend_eu">STT European FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 14288 hours of data.</td>
|
152 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
153 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-en-de-es-fr-14288.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-en-de-es-fr-14288.tar.bz2</a></td>
|
154 |
+
</tr>
|
155 |
+
<tr>
|
156 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-es-fast_conformer_ctc_1424.hap</td>
|
157 |
+
<td class="tg-0lax">It supports only Spanish. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_es_fastconformer_hybrid_large_pc">STT Es FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 1424 hours of data.</td>
|
158 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
159 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-es-1424.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-es-1424.tar.bz2</a></td>
|
160 |
+
</tr>
|
161 |
+
<tr>
|
162 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-en-fast_conformer_ctc_24500.hap</td>
|
163 |
+
<td class="tg-0lax">It supports only English. It is converted from <a href="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_fastconformer_hybrid_large_pc">STT En FastConformer Hybrid Transducer-CTC Large P&C</a> from <a href="https://github.com/NVIDIA/NeMo/">NVIDIA/NeMo</a>. Note that only the CTC branch is used. It is trained on 8500 hours of data.</td>
|
164 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
165 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-transducer-en-24500.tar.bz2">sherpa-onnx-nemo-fast-conformer-transducer-en-24500.tar.bz2</a></td>
|
166 |
+
</tr>
|
167 |
+
<tr>
|
168 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-zh-zipformer.hap</td>
|
169 |
+
<td class="tg-0lax">It supports only Chinese.</td>
|
170 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
171 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/icefall-asr-zipformer-wenetspeech-20230615.tar.bz2">icefall-asr-zipformer-wenetspeech-20230615</a></td>
|
172 |
+
</tr>
|
173 |
+
<tr>
|
174 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-zh-paraformer.hap</td>
|
175 |
+
<td class="tg-0lax"><span style="font-weight:400;font-style:normal">It supports both Chinese and English.</span></td>
|
176 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
177 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2">sherpa-onnx-paraformer-zh-2023-03-28</a></td>
|
178 |
+
</tr>
|
179 |
+
<tr>
|
180 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-en-whisper_tiny.hap</td>
|
181 |
+
<td class="tg-0lax">It supports only English.</td>
|
182 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
183 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2">sherpa-onnx-whisper-tiny.en</a></td>
|
184 |
+
</tr>
|
185 |
+
<tr>
|
186 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-en-moonshine_tiny_int8.hap</td>
|
187 |
+
<td class="tg-0lax">It supports only English.</td>
|
188 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
189 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-moonshine-tiny-en-int8.tar.bz2
|
190 |
+
">sherpa-onnx-moonshine-tiny-en-int8</a></td>
|
191 |
+
</tr>
|
192 |
+
<tr>
|
193 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-ru-nemo_transducer_giga_am.hap</td>
|
194 |
+
<td class="tg-0lax">It supports only Russian.</td>
|
195 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
196 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-transducer-giga-am-russian-2024-10-24.tar.bz2">sherpa-onnx-nemo-transducer-giga-am-russian-2024-10-24.tar.bz2</a> <br/>Please see also <a href="https://github.com/salute-developers/GigaAM">https://github.com/salute-developers/GigaAM</a></td>
|
197 |
+
</tr>
|
198 |
+
<tr>
|
199 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-ru-nemo_ctc_giga_am.hap</td>
|
200 |
+
<td class="tg-0lax">It supports only Russian.</td>
|
201 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
202 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-ctc-giga-am-russian-2024-10-24.tar.bz2">sherpa-onnx-nemo-ctc-giga-am-russian-2024-10-24.tar.bz2</a> <br/>Please see also <a href="https://github.com/salute-developers/GigaAM">https://github.com/salute-developers/GigaAM</a></td>
|
203 |
+
</tr>
|
204 |
+
<tr>
|
205 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-ru-small_zipformer.hap</td>
|
206 |
+
<td class="tg-0lax">It supports only Russian.</td>
|
207 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
208 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-small-zipformer-ru-2024-09-18.tar.bz2">sherpa-onnx-small-zipformer-ru-2024-09-18.tar.bz2</a></td>
|
209 |
+
</tr>
|
210 |
+
<tr>
|
211 |
+
<td class="tg-0pky">sherpa-onnx-x.y.z-vad_asr-ru-zipformer.hap</td>
|
212 |
+
<td class="tg-0lax">It supports only Russian.</td>
|
213 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx">silero_vad.onnx</a></td>
|
214 |
+
<td class="tg-0pky"><a href="https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-ru-2024-09-18.tar.bz2">sherpa-onnx-zipformer-ru-2024-09-18.tar.bz2</a></td>
|
215 |
+
</tr>
|
216 |
+
</tbody>
|
217 |
+
</table>
|
218 |
+
|
219 |
+
<br/>
|
220 |
+
<br/>
|
221 |
+
|
222 |
+
<div/>
|
223 |
+
"""
|
224 |
+
if "-cn" not in filename:
|
225 |
+
content += """
|
226 |
+
For Chinese users, please <a href="./hap-vad-asr-cn.html">visit this address</a>,
|
227 |
+
which replaces <a href="huggingface.co">huggingface.co</a> with <a href="hf-mirror.com">hf-mirror.com</a>
|
228 |
+
<br/>
|
229 |
+
<br/>
|
230 |
+
中国用户, 请访问<a href="./hap-vad-asr-cn.html">这个地址</a>
|
231 |
+
<br/>
|
232 |
+
<br/>
|
233 |
+
"""
|
234 |
+
|
235 |
+
with open(filename, "w") as f:
|
236 |
+
print(content, file=f)
|
237 |
+
for x in files:
|
238 |
+
name = x.rsplit("/", maxsplit=1)[-1]
|
239 |
+
print(f'<a href="{x}" />{name}<br/>', file=f)
|
240 |
+
|
241 |
+
|
242 |
+
def main():
|
243 |
+
hap = get_all_files("hap", suffix=".hap")
|
244 |
+
to_file("./hap-vad-asr.html", hap)
|
245 |
+
|
246 |
+
# for Chinese users
|
247 |
+
hap2 = []
|
248 |
+
for a in hap:
|
249 |
+
a = a.replace("huggingface.co", "hf-mirror.com")
|
250 |
+
a = a.replace("resolve", "blob")
|
251 |
+
hap2.append(a)
|
252 |
+
|
253 |
+
to_file("./hap-vad-asr-cn.html", hap2)
|
254 |
+
|
255 |
+
|
256 |
+
if __name__ == "__main__":
|
257 |
+
main()
|