Update README.md
Browse files
README.md
CHANGED
@@ -16,16 +16,11 @@ It is trained on 180k hours of public audio data for multilingual speech recogni
|
|
16 |
|
17 |
This model is initialized with [OWSM-CTC v3.1](https://huggingface.co/pyf98/owsm_ctc_v3.1_1B) and then fine-tuned on [v3.2 data](https://arxiv.org/abs/2406.09282) for 225k steps.
|
18 |
|
19 |
-
|
20 |
-
- PR in ESPnet: https://github.com/espnet/espnet/pull/5933
|
21 |
-
- Code in my repo: https://github.com/pyf98/espnet/tree/owsm-ctc
|
22 |
-
- Current model on HF: https://huggingface.co/pyf98/owsm_ctc_v3.2_ft_1B
|
23 |
-
|
24 |
-
To use the pre-trained model, you need to install `espnet` and `espnet_model_zoo`. The requirements are:
|
25 |
```
|
26 |
librosa
|
27 |
torch
|
28 |
-
espnet
|
29 |
espnet_model_zoo
|
30 |
```
|
31 |
|
@@ -34,100 +29,4 @@ We use FlashAttention during training, but we do not need it during inference. P
|
|
34 |
pip install flash-attn --no-build-isolation
|
35 |
```
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
```python
|
40 |
-
import soundfile as sf
|
41 |
-
import numpy as np
|
42 |
-
import librosa
|
43 |
-
import kaldiio
|
44 |
-
from espnet2.bin.s2t_inference_ctc import Speech2TextGreedySearch
|
45 |
-
|
46 |
-
|
47 |
-
s2t = Speech2TextGreedySearch.from_pretrained(
|
48 |
-
"pyf98/owsm_ctc_v3.2_ft_1B",
|
49 |
-
device="cuda",
|
50 |
-
generate_interctc_outputs=False,
|
51 |
-
lang_sym='<eng>',
|
52 |
-
task_sym='<asr>',
|
53 |
-
)
|
54 |
-
|
55 |
-
speech, rate = sf.read(
|
56 |
-
"xxx.wav"
|
57 |
-
)
|
58 |
-
speech = librosa.util.fix_length(speech, size=(16000 * 30))
|
59 |
-
|
60 |
-
res = s2t(speech)[0]
|
61 |
-
print(res)
|
62 |
-
```
|
63 |
-
|
64 |
-
### Example script for long-form ASR/ST
|
65 |
-
|
66 |
-
```python
|
67 |
-
import soundfile as sf
|
68 |
-
import torch
|
69 |
-
from espnet2.bin.s2t_inference_ctc import Speech2TextGreedySearch
|
70 |
-
|
71 |
-
|
72 |
-
if __name__ == "__main__":
|
73 |
-
context_len_in_secs = 4 # left and right context when doing buffered inference
|
74 |
-
batch_size = 32 # depends on the GPU memory
|
75 |
-
s2t = Speech2TextGreedySearch.from_pretrained(
|
76 |
-
"pyf98/owsm_ctc_v3.2_ft_1B",
|
77 |
-
device='cuda' if torch.cuda.is_available() else 'cpu',
|
78 |
-
generate_interctc_outputs=False,
|
79 |
-
lang_sym='<eng>',
|
80 |
-
task_sym='<asr>',
|
81 |
-
)
|
82 |
-
|
83 |
-
speech, rate = sf.read(
|
84 |
-
"xxx.wav"
|
85 |
-
)
|
86 |
-
|
87 |
-
text = s2t.decode_long_batched_buffered(
|
88 |
-
speech,
|
89 |
-
batch_size=batch_size,
|
90 |
-
context_len_in_secs=context_len_in_secs,
|
91 |
-
frames_per_sec=12.5, # 80ms shift, model-dependent, don't change
|
92 |
-
)
|
93 |
-
print(text)
|
94 |
-
```
|
95 |
-
|
96 |
-
### Example for CTC forced alignment using `ctc-segmentation`
|
97 |
-
|
98 |
-
It can be efficiently applied to audio of an arbitrary length.
|
99 |
-
For model downloading, please refer to https://github.com/espnet/espnet?tab=readme-ov-file#ctc-segmentation-demo
|
100 |
-
|
101 |
-
```python
|
102 |
-
import soundfile as sf
|
103 |
-
from espnet2.bin.s2t_ctc_align import CTCSegmentation
|
104 |
-
|
105 |
-
|
106 |
-
if __name__ == "__main__":
|
107 |
-
## Please download model first
|
108 |
-
aligner = CTCSegmentation(
|
109 |
-
s2t_model_file="exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/valid.total_count.ave_5best.till45epoch.pth",
|
110 |
-
fs=16000,
|
111 |
-
ngpu=1,
|
112 |
-
batch_size=16, # batched parallel decoding; reduce it if your GPU memory is smaller
|
113 |
-
kaldi_style_text=True,
|
114 |
-
time_stamps="fixed",
|
115 |
-
samples_to_frames_ratio=1280, # 80ms time shift; don't change as it depends on the pre-trained model
|
116 |
-
lang_sym="<eng>",
|
117 |
-
task_sym="<asr>",
|
118 |
-
context_len_in_secs=2, # left and right context in buffered decoding
|
119 |
-
frames_per_sec=12.5, # 80ms time shift; don't change as it depends on the pre-trained model
|
120 |
-
)
|
121 |
-
|
122 |
-
speech, rate = sf.read(
|
123 |
-
"example.wav"
|
124 |
-
)
|
125 |
-
print(f"speech duration: {len(speech) / rate : .2f} seconds")
|
126 |
-
text = '''
|
127 |
-
utt1 hello there
|
128 |
-
utt2 welcome to this repo
|
129 |
-
'''
|
130 |
-
|
131 |
-
segments = aligner(speech, text)
|
132 |
-
print(segments)
|
133 |
-
```
|
|
|
16 |
|
17 |
This model is initialized with [OWSM-CTC v3.1](https://huggingface.co/pyf98/owsm_ctc_v3.1_1B) and then fine-tuned on [v3.2 data](https://arxiv.org/abs/2406.09282) for 225k steps.
|
18 |
|
19 |
+
To use the pre-trained model, please install `espnet` and `espnet_model_zoo`. The requirements are:
|
|
|
|
|
|
|
|
|
|
|
20 |
```
|
21 |
librosa
|
22 |
torch
|
23 |
+
espnet
|
24 |
espnet_model_zoo
|
25 |
```
|
26 |
|
|
|
29 |
pip install flash-attn --no-build-isolation
|
30 |
```
|
31 |
|
32 |
+
**Example usage can be found in ESPnet:** https://github.com/espnet/espnet/tree/master/egs2/owsm_ctc_v3.1/s2t1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|