|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import wave |
|
from functools import lru_cache |
|
from typing import Tuple |
|
|
|
import numpy as np |
|
import sherpa_onnx |
|
|
|
print(dir(sherpa_onnx)) |
|
print(sherpa_onnx.__version__) |
|
from huggingface_hub import hf_hub_download |
|
from iso639 import Lang |
|
|
|
sample_rate = 16000 |
|
|
|
|
|
def read_wave(wave_filename: str) -> Tuple[np.ndarray, int]: |
|
""" |
|
Args: |
|
wave_filename: |
|
Path to a wave file. It should be single channel and each sample should |
|
be 16-bit. Its sample rate does not need to be 16kHz. |
|
Returns: |
|
Return a tuple containing: |
|
- A 1-D array of dtype np.float32 containing the samples, which are |
|
normalized to the range [-1, 1]. |
|
- sample rate of the wave file |
|
""" |
|
|
|
with wave.open(wave_filename) as f: |
|
assert f.getnchannels() == 1, f.getnchannels() |
|
assert f.getsampwidth() == 2, f.getsampwidth() |
|
num_samples = f.getnframes() |
|
samples = f.readframes(num_samples) |
|
samples_int16 = np.frombuffer(samples, dtype=np.int16) |
|
samples_float32 = samples_int16.astype(np.float32) |
|
|
|
samples_float32 = samples_float32 / 32768 |
|
return samples_float32, f.getframerate() |
|
|
|
|
|
def decode( |
|
slid: sherpa_onnx.SpokenLanguageIdentification, |
|
filename: str, |
|
) -> str: |
|
s = slid.create_stream() |
|
samples, sample_rate = read_wave(filename) |
|
s.accept_waveform(sample_rate, samples) |
|
lang = slid.compute(s) |
|
if lang == "": |
|
return "Unknown" |
|
|
|
try: |
|
return Lang(lang).name |
|
except: |
|
return lang |
|
|
|
|
|
def _get_nn_model_filename( |
|
repo_id: str, |
|
filename: str, |
|
subfolder: str = ".", |
|
) -> str: |
|
nn_model_filename = hf_hub_download( |
|
repo_id=repo_id, |
|
filename=filename, |
|
subfolder=subfolder, |
|
) |
|
return nn_model_filename |
|
|
|
|
|
@lru_cache(maxsize=8) |
|
def get_pretrained_model(name: str) -> sherpa_onnx.SpokenLanguageIdentification: |
|
assert name in ( |
|
"tiny", |
|
"base", |
|
"small", |
|
"medium", |
|
), name |
|
full_repo_id = "csukuangfj/sherpa-onnx-whisper-" + name |
|
encoder = _get_nn_model_filename( |
|
repo_id=full_repo_id, |
|
filename=f"{name}-encoder.int8.onnx", |
|
) |
|
|
|
decoder = _get_nn_model_filename( |
|
repo_id=full_repo_id, |
|
filename=f"{name}-decoder.int8.onnx", |
|
) |
|
|
|
config = sherpa_onnx.SpokenLanguageIdentificationConfig( |
|
whisper=sherpa_onnx.SpokenLanguageIdentificationWhisperConfig( |
|
encoder=encoder, |
|
decoder=decoder, |
|
), |
|
num_threads=2, |
|
debug=1, |
|
provider="cpu", |
|
) |
|
|
|
return sherpa_onnx.SpokenLanguageIdentification(config) |
|
|
|
|
|
whisper_models = { |
|
"tiny": get_pretrained_model, |
|
"base": get_pretrained_model, |
|
"small": get_pretrained_model, |
|
"medium": get_pretrained_model, |
|
} |
|
|