|
|
|
|
|
import base64 |
|
import io |
|
import os |
|
import re |
|
|
|
import gradio as gr |
|
import librosa |
|
import numpy as np |
|
import spaces |
|
import torch |
|
import torchaudio |
|
from funasr import AutoModel |
|
|
|
model = "FunAudioLLM/SenseVoiceSmall" |
|
model = AutoModel( |
|
model=model, |
|
vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", |
|
vad_kwargs={"max_single_segment_time": 30000}, |
|
hub="hf", |
|
device="cuda", |
|
) |
|
|
|
import re |
|
|
|
emo_dict = { |
|
"<|HAPPY|>": "๐", |
|
"<|SAD|>": "๐", |
|
"<|ANGRY|>": "๐ก", |
|
"<|NEUTRAL|>": "", |
|
"<|FEARFUL|>": "๐ฐ", |
|
"<|DISGUSTED|>": "๐คข", |
|
"<|SURPRISED|>": "๐ฎ", |
|
} |
|
|
|
event_dict = { |
|
"<|BGM|>": "๐ผ", |
|
"<|Speech|>": "", |
|
"<|Applause|>": "๐", |
|
"<|Laughter|>": "๐", |
|
"<|Cry|>": "๐ญ", |
|
"<|Sneeze|>": "๐คง", |
|
"<|Breath|>": "", |
|
"<|Cough|>": "๐คง", |
|
} |
|
|
|
emoji_dict = { |
|
"<|nospeech|><|Event_UNK|>": "โ", |
|
"<|zh|>": "", |
|
"<|en|>": "", |
|
"<|yue|>": "", |
|
"<|ja|>": "", |
|
"<|ko|>": "", |
|
"<|nospeech|>": "", |
|
"<|HAPPY|>": "๐", |
|
"<|SAD|>": "๐", |
|
"<|ANGRY|>": "๐ก", |
|
"<|NEUTRAL|>": "", |
|
"<|BGM|>": "๐ผ", |
|
"<|Speech|>": "", |
|
"<|Applause|>": "๐", |
|
"<|Laughter|>": "๐", |
|
"<|FEARFUL|>": "๐ฐ", |
|
"<|DISGUSTED|>": "๐คข", |
|
"<|SURPRISED|>": "๐ฎ", |
|
"<|Cry|>": "๐ญ", |
|
"<|EMO_UNKNOWN|>": "", |
|
"<|Sneeze|>": "๐คง", |
|
"<|Breath|>": "", |
|
"<|Cough|>": "๐ท", |
|
"<|Sing|>": "", |
|
"<|Speech_Noise|>": "", |
|
"<|withitn|>": "", |
|
"<|woitn|>": "", |
|
"<|GBG|>": "", |
|
"<|Event_UNK|>": "", |
|
} |
|
|
|
lang_dict = { |
|
"<|zh|>": "<|lang|>", |
|
"<|en|>": "<|lang|>", |
|
"<|yue|>": "<|lang|>", |
|
"<|ja|>": "<|lang|>", |
|
"<|ko|>": "<|lang|>", |
|
"<|nospeech|>": "<|lang|>", |
|
} |
|
|
|
emo_set = {"๐", "๐", "๐ก", "๐ฐ", "๐คข", "๐ฎ"} |
|
event_set = {"๐ผ", "๐", "๐", "๐ญ", "๐คง", "๐ท"} |
|
|
|
|
|
def format_str(s): |
|
for sptk in emoji_dict: |
|
s = s.replace(sptk, emoji_dict[sptk]) |
|
return s |
|
|
|
|
|
def format_str_v2(s): |
|
sptk_dict = {} |
|
for sptk in emoji_dict: |
|
sptk_dict[sptk] = s.count(sptk) |
|
s = s.replace(sptk, "") |
|
emo = "<|NEUTRAL|>" |
|
for e in emo_dict: |
|
if sptk_dict[e] > sptk_dict[emo]: |
|
emo = e |
|
for e in event_dict: |
|
if sptk_dict[e] > 0: |
|
s = event_dict[e] + s |
|
s = s + emo_dict[emo] |
|
|
|
for emoji in emo_set.union(event_set): |
|
s = s.replace(" " + emoji, emoji) |
|
s = s.replace(emoji + " ", emoji) |
|
return s.strip() |
|
|
|
|
|
def format_str_v3(s): |
|
def get_emo(s): |
|
return s[-1] if s[-1] in emo_set else None |
|
|
|
def get_event(s): |
|
return s[0] if s[0] in event_set else None |
|
|
|
s = s.replace("<|nospeech|><|Event_UNK|>", "โ") |
|
for lang in lang_dict: |
|
s = s.replace(lang, "<|lang|>") |
|
s_list = [format_str_v2(s_i).strip(" ") for s_i in s.split("<|lang|>")] |
|
new_s = " " + s_list[0] |
|
cur_ent_event = get_event(new_s) |
|
for i in range(1, len(s_list)): |
|
if len(s_list[i]) == 0: |
|
continue |
|
if get_event(s_list[i]) == cur_ent_event and get_event(s_list[i]) != None: |
|
s_list[i] = s_list[i][1:] |
|
|
|
cur_ent_event = get_event(s_list[i]) |
|
if get_emo(s_list[i]) != None and get_emo(s_list[i]) == get_emo(new_s): |
|
new_s = new_s[:-1] |
|
new_s += s_list[i].strip().lstrip() |
|
new_s = new_s.replace("The.", " ") |
|
return new_s.strip() |
|
|
|
|
|
@spaces.GPU |
|
def model_inference(input_wav, language, fs=16000): |
|
|
|
language_abbr = { |
|
"auto": "auto", |
|
"zh": "zh", |
|
"en": "en", |
|
"yue": "yue", |
|
"ja": "ja", |
|
"ko": "ko", |
|
"nospeech": "nospeech", |
|
} |
|
|
|
|
|
language = "auto" if len(language) < 1 else language |
|
selected_language = language_abbr[language] |
|
|
|
|
|
|
|
|
|
if isinstance(input_wav, tuple): |
|
fs, input_wav = input_wav |
|
input_wav = input_wav.astype(np.float32) / np.iinfo(np.int16).max |
|
if len(input_wav.shape) > 1: |
|
input_wav = input_wav.mean(-1) |
|
if fs != 16000: |
|
print(f"audio_fs: {fs}") |
|
resampler = torchaudio.transforms.Resample(fs, 16000) |
|
input_wav_t = torch.from_numpy(input_wav).to(torch.float32) |
|
input_wav = resampler(input_wav_t[None, :])[0, :].numpy() |
|
|
|
merge_vad = True |
|
print(f"language: {language}, merge_vad: {merge_vad}") |
|
text = model.generate( |
|
input=input_wav, |
|
cache={}, |
|
language=language, |
|
use_itn=True, |
|
batch_size_s=500, |
|
merge_vad=merge_vad, |
|
) |
|
|
|
print(text) |
|
text = text[0]["text"] |
|
text = format_str_v3(text) |
|
|
|
print(text) |
|
|
|
return text |
|
|
|
|
|
audio_examples = [ |
|
["example/zh.mp3", "zh"], |
|
["example/yue.mp3", "yue"], |
|
["example/en.mp3", "en"], |
|
["example/ja.mp3", "ja"], |
|
["example/ko.mp3", "ko"], |
|
["example/emo_1.wav", "auto"], |
|
["example/emo_2.wav", "auto"], |
|
["example/emo_3.wav", "auto"], |
|
["example/rich_1.wav", "auto"], |
|
["example/rich_2.wav", "auto"], |
|
["example/longwav_1.wav", "auto"], |
|
["example/longwav_2.wav", "auto"], |
|
["example/longwav_3.wav", "auto"], |
|
] |
|
|
|
|
|
def launch(): |
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
with gr.Row(): |
|
with gr.Column(): |
|
audio_inputs = gr.Audio(label="Upload audio or use the microphone") |
|
|
|
with gr.Accordion("Configuration"): |
|
language_inputs = gr.Dropdown( |
|
choices=["auto", "zh", "en", "yue", "ja", "ko", "nospeech"], |
|
value="auto", |
|
label="Language", |
|
) |
|
fn_button = gr.Button("Start", variant="primary") |
|
text_outputs = gr.Textbox(label="Results") |
|
gr.Examples( |
|
examples=audio_examples, |
|
inputs=[audio_inputs, language_inputs], |
|
examples_per_page=20, |
|
) |
|
|
|
fn_button.click( |
|
model_inference, |
|
inputs=[audio_inputs, language_inputs], |
|
outputs=text_outputs, |
|
) |
|
|
|
demo.launch() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
launch() |
|
|