Spaces:
Runtime error
Runtime error
File size: 6,488 Bytes
21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 441dbf2 21220b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
#!/usr/local/bin/python3
#-*- coding:utf-8 -*-
import gradio as gr
import librosa
import torch
from transformers import WhisperProcessor, WhisperForConditionalGeneration
checkpoint = "openai/whisper-base"
processor = WhisperProcessor.from_pretrained(checkpoint) # 加载tokenizer
model = WhisperForConditionalGeneration.from_pretrained(checkpoint) # 加载模型
def process_audio(sampling_rate, waveform): # 取樣率及波形
# convert from int16 to floating point
waveform = waveform / 32678.0 # 2^15
# convert to mono if stereo
if len(waveform.shape) > 1: # 如果是雙聲道
waveform = librosa.to_mono(waveform.T) # 轉成單聲道
# resample to 16 kHz if necessary
if sampling_rate != 16000: # 如果不是16kHz
waveform = librosa.resample(waveform, orig_sr=sampling_rate, target_sr=16000) # 重新取樣成16kHz
# limit to 30 seconds
waveform = waveform[:16000*30] # 限制30秒
# make PyTorch tensor
waveform = torch.tensor(waveform) # 轉成PyTorch tensor
return waveform
def predict(language, audio, mic_audio=None): # 預測
if mic_audio is not None: # 如果有麥克風輸入
sampling_rate, waveform = mic_audio # 取樣率及波形
elif audio is not None: # 如果有音頻檔案輸入
sampling_rate, waveform = audio # 取樣率及波形
else:
return "(please provide audio)" # 請提供音頻
forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task="transcribe") # 取得decoder prompt ids
waveform = process_audio(sampling_rate, waveform) # 預處理音頻
inputs = processor(audio=waveform, sampling_rate=16000, return_tensors="pt") # 轉成PyTorch tensor
predicted_ids = model.generate(**inputs, max_length=400, forced_decoder_ids=forced_decoder_ids) # 生成預測結果
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) # 轉成文字
return transcription[0] # 回傳預測結果
supportLangs = ['english', 'chinese', 'german', 'spanish', 'russian', 'korean', 'french', 'japanese', 'portuguese']
title = "OpenAI Whisper Base 原創為 https://huggingface.co/spaces/innev/whisper-Base 作者: Inne Villumsen 馮建凱加註複製至此,僅供自己學習使用。 "
description = """
本例用於演示 <b>openai/whisper-base</b> 模型的語音識別(ASR)能力。基于原始模型開發,没有對模型做微調。 本例預設為中文輸出,Whisper識別出的是繁体中文。
Whisper包含多個大小不同的版本,理論上來說模型越大效果越好,模型越小速度越快
<b>使用方法:</b> 上傳一個音檔或是直接在頁面中錄制音頻。音頻會在送到模型前先轉成單音並重新採樣為16Khz。
"""
article = """
## 音檔範例:
- "春日陽光普照大地,正是踏春好時節" 來源: 知琪(Zhiqi)
- "這是一年中最美味的團聚,也注定是一頓白感交集的晚餐。" 來源: 知廚(zhichu)
- "Hmm, I don't know" 來源: [InspectorJ](https://freesound.org/people/InspectorJ/sounds/519189) (CC BY 4.0 license)
- "Henry V" excerpt 來源: [acclivity](https://freesound.org/people/acclivity/sounds/24096) (CC BY-NC 4.0 license)
- "You can see it in the eyes" 來源: [JoyOhJoy](https://freesound.org/people/JoyOhJoy/sounds/165348) (CC0 license)
- "We yearn for time" 來源: [Sample_Me](https://freesound.org/people/Sample_Me/sounds/610529) (CC0 license)
## 参考
- [OpenAI Whisper Base](https://huggingface.co/openai/whisper-base)
- [Innev GitHub](https://github.com/innev)
## 多語音
english, chinese, german, spanish, russian, korean, french, japanese, portuguese, turkish, polish, catalan, dutch, arabic, swedish, italian, indonesian, hindi, finnish, vietnamese, hebrew, ukrainian, greek, malay, czech, romanian, danish, hungarian, tamil, norwegian, thai, urdu, croatian, bulgarian, lithuanian, latin, maori, malayalam, welsh, slovak, telugu, persian, latvian, bengali, serbian, azerbaijani, slovenian, kannada, estonian, macedonian, breton, basque, icelandic, armenian, nepali, mongolian, bosnian, kazakh, albanian, swahili, galician, marathi, punjabi, sinhala, khmer, shona, yoruba, somali, afrikaans, occitan, georgian, belarusian, tajik, sindhi, gujarati, amharic, yiddish, lao, uzbek, faroese, haitian creole, pashto, turkmen, nynorsk, maltese, sanskrit, luxembourgish, myanmar, tibetan, tagalog, malagasy, assamese, tatar, hawaiian, lingala, hausa, bashkir, javanese, sundanese, burmese, valencian, flemish, haitian, letzeburgesch, pushto, panjabi, moldavian, moldovan, sinhalese, castilian
## 模型版本
| 模型版本 | 参數大小 | 只有英文 | 多語言 |
|----------|------------|------------------------------------------------------|-----------------------------------------------------|
| tiny | 39 M | [✓](https://huggingface.co/openai/whisper-tiny.en) | [✓](https://huggingface.co/openai/whisper-tiny) |
| base | 74 M | [✓](https://huggingface.co/openai/whisper-base.en) | [✓](https://huggingface.co/openai/whisper-base) |
| small | 244 M | [✓](https://huggingface.co/openai/whisper-small.en) | [✓](https://huggingface.co/openai/whisper-small) |
| medium | 769 M | [✓](https://huggingface.co/openai/whisper-medium.en) | [✓](https://huggingface.co/openai/whisper-medium) |
| large | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large) |
| large-v2 | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large-v2) |
"""
examples = [
[None, "examples/zhiqi.wav", None],
[None, "examples/zhichu.wav", None],
[None, "examples/hmm_i_dont_know.wav", None],
[None, "examples/henry5.mp3", None],
[None, "examples/yearn_for_time.mp3", None],
[None, "examples/see_in_eyes.wav", None],
]
gr.Interface(
fn=predict,
inputs=[
gr.Radio(label="目標語言", choices=supportLangs, value="chinese"),
gr.Audio(label="上傳語音", source="upload", type="numpy"),
gr.Audio(label="錄製語音", source="microphone", type="numpy"),
],
outputs=[
gr.Text(label="識別結果"),
],
title=title,
description=description,
article=article,
examples=examples,
).launch() |