aka7774's picture
Update fn.py
82d8c86 verified
from faster_whisper import WhisperModel
model = None
model_size = None
initial_prompt = None
language = 'ja'
transcribe_kwargs = {}
def load_model(_model_size):
global model_size, model
if _model_size and model_size != _model_size:
model_size = _model_size
try:
model = WhisperModel(model_size, device="cuda", compute_type="float16")
except:
model = WhisperModel(model_size, device="cpu", compute_type="int8")
def set_prompt(prompt, _language = None):
global initial_prompt, language
initial_prompt = prompt
if _language:
language = _language
def set_transcribe_kwargs(args):
global transcribe_kwargs
transcribe_kwargs = args
def speech_to_text(audio_file, _model_size = None):
global model_size, model, transcribe_kwargs
load_model(_model_size)
segments, info = model.transcribe(
audio_file,
initial_prompt=initial_prompt,
language=language,
beam_size=5,
vad_filter=True,
without_timestamps=False,
**transcribe_kwargs,
)
text_only = ''
text_with_timestamps = ''
for segment in segments:
text_only += f"{segment.text}\n"
text_with_timestamps += f"{segment.start:.2f}\t{segment.end:.2f}\t{segment.text}\n"
return text_only, text_with_timestamps