Example of use this model with faster-whisper
import io
import json
import logging
import sys
import time
from datetime import datetime
from faster_whisper import WhisperModel
from pydub import AudioSegment
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('faster-whisper.log'),
logging.StreamHandler(sys.stdout)
]
)
model = WhisperModel("/path/to/dvislobokov/faster-whisper-large-v3-turbo-russian", "cpu")
audio = AudioSegment.from_wav("ezyZip.wav")
chunk_length = 30 * 1000 # in milliseconds
chunks = [audio[i:i + chunk_length] for i in range(0, len(audio), chunk_length)]
logging.info(f'Start transcribe at {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}')
start = time.time()
text = []
for i, chunk in enumerate(chunks):
buffer = io.BytesIO()
chunk.export(buffer, format="wav")
segments, info = model.transcribe(buffer, language="ru")
text.append("".join(segment.text for segment in segments))
end = time.time()
logging.info(f'Finish transcribe at {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}')
logging.info(f'Total time: {end - start}')
logging.info(f'Text: {text}')
- Downloads last month
- 101
Inference Providers
NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API:
The model has no library tag.
Model tree for dvislobokov/faster-whisper-large-v3-turbo-russian
Base model
openai/whisper-large-v3
Finetuned
openai/whisper-large-v3-turbo