Spaces:
Running
Running
File size: 22,852 Bytes
bbbf06e 48d8572 c0e541b d906b98 f288ceb 295de00 c0e541b ea7f8cc 0cb931d 295de00 0cb931d ea7f8cc f288ceb c52f09b 48d8572 bc0cb58 d906b98 bc0cb58 f288ceb 89bd811 48d8572 b604ea1 5bbbb16 95261ed 5bbbb16 bbbf06e 084aa80 48d8572 95261ed 48d8572 5bbbb16 95261ed 5bbbb16 f288ceb bbbf06e f288ceb 530547e bbbf06e c0e541b bbbf06e f288ceb bbbf06e 5bbbb16 f288ceb bbbf06e f288ceb c0e541b ff19da3 95261ed c0e541b ff19da3 95261ed ff19da3 c0e541b 95261ed f288ceb 5bbbb16 8781620 5bbbb16 48d8572 c0e541b 5bbbb16 d906b98 c0e541b d906b98 5bbbb16 bc0cb58 95261ed 295de00 33a2c1e 95261ed 4a9d465 c0e541b 4a9d465 95261ed 4a9d465 95261ed 4a9d465 f288ceb 4a9d465 168184d 4a9d465 168184d 4a9d465 f288ceb 33a2c1e 4a9d465 5bbbb16 31ba778 d906b98 5bbbb16 d906b98 bc0cb58 8781620 74b1efd 8781620 48d8572 b604ea1 f288ceb f55c594 f288ceb bbbf06e f288ceb bbbf06e f288ceb bbbf06e 95261ed bbbf06e c0e541b 5bbbb16 c0e541b b604ea1 c0e541b b604ea1 c0e541b b604ea1 c0e541b bbbf06e b604ea1 02253c6 b604ea1 bbbf06e c0e541b bcee468 bbbf06e c0e541b bbbf06e 5bbbb16 bbbf06e 530547e c0e541b bbbf06e c0e541b bbbf06e c0e541b bbbf06e b604ea1 bbbf06e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 |
from abc import ABC, abstractmethod
from collections import Counter, deque
import time
from typing import Any, Deque, Iterator, List, Dict
from pprint import pprint
from src.hooks.progressListener import ProgressListener
from src.hooks.subTaskProgressListener import SubTaskProgressListener
from src.hooks.whisperProgressHook import create_progress_listener_handle
from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache
from src.segments import merge_timestamps
from src.whisper.abstractWhisperContainer import AbstractWhisperCallback
# Workaround for https://github.com/tensorflow/tensorflow/issues/48797
try:
import tensorflow as tf
except ModuleNotFoundError:
# Error handling
pass
import torch
import ffmpeg
import numpy as np
from src.utils import format_timestamp
from enum import Enum
class NonSpeechStrategy(Enum):
"""
Ignore non-speech frames segments.
"""
SKIP = 1
"""
Just treat non-speech segments as speech.
"""
CREATE_SEGMENT = 2
"""
Expand speech segments into subsequent non-speech segments.
"""
EXPAND_SEGMENT = 3
# Defaults for Silero
SPEECH_TRESHOLD = 0.3
# Minimum size of segments to process
MIN_SEGMENT_DURATION = 1
# The maximum time for texts from old segments to be used in the next segment
MAX_PROMPT_WINDOW = 0 # seconds (0 = disabled)
PROMPT_NO_SPEECH_PROB = 0.1 # Do not pass the text from segments with a no speech probability higher than this
VAD_MAX_PROCESSING_CHUNK = 60 * 60 # 60 minutes of audio
class TranscriptionConfig(ABC):
def __init__(self, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP,
segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None,
max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1):
self.non_speech_strategy = non_speech_strategy
self.segment_padding_left = segment_padding_left
self.segment_padding_right = segment_padding_right
self.max_silent_period = max_silent_period
self.max_merge_size = max_merge_size
self.max_prompt_window = max_prompt_window
self.initial_segment_index = initial_segment_index
class PeriodicTranscriptionConfig(TranscriptionConfig):
def __init__(self, periodic_duration: float, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP,
segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None,
max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1):
super().__init__(non_speech_strategy, segment_padding_left, segment_padding_right, max_silent_period, max_merge_size, max_prompt_window, initial_segment_index)
self.periodic_duration = periodic_duration
class AbstractTranscription(ABC):
def __init__(self, sampling_rate: int = 16000):
self.sampling_rate = sampling_rate
def get_audio_segment(self, str, start_time: str = None, duration: str = None):
return load_audio(str, self.sampling_rate, start_time, duration)
def is_transcribe_timestamps_fast(self):
"""
Determine if get_transcribe_timestamps is fast enough to not need parallelization.
"""
return False
@abstractmethod
def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float):
"""
Get the start and end timestamps of the sections that should be transcribed by this VAD method.
Parameters
----------
audio: str
The audio file.
config: TranscriptionConfig
The transcription configuration.
Returns
-------
A list of start and end timestamps, in fractional seconds.
"""
return
def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: TranscriptionConfig, total_duration: float):
"""
Get the start and end timestamps of the sections that should be transcribed by this VAD method,
after merging the given segments using the specified configuration.
Parameters
----------
audio: str
The audio file.
config: TranscriptionConfig
The transcription configuration.
Returns
-------
A list of start and end timestamps, in fractional seconds.
"""
merged = merge_timestamps(timestamps, config.max_silent_period, config.max_merge_size,
config.segment_padding_left, config.segment_padding_right)
if config.non_speech_strategy != NonSpeechStrategy.SKIP:
# Expand segments to include the gaps between them
if (config.non_speech_strategy == NonSpeechStrategy.CREATE_SEGMENT):
# When we have a prompt window, we create speech segments betwen each segment if we exceed the merge size
merged = self.fill_gaps(merged, total_duration=total_duration, max_expand_size=config.max_merge_size)
elif config.non_speech_strategy == NonSpeechStrategy.EXPAND_SEGMENT:
# With no prompt window, it is better to just expand the segments (this effectively passes the prompt to the next segment)
merged = self.expand_gaps(merged, total_duration=total_duration)
else:
raise Exception("Unknown non-speech strategy: " + str(config.non_speech_strategy))
print("Transcribing non-speech:")
pprint(merged)
return merged
def transcribe(self, audio: str, whisperCallable: AbstractWhisperCallback, config: TranscriptionConfig,
progressListener: ProgressListener = None):
"""
Transcribe the given audo file.
Parameters
----------
audio: str
The audio file.
whisperCallable: WhisperCallback
A callback object to call to transcribe each segment.
Returns
-------
A list of start and end timestamps, in fractional seconds.
"""
try:
max_audio_duration = self.get_audio_duration(audio, config)
timestamp_segments = self.get_transcribe_timestamps(audio, config, 0, max_audio_duration)
# Get speech timestamps from full audio file
merged = self.get_merged_timestamps(timestamp_segments, config, max_audio_duration)
# A deque of transcribed segments that is passed to the next segment as a prompt
prompt_window = deque()
print("Processing timestamps:")
pprint(merged)
result = {
'text': "",
'segments': [],
'language': ""
}
languageCounter = Counter()
detected_language = None
segment_index = config.initial_segment_index
# Calculate progress
progress_start_offset = merged[0]['start'] if len(merged) > 0 else 0
progress_total_duration = sum([segment['end'] - segment['start'] for segment in merged])
# For each time segment, run whisper
for segment in merged:
segment_index += 1
segment_start = segment['start']
segment_end = segment['end']
segment_expand_amount = segment.get('expand_amount', 0)
segment_gap = segment.get('gap', False)
segment_duration = segment_end - segment_start
if segment_duration < MIN_SEGMENT_DURATION:
continue
# Audio to run on Whisper
segment_audio = self.get_audio_segment(audio, start_time = str(segment_start), duration = str(segment_duration))
# Previous segments to use as a prompt
segment_prompt = ' '.join([segment['text'] for segment in prompt_window]) if len(prompt_window) > 0 else None
# Detected language
detected_language = languageCounter.most_common(1)[0][0] if len(languageCounter) > 0 else None
print("Running whisper from ", format_timestamp(segment_start), " to ", format_timestamp(segment_end), ", duration: ",
segment_duration, "expanded: ", segment_expand_amount, "prompt: ", segment_prompt, "language: ", detected_language)
perf_start_time = time.perf_counter()
scaled_progress_listener = SubTaskProgressListener(progressListener, base_task_total=progress_total_duration,
sub_task_start=segment_start - progress_start_offset, sub_task_total=segment_duration)
segment_result = whisperCallable.invoke(segment_audio, segment_index, segment_prompt, detected_language, progress_listener=scaled_progress_listener)
perf_end_time = time.perf_counter()
print("Whisper took {} seconds".format(perf_end_time - perf_start_time))
adjusted_segments = self.adjust_timestamp(segment_result["segments"], adjust_seconds=segment_start, max_source_time=segment_duration)
# Propagate expand amount to the segments
if (segment_expand_amount > 0):
segment_without_expansion = segment_duration - segment_expand_amount
for adjusted_segment in adjusted_segments:
adjusted_segment_end = adjusted_segment['end']
# Add expand amount if the segment got expanded
if (adjusted_segment_end > segment_without_expansion):
adjusted_segment["expand_amount"] = adjusted_segment_end - segment_without_expansion
# Append to output
result['text'] += segment_result['text']
result['segments'].extend(adjusted_segments)
# Increment detected language
if not segment_gap:
languageCounter[segment_result['language']] += 1
# Update prompt window
self.__update_prompt_window(prompt_window, adjusted_segments, segment_end, segment_gap, config)
if detected_language is not None:
result['language'] = detected_language
finally:
# Notify progress listener that we are done
if progressListener is not None:
progressListener.on_finished()
return result
def get_audio_duration(self, audio: str, config: TranscriptionConfig):
return get_audio_duration(audio)
def __update_prompt_window(self, prompt_window: Deque, adjusted_segments: List, segment_end: float, segment_gap: bool, config: TranscriptionConfig):
if (config.max_prompt_window is not None and config.max_prompt_window > 0):
# Add segments to the current prompt window (unless it is a speech gap)
if not segment_gap:
for segment in adjusted_segments:
if segment.get('no_speech_prob', 0) <= PROMPT_NO_SPEECH_PROB:
prompt_window.append(segment)
while (len(prompt_window) > 0):
first_end_time = prompt_window[0].get('end', 0)
# Time expanded in the segments should be discounted from the prompt window
first_expand_time = prompt_window[0].get('expand_amount', 0)
if (first_end_time - first_expand_time < segment_end - config.max_prompt_window):
prompt_window.popleft()
else:
break
def include_gaps(self, segments: Iterator[dict], min_gap_length: float, total_duration: float):
result = []
last_end_time = 0
for segment in segments:
segment_start = float(segment['start'])
segment_end = float(segment['end'])
if (last_end_time != segment_start):
delta = segment_start - last_end_time
if (min_gap_length is None or delta >= min_gap_length):
result.append( { 'start': last_end_time, 'end': segment_start, 'gap': True } )
last_end_time = segment_end
result.append(segment)
# Also include total duration if specified
if (total_duration is not None and last_end_time < total_duration):
delta = total_duration - segment_start
if (min_gap_length is None or delta >= min_gap_length):
result.append( { 'start': last_end_time, 'end': total_duration, 'gap': True } )
return result
# Expand the end time of each segment to the start of the next segment
def expand_gaps(self, segments: List[Dict[str, Any]], total_duration: float):
result = []
if len(segments) == 0:
return result
# Add gap at the beginning if needed
if (segments[0]['start'] > 0):
result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } )
for i in range(len(segments) - 1):
current_segment = segments[i]
next_segment = segments[i + 1]
delta = next_segment['start'] - current_segment['end']
# Expand if the gap actually exists
if (delta >= 0):
current_segment = current_segment.copy()
current_segment['expand_amount'] = delta
current_segment['end'] = next_segment['start']
result.append(current_segment)
# Add last segment
last_segment = segments[-1]
result.append(last_segment)
# Also include total duration if specified
if (total_duration is not None):
last_segment = result[-1]
if (last_segment['end'] < total_duration):
last_segment = last_segment.copy()
last_segment['end'] = total_duration
result[-1] = last_segment
return result
def fill_gaps(self, segments: List[Dict[str, Any]], total_duration: float, max_expand_size: float = None):
result = []
if len(segments) == 0:
return result
# Add gap at the beginning if needed
if (segments[0]['start'] > 0):
result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } )
for i in range(len(segments) - 1):
expanded = False
current_segment = segments[i]
next_segment = segments[i + 1]
delta = next_segment['start'] - current_segment['end']
if (max_expand_size is not None and delta <= max_expand_size):
# Just expand the current segment
current_segment = current_segment.copy()
current_segment['expand_amount'] = delta
current_segment['end'] = next_segment['start']
expanded = True
result.append(current_segment)
# Add a gap to the next segment if needed
if (delta >= 0 and not expanded):
result.append({ 'start': current_segment['end'], 'end': next_segment['start'], 'gap': True } )
# Add last segment
last_segment = segments[-1]
result.append(last_segment)
# Also include total duration if specified
if (total_duration is not None):
last_segment = result[-1]
delta = total_duration - last_segment['end']
if (delta > 0):
if (max_expand_size is not None and delta <= max_expand_size):
# Expand the last segment
last_segment = last_segment.copy()
last_segment['expand_amount'] = delta
last_segment['end'] = total_duration
result[-1] = last_segment
else:
result.append({ 'start': last_segment['end'], 'end': total_duration, 'gap': True } )
return result
def adjust_timestamp(self, segments: Iterator[dict], adjust_seconds: float, max_source_time: float = None):
result = []
for segment in segments:
segment_start = float(segment['start'])
segment_end = float(segment['end'])
# Filter segments?
if (max_source_time is not None):
if (segment_start > max_source_time):
continue
segment_end = min(max_source_time, segment_end)
new_segment = segment.copy()
# Add to start and end
new_segment['start'] = segment_start + adjust_seconds
new_segment['end'] = segment_end + adjust_seconds
# Handle words
if ('words' in new_segment):
for word in new_segment['words']:
# Adjust start and end
word['start'] = word['start'] + adjust_seconds
word['end'] = word['end'] + adjust_seconds
result.append(new_segment)
return result
def multiply_timestamps(self, timestamps: List[Dict[str, Any]], factor: float):
result = []
for entry in timestamps:
start = entry['start']
end = entry['end']
result.append({
'start': start * factor,
'end': end * factor
})
return result
class VadSileroTranscription(AbstractTranscription):
def __init__(self, sampling_rate: int = 16000, cache: ModelCache = None):
super().__init__(sampling_rate=sampling_rate)
self.model = None
self.cache = cache
self._initialize_model()
def _initialize_model(self):
if (self.cache is not None):
model_key = "VadSileroTranscription"
self.model, self.get_speech_timestamps = self.cache.get(model_key, self._create_model)
print("Loaded Silerio model from cache.")
else:
self.model, self.get_speech_timestamps = self._create_model()
print("Created Silerio model")
def _create_model(self):
model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad', model='silero_vad')
# Silero does not benefit from multi-threading
torch.set_num_threads(1) # JIT
(get_speech_timestamps, _, _, _, _) = utils
return model, get_speech_timestamps
def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float):
result = []
print("Getting timestamps from audio file: {}, start: {}, duration: {}".format(audio, start_time, end_time))
perf_start_time = time.perf_counter()
# Divide procesisng of audio into chunks
chunk_start = start_time
while (chunk_start < end_time):
chunk_duration = min(end_time - chunk_start, VAD_MAX_PROCESSING_CHUNK)
print("Processing VAD in chunk from {} to {}".format(format_timestamp(chunk_start), format_timestamp(chunk_start + chunk_duration)))
wav = self.get_audio_segment(audio, str(chunk_start), str(chunk_duration))
sample_timestamps = self.get_speech_timestamps(wav, self.model, sampling_rate=self.sampling_rate, threshold=SPEECH_TRESHOLD)
seconds_timestamps = self.multiply_timestamps(sample_timestamps, factor=1 / self.sampling_rate)
adjusted = self.adjust_timestamp(seconds_timestamps, adjust_seconds=chunk_start, max_source_time=chunk_start + chunk_duration)
#pprint(adjusted)
result.extend(adjusted)
chunk_start += chunk_duration
perf_end_time = time.perf_counter()
print("VAD processing took {} seconds".format(perf_end_time - perf_start_time))
return result
def __getstate__(self):
# We only need the sampling rate
return { 'sampling_rate': self.sampling_rate }
def __setstate__(self, state):
self.sampling_rate = state['sampling_rate']
self.model = None
# Use the global cache
self.cache = GLOBAL_MODEL_CACHE
self._initialize_model()
# A very simple VAD that just marks every N seconds as speech
class VadPeriodicTranscription(AbstractTranscription):
def __init__(self, sampling_rate: int = 16000):
super().__init__(sampling_rate=sampling_rate)
def is_transcribe_timestamps_fast(self):
# This is a very fast VAD - no need to parallelize it
return True
def get_transcribe_timestamps(self, audio: str, config: PeriodicTranscriptionConfig, start_time: float, end_time: float):
result = []
# Generate a timestamp every N seconds
start_timestamp = start_time
while (start_timestamp < end_time):
end_timestamp = min(start_timestamp + config.periodic_duration, end_time)
segment_duration = end_timestamp - start_timestamp
# Minimum duration is 1 second
if (segment_duration >= 1):
result.append( { 'start': start_timestamp, 'end': end_timestamp } )
start_timestamp = end_timestamp
return result
def get_audio_duration(file: str):
return float(ffmpeg.probe(file)["format"]["duration"])
def load_audio(file: str, sample_rate: int = 16000,
start_time: str = None, duration: str = None):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
start_time: str
The start time, using the standard FFMPEG time duration syntax, or None to disable.
duration: str
The duration, using the standard FFMPEG time duration syntax, or None to disable.
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
inputArgs = {'threads': 0}
if (start_time is not None):
inputArgs['ss'] = start_time
if (duration is not None):
inputArgs['t'] = duration
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, **inputArgs)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sample_rate)
.run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}")
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 |