#!/usr/bin/env python # coding: utf-8 # In[3]: # import webrtcvad # import numpy as np # import librosa # def apply_vad(audio, sr, frame_duration=30, aggressiveness=3): # ''' # Voice Activity Detection (VAD): It is a technique used to determine whether a segment of audio contains speech. # This is useful in noisy environments where you want to filter out non-speech parts of the audio. # webrtcvad: This is a Python package based on the VAD from the WebRTC (Web Real-Time Communication) project. # It helps detect speech in small chunks of audio. # ''' # vad = webrtcvad.Vad() # audio_int16 = np.int16(audio * 32767) # frame_size = int(sr * frame_duration / 1000) # frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)] # voiced_audio = np.concatenate([frame for frame in frames if vad.is_speech(frame.tobytes(), sample_rate=sr)]) # voiced_audio = np.float32(voiced_audio) / 32767 # return voiced_audio # In[1]: # import webrtcvad # import librosa # import numpy as np # def apply_vad(audio, sr, frame_duration_ms=30): # # Initialize WebRTC VAD # vad = webrtcvad.Vad() # vad.set_mode(1) # Set aggressiveness mode (0-3) # # Convert to 16kHz if not already # if sr != 16000: # audio = librosa.resample(audio, orig_sr=sr, target_sr=16000) # sr = 16000 # # Convert to 16-bit PCM # audio = (audio * 32768).astype(np.int16) # frame_length = int(sr * (frame_duration_ms / 1000.0)) # Calculate fram # e length in samples # bytes_per_frame = frame_length * 2 # 16-bit audio has 2 bytes per sample # # Apply VAD to the audio # voiced_frames = [] # for i in range(0, len(audio), frame_length): # frame = audio[i:i + frame_length].tobytes() # if len(frame) == bytes_per_frame and vad.is_speech(frame, sr): # voiced_frames.extend(audio[i:i + frame_length]) # # Return the VAD-filtered audio # return np.array(voiced_frames) # In[4]: import webrtcvad import numpy as np import librosa def apply_vad(audio, sr, frame_duration=30, aggressiveness=3): ''' Voice Activity Detection (VAD): Detects speech in audio. ''' vad = webrtcvad.Vad(aggressiveness) # Resample to 16000 Hz if not already (recommended for better compatibility) if sr != 16000: audio = librosa.resample(audio, orig_sr=sr, target_sr=16000) sr = 16000 # Convert to 16-bit PCM format expected by webrtcvad audio_int16 = np.int16(audio * 32767) # Ensure frame size matches WebRTC's expected lengths frame_size = int(sr * frame_duration / 1000) if frame_size % 2 != 0: frame_size -= 1 # Make sure it's even to avoid processing issues frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)] # Filter out non-speech frames voiced_frames = [] for frame in frames: if len(frame) == frame_size and vad.is_speech(frame.tobytes(), sample_rate=sr): voiced_frames.append(frame) # Concatenate the voiced frames voiced_audio = np.concatenate(voiced_frames) voiced_audio = np.float32(voiced_audio) / 32767 return voiced_audio # In[ ]: