File size: 3,388 Bytes
bfde6e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#!/usr/bin/env python
# coding: utf-8

# In[3]:


# import webrtcvad
# import numpy as np
# import librosa
# def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
#     '''
#      Voice Activity Detection (VAD): It is a technique used to determine whether a segment of audio contains speech. 
#      This is useful in noisy environments where you want to filter out non-speech parts of the audio.
#      webrtcvad: This is a Python package based on the VAD from the WebRTC (Web Real-Time Communication) project. 
#      It helps detect speech in small chunks of audio.
#      '''
#     vad = webrtcvad.Vad()
#     audio_int16 = np.int16(audio * 32767)
#     frame_size = int(sr * frame_duration / 1000)
#     frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
#     voiced_audio = np.concatenate([frame for frame in frames if vad.is_speech(frame.tobytes(), sample_rate=sr)])
#     voiced_audio = np.float32(voiced_audio) / 32767
#     return voiced_audio


# In[1]:


# import webrtcvad
# import librosa
# import numpy as np
# def apply_vad(audio, sr, frame_duration_ms=30):
#     # Initialize WebRTC VAD
#     vad = webrtcvad.Vad()
#     vad.set_mode(1)  # Set aggressiveness mode (0-3)

#     # Convert to 16kHz if not already
#     if sr != 16000:
#         audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
#         sr = 16000

#     # Convert to 16-bit PCM
#     audio = (audio * 32768).astype(np.int16)

#     frame_length = int(sr * (frame_duration_ms / 1000.0))  # Calculate fram
#     e length in samples
#     bytes_per_frame = frame_length * 2  # 16-bit audio has 2 bytes per sample

#     # Apply VAD to the audio
#     voiced_frames = []
#     for i in range(0, len(audio), frame_length):
#         frame = audio[i:i + frame_length].tobytes()
#         if len(frame) == bytes_per_frame and vad.is_speech(frame, sr):
#             voiced_frames.extend(audio[i:i + frame_length])

#     # Return the VAD-filtered audio
#     return np.array(voiced_frames)


# In[4]:


import webrtcvad
import numpy as np
import librosa

def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
    '''

    Voice Activity Detection (VAD): Detects speech in audio.

    '''
    vad = webrtcvad.Vad(aggressiveness)
    
    # Resample to 16000 Hz if not already (recommended for better compatibility)
    if sr != 16000:
        audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
        sr = 16000
    
    # Convert to 16-bit PCM format expected by webrtcvad
    audio_int16 = np.int16(audio * 32767)
    
    # Ensure frame size matches WebRTC's expected lengths
    frame_size = int(sr * frame_duration / 1000)
    if frame_size % 2 != 0:
        frame_size -= 1  # Make sure it's even to avoid processing issues
    
    frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
    
    # Filter out non-speech frames
    voiced_frames = []
    for frame in frames:
        if len(frame) == frame_size and vad.is_speech(frame.tobytes(), sample_rate=sr):
            voiced_frames.append(frame)
    
    # Concatenate the voiced frames
    voiced_audio = np.concatenate(voiced_frames)
    voiced_audio = np.float32(voiced_audio) / 32767
    
    return voiced_audio


# In[ ]: