cdactvm commited on
Commit
f656d13
1 Parent(s): bfde6e2

Update applyVad.py

Browse files
Files changed (1) hide show
  1. applyVad.py +85 -105
applyVad.py CHANGED
@@ -1,105 +1,85 @@
1
- #!/usr/bin/env python
2
- # coding: utf-8
3
-
4
- # In[3]:
5
-
6
-
7
- # import webrtcvad
8
- # import numpy as np
9
- # import librosa
10
- # def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
11
- # '''
12
- # Voice Activity Detection (VAD): It is a technique used to determine whether a segment of audio contains speech.
13
- # This is useful in noisy environments where you want to filter out non-speech parts of the audio.
14
- # webrtcvad: This is a Python package based on the VAD from the WebRTC (Web Real-Time Communication) project.
15
- # It helps detect speech in small chunks of audio.
16
- # '''
17
- # vad = webrtcvad.Vad()
18
- # audio_int16 = np.int16(audio * 32767)
19
- # frame_size = int(sr * frame_duration / 1000)
20
- # frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
21
- # voiced_audio = np.concatenate([frame for frame in frames if vad.is_speech(frame.tobytes(), sample_rate=sr)])
22
- # voiced_audio = np.float32(voiced_audio) / 32767
23
- # return voiced_audio
24
-
25
-
26
- # In[1]:
27
-
28
-
29
- # import webrtcvad
30
- # import librosa
31
- # import numpy as np
32
- # def apply_vad(audio, sr, frame_duration_ms=30):
33
- # # Initialize WebRTC VAD
34
- # vad = webrtcvad.Vad()
35
- # vad.set_mode(1) # Set aggressiveness mode (0-3)
36
-
37
- # # Convert to 16kHz if not already
38
- # if sr != 16000:
39
- # audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
40
- # sr = 16000
41
-
42
- # # Convert to 16-bit PCM
43
- # audio = (audio * 32768).astype(np.int16)
44
-
45
- # frame_length = int(sr * (frame_duration_ms / 1000.0)) # Calculate fram
46
- # e length in samples
47
- # bytes_per_frame = frame_length * 2 # 16-bit audio has 2 bytes per sample
48
-
49
- # # Apply VAD to the audio
50
- # voiced_frames = []
51
- # for i in range(0, len(audio), frame_length):
52
- # frame = audio[i:i + frame_length].tobytes()
53
- # if len(frame) == bytes_per_frame and vad.is_speech(frame, sr):
54
- # voiced_frames.extend(audio[i:i + frame_length])
55
-
56
- # # Return the VAD-filtered audio
57
- # return np.array(voiced_frames)
58
-
59
-
60
- # In[4]:
61
-
62
-
63
- import webrtcvad
64
- import numpy as np
65
- import librosa
66
-
67
- def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
68
- '''
69
- Voice Activity Detection (VAD): Detects speech in audio.
70
- '''
71
- vad = webrtcvad.Vad(aggressiveness)
72
-
73
- # Resample to 16000 Hz if not already (recommended for better compatibility)
74
- if sr != 16000:
75
- audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
76
- sr = 16000
77
-
78
- # Convert to 16-bit PCM format expected by webrtcvad
79
- audio_int16 = np.int16(audio * 32767)
80
-
81
- # Ensure frame size matches WebRTC's expected lengths
82
- frame_size = int(sr * frame_duration / 1000)
83
- if frame_size % 2 != 0:
84
- frame_size -= 1 # Make sure it's even to avoid processing issues
85
-
86
- frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
87
-
88
- # Filter out non-speech frames
89
- voiced_frames = []
90
- for frame in frames:
91
- if len(frame) == frame_size and vad.is_speech(frame.tobytes(), sample_rate=sr):
92
- voiced_frames.append(frame)
93
-
94
- # Concatenate the voiced frames
95
- voiced_audio = np.concatenate(voiced_frames)
96
- voiced_audio = np.float32(voiced_audio) / 32767
97
-
98
- return voiced_audio
99
-
100
-
101
- # In[ ]:
102
-
103
-
104
-
105
-
 
1
+
2
+ import webrtcvad
3
+ import numpy as np
4
+ import librosa
5
+ def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
6
+ '''
7
+ Voice Activity Detection (VAD): It is a technique used to determine whether a segment of audio contains speech.
8
+ This is useful in noisy environments where you want to filter out non-speech parts of the audio.
9
+ webrtcvad: This is a Python package based on the VAD from the WebRTC (Web Real-Time Communication) project.
10
+ It helps detect speech in small chunks of audio.
11
+ '''
12
+ vad = webrtcvad.Vad()
13
+ audio_int16 = np.int16(audio * 32767)
14
+ frame_size = int(sr * frame_duration / 1000)
15
+ frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
16
+ voiced_audio = np.concatenate([frame for frame in frames if vad.is_speech(frame.tobytes(), sample_rate=sr)])
17
+ voiced_audio = np.float32(voiced_audio) / 32767
18
+ return voiced_audio
19
+
20
+ # import webrtcvad
21
+ # import librosa
22
+ # import numpy as np
23
+ # def apply_vad(audio, sr, frame_duration_ms=30):
24
+ # # Initialize WebRTC VAD
25
+ # vad = webrtcvad.Vad()
26
+ # vad.set_mode(1) # Set aggressiveness mode (0-3)
27
+
28
+ # # Convert to 16kHz if not already
29
+ # if sr != 16000:
30
+ # audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
31
+ # sr = 16000
32
+
33
+ # # Convert to 16-bit PCM
34
+ # audio = (audio * 32768).astype(np.int16)
35
+
36
+ # frame_length = int(sr * (frame_duration_ms / 1000.0)) # Calculate fram
37
+ # e length in samples
38
+ # bytes_per_frame = frame_length * 2 # 16-bit audio has 2 bytes per sample
39
+
40
+ # # Apply VAD to the audio
41
+ # voiced_frames = []
42
+ # for i in range(0, len(audio), frame_length):
43
+ # frame = audio[i:i + frame_length].tobytes()
44
+ # if len(frame) == bytes_per_frame and vad.is_speech(frame, sr):
45
+ # voiced_frames.extend(audio[i:i + frame_length])
46
+
47
+ # # Return the VAD-filtered audio
48
+ # return np.array(voiced_frames)
49
+
50
+ # import webrtcvad
51
+ # import numpy as np
52
+ # import librosa
53
+
54
+ # def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
55
+ # '''
56
+ # Voice Activity Detection (VAD): Detects speech in audio.
57
+ # '''
58
+ # vad = webrtcvad.Vad(aggressiveness)
59
+
60
+ # # Resample to 16000 Hz if not already (recommended for better compatibility)
61
+ # if sr != 16000:
62
+ # audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
63
+ # sr = 16000
64
+
65
+ # # Convert to 16-bit PCM format expected by webrtcvad
66
+ # audio_int16 = np.int16(audio * 32767)
67
+
68
+ # # Ensure frame size matches WebRTC's expected lengths
69
+ # frame_size = int(sr * frame_duration / 1000)
70
+ # if frame_size % 2 != 0:
71
+ # frame_size -= 1 # Make sure it's even to avoid processing issues
72
+
73
+ # frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
74
+
75
+ # # Filter out non-speech frames
76
+ # voiced_frames = []
77
+ # for frame in frames:
78
+ # if len(frame) == frame_size and vad.is_speech(frame.tobytes(), sample_rate=sr):
79
+ # voiced_frames.append(frame)
80
+
81
+ # # Concatenate the voiced frames
82
+ # voiced_audio = np.concatenate(voiced_frames)
83
+ # voiced_audio = np.float32(voiced_audio) / 32767
84
+
85
+ # return voiced_audio