Upload 12 files
Browse files- NoiseReductionInASR.py +351 -0
- Text2List.py +66 -0
- applyVad.py +105 -0
- convert2list.py +15 -3
- highPassFilter.py +35 -0
- main.py +51 -0
- numberMapping.py +135 -0
- processDoubles.py +54 -24
- replaceWords.py +157 -144
- wienerFilter.py +22 -0
NoiseReductionInASR.py
ADDED
@@ -0,0 +1,351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[ ]:
|
5 |
+
|
6 |
+
|
7 |
+
get_ipython().system('pip install webrtcvad')
|
8 |
+
|
9 |
+
|
10 |
+
# In[ ]:
|
11 |
+
|
12 |
+
|
13 |
+
# import librosa
|
14 |
+
# import numpy as np
|
15 |
+
# import scipy.signal
|
16 |
+
# import webrtcvad
|
17 |
+
# import soundfile as sf # New library for saving audio
|
18 |
+
# import matplotlib.pyplot as plt
|
19 |
+
|
20 |
+
# # Function to apply a high-pass filter
|
21 |
+
# def high_pass_filter(audio, sr, cutoff=100):
|
22 |
+
# # Design a high-pass Butterworth filter
|
23 |
+
# sos = scipy.signal.butter(10, cutoff, btype='highpass', fs=sr, output='sos')
|
24 |
+
# filtered_audio = scipy.signal.sosfilt(sos, audio)
|
25 |
+
# return filtered_audio
|
26 |
+
|
27 |
+
# # Function to apply Wiener filter for noise reduction
|
28 |
+
# def wiener_filter(audio):
|
29 |
+
# return scipy.signal.wiener(audio)
|
30 |
+
|
31 |
+
# # Voice Activity Detection using WebRTC VAD
|
32 |
+
# def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
|
33 |
+
# vad = webrtcvad.Vad(aggressiveness) # aggressiveness: 0 (least aggressive) to 3 (most aggressive)
|
34 |
+
|
35 |
+
# # Convert audio to 16-bit PCM (required by webrtcvad)
|
36 |
+
# audio_int16 = np.int16(audio * 32767) # assuming `audio` is in range [-1, 1]
|
37 |
+
|
38 |
+
# frame_size = int(sr * frame_duration / 1000) # frame size in samples
|
39 |
+
# frames = [audio_int16[i:i+frame_size] for i in range(0, len(audio_int16), frame_size)]
|
40 |
+
|
41 |
+
# voiced_audio = np.concatenate([frame for frame in frames if vad.is_speech(frame.tobytes(), sample_rate=sr)])
|
42 |
+
|
43 |
+
# # Convert back to float32
|
44 |
+
# voiced_audio = np.float32(voiced_audio) / 32767
|
45 |
+
# return voiced_audio
|
46 |
+
|
47 |
+
# # Load the audio file
|
48 |
+
# def load_audio(filepath):
|
49 |
+
# # Load with librosa
|
50 |
+
# audio, sr = librosa.load(filepath, sr=None)
|
51 |
+
# return audio, sr
|
52 |
+
|
53 |
+
# # Save the audio file using soundfile
|
54 |
+
# def save_audio(filepath, audio, sr):
|
55 |
+
# # Use soundfile.write to save the audio
|
56 |
+
# sf.write(filepath, audio, sr)
|
57 |
+
|
58 |
+
# # Full noise reduction pipeline
|
59 |
+
# def noise_reduction_pipeline(filepath):
|
60 |
+
# # Step 1: Load audio
|
61 |
+
# audio, sr = load_audio(filepath)
|
62 |
+
# print(f"Loaded audio with sample rate: {sr}, duration: {len(audio) / sr:.2f} seconds")
|
63 |
+
|
64 |
+
# # Step 2: Apply high-pass filter
|
65 |
+
# audio_hp = high_pass_filter(audio, sr, cutoff=100) # Remove low-frequency noise below 100 Hz
|
66 |
+
|
67 |
+
# # Step 3: Apply Wiener filter (for noise reduction)
|
68 |
+
# audio_wiener = wiener_filter(audio_hp)
|
69 |
+
|
70 |
+
# # Step 4: Apply Voice Activity Detection (VAD)
|
71 |
+
# audio_vad = apply_vad(audio_wiener, sr)
|
72 |
+
|
73 |
+
# # Step 5: Save processed audio
|
74 |
+
# output_filepath = "processed_output.wav"
|
75 |
+
# save_audio(output_filepath, audio_vad, sr)
|
76 |
+
|
77 |
+
# print(f"Processed audio saved to {output_filepath}")
|
78 |
+
# return output_filepath
|
79 |
+
|
80 |
+
# # Optional: Plot the original and processed audio signals
|
81 |
+
# def plot_signals(original, processed, sr):
|
82 |
+
# plt.figure(figsize=(14, 6))
|
83 |
+
# plt.subplot(2, 1, 1)
|
84 |
+
# librosa.display.waveshow(original, sr=sr)
|
85 |
+
# plt.title("Original Audio")
|
86 |
+
|
87 |
+
# plt.subplot(2, 1, 2)
|
88 |
+
# librosa.display.waveshow(processed, sr=sr)
|
89 |
+
# plt.title("Processed Audio")
|
90 |
+
|
91 |
+
# plt.tight_layout()
|
92 |
+
# plt.show()
|
93 |
+
|
94 |
+
# # Example usage:
|
95 |
+
# if __name__ == "__main__":
|
96 |
+
# # Replace 'input.wav' with your audio file path
|
97 |
+
# input_filepath = 'C:/Users/WCHL/Desktop/hindi_dataset/train/hp_sounds/crm/hi/1728268478957.wav' # input file to process
|
98 |
+
# processed_filepath = noise_reduction_pipeline(input_filepath)
|
99 |
+
# # processed_filepath=
|
100 |
+
# # Load original and processed audio for visualization
|
101 |
+
# original_audio, sr = load_audio(input_filepath)
|
102 |
+
# processed_audio, _ = load_audio(processed_filepath)
|
103 |
+
|
104 |
+
# # Plot the original and processed signals
|
105 |
+
# plot_signals(original_audio, processed_audio, sr)
|
106 |
+
|
107 |
+
|
108 |
+
# In[ ]:
|
109 |
+
|
110 |
+
|
111 |
+
# !pip install speechbrain
|
112 |
+
|
113 |
+
|
114 |
+
# ##########################
|
115 |
+
#
|
116 |
+
|
117 |
+
# In[1]:
|
118 |
+
|
119 |
+
|
120 |
+
# Load the Hugging Face ASR pipeline
|
121 |
+
from transformers import pipeline
|
122 |
+
hindi_pipe = pipeline("automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_new")
|
123 |
+
whisper_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3")
|
124 |
+
eng_pipe = pipeline(task="automatic-speech-recognition", model="C:/Users/WCHL/Desktop/huggingface_english/hf_eng")
|
125 |
+
|
126 |
+
|
127 |
+
# In[12]:
|
128 |
+
|
129 |
+
|
130 |
+
import os
|
131 |
+
import re
|
132 |
+
import librosa
|
133 |
+
import nbimporter
|
134 |
+
import torchaudio
|
135 |
+
import numpy as np
|
136 |
+
import scipy.signal
|
137 |
+
import webrtcvad
|
138 |
+
import soundfile as sf
|
139 |
+
import warnings
|
140 |
+
warnings.filterwarnings("ignore")
|
141 |
+
|
142 |
+
from transformers import pipeline
|
143 |
+
from text2int import text_to_int
|
144 |
+
from isNumber import is_number
|
145 |
+
from Text2List import text_to_list
|
146 |
+
from convert2list import convert_to_list
|
147 |
+
from processDoubles import process_doubles
|
148 |
+
from replaceWords import replace_words
|
149 |
+
from applyVad import apply_vad
|
150 |
+
from wienerFilter import wiener_filter
|
151 |
+
from highPassFilter import high_pass_filter
|
152 |
+
|
153 |
+
def noise_reduction_pipeline(filepath):
|
154 |
+
audio, sr = librosa.load(filepath, sr=None)
|
155 |
+
print(sr)
|
156 |
+
audio_hp = high_pass_filter(audio, sr, cutoff=100, order=5)
|
157 |
+
audio_wiener = wiener_filter(audio_hp)
|
158 |
+
audio_vad = apply_vad(audio_wiener, sr)
|
159 |
+
output_filepath = "processed_output.wav"
|
160 |
+
sf.write(output_filepath, audio_vad, sr)
|
161 |
+
return output_filepath
|
162 |
+
|
163 |
+
# Hugging Face ASR pipeline integration
|
164 |
+
def transcribe_with_huggingface(filepath):
|
165 |
+
asr_pipeline = pipeline("automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_new")
|
166 |
+
result = asr_pipeline(filepath)
|
167 |
+
text_value=result['text']
|
168 |
+
cleaned_text=text_value.replace("<s>", "")
|
169 |
+
converted_to_list=convert_to_list(cleaned_text,text_to_list())
|
170 |
+
processd_doubles=process_doubles(converted_to_list)
|
171 |
+
replaced_words = replace_words(processd_doubles)
|
172 |
+
converted_text=text_to_int(replaced_words)
|
173 |
+
print("Transcription: ", converted_text)
|
174 |
+
return converted_text
|
175 |
+
|
176 |
+
if __name__ == "__main__":
|
177 |
+
# Step 1: Input file path
|
178 |
+
input_filepath = 'C:/Users/WCHL/Desktop/hp_sounds/101003/crm/hi/1728685442307.wav'
|
179 |
+
# input_file="enhanced.wav"
|
180 |
+
|
181 |
+
# Step 2: Preprocess (Noise Reduction)
|
182 |
+
processed_filepath = noise_reduction_pipeline(input_filepath)
|
183 |
+
|
184 |
+
# Step 3: ASR (Automatic Speech Recognition) with Hugging Face pipeline
|
185 |
+
transcription = transcribe_with_huggingface(processed_filepath)
|
186 |
+
|
187 |
+
|
188 |
+
# In[ ]:
|
189 |
+
|
190 |
+
|
191 |
+
# result = eng_pipe(filepath)
|
192 |
+
result = hindi_pipe("C:/Users/WCHL/Desktop/hp_sounds/101003/crm/hi/1728685502007.wav")
|
193 |
+
# result = hindi_pipe("./enhanced/1728268841215.wav")
|
194 |
+
# result = whisper_pipe(filepath)
|
195 |
+
text_value=result['text']
|
196 |
+
cleaned_text=text_value.replace("<s>", "")
|
197 |
+
converted_to_list=convert_to_list(cleaned_text,text_to_list())
|
198 |
+
processd_doubles=process_doubles(converted_to_list)
|
199 |
+
replaced_words = replace_words(processd_doubles)
|
200 |
+
converted_text=text_to_int(replaced_words)
|
201 |
+
# Output the transcription
|
202 |
+
print("Transcription: ", converted_text)
|
203 |
+
|
204 |
+
नमस्का जी 1 मन 2 पुलिस हेलप्लेन से बात कर रहे बताइए आपकी ाएमर्जेंसी है
|
205 |
+
नमिश्का जी 1 मन 2 पुलिस हेलप्लेन से बात कर रह बताइए आपकी क्या एमर्जेंसी है
|
206 |
+
नमस्का जी 1 मन 2 पुलिस हेलप्लेन से बात कर रह बताइए आपके क्या एमर्जेंसी हैवेल्कम 2 एमर्जनसी
|
207 |
+
वेल्कम 2 एमर्जनसी
|
208 |
+
वेलकम 2 एमर्जेंसी
|
209 |
+
और 9 र मलीख वेल्कम 2 एमर्जंसीनमस्कार जी 1 ्स 2 बारा पुलस हल्प्लाइन में आपका स्वागत ह बताइए आपकी के एमर्जेंसी है
|
210 |
+
नमस्कार जी 1 ्स दौबारा पुलिस हेल्प्लाइ में आपका स्वागत है बताइए आपकी के एमर्जेंसी है
|
211 |
+
नमस्कार जी 1 2 बारा पुलिस हल्प्लाइन में आपका स्वागत है बताइए आपकी क् एमर्जेंसी हैमस्कार जी 1 ्स 2 12 पुलस हल्प्लाइन में आपका स्वागत ह बताइए आपकी के एमर्जेंसी है
|
212 |
+
नमस्कार जी 1 ्स दौबारा पुलिस हेल्प्लाइ में आपका स्वागत है बताइए आपकी के एमर्जेंसी है
|
213 |
+
नमस्कार जी 1 2 12 पुलिस हल्प्लाइन में आपका स्वागत है बताइए आपकी क् एमर्जेंसी हैनमस्कार जी इक्सुबारा में आपका स्वागत हैइनम
|
214 |
+
नमस्कार जी इक्सुबारा में आपका स्वागत है कि इनमें
|
215 |
+
नमस्कार जी 1 ्सुबारा में आपका स्वागत हैइन
|
216 |
+
# In[ ]:
|
217 |
+
|
218 |
+
|
219 |
+
import os
|
220 |
+
import numpy as np
|
221 |
+
import scipy.signal
|
222 |
+
import webrtcvad
|
223 |
+
import soundfile as sf
|
224 |
+
import librosa
|
225 |
+
import logging
|
226 |
+
from transformers import pipeline
|
227 |
+
from text2int import text_to_int
|
228 |
+
from isNumber import is_number
|
229 |
+
from Text2List import text_to_list
|
230 |
+
from convert2list import convert_to_list
|
231 |
+
from processDoubles import process_doubles
|
232 |
+
from replaceWords import replace_words
|
233 |
+
|
234 |
+
# Set up logging
|
235 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
236 |
+
|
237 |
+
# Noise reduction functions
|
238 |
+
def high_pass_filter(audio, sr, cutoff=100, order=5):
|
239 |
+
try:
|
240 |
+
sos = scipy.signal.butter(order, cutoff, btype='highpass', fs=sr, output='sos')
|
241 |
+
filtered_audio = scipy.signal.sosfilt(sos, audio)
|
242 |
+
return filtered_audio
|
243 |
+
except Exception as e:
|
244 |
+
logging.error(f"High-pass filter failed: {e}")
|
245 |
+
return audio
|
246 |
+
|
247 |
+
def wiener_filter(audio):
|
248 |
+
try:
|
249 |
+
return scipy.signal.wiener(audio)
|
250 |
+
except Exception as e:
|
251 |
+
logging.error(f"Wiener filter failed: {e}")
|
252 |
+
return audio
|
253 |
+
|
254 |
+
def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
|
255 |
+
try:
|
256 |
+
vad = webrtcvad.Vad(aggressiveness)
|
257 |
+
audio_int16 = np.int16(audio * 32767)
|
258 |
+
frame_size = int(sr * frame_duration / 1000)
|
259 |
+
frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
|
260 |
+
voiced_audio = np.concatenate([frame for frame in frames if vad.is_speech(frame.tobytes(), sample_rate=sr)])
|
261 |
+
voiced_audio = np.float32(voiced_audio) / 32767
|
262 |
+
return voiced_audio
|
263 |
+
except Exception as e:
|
264 |
+
logging.error(f"VAD processing failed: {e}")
|
265 |
+
return audio
|
266 |
+
|
267 |
+
def load_audio(filepath):
|
268 |
+
try:
|
269 |
+
audio, sr = librosa.load(filepath, sr=None)
|
270 |
+
return audio, sr
|
271 |
+
except Exception as e:
|
272 |
+
logging.error(f"Failed to load audio: {e}")
|
273 |
+
return None, None
|
274 |
+
|
275 |
+
def save_audio(filepath, audio, sr):
|
276 |
+
try:
|
277 |
+
sf.write(filepath, audio, sr)
|
278 |
+
logging.info(f"Audio saved at {filepath}")
|
279 |
+
except Exception as e:
|
280 |
+
logging.error(f"Failed to save audio: {e}")
|
281 |
+
|
282 |
+
def noise_reduction_pipeline(filepath):
|
283 |
+
# Step 1: Load audio
|
284 |
+
audio, sr = load_audio(filepath)
|
285 |
+
if audio is None:
|
286 |
+
return None
|
287 |
+
|
288 |
+
# Step 2: Apply high-pass filter
|
289 |
+
audio_hp = high_pass_filter(audio, sr)
|
290 |
+
|
291 |
+
# Step 3: Apply Wiener filter
|
292 |
+
audio_wiener = wiener_filter(audio_hp)
|
293 |
+
|
294 |
+
# Step 4: Apply VAD
|
295 |
+
audio_vad = apply_vad(audio_wiener, sr)
|
296 |
+
|
297 |
+
# Step 5: Save cleaned audio
|
298 |
+
output_filepath = "processed_output.wav"
|
299 |
+
save_audio(output_filepath, audio_vad, sr)
|
300 |
+
|
301 |
+
return output_filepath
|
302 |
+
|
303 |
+
# Hugging Face ASR pipeline integration
|
304 |
+
def transcribe_with_huggingface(filepath, model_name="cdactvm/w2v-bert-2.0-hindi_new"):
|
305 |
+
try:
|
306 |
+
# Load ASR model
|
307 |
+
logging.info("Loading ASR model...")
|
308 |
+
asr_pipeline = pipeline("automatic-speech-recognition", model=model_name)
|
309 |
+
|
310 |
+
# Run the ASR pipeline on the processed audio
|
311 |
+
result = asr_pipeline(filepath)
|
312 |
+
text_value = result.get('text', '')
|
313 |
+
|
314 |
+
# Clean and process transcription
|
315 |
+
cleaned_text = text_value.replace("<s>", "")
|
316 |
+
converted_to_list = convert_to_list(cleaned_text, text_to_list())
|
317 |
+
processed_doubles = process_doubles(converted_to_list)
|
318 |
+
replaced_words = replace_words(processed_doubles)
|
319 |
+
converted_text = text_to_int(replaced_words)
|
320 |
+
|
321 |
+
logging.info("Transcription completed.")
|
322 |
+
return converted_text
|
323 |
+
|
324 |
+
except Exception as e:
|
325 |
+
logging.error(f"ASR transcription failed: {e}")
|
326 |
+
return ""
|
327 |
+
|
328 |
+
if __name__ == "__main__":
|
329 |
+
# Input file path
|
330 |
+
input_filepath = 'C:/Users/WCHL/Desktop/hp_sounds/101005/crm/hi/1728268817091.wav'
|
331 |
+
|
332 |
+
# Step 1: Preprocess (Noise Reduction)
|
333 |
+
processed_filepath = noise_reduction_pipeline(input_filepath)
|
334 |
+
|
335 |
+
# Step 2: Check if noise reduction succeeded
|
336 |
+
if processed_filepath:
|
337 |
+
# Step 3: ASR (Automatic Speech Recognition) with Hugging Face pipeline
|
338 |
+
transcription = transcribe_with_huggingface(processed_filepath)
|
339 |
+
if transcription:
|
340 |
+
print("Transcription:", transcription)
|
341 |
+
else:
|
342 |
+
logging.warning("No transcription could be generated.")
|
343 |
+
else:
|
344 |
+
logging.warning("Noise reduction failed; skipping ASR transcription.")
|
345 |
+
|
346 |
+
|
347 |
+
# In[ ]:
|
348 |
+
|
349 |
+
|
350 |
+
|
351 |
+
|
Text2List.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[4]:
|
5 |
+
|
6 |
+
|
7 |
+
def text_to_list():
|
8 |
+
text_list = [
|
9 |
+
# Hindi script for English numbers (11-19)
|
10 |
+
'इलेवन', 'ट्वेल्व', 'थर्टीन', 'फोर्टीन', 'फिफ्टीन', 'सिक्स्टीन', 'सेवन्टीन', 'एटीन', 'नाइन्टीन',
|
11 |
+
# Hindi numbers (11-19)
|
12 |
+
'ग्यारह', 'बारह', 'तेरह','तेरा ', 'चौदह', 'पंद्रह', 'सोलह','सोल्ला' 'सत्रह', 'सतरा', 'अठारा', 'उनाइस','अठारह', 'उन्नीस',
|
13 |
+
# Hindi script for English multiples of ten (20, 30, ..., 90)
|
14 |
+
'ट्वेंटी', 'थर्टी', 'फोर्टी', 'फिफ्टी', 'सिक्स्टी', 'सेवेन्टी', 'सेवंटी', 'सत्तर','सेवनटी','सेवेनटी','सेवांटी','एटी', 'नाइंटी',
|
15 |
+
# Hindi multiples of ten (20, 30, ..., 90)
|
16 |
+
'बीस', 'तीस', 'चालीस', 'पचास', 'साठ', 'सत्तर', 'अस्सी', 'नब्बे',
|
17 |
+
# Hindi script for English combinations of 21-29
|
18 |
+
'ट्वेंटी वन', 'ट्वेंटी टू', 'ट्वेंटी थ्री', 'ट्वेंटी फोर', 'ट्वेंटी फाइव', 'ट्वेंटी सिक्स', 'ट्वेंटी सेवन', 'ट्वेंटी एट', 'ट्वेंटी नाइन',
|
19 |
+
# Hindi combinations of 21-29
|
20 |
+
'इक्कीस', 'बाईस', 'तेईस', 'चौबीस', 'पच्चीस', 'छब्बीस', 'सत्ताईस', 'अट्ठाईस', 'उनतीस',
|
21 |
+
# Hindi script for English combinations of 31-39
|
22 |
+
'थर्टी वन', 'थर्टी टू', 'थर्टी थ्री', 'थर्टी फोर', 'थर्टी फाइव', 'थर्टी सिक्स', 'थर्टी सेवन', 'थर्टी एट', 'थर्टी नाइन',
|
23 |
+
# Hindi combinations of 31-39
|
24 |
+
'इकतीस', 'बत्तीस', 'तेतीस', 'चौंतीस', 'पैंतीस', 'छत्तीस', 'सैंतीस', 'अड़तीस', 'उनतालीस',
|
25 |
+
# Hindi script for English combinations of 41-49
|
26 |
+
'फोर्टी वन', 'फोर्टी टू', 'फोर्टी थ्री', 'फोर्टी फोर', 'फोर्टी फाइव', 'फोर्टी सिक्स', 'फोर्टी सेवन', 'फोर्टी एट', 'फोर्टी नाइन',
|
27 |
+
# Hindi combinations of 41-49
|
28 |
+
'इकतालीस', 'बयालीस', 'तैंतालीस', 'चौंतालीस', 'पैंतालीस', 'छयालिस', 'सैंतालीस', 'अड़तालीस', 'उनचास',
|
29 |
+
# Hindi script for English combinations of 51-59
|
30 |
+
'फिफ्टी वन', 'फिफ्टी टू', 'फिफ्टी थ्री', 'फिफ्टी फोर', 'फिफ्टी फाइव', 'फिफ्टी सिक्स', 'फिफ्टी सेवन', 'फिफ्टी एट', 'फिफ्टी नाइन',
|
31 |
+
# Hindi combinations of 51-59
|
32 |
+
'इक्यावन', 'बावन', 'तिरेपन', 'चौवन', 'पचपन', 'छप्पन', 'सत्तावन','संतावन', 'अट्ठावन', 'उनसठ','अंठावन','उंसट',
|
33 |
+
# Hindi script for English combinations of 61-69
|
34 |
+
'सिक्स्टी वन', 'सिक्स्टी टू', 'सिक्स्टी थ्री', 'सिक्स्टी फोर', 'सिक्स्टी फाइव', 'सिक्स्टी सिक्स', 'सिक्स्टी सेवन', 'सिक्स्टी एट', 'सिक्स्टी नाइन',
|
35 |
+
# Hindi combinations of 61-69
|
36 |
+
'इकसठ', 'बासठ', 'तिरसठ', 'चौंसठ', 'पैंसठ', 'छियासठ', 'सड़सठ', 'अड़सठ', 'उनहत्तर',
|
37 |
+
# Hindi script for English combinations of 71-79
|
38 |
+
'सेवेन्टी वन', 'सेवेन्टी टू', 'सेवेन्टी थ्री', 'सेवेन्टी फोर', 'सेवेन्टी फाइव', 'सेवेन्टी सिक्स', 'सेवेन्टी सेवन', 'सेवेन्टी एट', 'सेवेन्टी नाइन',
|
39 |
+
# Hindi combinations of 71-79
|
40 |
+
'इकहत्तर', 'बहत्तर', 'तिहत्तर', 'तियत्तर','तीहत्तर','पचत्तर', 'चिहत्तर', 'अटत्तर', 'उनासी' 'चौहत्तर', 'पचहत्तर', 'छिहत्तर', 'सतहत्तर', 'अठह��्तर', 'उन्यासी','उनासी','अठत्तर',
|
41 |
+
# Hindi script for English combinations of 81-89
|
42 |
+
'एटी वन', 'एटी टू', 'एटी थ्री', 'एटी फोर', 'एटी फाइव', 'एटी सिक्स', 'एटी सेवन', 'एटी एट', 'एटी नाइन',
|
43 |
+
# Hindi combinations of 81-89
|
44 |
+
'इक्यासी', 'बयासी', 'तिरासी', 'चौरासी', 'पचासी', 'छियासी', 'सतासी', 'अठासी', 'नवासी',
|
45 |
+
# Hindi script for English combinations of 91-99
|
46 |
+
'नाइंटी वन', 'नाइंटी टू', 'नाइंटी थ्री', 'नाइंटी फोर', 'नाइंटी फाइव', 'नाइंटी सिक्स', 'नाइंटी सेवन', 'नाइंटी एट', 'नाइंटी नाइन',
|
47 |
+
# Hindi combinations of 91-99
|
48 |
+
'इक्यानवे', 'बानवे', 'तिरानवे', 'चौरानवे', 'पचानवे', 'छियानवे', 'सतानवे', 'अठानवे', 'निन्यानवे',
|
49 |
+
# Hindi script for English numbers (0-10)
|
50 |
+
'ज़ीरो', 'वन', 'टू', 'थ्री', 'फोर', 'फाइव', 'सिक्स', 'सेवन', 'एट', 'नाइन', 'टेन',
|
51 |
+
# Hindi numbers (0-10)
|
52 |
+
'जीरो', 'एक', 'दो', 'तीन', 'चार', 'पांच', 'छह', 'सात', 'आठ', 'नौ', 'दस',
|
53 |
+
# Hindi script for 100
|
54 |
+
'हंड्रेड',
|
55 |
+
# Hindi for 100
|
56 |
+
'सौ',
|
57 |
+
]
|
58 |
+
|
59 |
+
return text_list
|
60 |
+
|
61 |
+
|
62 |
+
# In[ ]:
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
applyVad.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[3]:
|
5 |
+
|
6 |
+
|
7 |
+
# import webrtcvad
|
8 |
+
# import numpy as np
|
9 |
+
# import librosa
|
10 |
+
# def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
|
11 |
+
# '''
|
12 |
+
# Voice Activity Detection (VAD): It is a technique used to determine whether a segment of audio contains speech.
|
13 |
+
# This is useful in noisy environments where you want to filter out non-speech parts of the audio.
|
14 |
+
# webrtcvad: This is a Python package based on the VAD from the WebRTC (Web Real-Time Communication) project.
|
15 |
+
# It helps detect speech in small chunks of audio.
|
16 |
+
# '''
|
17 |
+
# vad = webrtcvad.Vad()
|
18 |
+
# audio_int16 = np.int16(audio * 32767)
|
19 |
+
# frame_size = int(sr * frame_duration / 1000)
|
20 |
+
# frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
|
21 |
+
# voiced_audio = np.concatenate([frame for frame in frames if vad.is_speech(frame.tobytes(), sample_rate=sr)])
|
22 |
+
# voiced_audio = np.float32(voiced_audio) / 32767
|
23 |
+
# return voiced_audio
|
24 |
+
|
25 |
+
|
26 |
+
# In[1]:
|
27 |
+
|
28 |
+
|
29 |
+
# import webrtcvad
|
30 |
+
# import librosa
|
31 |
+
# import numpy as np
|
32 |
+
# def apply_vad(audio, sr, frame_duration_ms=30):
|
33 |
+
# # Initialize WebRTC VAD
|
34 |
+
# vad = webrtcvad.Vad()
|
35 |
+
# vad.set_mode(1) # Set aggressiveness mode (0-3)
|
36 |
+
|
37 |
+
# # Convert to 16kHz if not already
|
38 |
+
# if sr != 16000:
|
39 |
+
# audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
|
40 |
+
# sr = 16000
|
41 |
+
|
42 |
+
# # Convert to 16-bit PCM
|
43 |
+
# audio = (audio * 32768).astype(np.int16)
|
44 |
+
|
45 |
+
# frame_length = int(sr * (frame_duration_ms / 1000.0)) # Calculate fram
|
46 |
+
# e length in samples
|
47 |
+
# bytes_per_frame = frame_length * 2 # 16-bit audio has 2 bytes per sample
|
48 |
+
|
49 |
+
# # Apply VAD to the audio
|
50 |
+
# voiced_frames = []
|
51 |
+
# for i in range(0, len(audio), frame_length):
|
52 |
+
# frame = audio[i:i + frame_length].tobytes()
|
53 |
+
# if len(frame) == bytes_per_frame and vad.is_speech(frame, sr):
|
54 |
+
# voiced_frames.extend(audio[i:i + frame_length])
|
55 |
+
|
56 |
+
# # Return the VAD-filtered audio
|
57 |
+
# return np.array(voiced_frames)
|
58 |
+
|
59 |
+
|
60 |
+
# In[4]:
|
61 |
+
|
62 |
+
|
63 |
+
import webrtcvad
|
64 |
+
import numpy as np
|
65 |
+
import librosa
|
66 |
+
|
67 |
+
def apply_vad(audio, sr, frame_duration=30, aggressiveness=3):
|
68 |
+
'''
|
69 |
+
Voice Activity Detection (VAD): Detects speech in audio.
|
70 |
+
'''
|
71 |
+
vad = webrtcvad.Vad(aggressiveness)
|
72 |
+
|
73 |
+
# Resample to 16000 Hz if not already (recommended for better compatibility)
|
74 |
+
if sr != 16000:
|
75 |
+
audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
|
76 |
+
sr = 16000
|
77 |
+
|
78 |
+
# Convert to 16-bit PCM format expected by webrtcvad
|
79 |
+
audio_int16 = np.int16(audio * 32767)
|
80 |
+
|
81 |
+
# Ensure frame size matches WebRTC's expected lengths
|
82 |
+
frame_size = int(sr * frame_duration / 1000)
|
83 |
+
if frame_size % 2 != 0:
|
84 |
+
frame_size -= 1 # Make sure it's even to avoid processing issues
|
85 |
+
|
86 |
+
frames = [audio_int16[i:i + frame_size] for i in range(0, len(audio_int16), frame_size)]
|
87 |
+
|
88 |
+
# Filter out non-speech frames
|
89 |
+
voiced_frames = []
|
90 |
+
for frame in frames:
|
91 |
+
if len(frame) == frame_size and vad.is_speech(frame.tobytes(), sample_rate=sr):
|
92 |
+
voiced_frames.append(frame)
|
93 |
+
|
94 |
+
# Concatenate the voiced frames
|
95 |
+
voiced_audio = np.concatenate(voiced_frames)
|
96 |
+
voiced_audio = np.float32(voiced_audio) / 32767
|
97 |
+
|
98 |
+
return voiced_audio
|
99 |
+
|
100 |
+
|
101 |
+
# In[ ]:
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
convert2list.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
# coding: utf-8
|
3 |
|
4 |
-
# In[
|
5 |
|
6 |
|
7 |
# import nbimporter
|
@@ -41,14 +41,14 @@ def convert_to_list(text, text_list):
|
|
41 |
result = ' '.join(matched_words)
|
42 |
return result
|
43 |
|
44 |
-
text = "जीरोएकदोतीनचारपांचछहसातआठनौदसजीरोएकदोतीनचारपांच"
|
45 |
|
46 |
if __name__=="__main__":
|
47 |
converted=convert_to_list(text, text_to_list())
|
48 |
print(converted)
|
49 |
|
50 |
|
51 |
-
# In[
|
52 |
|
53 |
|
54 |
# # import nbimporter
|
@@ -94,3 +94,15 @@ if __name__=="__main__":
|
|
94 |
# converted=convert_to_list(text, text_to_list())
|
95 |
# print(converted)
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
#!/usr/bin/env python
|
2 |
# coding: utf-8
|
3 |
|
4 |
+
# In[ ]:
|
5 |
|
6 |
|
7 |
# import nbimporter
|
|
|
41 |
result = ' '.join(matched_words)
|
42 |
return result
|
43 |
|
44 |
+
# text = "जीरोएकदोतीनचारपांचछहसातआठनौदसजीरोएकदोतीनचारपांच"
|
45 |
|
46 |
if __name__=="__main__":
|
47 |
converted=convert_to_list(text, text_to_list())
|
48 |
print(converted)
|
49 |
|
50 |
|
51 |
+
# In[ ]:
|
52 |
|
53 |
|
54 |
# # import nbimporter
|
|
|
94 |
# converted=convert_to_list(text, text_to_list())
|
95 |
# print(converted)
|
96 |
|
97 |
+
|
98 |
+
# In[ ]:
|
99 |
+
|
100 |
+
|
101 |
+
get_ipython().system('git clone https://huggingface.co/StephennFernandes/wav2vec2-XLS-R-300m-konkani')
|
102 |
+
|
103 |
+
|
104 |
+
# In[ ]:
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
highPassFilter.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[2]:
|
5 |
+
|
6 |
+
|
7 |
+
import scipy.signal
|
8 |
+
|
9 |
+
def high_pass_filter(audio, sr, cutoff=100, order=5):
|
10 |
+
"""
|
11 |
+
Applies a high-pass filter to an audio signal.
|
12 |
+
|
13 |
+
Parameters:
|
14 |
+
audio (numpy array): The input audio signal.
|
15 |
+
sr (int): The sample rate of the audio signal.
|
16 |
+
cutoff (float): The cutoff frequency in Hz. Default is 100 Hz.
|
17 |
+
order (int): The order of the filter. Default is 5.
|
18 |
+
|
19 |
+
Returns:
|
20 |
+
numpy array: The filtered audio signal.
|
21 |
+
"""
|
22 |
+
# Design the high-pass filter using a Butterworth filter design
|
23 |
+
sos = scipy.signal.butter(order, cutoff, btype='highpass', fs=sr, output='sos')
|
24 |
+
|
25 |
+
# Apply the filter using sosfilt (second-order sections filter)
|
26 |
+
filtered_audio = scipy.signal.sosfilt(sos, audio)
|
27 |
+
|
28 |
+
return filtered_audio
|
29 |
+
|
30 |
+
|
31 |
+
# In[ ]:
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
|
main.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[1]:
|
5 |
+
|
6 |
+
|
7 |
+
get_ipython().system('pip install nbimporter')
|
8 |
+
|
9 |
+
|
10 |
+
# In[2]:
|
11 |
+
|
12 |
+
|
13 |
+
# Import necessary libraries and filter warnings
|
14 |
+
import warnings
|
15 |
+
warnings.filterwarnings("ignore")
|
16 |
+
import nbimporter
|
17 |
+
import os
|
18 |
+
import re
|
19 |
+
import torchaudio
|
20 |
+
from transformers import pipeline
|
21 |
+
from text2int import text_to_int
|
22 |
+
from isNumber import is_number
|
23 |
+
from Text2List import text_to_list
|
24 |
+
from convert2list import convert_to_list
|
25 |
+
from processDoubles import process_doubles
|
26 |
+
from replaceWords import replace_words
|
27 |
+
pipe = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_v1")
|
28 |
+
|
29 |
+
|
30 |
+
# In[4]:
|
31 |
+
|
32 |
+
|
33 |
+
# # Process the audio file
|
34 |
+
transcript = pipe("C:/Users/WCHL/Desktop/hindi_dataset/train/hindi_numbers_test/hindi7.mp3")
|
35 |
+
text_value = transcript['text']
|
36 |
+
processd_doubles=process_doubles(text_value)
|
37 |
+
# converted_to_list=convert_to_list(processd_doubles,text_to_list())
|
38 |
+
replaced_words = replace_words(processd_doubles)
|
39 |
+
converted_text=text_to_int(replaced_words)
|
40 |
+
print(f"generated text : {text_value}")
|
41 |
+
print(f"processed doubles : {processd_doubles}")
|
42 |
+
# print(f"converted to list : {converted_to_list}")
|
43 |
+
print(f"replaced words : {replaced_words}")
|
44 |
+
print(f"final text : {converted_text}")
|
45 |
+
|
46 |
+
|
47 |
+
# In[ ]:
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
|
numberMapping.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[ ]:
|
5 |
+
|
6 |
+
|
7 |
+
replacement_map = {
|
8 |
+
'zero': ['शून्य', 'जेरो', 'शुन्ना', 'जीरो'],
|
9 |
+
'one': ['वन', 'एंक', 'इक', 'एक'],
|
10 |
+
'two': ['टू', 'दौ', 'दो'],
|
11 |
+
'three': ['थ्री', 'तीना', 'तीन', 'त्री'],
|
12 |
+
'four': ['फोर', 'फॉर', 'च्यार', 'चार'],
|
13 |
+
'five': ['फाइव', 'पाँच', 'पांच'],
|
14 |
+
'six': ['सिक्स', 'चह', 'छौ', 'छै', 'छह'],
|
15 |
+
'seven': ['सेवन', 'सात'],
|
16 |
+
'eight': ['एट', 'अट', 'आठ'],
|
17 |
+
'nine': ['नाइन', 'नौ'],
|
18 |
+
'ten': ['टेन', 'दस'],
|
19 |
+
|
20 |
+
# Numbers from 11 to 19
|
21 |
+
'eleven': ['इलेवन', 'ग्यारह'],
|
22 |
+
'twelve': ['ट्वेल्व', 'बारह'],
|
23 |
+
'thirteen': ['थर्टीन', 'तेरह'],
|
24 |
+
'fourteen': ['फोर्टीन', 'चौदह'],
|
25 |
+
'fifteen': ['फिफ्टीन', 'पंद्रह'],
|
26 |
+
'sixteen': ['सिक्स्टीन', 'सोलह'],
|
27 |
+
'seventeen': ['सेवंटीन', 'सत्रह'],
|
28 |
+
'eighteen': ['एटीन', 'अठारह'],
|
29 |
+
'nineteen': ['नाइनटीन', 'उन्नीस'],
|
30 |
+
|
31 |
+
# Multiples of ten
|
32 |
+
'twenty': ['ट्वेंटी', 'बीस'],
|
33 |
+
'thirty': ['थर्टी', 'तीस'],
|
34 |
+
'forty': ['फोर्टी', 'चालीस'],
|
35 |
+
'fifty': ['फिफ्टी', 'पचास'],
|
36 |
+
'sixty': ['सिक्स्टी', 'साठ'],
|
37 |
+
'seventy': ['सेवंटी', 'सत्तर'],
|
38 |
+
'eighty': ['एटी', 'अस्सी'],
|
39 |
+
'ninety': ['नाइंटी', 'नब्बे'],
|
40 |
+
|
41 |
+
# Numbers from 21 to 29
|
42 |
+
'twenty one': ['ट्वेंटी वन', 'इक्कीस'],
|
43 |
+
'twenty two': ['ट्वेंटी टू', 'बाईस'],
|
44 |
+
'twenty three': ['ट्वेंटी थ्री', 'तेईस'],
|
45 |
+
'twenty four': ['ट्वेंटी फोर', 'चौबीस'],
|
46 |
+
'twenty five': ['ट्वेंटी फाइव', 'पच्चीस'],
|
47 |
+
'twenty six': ['ट्वेंटी सिक्स', 'छब्बीस'],
|
48 |
+
'twenty seven': ['ट्वेंटी सेवन', 'सत्ताईस'],
|
49 |
+
'twenty eight': ['ट्वेंटी एट', 'अट्ठाईस'],
|
50 |
+
'twenty nine': ['ट्वेंटी नाइन', 'उनतीस'],
|
51 |
+
|
52 |
+
# Numbers from 31 to 39
|
53 |
+
'thirty one': ['थर्टी वन', 'इकतीस'],
|
54 |
+
'thirty two': ['थर्टी टू', 'बत्तीस'],
|
55 |
+
'thirty three': ['थर्टी थ्री', 'तेतीस'],
|
56 |
+
'thirty four': ['थर्टी फोर', 'चौंतीस'],
|
57 |
+
'thirty five': ['थर्टी फाइव', 'पैंतीस'],
|
58 |
+
'thirty six': ['थर्टी सिक्स', 'छत्तीस'],
|
59 |
+
'thirty seven': ['थर्टी सेवन', 'सैंतीस'],
|
60 |
+
'thirty eight': ['थर्टी एट', 'अड़तीस'],
|
61 |
+
'thirty nine': ['थर्टी नाइन', 'उनतालीस'],
|
62 |
+
|
63 |
+
# Numbers from 41 to 49
|
64 |
+
'forty one': ['फोर्टी वन', 'इकतालीस'],
|
65 |
+
'forty two': ['फोर्टी टू', 'बयालीस'],
|
66 |
+
'forty three': ['फोर्टी थ्री', 'तैंतालीस'],
|
67 |
+
'forty four': ['फोर्टी फोर', 'चौंतालीस'],
|
68 |
+
'forty five': ['फोर्टी फाइव', 'पैंतालीस'],
|
69 |
+
'forty six': ['फोर्टी सिक्स', 'छयालिस'],
|
70 |
+
'forty seven': ['फोर्टी सेवन', 'सैंतालीस'],
|
71 |
+
'forty eight': ['फोर्टी एट', 'अड़तालीस'],
|
72 |
+
'forty nine': ['फोर्टी नाइन', 'उनचास'],
|
73 |
+
|
74 |
+
# Numbers from 51 to 59
|
75 |
+
'fifty one': ['फिफ्टी वन', 'इक्यावन'],
|
76 |
+
'fifty two': ['फिफ्टी टू', 'बावन'],
|
77 |
+
'fifty three': ['फिफ्टी थ्री', 'तिरेपन'],
|
78 |
+
'fifty four': ['फिफ्टी फोर', 'चौवन'],
|
79 |
+
'fifty five': ['फिफ्टी फाइव', 'पचपन'],
|
80 |
+
'fifty six': ['फिफ्टी सिक्स', 'छप्पन'],
|
81 |
+
'fifty seven': ['फिफ्टी सेवन', 'सत्तावन'],
|
82 |
+
'fifty eight': ['फिफ्टी एट', 'अट्ठावन'],
|
83 |
+
'fifty nine': ['फिफ्टी नाइन', 'उनसठ'],
|
84 |
+
|
85 |
+
# Numbers from 61 to 69
|
86 |
+
'sixty one': ['सिक्स्टी वन', 'इकसठ'],
|
87 |
+
'sixty two': ['सिक्स्टी टू', 'बासठ'],
|
88 |
+
'sixty three': ['सिक्स्टी थ्री', 'तिरसठ'],
|
89 |
+
'sixty four': ['सिक्स्टी फोर', 'चौंसठ'],
|
90 |
+
'sixty five': ['सिक्स��टी फाइव', 'पैंसठ'],
|
91 |
+
'sixty six': ['सिक्स्टी सिक्स', 'छियासठ'],
|
92 |
+
'sixty seven': ['सिक्स्टी सेवन', 'सड़सठ'],
|
93 |
+
'sixty eight': ['सिक्स्टी एट', 'अड़सठ'],
|
94 |
+
'sixty nine': ['सिक्स्टी नाइन', 'उनहत्तर'],
|
95 |
+
|
96 |
+
# Numbers from 71 to 79
|
97 |
+
'seventy one': ['सेवंटी वन', 'इकहत्तर'],
|
98 |
+
'seventy two': ['सेवंटी टू', 'बहत्तर'],
|
99 |
+
'seventy three': ['सेवंटी थ्री', 'तिहत्तर'],
|
100 |
+
'seventy four': ['सेवंटी फोर', 'चौहत्तर'],
|
101 |
+
'seventy five': ['सेवंटी फाइव', 'पचहत्तर'],
|
102 |
+
'seventy six': ['सेवंटी सिक्स', 'छिहत्तर'],
|
103 |
+
'seventy seven': ['सेवंटी सेवन', 'सतहत्तर'],
|
104 |
+
'seventy eight': ['सेवंटी एट', 'अठहत्तर'],
|
105 |
+
'seventy nine': ['सेवंटी नाइन', 'उन्यासी'],
|
106 |
+
|
107 |
+
# Numbers from 81 to 89
|
108 |
+
'eighty one': ['एटी वन', 'इक्यासी'],
|
109 |
+
'eighty two': ['एटी टू', 'बयासी'],
|
110 |
+
'eighty three': ['एटी थ्री', 'तिरासी'],
|
111 |
+
'eighty four': ['एटी फोर', 'चौरासी'],
|
112 |
+
'eighty five': ['एटी फाइव', 'पचासी'],
|
113 |
+
'eighty six': ['एटी सिक्स', 'छियासी'],
|
114 |
+
'eighty seven': ['एटी सेवन', 'सतासी'],
|
115 |
+
'eighty eight': ['एटी एट', 'अठासी'],
|
116 |
+
'eighty nine': ['एटी नाइन', 'नवासी'],
|
117 |
+
|
118 |
+
# Numbers from 91 to 99
|
119 |
+
'ninety one': ['नाइंटी वन', 'इक्यानवे'],
|
120 |
+
'ninety two': ['नाइंटी टू', 'बानवे'],
|
121 |
+
'ninety three': ['नाइंटी थ्री', 'तिरानवे'],
|
122 |
+
'ninety four': ['नाइंटी फोर', 'चौरानवे'],
|
123 |
+
'ninety five': ['नाइंटी फाइव', 'पचानवे'],
|
124 |
+
'ninety six': ['नाइंटी सिक्स', 'छियानवे'],
|
125 |
+
'ninety seven': ['नाइंटी सेवन', 'सतानवे'],
|
126 |
+
'ninety eight': ['नाइंटी एट', 'अठानवे'],
|
127 |
+
'ninety nine': ['नाइंटी नाइन', 'निन्यानवे'],
|
128 |
+
|
129 |
+
# Hundred
|
130 |
+
'hundred': ['हंड्रेड', 'सौ'],
|
131 |
+
|
132 |
+
# Special for double digits
|
133 |
+
'डबल': ['दबल', 'डबल', 'दुबाल'],
|
134 |
+
}
|
135 |
+
|
processDoubles.py
CHANGED
@@ -1,24 +1,54 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[2]:
|
5 |
+
|
6 |
+
|
7 |
+
# # Function to process "double" followed by a number
|
8 |
+
# def process_doubles(sentence):
|
9 |
+
# tokens = sentence.split()
|
10 |
+
# result = []
|
11 |
+
# i = 0
|
12 |
+
# while i < len(tokens):
|
13 |
+
# if tokens[i] == "डबल":
|
14 |
+
# if i + 1 < len(tokens):
|
15 |
+
# result.append(tokens[i + 1])
|
16 |
+
# result.append(tokens[i + 1])
|
17 |
+
# i += 2
|
18 |
+
# else:
|
19 |
+
# result.append(tokens[i])
|
20 |
+
# i += 1
|
21 |
+
# else:
|
22 |
+
# result.append(tokens[i])
|
23 |
+
# i += 1
|
24 |
+
# return ' '.join(result)
|
25 |
+
|
26 |
+
|
27 |
+
# In[ ]:
|
28 |
+
|
29 |
+
|
30 |
+
import re
|
31 |
+
|
32 |
+
def process_doubles(sentence):
|
33 |
+
# Use regex to split 'डबल' followed by numbers/words without space (e.g., "डबलवन" -> "डबल वन")
|
34 |
+
sentence = re.sub(r'(डबल)(\S+)', r'\1 \2', sentence)
|
35 |
+
|
36 |
+
tokens = sentence.split()
|
37 |
+
result = []
|
38 |
+
i = 0
|
39 |
+
|
40 |
+
while i < len(tokens):
|
41 |
+
if tokens[i] == "डबल":
|
42 |
+
if i + 1 < len(tokens):
|
43 |
+
result.append(tokens[i + 1]) # Append the next word/number
|
44 |
+
result.append(tokens[i + 1]) # Append the next word/number again to duplicate
|
45 |
+
i += 2 # Skip over the next word since it's already processed
|
46 |
+
else:
|
47 |
+
result.append(tokens[i])
|
48 |
+
i += 1
|
49 |
+
else:
|
50 |
+
result.append(tokens[i])
|
51 |
+
i += 1
|
52 |
+
|
53 |
+
return ' '.join(result)
|
54 |
+
|
replaceWords.py
CHANGED
@@ -1,144 +1,157 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
'
|
14 |
-
'
|
15 |
-
|
16 |
-
|
17 |
-
'
|
18 |
-
'
|
19 |
-
'
|
20 |
-
'
|
21 |
-
|
22 |
-
|
23 |
-
'
|
24 |
-
'
|
25 |
-
'
|
26 |
-
|
27 |
-
|
28 |
-
'
|
29 |
-
'
|
30 |
-
'
|
31 |
-
'
|
32 |
-
|
33 |
-
|
34 |
-
'twenty
|
35 |
-
'twenty
|
36 |
-
'twenty
|
37 |
-
|
38 |
-
|
39 |
-
'
|
40 |
-
'
|
41 |
-
'
|
42 |
-
'
|
43 |
-
|
44 |
-
|
45 |
-
'thirty
|
46 |
-
'thirty
|
47 |
-
'thirty
|
48 |
-
|
49 |
-
|
50 |
-
'
|
51 |
-
'
|
52 |
-
'
|
53 |
-
'
|
54 |
-
|
55 |
-
|
56 |
-
'forty
|
57 |
-
'forty
|
58 |
-
'forty
|
59 |
-
|
60 |
-
|
61 |
-
'
|
62 |
-
'
|
63 |
-
'
|
64 |
-
'
|
65 |
-
|
66 |
-
|
67 |
-
'fifty
|
68 |
-
'fifty
|
69 |
-
'fifty
|
70 |
-
|
71 |
-
|
72 |
-
'
|
73 |
-
'
|
74 |
-
'
|
75 |
-
'
|
76 |
-
|
77 |
-
|
78 |
-
'sixty
|
79 |
-
'sixty
|
80 |
-
'sixty
|
81 |
-
|
82 |
-
|
83 |
-
'
|
84 |
-
'
|
85 |
-
'
|
86 |
-
'
|
87 |
-
|
88 |
-
|
89 |
-
'seventy
|
90 |
-
'seventy
|
91 |
-
'seventy
|
92 |
-
|
93 |
-
|
94 |
-
'
|
95 |
-
'
|
96 |
-
'
|
97 |
-
'
|
98 |
-
|
99 |
-
|
100 |
-
'eighty
|
101 |
-
'eighty
|
102 |
-
'eighty
|
103 |
-
|
104 |
-
|
105 |
-
'
|
106 |
-
'
|
107 |
-
'
|
108 |
-
'
|
109 |
-
|
110 |
-
|
111 |
-
'ninety
|
112 |
-
'ninety
|
113 |
-
'ninety
|
114 |
-
|
115 |
-
'
|
116 |
-
'
|
117 |
-
'
|
118 |
-
'
|
119 |
-
'
|
120 |
-
|
121 |
-
'
|
122 |
-
'
|
123 |
-
'
|
124 |
-
'
|
125 |
-
'
|
126 |
-
|
127 |
-
'
|
128 |
-
|
129 |
-
'
|
130 |
-
|
131 |
-
'
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
#
|
144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[1]:
|
5 |
+
|
6 |
+
|
7 |
+
import re
|
8 |
+
|
9 |
+
def replace_words(sentence):
|
10 |
+
# Define a dictionary mapping a single word to a list of words or phrases
|
11 |
+
replacement_map = {
|
12 |
+
# Multiples of ten
|
13 |
+
'twenty': ['ट्वेंटी', 'बीस'],
|
14 |
+
'thirty': ['थर्टी', 'तीस'],
|
15 |
+
'forty': ['फोर्टी', 'चालीस'],
|
16 |
+
'fifty': ['फिफ्टी', 'पचास'],
|
17 |
+
'sixty': ['सिक्स्टी', 'साठ'],
|
18 |
+
'seventy': ['सेवंटी', 'सत्तर','सेवनटी','सेवेनटी','सेवांटी'],
|
19 |
+
'eighty': ['एटी', 'अस्सी'],
|
20 |
+
'ninety': ['नाइंटी', 'नब्बे'],
|
21 |
+
|
22 |
+
# Numbers from 11 to 19
|
23 |
+
'eleven': ['इलेवन', 'ग्यारह','इगारा'],
|
24 |
+
'twelve': ['ट्वेल्व', 'बारह','बारा','मंटों','सौबारह','शौबारह'],
|
25 |
+
'thirteen': ['थर्टी���', 'तेरह','तेरा'],
|
26 |
+
'fourteen': ['फोर्टीन', 'चौदह'],
|
27 |
+
'fifteen': ['फिफ्टीन', 'पंद्रह','पंद्रा'],
|
28 |
+
'sixteen': ['सिक्स्टीन', 'सोलह','सोल्ला'],
|
29 |
+
'seventeen': ['सेवंटीन', 'सत्रह''सतरा'],
|
30 |
+
'eighteen': ['एटीन', 'अठारह''अठारा'],
|
31 |
+
'nineteen': ['नाइनटीन', 'उन्नीस','उन्नईस','उनाइस'],
|
32 |
+
|
33 |
+
# Numbers from 21 to 29
|
34 |
+
'twenty one': ['ट्वेंटी वन', 'इक्कीस'],
|
35 |
+
'twenty two': ['ट्वेंटी टू', 'बाईस'],
|
36 |
+
'twenty three': ['ट्वेंटी थ्री', 'तेईस'],
|
37 |
+
'twenty four': ['ट्वेंटी फोर', 'चौबीस'],
|
38 |
+
'twenty five': ['ट्वेंटी फाइव', 'पच्चीस'],
|
39 |
+
'twenty six': ['ट्वेंटी सिक्स', 'छब्बीस'],
|
40 |
+
'twenty seven': ['ट्वेंटी सेवन', 'सत्ताईस','सताईस'],
|
41 |
+
'twenty eight': ['ट्वेंटी एट', 'अट्ठाईस','अठ्ठाइस','अठ्ठाईस'],
|
42 |
+
'twenty nine': ['ट्वेंटी नाइन', 'उनतीस'],
|
43 |
+
|
44 |
+
# Numbers from 31 to 39
|
45 |
+
'thirty one': ['थर्टी वन', 'इकतीस'],
|
46 |
+
'thirty two': ['थर्टी टू', 'बत्तीस'],
|
47 |
+
'thirty three': ['थर्टी थ्री', 'तेतीस'],
|
48 |
+
'thirty four': ['थर्टी फोर', 'चौंतीस'],
|
49 |
+
'thirty five': ['थर्टी फाइव', 'पैंतीस'],
|
50 |
+
'thirty six': ['थर्टी सिक्स', 'छत्तीस'],
|
51 |
+
'thirty seven': ['थर्टी सेवन', 'सैंतीस'],
|
52 |
+
'thirty eight': ['थर्टी एट', 'अड़तीस'],
|
53 |
+
'thirty nine': ['थर्टी नाइन', 'उनतालीस'],
|
54 |
+
|
55 |
+
# Numbers from 41 to 49
|
56 |
+
'forty one': ['फोर्टी वन', 'इकतालीस'],
|
57 |
+
'forty two': ['फोर्टी टू', 'बयालीस'],
|
58 |
+
'forty three': ['फोर्टी थ्री', 'तैंतालीस'],
|
59 |
+
'forty four': ['फोर्टी फोर', 'चौंतालीस'],
|
60 |
+
'forty five': ['फोर्टी फाइव', 'पैंतालीस'],
|
61 |
+
'forty six': ['फोर्टी सिक्स', 'छयालिस'],
|
62 |
+
'forty seven': ['फोर्टी सेवन', 'सैंतालीस'],
|
63 |
+
'forty eight': ['फोर्टी एट', 'अड़तालीस'],
|
64 |
+
'forty nine': ['फोर्टी नाइन', 'उनचास'],
|
65 |
+
|
66 |
+
# Numbers from 51 to 59
|
67 |
+
'fifty one': ['फिफ्टी वन', 'इक्यावन'],
|
68 |
+
'fifty two': ['फिफ्टी टू', 'बावन'],
|
69 |
+
'fifty three': ['फिफ्टी थ्री', 'तिरेपन','तिरपन','तीरपन'],
|
70 |
+
'fifty four': ['फिफ्टी फोर', 'चौवन'],
|
71 |
+
'fifty five': ['फिफ्टी फाइव', 'पचपन'],
|
72 |
+
'fifty six': ['फिफ्टी सिक्स', 'छप्पन','छपपन'],
|
73 |
+
'fifty seven': ['फिफ्टी सेवन', 'सत्तावन','संताबन','संतावन'],
|
74 |
+
'fifty eight': ['फिफ्टी एट', 'अट्ठावन','अंठावन'],
|
75 |
+
'fifty nine': ['फिफ्टी नाइन', 'उनसठ','उंसट','उंसठ'],
|
76 |
+
|
77 |
+
# Numbers from 61 to 69
|
78 |
+
'sixty one': ['सिक्स्टी वन', 'इकसठ'],
|
79 |
+
'sixty two': ['सिक्स्टी टू', 'बासठ'],
|
80 |
+
'sixty three': ['सिक्स्टी थ्री', 'तिरसठ'],
|
81 |
+
'sixty four': ['सिक्स्टी फोर', 'चौंसठ'],
|
82 |
+
'sixty five': ['सिक्स्टी फाइव', 'पैंसठ'],
|
83 |
+
'sixty six': ['सिक्स्टी सिक्स', 'छियासठ'],
|
84 |
+
'sixty seven': ['सिक्स्टी सेवन', 'सड़सठ'],
|
85 |
+
'sixty eight': ['सिक्स्टी एट', 'अड़सठ'],
|
86 |
+
'sixty nine': ['सिक्स्टी नाइन', 'उनहत्तर'],
|
87 |
+
|
88 |
+
# Numbers from 71 to 79
|
89 |
+
'seventy one': ['सेवंटी वन', 'इकहत्तर','इखत्तर','इकत्तर'],
|
90 |
+
'seventy two': ['सेवंटी टू', 'बहत्तर'],
|
91 |
+
'seventy three': ['सेवंटी थ्री', 'तिहत्तर','तियत्र','तियत्तर','तीहत्तर','तिहत्थर'],
|
92 |
+
'seventy four': ['सेवंटी फोर', 'चौहत्तर',],
|
93 |
+
'seventy five': ['सेवंटी फाइव', 'पचहत्तर','पछत्तर','पिछत्तर','पचहत्तर','पचत्तर'],
|
94 |
+
'seventy six': ['सेवंटी स���क्स', 'छिहत्तर','छीहत्तर'],
|
95 |
+
'seventy seven': ['सेवंटी सेवन', 'सतहत्तर','सतात्तर','सतत्तर','सतहत्थर'],
|
96 |
+
'seventy eight': ['सेवंटी एट', 'अठहत्तर','अठत्तर'],
|
97 |
+
'seventy nine': ['सेवंटी नाइन', 'उन्यासी','उनासी'],
|
98 |
+
|
99 |
+
# Numbers from 81 to 89
|
100 |
+
'eighty one': ['एटी वन', 'इक्यासी'],
|
101 |
+
'eighty two': ['एटी टू', 'बयासी'],
|
102 |
+
'eighty three': ['एटी थ्री', 'तिरासी'],
|
103 |
+
'eighty four': ['एटी फोर', 'चौरासी'],
|
104 |
+
'eighty five': ['एटी फाइव', 'पचासी','पिचासी'],
|
105 |
+
'eighty six': ['एटी सिक्स', 'छियासी'],
|
106 |
+
'eighty seven': ['एटी सेवन', 'सतासी'],
|
107 |
+
'eighty eight': ['एटी एट', 'अठासी'],
|
108 |
+
'eighty nine': ['एटी नाइन', 'नवासी'],
|
109 |
+
|
110 |
+
# Numbers from 91 to 99
|
111 |
+
'ninety one': ['नाइंटी वन', 'इक्यानवे'],
|
112 |
+
'ninety two': ['नाइंटी टू', 'बानवे','बानबे'],
|
113 |
+
'ninety three': ['नाइंटी थ्री', 'तिरानवे'],
|
114 |
+
'ninety four': ['नाइंटी फोर', 'चौरानवे'],
|
115 |
+
'ninety five': ['नाइंटी फाइव', 'पचानवे'],
|
116 |
+
'ninety six': ['नाइंटी सिक्स', 'छियानवे'],
|
117 |
+
'ninety seven': ['नाइंटी सेवन', 'सतानवे'],
|
118 |
+
'ninety eight': ['नाइंटी एट', 'अठानवे'],
|
119 |
+
'ninety nine': ['नाइंटी नाइन', 'निन्यानवे'],
|
120 |
+
# Numbers from one to ten
|
121 |
+
'seven': ['सेवन', 'सात'],
|
122 |
+
'zero': ['शून्य', 'जेरो', 'शुन्ना', 'जीरो'],
|
123 |
+
'one': ['वन', 'एंक', 'इक', 'एक'],
|
124 |
+
'two': ['टू', 'दो'],
|
125 |
+
'three': ['थ्री', 'तीना', 'तीन', 'त्री'],
|
126 |
+
'four': ['फोर','फ़ोर', 'फॉर', 'च्यार', 'चार'],
|
127 |
+
'five': ['फाइव', 'पाँच', 'पांच'],
|
128 |
+
'six': ['सिक्स', 'चह', 'छौ', 'छै', 'छह', 'छे'],
|
129 |
+
'eight': ['एट', 'अट', 'आठ'],
|
130 |
+
'nine': ['नाइन', 'नौ'],
|
131 |
+
'ten': ['टेन', 'दस'],
|
132 |
+
# Hundred
|
133 |
+
'hundred': ['हंड्रेड', 'सौ','सो','साव'],
|
134 |
+
# Thousand
|
135 |
+
'thousand' : ['हजार','थौजनड','थाउजंड','हज़ार'],
|
136 |
+
# Lakhs
|
137 |
+
'lac' : ['लाख'],
|
138 |
+
'one hundred twelve' : ['इक्सुबारा','वनमंटों','वनवंतु'],
|
139 |
+
}
|
140 |
+
|
141 |
+
words = sentence.split() # Split the sentence by spaces
|
142 |
+
|
143 |
+
# Replace words using the mapping
|
144 |
+
for i, word in enumerate(words):
|
145 |
+
for replacement, patterns in replacement_map.items():
|
146 |
+
if word in patterns:
|
147 |
+
words[i] = replacement # Replace the word if it's fully matched
|
148 |
+
|
149 |
+
# Join the processed words back into a sentence
|
150 |
+
return ' '.join(words)
|
151 |
+
|
152 |
+
|
153 |
+
# In[ ]:
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
+
|
wienerFilter.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[2]:
|
5 |
+
|
6 |
+
|
7 |
+
import scipy.signal
|
8 |
+
def wiener_filter(audio):
|
9 |
+
|
10 |
+
'''
|
11 |
+
The Wiener filter is designed to minimize the impact of noise by applying an adaptive filtering process.
|
12 |
+
It tries to estimate the original, clean signal by taking into account both the noisy signal and the statistical properties of the noise.
|
13 |
+
The Wiener filter is particularly useful when dealing with stationary noise (constant background noise, like white noise).
|
14 |
+
'''
|
15 |
+
return scipy.signal.wiener(audio)
|
16 |
+
|
17 |
+
|
18 |
+
# In[ ]:
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|