import warnings import gradio as gr from transformers import pipeline from transformers import AutoProcessor from pyctcdecode import build_ctcdecoder from transformers import Wav2Vec2ProcessorWithLM import os import re #import torchaudio # Initialize the speech recognition pipeline and transliterator p1 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-odia_v1") p2 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_v1") #p3 = pipeline(task="automatic-speech-recognition", model="cdactvm/kannada_w2v-bert_model") #p4 = pipeline(task="automatic-speech-recognition", model="cdactvm/telugu_w2v-bert_model") #p5 = pipeline(task="automatic-speech-recognition", model="Sajjo/w2v-bert-2.0-bangala-gpu-CV16.0_v2") #p6 = pipeline(task="automatic-speech-recognition", model="cdactvm/hf-open-assames") p7 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-assames") processor = AutoProcessor.from_pretrained("cdactvm/w2v-assames") vocab_dict = processor.tokenizer.get_vocab() sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])} decoder = build_ctcdecoder( labels=list(sorted_vocab_dict.keys()), kenlm_model_path="lm.binary", ) processor_with_lm = Wav2Vec2ProcessorWithLM( feature_extractor=processor.feature_extractor, tokenizer=processor.tokenizer, decoder=decoder ) processor.feature_extractor._processor_class = "Wav2Vec2ProcessorWithLM" p8 = pipeline("automatic-speech-recognition", model="cdactvm/w2v-assames", tokenizer=processor_with_lm, feature_extractor=processor_with_lm.feature_extractor, decoder=processor_with_lm.decoder) os.system('git clone https://github.com/irshadbhat/indic-trans.git') os.system('pip install ./indic-trans/.') #HF_TOKEN = os.getenv('HF_TOKEN') #hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "asr_demo") from indictrans import Transliterator trn = Transliterator(source='ori', target='eng', build_lookup=True) def transcribe_odiya(speech): text = p1(speech)["text"] if text is None: return "Error: ASR returned None" return text def cleanhtml(raw_html): cleantext = re.sub(r'<.*?>', '', raw_html) return cleantext def transcribe_hindi(speech): text = p2(speech)["text"] if text is None: return "Error: ASR returned None" return text def transcribe_kannada(speech): text = p3(speech)["text"] if text is None: return "Error: ASR returned None" return text def transcribe_telugu(speech): text = p4(speech)["text"] if text is None: return "Error: ASR returned None" return text def transcribe_bangala(speech): text = p5(speech)["text"] if text is None: return "Error: ASR returned None" return text def transcribe_assamese_LM(speech): text = p8(speech)["text"] text = cleanhtml(text) if text is None: return "Error: ASR returned None" return text def transcribe_assamese_model2(speech): text = p7(speech)["text"] text = cleanhtml(text) if text is None: return "Error: ASR returned None" return text def transcribe_odiya_eng(speech): trn = Transliterator(source='ori', target='eng', build_lookup=True) text = p1(speech)["text"] if text is None: return "Error: ASR returned None" sentence = trn.transform(text) if sentence is None: return "Error: Transliteration returned None" replaced_words = replace_words(sentence) processed_sentence = process_doubles(replaced_words) return process_transcription(processed_sentence) def transcribe_ban_eng(speech): trn = Transliterator(source='ben', target='eng', build_lookup=True) text = p5(speech)["text"] if text is None: return "Error: ASR returned None" sentence = trn.transform(text) if sentence is None: return "Error: Transliteration returned None" replaced_words = replace_words(sentence) processed_sentence = process_doubles(replaced_words) return process_transcription(processed_sentence) def transcribe_hin_eng(speech): trn = Transliterator(source='hin', target='eng', build_lookup=True) text = p2(speech)["text"] if text is None: return "Error: ASR returned None" sentence = trn.transform(text) if sentence is None: return "Error: Transliteration returned None" replaced_words = replace_words(sentence) processed_sentence = process_doubles(replaced_words) return process_transcription(processed_sentence) def transcribe_kan_eng(speech): trn = Transliterator(source='kan', target='eng', build_lookup=True) text = p3(speech)["text"] if text is None: return "Error: ASR returned None" sentence = trn.transform(text) if sentence is None: return "Error: Transliteration returned None" replaced_words = replace_words(sentence) processed_sentence = process_doubles(replaced_words) return process_transcription(processed_sentence) def transcribe_tel_eng(speech): trn = Transliterator(source='tel', target='eng', build_lookup=True) text = p4(speech)["text"] if text is None: return "Error: ASR returned None" sentence = trn.transform(text) if sentence is None: return "Error: Transliteration returned None" replaced_words = replace_words(sentence) processed_sentence = process_doubles(replaced_words) return process_transcription(processed_sentence) def process_transcription(input_sentence): word_to_code_map = {} code_to_word_map = {} transcript_1 = sentence_to_transcript(input_sentence, word_to_code_map) if transcript_1 is None: return "Error: Transcript conversion returned None" numbers = text2int(transcript_1) if numbers is None: return "Error: Text to number conversion returned None" code_to_word_map = {v: k for k, v in word_to_code_map.items()} text = transcript_to_sentence(numbers, code_to_word_map) return text def sel_lng(lng, mic=None, file=None): if mic is not None: audio = mic elif file is not None: audio = file else: return "You must either provide a mic recording or a file" if lng == "Odiya": return transcribe_odiya(audio) elif lng == "Odiya-trans": return transcribe_odiya_eng(audio) elif lng == "Hindi-trans": return transcribe_hin_eng(audio) elif lng == "Hindi": return transcribe_hindi(audio) elif lng == "Kannada-trans": return transcribe_kan_eng(audio) elif lng == "Kannada": return transcribe_kannada(audio) elif lng == "Telugu-trans": return transcribe_tel_eng(audio) elif lng == "Telugu": return transcribe_telugu(audio) elif lng == "Bangala-trans": return transcribe_ban_eng(audio) elif lng == "Bangala": return transcribe_bangala(audio) elif lng == "Assamese-LM": return transcribe_assamese_LM(audio) elif lng == "Assamese-Model2": return transcribe_assamese_model2(audio) # Function to replace incorrectly spelled words def replace_words(sentence): replacements = [ (r'\bjiro\b', 'zero'), (r'\bjero\b', 'zero'), (r'\bnn\b', 'one'), (r'\bn\b', 'one'), (r'\bna\b', 'one'), (r'\btu\b', 'two'), (r'\btoo\b', 'two'), (r'\bthiri\b', 'three'), (r'\bfor\b', 'four'), (r'\bfore\b', 'four'), (r'\bfib\b', 'five'), (r'\bdublseven\b', 'double seven'), (r'\bdubalathri\b', 'double three'), (r'\bnineeit\b', 'nine eight'), (r'\bfipeit\b', 'five eight'), (r'\bdubal\b', 'double'), (r'\bsevenatu\b', 'seven two'), ] for pattern, replacement in replacements: sentence = re.sub(pattern, replacement, sentence) return sentence # Function to process "double" followed by a number def process_doubles(sentence): tokens = sentence.split() result = [] i = 0 while i < len(tokens): if tokens[i] in ("double", "dubal"): if i + 1 < len(tokens): result.append(tokens[i + 1]) result.append(tokens[i + 1]) i += 2 else: result.append(tokens[i]) i += 1 else: result.append(tokens[i]) i += 1 return ' '.join(result) # Function to generate Soundex code for a word def soundex(word): word = word.upper() word = ''.join(filter(str.isalpha, word)) if not word: return None soundex_mapping = { 'B': '1', 'F': '1', 'P': '1', 'V': '1', 'C': '2', 'G': '2', 'J': '2', 'K': '2', 'Q': '2', 'S': '2', 'X': '2', 'Z': '2', 'D': '3', 'T': '3', 'L': '4', 'M': '5', 'N': '5', 'R': '6' } soundex_code = word[0] for char in word[1:]: if char not in ('H', 'W'): soundex_code += soundex_mapping.get(char, '0') soundex_code = soundex_code[0] + ''.join(c for i, c in enumerate(soundex_code[1:]) if c != soundex_code[i]) soundex_code = soundex_code.replace('0', '') + '000' return soundex_code[:4] # Function to convert text to numerical representation def is_number(x): if type(x) == str: x = x.replace(',', '') try: float(x) except: return False return True def text2int(textnum, numwords={}): units = ['Z600', 'O500','T000','T600','F600','F100','S220','S150','E300','N500', 'T500', 'E415', 'T410', 'T635', 'F635', 'F135', 'S235', 'S153', 'E235','N535'] tens = ['', '', 'T537', 'T637', 'F637', 'F137', 'S230', 'S153', 'E230', 'N530'] scales = ['H536', 'T253', 'M450', 'C600'] ordinal_words = {'oh': 'Z600', 'first': 'O500', 'second': 'T000', 'third': 'T600', 'fourth': 'F600', 'fifth': 'F100', 'sixth': 'S200','seventh': 'S150','eighth': 'E230', 'ninth': 'N500', 'twelfth': 'T410'} ordinal_endings = [('ieth', 'y'), ('th', '')] if not numwords: numwords['and'] = (1, 0) for idx, word in enumerate(units): numwords[word] = (1, idx) for idx, word in enumerate(tens): numwords[word] = (1, idx * 10) for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0) textnum = textnum.replace('-', ' ') current = result = 0 curstring = '' onnumber = False lastunit = False lastscale = False def is_numword(x): if is_number(x): return True if word in numwords: return True return False def from_numword(x): if is_number(x): scale = 0 increment = int(x.replace(',', '')) return scale, increment return numwords[x] for word in textnum.split(): if word in ordinal_words: scale, increment = (1, ordinal_words[word]) current = current * scale + increment if scale > 100: result += current current = 0 onnumber = True lastunit = False lastscale = False else: for ending, replacement in ordinal_endings: if word.endswith(ending): word = "%s%s" % (word[:-len(ending)], replacement) if (not is_numword(word)) or (word == 'and' and not lastscale): if onnumber: curstring += repr(result + current) + " " curstring += word + " " result = current = 0 onnumber = False lastunit = False lastscale = False else: scale, increment = from_numword(word) onnumber = True if lastunit and (word not in scales): curstring += repr(result + current) result = current = 0 if scale > 1: current = max(1, current) current = current * scale + increment if scale > 100: result += current current = 0 lastscale = False lastunit = False if word in scales: lastscale = True elif word in units: lastunit = True if onnumber: curstring += repr(result + current) return curstring # Convert sentence to transcript using Soundex def sentence_to_transcript(sentence, word_to_code_map): words = sentence.split() transcript_codes = [] for word in words: if word not in word_to_code_map: word_to_code_map[word] = soundex(word) transcript_codes.append(word_to_code_map[word]) transcript = ' '.join(transcript_codes) return transcript # Convert transcript back to sentence using mapping def transcript_to_sentence(transcript, code_to_word_map): codes = transcript.split() sentence_words = [] for code in codes: sentence_words.append(code_to_word_map.get(code, code)) sentence = ' '.join(sentence_words) return sentence # # Process the audio file # transcript = pipe("./odia_recorded/AUD-20240614-WA0004.wav") # text_value = transcript['text'] # sentence = trn.transform(text_value) # replaced_words = replace_words(sentence) # processed_sentence = process_doubles(replaced_words) # input_sentence_1 = processed_sentence # Create empty mappings word_to_code_map = {} code_to_word_map = {} # Convert sentence to transcript # transcript_1 = sentence_to_transcript(input_sentence_1, word_to_code_map) # Convert transcript to numerical representation # numbers = text2int(transcript_1) # Create reverse mapping code_to_word_map = {v: k for k, v in word_to_code_map.items()} # Convert transcript back to sentence # reconstructed_sentence_1 = transcript_to_sentence(numbers, code_to_word_map) # demo=gr.Interface( # fn=sel_lng, # inputs=[ # gr.Dropdown(["Hindi","Hindi-trans","Odiya","Odiya-trans"],value="Hindi",label="Select Language"), # gr.Audio(source="microphone", type="filepath"), # gr.Audio(source= "upload", type="filepath"), # #gr.Audio(sources="upload", type="filepath"), # #"state" # ], # outputs=[ # "textbox" # # #"state" # ], # title="Automatic Speech Recognition", # description = "Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially it will take some time to load the model. The recognized text will appear in the output textbox", # ).launch() ###################################################### demo=gr.Interface( fn=sel_lng, inputs=[ #gr.Dropdown(["Hindi","Hindi-trans","Odiya","Odiya-trans","Kannada","Kannada-trans","Telugu","Telugu-trans","Bangala","Bangala-trans"],value="Hindi",label="Select Language"), gr.Dropdown(["Hindi","Hindi-trans","Odiya","Odiya-trans","Assamese-LM","Assamese-Model2"],value="Hindi",label="Select Language"), gr.Audio(sources=["microphone","upload"], type="filepath"), #gr.Audio(sources="upload", type="filepath"), #"state" ], outputs=[ "textbox" # #"state" ], allow_flagging="auto", #flagging_options=["Language error", "English transliteration error", "Other"], #flagging_callback=hf_writer, title="Automatic Speech Recognition", description = "Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially it will take some time to load the model. The recognized text will appear in the output textbox", ).launch()