Soma Dhavala
commited on
Commit
·
0c910e6
1
Parent(s):
b2acaa4
add bhasini for hindi
Browse files
app.py
CHANGED
@@ -34,8 +34,10 @@ def get_asr_model_processor(model_id):
|
|
34 |
"model": model,
|
35 |
"processor": processor
|
36 |
}
|
37 |
-
|
38 |
-
|
|
|
|
|
39 |
|
40 |
|
41 |
|
@@ -49,10 +51,10 @@ def get_asr_output(audio_path,lang):
|
|
49 |
transcription = openai.Audio.transcribe("whisper-1", file, language=lang)
|
50 |
op_text = transcription.text
|
51 |
|
52 |
-
if lang == "hi":
|
53 |
op_text = asr_pipe("temp.wav")['text']
|
54 |
print('whisper',transcription)
|
55 |
-
print('ai4b',op_text)
|
56 |
|
57 |
return op_text
|
58 |
|
|
|
34 |
"model": model,
|
35 |
"processor": processor
|
36 |
}
|
37 |
+
|
38 |
+
|
39 |
+
#model_proc_dict = get_asr_model_processor("vasista22/whisper-hindi-large-v2")
|
40 |
+
#asr_pipe = pipeline("automatic-speech-recognition", model=model_proc_dict["model"], tokenizer=model_proc_dict["processor"].tokenizer, feature_extractor=model_proc_dict["processor"].feature_extractor,device=device)
|
41 |
|
42 |
|
43 |
|
|
|
51 |
transcription = openai.Audio.transcribe("whisper-1", file, language=lang)
|
52 |
op_text = transcription.text
|
53 |
|
54 |
+
""" if lang == "hi":
|
55 |
op_text = asr_pipe("temp.wav")['text']
|
56 |
print('whisper',transcription)
|
57 |
+
print('ai4b',op_text) """
|
58 |
|
59 |
return op_text
|
60 |
|