Pawan Kumar Pradhan commited on
Commit
3d13e83
1 Parent(s): f68990a

initial commit

Browse files
Files changed (2) hide show
  1. app.py +102 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import whisper
3
+ from translate import Translator
4
+ from TTS.api import TTS
5
+ import uuid
6
+ from pathlib import Path
7
+
8
+
9
+ model = whisper.load_model("base")
10
+ tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True)
11
+
12
+ def v2vtranslate(audiofile):
13
+
14
+ print("Starting transcription...")
15
+ transcription_result = transcribeaudio(audiofile)
16
+
17
+ if transcription_result.status == model.transcribe.error:
18
+ raise gr.Error(transcription_result.error)
19
+ else:
20
+ text = transcription_result.text
21
+ print(f"Transcribed Text: {text}")
22
+
23
+ print("Starting translation...")
24
+ es_translation,fr_translation,hi_translation,ja_translation = translatetext(text)
25
+ print(f"Translations:\nSpanish: {es_translation}\nFrench: {fr_translation}\nHindi: {hi_translation}\nJapanese: {ja_translation}")
26
+
27
+ print("Generating TTS audio files(Outside Function)...")
28
+ es_translation_path = readtranslation(es_translation,audiofile)
29
+ fr_translation_path = readtranslation(fr_translation,audiofile)
30
+ hi_translation_path = readtranslation(hi_translation,audiofile)
31
+ ja_translation_path = readtranslation(ja_translation,audiofile)
32
+ print(f"Generated audio paths:\nSpanish: {es_translation_path}\nFrench: {fr_translation_path}\nHindi: {hi_translation_path}\nJapanese: {ja_translation_path}")
33
+
34
+
35
+
36
+ es_path = Path(es_translation_path)
37
+ fr_path = Path(fr_translation_path)
38
+ hi_path = Path(hi_translation_path)
39
+ ja_path = Path(ja_translation_path)
40
+
41
+
42
+
43
+ def transcribeaudio(audiofile):
44
+
45
+ print("Transcribing audio...")
46
+ tresult = model.transcribe(audiofile)
47
+
48
+ if "text" not in tresult:
49
+ print("Transcription failed.")
50
+ return {"status": "error", "error": "Transcription failed"}
51
+
52
+ audio = whisper.load_audio(audiofile)
53
+ audio = whisper.pad_or_trim(audio)
54
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
55
+
56
+ _, probs = model.detect_language(mel)
57
+ print(f"Detected language: {max(probs, key=probs.get)}")
58
+
59
+ return tresult
60
+
61
+ def translatetext(text):
62
+
63
+ translator_spanish = Translator(from_lang="en",to_lang="es")
64
+ es_text = translator_spanish.translate(text)
65
+
66
+ translator_french = Translator(from_lang="en",to_lang="fr")
67
+ fr_text = translator_french.translate(text)
68
+
69
+ translator_hindi = Translator(from_lang="en",to_lang="hi")
70
+ hi_text = translator_hindi.translate(text)
71
+
72
+ translator_japanese = Translator(from_lang="en",to_lang="ja")
73
+ ja_text = translator_japanese.translate(text)
74
+ print(f"Japanese Translation(Inside Function): {ja_text}")
75
+
76
+ return es_text,fr_text,hi_text,ja_text
77
+
78
+
79
+ def readtranslation(text,audiofile):
80
+
81
+ print(f"Generating TTS for text(Inside Function): {text}")
82
+ output_path = f"{uuid.uuid4()}.wav"
83
+ tts.tts_to_file(text=text,
84
+ file_path=output_path,
85
+ speaker_wav=audiofile,
86
+ language="en")
87
+ print(f"Generated audio file at: {output_path}")
88
+ return output_path
89
+
90
+
91
+ audio_input = gr.Audio(
92
+ sources=['microphone'],
93
+ type="filepath"
94
+ )
95
+ demo = gr.Interface(
96
+ fn=v2vtranslate,
97
+ inputs=audio_input,
98
+ outputs=[gr.Audio(label="Spanish"),gr.Audio(label="French"),gr.Audio(label="Hindi"),gr.Audio(label="Japanese")]
99
+ )
100
+
101
+ if __name__ == "__main__":
102
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ translate
3
+ TTS
4
+ openai-whisper