Madiharehan commited on
Commit
691cb51
1 Parent(s): 0a7ae60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -31
app.py CHANGED
@@ -1,51 +1,76 @@
1
- # Import libraries
2
- import whisper
3
  import os
4
- from gtts import gTTS
5
- import gradio as gr
6
- from groq import Groq
7
 
8
- # Load Whisper model for transcription
 
 
 
 
 
 
 
 
 
9
  model = whisper.load_model("base")
10
 
11
- Groq_api_key = "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"
 
 
 
 
 
 
 
12
  client = Groq(api_key=Groq_api_key)
13
 
14
- # Function to get the LLM response from Groq
15
- def get_llm_response(user_input):
 
 
 
 
 
16
  chat_completion = client.chat.completions.create(
17
- messages=[{"role": "user", "content": user_input}],
18
- model="llama3-8b-8192", # Replace with your desired model
 
 
 
 
 
19
  )
20
  return chat_completion.choices[0].message.content
21
 
22
- # Function to convert text to speech using gTTS
23
- def text_to_speech(text, output_audio="output_audio.mp3"):
24
- tts = gTTS(text)
25
- tts.save(output_audio)
26
- return output_audio
 
27
 
28
- # Main chatbot function to handle audio input and output
29
  def chatbot(audio):
30
- # Step 1: Transcribe the audio using Whisper
31
- result = model.transcribe(audio)
32
- user_text = result["text"]
33
-
34
- # Step 2: Get LLM response from Groq
35
- response_text = get_llm_response(user_text)
36
-
37
- # Step 3: Convert the response text to speech
38
- output_audio = text_to_speech(response_text)
39
 
40
- return response_text, output_audio
 
41
 
42
- # Gradio interface for real-time interaction
43
  iface = gr.Interface(
44
  fn=chatbot,
45
- inputs=gr.Audio(type="filepath"), # Input from mic or file
46
- outputs=[gr.Textbox(), gr.Audio(type="filepath")], # Output: response text and audio
 
 
47
  live=True
48
  )
49
 
50
- # Launch the Gradio app
51
  iface.launch()
 
1
+ # Import necessary libraries
 
2
  import os
 
 
 
3
 
4
+ # Install required libraries
5
+ try:
6
+ import whisper
7
+ import gtts
8
+ import gradio as gr
9
+ from groq import Groq
10
+ except ImportError:
11
+ os.system("pip install git+https://github.com/openai/whisper.git gtts gradio groq")
12
+
13
+ # Load Whisper model
14
  model = whisper.load_model("base")
15
 
16
+ # Fetch the API key from the environment variable
17
+ Groq_api_key = os.getenv("GROQ_API_KEY")
18
+
19
+ # Check if the API key is set
20
+ if Groq_api_key is None:
21
+ raise ValueError("API key for Groq not found. Please set the 'GROQ_API_KEY' environment variable.")
22
+
23
+ # Initialize the Groq client
24
  client = Groq(api_key=Groq_api_key)
25
 
26
+ # Function to transcribe audio
27
+ def transcribe_audio(audio_path):
28
+ result = model.transcribe(audio_path)
29
+ return result["text"]
30
+
31
+ # Function to get response from Groq's API
32
+ def get_groq_response(transcribed_text):
33
  chat_completion = client.chat.completions.create(
34
+ messages=[
35
+ {
36
+ "role": "user",
37
+ "content": transcribed_text,
38
+ }
39
+ ],
40
+ model="llama3-8b-8192",
41
  )
42
  return chat_completion.choices[0].message.content
43
 
44
+ # Function to convert text to speech
45
+ def text_to_speech(text):
46
+ tts = gtts.gTTS(text=text, lang='en')
47
+ audio_path = "response.mp3"
48
+ tts.save(audio_path)
49
+ return audio_path
50
 
51
+ # Gradio chatbot function
52
  def chatbot(audio):
53
+ # Step 1: Transcribe the audio
54
+ transcribed_text = transcribe_audio(audio)
55
+
56
+ # Step 2: Get LLM response from Groq API
57
+ response_text = get_groq_response(transcribed_text)
58
+
59
+ # Step 3: Convert response text to speech
60
+ response_audio = text_to_speech(response_text)
 
61
 
62
+ # Return the response audio
63
+ return response_audio
64
 
65
+ # Create a Gradio interface
66
  iface = gr.Interface(
67
  fn=chatbot,
68
+ inputs=gr.Audio(source="microphone", type="filepath"),
69
+ outputs="audio",
70
+ title="Voice-to-Voice Chatbot",
71
+ description="Speak to the chatbot and listen to the response!",
72
  live=True
73
  )
74
 
75
+ # Launch the interface
76
  iface.launch()