FumesAI commited on
Commit
aa6e23f
1 Parent(s): 661e0c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -19
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import gradio as gr
2
- import autocausalfrompretrained
 
 
3
 
4
  class InteractiveChat:
5
-
6
- whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-large")
7
- whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
8
-
9
  def __init__(self):
 
 
10
  self.zephyr_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
11
  self.zephyr_model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta", device_map="auto")
12
 
@@ -26,23 +26,23 @@ class InteractiveChat:
26
  return response
27
 
28
  def speak(self, text):
29
- speech_client = SpeechClient()
30
- speech_client.synthesize(text)
 
31
 
32
- def generate_response(self, input):
33
-
34
- # get transcription from Whisper
35
-
36
- response = self.get_zephyr_response(transcription)
37
-
38
- self.speak(response)
39
-
40
- return response
41
 
 
 
 
 
 
42
  interface = gr.Interface(
43
- gr.Audio(type="microphone"),
44
  gr.Textbox(),
45
- self.generate_response
46
  )
47
 
48
- interface.launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, WhisperProcessor, WhisperForConditionalGeneration
3
+ from gtts import gTTS
4
+ import os
5
 
6
  class InteractiveChat:
 
 
 
 
7
  def __init__(self):
8
+ self.whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-large")
9
+ self.whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
10
  self.zephyr_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
11
  self.zephyr_model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta", device_map="auto")
12
 
 
26
  return response
27
 
28
  def speak(self, text):
29
+ tts = gTTS(text=text, lang='en')
30
+ tts.save("output.mp3")
31
+ os.system("mpg321 output.mp3")
32
 
33
+ # Create an instance of the InteractiveChat class
34
+ chat = InteractiveChat()
 
 
 
 
 
 
 
35
 
36
+ # Define a function that wraps the generate_response method
37
+ def generate_response_fn(input_data):
38
+ return chat.generate_response(input_data)
39
+
40
+ # Use the function in gr.Interface
41
  interface = gr.Interface(
42
+ gr.Audio(type="filepath"), # Accept audio files
43
  gr.Textbox(),
44
+ generate_response_fn # Pass the function here
45
  )
46
 
47
+ interface.launch()
48
+