ahricat commited on
Commit
25d1ea3
1 Parent(s): f410f32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -59
app.py CHANGED
@@ -1,65 +1,20 @@
1
  import gradio as gr
2
- from transformers import (
3
- WhisperProcessor, WhisperForConditionalGeneration,
4
- AutoModelForCausalLM, AutoTokenizer, pipeline,
5
- )
6
- from huggingface_hub import snapshot_download
7
- import sounddevice as sd
8
- import numpy as np
9
- import torch
10
- from gtts import gTTS
11
- import pygame
12
-
13
- class InteractiveChat:
14
- def __init__(self, model_name="openai/whisper-large", tts_choice="OpenVoice"):
15
- self.whisper_processor = WhisperProcessor.from_pretrained(model_name)
16
- self.whisper_model = WhisperForConditionalGeneration.from_pretrained(model_name)
17
- self.zephyr_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
18
- self.zephyr_model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta", device_map="auto")
19
- self.zephyr_pipeline = pipeline("text-generation", model=self.zephyr_model, tokenizer=self.zephyr_tokenizer)
20
- self.tts_choice = tts_choice
21
-
22
- def generate_response(self, input_data):
23
- input_features = self.whisper_processor(input_data, sampling_rate=16_000, return_tensors="pt").input_features
24
- predicted_ids = self.whisper_model.generate(input_features)
25
- transcription = self.whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
26
-
27
- # Use the transcription as input for Zephyr
28
- response = self.zephyr_pipeline(transcription, max_length=1000)[0]["generated_text"]
29
- return transcription, response
30
-
31
- def speak(self, text):
32
- try:
33
- if self.tts_choice == "OpenVoice":
34
- model_path = snapshot_download("facebook/mms-tts-eng")
35
- pipe = pipeline("text-to-speech", model=model_path)
36
- audio_array = pipe(text).audio
37
- pygame.mixer.init()
38
- sound = pygame.sndarray.make_sound(audio_array)
39
- sound.play()
40
- pygame.time.delay(int(sound.get_length() * 1000))
41
- else: # gTTS
42
- tts = gTTS(text=text, lang='en')
43
- tts.save("response.mp3")
44
- pygame.mixer.init()
45
- pygame.mixer.music.load("response.mp3")
46
- pygame.mixer.music.play()
47
- while pygame.mixer.music.get_busy():
48
- pygame.time.Clock().tick(10)
49
- except Exception as e:
50
- print("Error occurred during speech generation:", e)
51
-
52
-
53
 
54
  with gr.Blocks() as demo:
55
- model_choice = gr.Dropdown(["openai/whisper-large"], label="Whisper Model", value="openai/whisper-large")
56
- tts_choice = gr.Radio(["OpenVoice", "gTTS"], label="TTS Engine", value="OpenVoice")
57
- input_data = gr.Audio(source="microphone", type="numpy", label="Speak Your Message")
58
  output_text = gr.Textbox(label="Transcription and Response")
59
 
60
- model_choice.change(lambda x, y: InteractiveChat(x, y), inputs=[model_choice, tts_choice], outputs=None)
61
- tts_choice.change(lambda x, y: InteractiveChat(y, x), inputs=[tts_choice, model_choice], outputs=None)
62
- input_data.change(lambda x, model: model.generate_response(x), inputs=[input_data, model_choice], outputs=output_text)
63
- input_data.change(lambda x, model: model.speak(x[1]), inputs=[output_text, model_choice], outputs=None) # Speak the response
 
 
 
 
 
 
64
 
65
- demo.launch(share=True)
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  with gr.Blocks() as demo:
4
+ model_choice = gr.Dropdown(["openai/whisper-large", "HuggingFaceH4/zephyr-7b-beta"],
5
+ label="Choose Model", value="openai/whisper-large")
6
+ input_data = gr.Audio(source="microphone", type="filepath", label="Speak Your Message") # Note: type is now "filepath"
7
  output_text = gr.Textbox(label="Transcription and Response")
8
 
9
+ def generate_response(audio_path, model_name):
10
+ hf_interface = gr.Interface.load(model_name) # Load the model directly from the Hub
11
+ if model_name == "openai/whisper-large":
12
+ transcription = hf_interface(audio_path) # Handle transcription
13
+ else: # Zephyr
14
+ transcription = hf_interface(audio_path)[0]["generated_text"] # Extract transcription from zephyr
15
+ response = hf_interface(transcription)[0]["generated_text"] # Get Zephyr's response
16
+ return transcription, response
17
+
18
+ input_data.change(generate_response, inputs=[input_data, model_choice], outputs=output_text)
19
 
20
+ demo.launch()