sagar007 commited on
Commit
08a0b8f
·
verified ·
1 Parent(s): 5fd8357

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -2
app.py CHANGED
@@ -47,6 +47,11 @@ vision_model = AutoModelForCausalLM.from_pretrained(
47
 
48
  vision_processor = AutoProcessor.from_pretrained(VISION_MODEL_ID, trust_remote_code=True)
49
 
 
 
 
 
 
50
  # Helper functions
51
  @spaces.GPU
52
  def stream_text_chat(message, history, system_prompt, temperature=0.8, max_new_tokens=1024, top_p=1.0, top_k=20):
@@ -77,9 +82,30 @@ def stream_text_chat(message, history, system_prompt, temperature=0.8, max_new_t
77
  thread.start()
78
 
79
  buffer = ""
 
80
  for new_text in streamer:
81
  buffer += new_text
82
- yield history + [[message, buffer]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  @spaces.GPU
85
  def process_vision_query(image, text_input):
@@ -106,6 +132,7 @@ def process_vision_query(image, text_input):
106
  response = vision_processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
107
  return response
108
 
 
109
  # Custom CSS
110
  custom_css = """
111
  body { background-color: #0b0f19; color: #e2e8f0; font-family: 'Arial', sans-serif;}
@@ -169,6 +196,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Base().set(
169
  with gr.Tab("Text Model (Phi-3.5-mini)"):
170
  chatbot = gr.Chatbot(height=400)
171
  msg = gr.Textbox(label="Message", placeholder="Type your message here...")
 
172
  with gr.Accordion("Advanced Options", open=False):
173
  system_prompt = gr.Textbox(value="You are a helpful assistant", label="System Prompt")
174
  temperature = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.8, label="Temperature")
@@ -179,7 +207,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Base().set(
179
  submit_btn = gr.Button("Submit", variant="primary")
180
  clear_btn = gr.Button("Clear Chat", variant="secondary")
181
 
182
- submit_btn.click(stream_text_chat, [msg, chatbot, system_prompt, temperature, max_new_tokens, top_p, top_k], [chatbot])
183
  clear_btn.click(lambda: None, None, chatbot, queue=False)
184
 
185
  with gr.Tab("Vision Model (Phi-3.5-vision)"):
 
47
 
48
  vision_processor = AutoProcessor.from_pretrained(VISION_MODEL_ID, trust_remote_code=True)
49
 
50
+ # Helper functions
51
+ # Initialize Parler-TTS
52
+ tts_model = ParlerTTSForConditionalGeneration.from_pretrained("parler-tts/parler-tts-mini-v1").to(device)
53
+ tts_tokenizer = AutoTokenizer.from_pretrained("parler-tts/parler-tts-mini-v1")
54
+
55
  # Helper functions
56
  @spaces.GPU
57
  def stream_text_chat(message, history, system_prompt, temperature=0.8, max_new_tokens=1024, top_p=1.0, top_k=20):
 
82
  thread.start()
83
 
84
  buffer = ""
85
+ audio_files = []
86
  for new_text in streamer:
87
  buffer += new_text
88
+
89
+ # Generate speech for the new text
90
+ tts_input_ids = tts_tokenizer(new_text, return_tensors="pt").input_ids.to(device)
91
+ tts_description = "A clear and natural voice reads the text with moderate speed and expression."
92
+ tts_description_ids = tts_tokenizer(tts_description, return_tensors="pt").input_ids.to(device)
93
+
94
+ with torch.no_grad():
95
+ audio_generation = tts_model.generate(input_ids=tts_description_ids, prompt_input_ids=tts_input_ids)
96
+
97
+ audio_arr = audio_generation.cpu().numpy().squeeze()
98
+
99
+ # Save the audio to a temporary file
100
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
101
+ sf.write(temp_audio.name, audio_arr, tts_model.config.sampling_rate)
102
+ audio_files.append(temp_audio.name)
103
+
104
+ yield history + [[message, buffer]], audio_files
105
+
106
+ # Clean up temporary audio files
107
+ for audio_file in audio_files:
108
+ os.remove(audio_file)
109
 
110
  @spaces.GPU
111
  def process_vision_query(image, text_input):
 
132
  response = vision_processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
133
  return response
134
 
135
+
136
  # Custom CSS
137
  custom_css = """
138
  body { background-color: #0b0f19; color: #e2e8f0; font-family: 'Arial', sans-serif;}
 
196
  with gr.Tab("Text Model (Phi-3.5-mini)"):
197
  chatbot = gr.Chatbot(height=400)
198
  msg = gr.Textbox(label="Message", placeholder="Type your message here...")
199
+ audio_output = gr.Audio(label="Generated Speech", autoplay=True)
200
  with gr.Accordion("Advanced Options", open=False):
201
  system_prompt = gr.Textbox(value="You are a helpful assistant", label="System Prompt")
202
  temperature = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.8, label="Temperature")
 
207
  submit_btn = gr.Button("Submit", variant="primary")
208
  clear_btn = gr.Button("Clear Chat", variant="secondary")
209
 
210
+ submit_btn.click(stream_text_chat, [msg, chatbot, system_prompt, temperature, max_new_tokens, top_p, top_k], [chatbot, audio_output])
211
  clear_btn.click(lambda: None, None, chatbot, queue=False)
212
 
213
  with gr.Tab("Vision Model (Phi-3.5-vision)"):