Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Load pipelines for Canary ASR, LLama3 QA, and VITS TTS
|
5 |
-
asr_pipeline = pipeline("automatic-speech-recognition", model="canary
|
6 |
qa_pipeline = pipeline("question-answering", model="LLAMA/llama3-base-qa", tokenizer="LLAMA/llama3-base-qa")
|
7 |
tts_pipeline = pipeline("text-to-speech", model="patrickvonplaten/vits-large", device=0)
|
8 |
|
@@ -34,7 +34,7 @@ def ai_assistant(audio_input):
|
|
34 |
if __name__ == "__main__":
|
35 |
# Create a Gradio interface
|
36 |
gr.Interface(ai_assistant,
|
37 |
-
inputs=gr.inputs.Audio(capture=
|
38 |
outputs=gr.outputs.Audio(type="audio", label="Assistant's Response"),
|
39 |
title="AI Assistant",
|
40 |
-
description="An AI Assistant that answers questions based on your speech input.").launch(
|
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Load pipelines for Canary ASR, LLama3 QA, and VITS TTS
|
5 |
+
asr_pipeline = pipeline("automatic-speech-recognition", model="nvidia/canary-1b", device=0)
|
6 |
qa_pipeline = pipeline("question-answering", model="LLAMA/llama3-base-qa", tokenizer="LLAMA/llama3-base-qa")
|
7 |
tts_pipeline = pipeline("text-to-speech", model="patrickvonplaten/vits-large", device=0)
|
8 |
|
|
|
34 |
if __name__ == "__main__":
|
35 |
# Create a Gradio interface
|
36 |
gr.Interface(ai_assistant,
|
37 |
+
inputs=gr.inputs.Audio(capture=capture_audio, label="Speak Here"),
|
38 |
outputs=gr.outputs.Audio(type="audio", label="Assistant's Response"),
|
39 |
title="AI Assistant",
|
40 |
+
description="An AI Assistant that answers questions based on your speech input.").launch()
|