|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
asr_pipeline = pipeline("automatic-speech-recognition", model="nvidia/canary-1b", device=0) |
|
qa_pipeline = pipeline("question-answering", model="LLAMA/llama3-base-qa", tokenizer="LLAMA/llama3-base-qa") |
|
tts_pipeline = pipeline("text-to-speech", model="patrickvonplaten/vits-large", device=0) |
|
|
|
|
|
def capture_audio(): |
|
print("Listening for cue words...") |
|
while True: |
|
audio_input = asr_pipeline(None)[0]['input_values'] |
|
transcript = asr_pipeline(audio_input)[0]['transcription'] |
|
if "hey canary" in transcript.lower(): |
|
print("Cue word detected!") |
|
break |
|
print("Listening...") |
|
return audio_input |
|
|
|
|
|
def ai_assistant(audio_input): |
|
|
|
transcript = asr_pipeline(audio_input)[0]['transcription'] |
|
|
|
|
|
qa_result = qa_pipeline(question=transcript, context="Insert your context here") |
|
|
|
|
|
tts_output = tts_pipeline(qa_result['answer']) |
|
|
|
return tts_output[0]['audio'] |
|
|
|
if __name__ == "__main__": |
|
|
|
gr.Interface(ai_assistant, |
|
inputs=gr.inputs.Audio(capture=capture_audio, label="Speak Here"), |
|
outputs=gr.outputs.Audio(type="audio", label="Assistant's Response"), |
|
title="AI Assistant", |
|
description="An AI Assistant that answers questions based on your speech input.").launch() |
|
|