File size: 1,659 Bytes
ea225b5
 
25a5cc8
b7a5199
10a5181
25a5cc8
b7a5199
25a5cc8
beebab3
 
b7a5199
4710fcc
beebab3
 
b7a5199
 
4710fcc
 
beebab3
4710fcc
 
25a5cc8
 
b7a5199
25a5cc8
 
b7a5199
25a5cc8
b7a5199
 
25a5cc8
 
 
 
 
4710fcc
10a5181
25a5cc8
b7a5199
10a5181
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import gradio as gr
from transformers import pipeline

# Load pipelines for Canary ASR, LLama3 QA, and VITS TTS
asr_pipeline = pipeline("automatic-speech-recognition", model="nvidia/canary-1b", device=0)
qa_pipeline = pipeline("question-answering", model="LLAMA/llama3-base-qa", tokenizer="LLAMA/llama3-base-qa")
tts_pipeline = pipeline("text-to-speech", model="patrickvonplaten/vits-large", device=0)

# Function to capture audio using Canary ASR
def capture_audio():
    print("Listening for cue words...")
    while True:
        audio_input = asr_pipeline(None)[0]['input_values']
        transcript = asr_pipeline(audio_input)[0]['transcription']
        if "hey canary" in transcript.lower():
            print("Cue word detected!")
            break
    print("Listening...")
    return audio_input

# AI assistant function
def ai_assistant(audio_input):
    # Perform automatic speech recognition (ASR)
    transcript = asr_pipeline(audio_input)[0]['transcription']

    # Perform question answering (QA)
    qa_result = qa_pipeline(question=transcript, context="Insert your context here")

    # Convert the QA result to speech using text-to-speech (TTS)
    tts_output = tts_pipeline(qa_result['answer'])

    return tts_output[0]['audio']

if __name__ == "__main__":
    # Create a Gradio interface
    gr.Interface(ai_assistant,
                 inputs=gr.inputs.Audio(capture=capture_audio, label="Speak Here"),
                 outputs=gr.outputs.Audio(type="audio", label="Assistant's Response"),
                 title="AI Assistant",
                 description="An AI Assistant that answers questions based on your speech input.").launch()