import gradio as gr from transformers import pipeline # Create pipelines for ASR, QA, and TTS asr_pipeline = pipeline("automatic-speech-recognition", model="canary/asr-small-librispeech", device=0) # Adjust device based on your hardware qa_pipeline = pipeline("question-answering", model="LLAMA/llama3-base-qa", tokenizer="LLAMA/llama3-base-qa") tts_pipeline = pipeline("text-to-speech", model="patrickvonplaten/vits-large", device=0) # Adjust device based on your hardware def ai_assistant(audio_input): # Perform automatic speech recognition (ASR) transcribed_text = asr_pipeline(audio_input)[0]['transcription'] # Perform question answering (QA) question = transcribed_text context = "Insert your context here" # Provide the context for the question answering model answer = qa_pipeline(question=question, context=context) # Convert the answer to speech using text-to-speech (TTS) tts_output = tts_pipeline(answer['answer']) # Output the speech return tts_output[0]['audio'] if __name__ == "__main__": # Create a Gradio interface gr.Interface(ai_assistant, inputs=gr.inputs.Audio(source="microphone", type="microphone", label="Speak Here"), outputs=gr.outputs.Audio(type="audio", label="Assistant's Response"), title="AI Assistant", description="An AI Assistant that answers questions based on your speech input.") .launch()