import gradio as gr import shutil, openai, os from langchain.document_loaders.generic import GenericLoader from langchain.document_loaders.parsers import OpenAIWhisperParser from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) #openai.api_key = os.environ["OPENAI_API_KEY"] def invoke(openai_api_key, youtube_url, prompt): openai.api_key = openai_api_key url = youtube_url save_dir = "docs/youtube/" loader = GenericLoader( YoutubeAudioLoader([url], save_dir), OpenAIWhisperParser() ) docs = loader.load() shutil.rmtree(save_dir) content = docs[0].page_content ##### #TODO ##### return content description = """The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data. Enter an OpenAI API key, YouTube URL, and prompt to search the video, analyse its sentiment, summarize it, translate it, etc. Implementation: Gradio UI using OpenAI API via AI-first toolkit LangChain with foundation models Whisper (speech to text) and GPT-4 (LLM use cases).""" gr.close_all() demo = gr.Interface(fn=invoke, inputs = [gr.Textbox(label = "OpenAI API Key", lines = 1), gr.Textbox(label = "YouTube URL", lines = 1), gr.Textbox(label = "Prompt", lines = 1)], outputs = [gr.Textbox(label = "Completion", lines = 1)], title = "Generative AI - RAG", description = description) demo.launch()