rag / app.py
bstraehle's picture
Update app.py
5f0430e
raw
history blame
No virus
1.88 kB
import gradio as gr
import shutil, openai, os
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
#openai.api_key = os.environ["OPENAI_API_KEY"]
def invoke(openai_api_key, youtube_url, prompt):
openai.api_key = openai_api_key
url = youtube_url
save_dir = "docs/youtube/"
loader = GenericLoader(
YoutubeAudioLoader([url], save_dir),
OpenAIWhisperParser()
)
docs = loader.load()
shutil.rmtree(save_dir)
content = docs[0].page_content
#####
#TODO
#####
return content
description = """The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data.
Enter an OpenAI API key, YouTube URL, and prompt to search the video, analyse its sentiment, summarize it, translate it, etc.
Implementation: <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API
via AI-first toolkit <a href='https://www.langchain.com/'>LangChain</a> with foundation models
<a href='https://openai.com/research/whisper'>Whisper</a> (speech to text) and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM use cases)."""
gr.close_all()
demo = gr.Interface(fn=invoke,
inputs = [gr.Textbox(label = "OpenAI API Key", lines = 1), gr.Textbox(label = "YouTube URL", lines = 1), gr.Textbox(label = "Prompt", lines = 1)],
outputs = [gr.Textbox(label = "Completion", lines = 1)],
title = "Generative AI - RAG",
description = description)
demo.launch()