import gradio as gr
import shutil, openai, os
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
#openai.api_key = os.environ["OPENAI_API_KEY"]
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know,
don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "Thanks, Bernd Straehle 🚀"
at the end of the answer. {context} Question: {question} Helpful Answer: """
QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
def invoke(openai_api_key, youtube_url, prompt):
openai.api_key = openai_api_key
youtube_dir = "docs/youtube/"
loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
splits = text_splitter.split_documents(docs)
chroma_dir = "docs/chroma/"
vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = chroma_dir)
llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
result = qa_chain({"query": prompt})
shutil.rmtree(youtube_dir)
shutil.rmtree(chroma_dir)
return result["result"]
description = """The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data.
Enter an OpenAI API key, YouTube URL (external data), and prompt to search the video, analyse its sentiment, summarize it, and/or translate it, etc.\n\n
Implementation: Gradio UI using OpenAI API
via AI-first toolkit LangChain with foundation models
Whisper (speech to text) and GPT-4 (LLM use cases)."""
gr.close_all()
demo = gr.Interface(fn=invoke,
inputs = [gr.Textbox(label = "OpenAI API Key", lines = 1), gr.Textbox(label = "YouTube URL", lines = 1), gr.Textbox(label = "Prompt", lines = 1)],
outputs = [gr.Textbox(label = "Completion", lines = 1)],
title = "Generative AI - LLM & RAG",
description = description)
demo.launch()