|
import gradio as gr |
|
import openai, os |
|
|
|
from langchain.chains import LLMChain, RetrievalQA |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader |
|
from langchain.document_loaders.generic import GenericLoader |
|
from langchain.document_loaders.parsers import OpenAIWhisperParser |
|
from langchain.embeddings.openai import OpenAIEmbeddings |
|
from langchain.prompts import PromptTemplate |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import Chroma |
|
|
|
from dotenv import load_dotenv, find_dotenv |
|
_ = load_dotenv(find_dotenv()) |
|
|
|
|
|
|
|
template = """Answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as |
|
possible. Always say "🚀 Thanks for using the app - Bernd Straehle." at the end of the answer. |
|
Question: {question} Helpful Answer: """ |
|
|
|
rag_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up |
|
an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer. |
|
{context} Question: {question} Helpful Answer: """ |
|
|
|
CHAIN_PROMPT = PromptTemplate(input_variables = ["question"], template = template) |
|
RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = rag_template) |
|
|
|
CHROMA_DIR = "docs/chroma" |
|
YOUTUBE_DIR = "docs/youtube" |
|
|
|
YOUTUBE_URL = "https://www.youtube.com/watch?v=--khbXchTeE" |
|
|
|
MODEL_NAME = "gpt-4" |
|
|
|
def invoke(openai_api_key, use_rag, prompt): |
|
llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature = 0) |
|
if (use_rag): |
|
if (os.path.isdir(CHROMA_DIR)): |
|
vector_db = Chroma(persist_directory = CHROMA_DIR, embedding_function = OpenAIEmbeddings()) |
|
else: |
|
loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL], YOUTUBE_DIR), OpenAIWhisperParser()) |
|
docs = loader.load() |
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150) |
|
splits = text_splitter.split_documents(docs) |
|
vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR) |
|
rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT}) |
|
result = rag_chain({"query": prompt}) |
|
result = result["result"] |
|
else: |
|
chain = LLMChain(llm = llm, prompt = prompt) |
|
result = chain.run({"question": prompt}) |
|
|
|
return result |
|
|
|
description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data |
|
(in this case a YouTube video, but it could be PDFs, URLs, or other structured/unstructured private/public |
|
<a href='https://raw.githubusercontent.com/bstraehle/ai-ml-dl/c38b224c196fc984aab6b6cc6bdc666f8f4fbcff/langchain/document-loaders.png'>data sources</a>).\n\n |
|
<strong>Instructions:</strong> Enter an OpenAI API key and perform LLM use cases on a <a href='https://www.youtube.com/watch?v=--khbXchTeE'>short video about GPT-4</a> |
|
(semantic search, sentiment analysis, summarization, translation, etc.) |
|
<ul style="list-style-type:square;"> |
|
<li>Set "Use RAG" to "False" and submit prompt "what is gpt-4". The LLM <strong>without</strong> RAG does not know the answer.</li> |
|
<li>Set "Use RAG" to "True" and submit prompt "what is gpt-4". The LLM <strong>with</strong> RAG knows the answer.</li> |
|
<li>Experiment with different prompts, for example "what is gpt-4, answer in german" or "write a poem about gpt-4".</li> |
|
</ul> |
|
In a production system, processing external data would be done in a batch process. An idea for a production system would be to perform LLM use cases on the |
|
<a href='https://www.youtube.com/playlist?list=PL2yQDdvlhXf_hIzmfHCdbcXj2hS52oP9r'>AWS re:Invent playlist</a>.\n\n |
|
<strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API via AI-first |
|
<a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text) and |
|
<a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native <a href='https://www.trychroma.com/'>Chroma</a> |
|
embedding database.""" |
|
|
|
gr.close_all() |
|
demo = gr.Interface(fn=invoke, |
|
inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Radio([True, False], label="Use RAG", value = False), gr.Textbox(label = "Prompt", value = "what is gpt-4", lines = 1)], |
|
outputs = [gr.Textbox(label = "Completion", lines = 1)], |
|
title = "Generative AI - LLM & RAG", |
|
description = description) |
|
demo.queue().launch() |