bstraehle commited on
Commit
7e9c595
1 Parent(s): 0dc5da4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -22,9 +22,12 @@ template = """Use the following pieces of context to answer the question at the
22
 
23
  QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
24
 
 
 
25
  def invoke(openai_api_key, youtube_url, prompt):
26
  openai.api_key = openai_api_key
27
  if (os.path.isdir("docs/chroma/") == False):
 
28
  youtube_dir = "docs/youtube/"
29
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
30
  docs = loader.load()
@@ -34,13 +37,14 @@ def invoke(openai_api_key, youtube_url, prompt):
34
  vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = chroma_dir)
35
  llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
36
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
 
37
  result = qa_chain({"query": prompt})
38
  shutil.rmtree(youtube_dir)
39
  shutil.rmtree(chroma_dir)
40
  return result["result"]
41
 
42
  description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.
43
- Enter an OpenAI API key, YouTube URL (external data), and prompt to perform semantic search, sentiment analysis, summarization, and/or translation, etc.\n\n
44
  Implementation: <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API
45
  via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech to text)
46
  and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM use cases) foundation models as well as AI-native
 
22
 
23
  QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
24
 
25
+ print(0)
26
+
27
  def invoke(openai_api_key, youtube_url, prompt):
28
  openai.api_key = openai_api_key
29
  if (os.path.isdir("docs/chroma/") == False):
30
+ print(1)
31
  youtube_dir = "docs/youtube/"
32
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
33
  docs = loader.load()
 
37
  vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = chroma_dir)
38
  llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
39
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
40
+ print(2)
41
  result = qa_chain({"query": prompt})
42
  shutil.rmtree(youtube_dir)
43
  shutil.rmtree(chroma_dir)
44
  return result["result"]
45
 
46
  description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.
47
+ Enter an OpenAI API key, YouTube URL (external data), and prompt to perform semantic search, sentiment analysis, summarization, translation, etc.\n\n
48
  Implementation: <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API
49
  via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech to text)
50
  and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM use cases) foundation models as well as AI-native