bstraehle commited on
Commit
ac4cc68
1 Parent(s): 43377f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -14,8 +14,6 @@ from langchain.vectorstores import Chroma
14
  from dotenv import load_dotenv, find_dotenv
15
  _ = load_dotenv(find_dotenv())
16
 
17
- global vectordb
18
-
19
  #openai.api_key = os.environ["OPENAI_API_KEY"]
20
 
21
  template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up
@@ -24,9 +22,12 @@ template = """Use the following pieces of context to answer the question at the
24
 
25
  QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
26
 
 
 
 
27
  def invoke(openai_api_key, youtube_url, prompt):
28
  openai.api_key = openai_api_key
29
- if vectordb is not None:
30
  youtube_dir = "docs/youtube/"
31
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
32
  docs = loader.load()
@@ -37,8 +38,8 @@ def invoke(openai_api_key, youtube_url, prompt):
37
  llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
38
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
39
  result = qa_chain({"query": prompt})
40
- #shutil.rmtree(youtube_dir)
41
- #shutil.rmtree(chroma_dir)
42
  return result["result"]
43
 
44
  description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.
 
14
  from dotenv import load_dotenv, find_dotenv
15
  _ = load_dotenv(find_dotenv())
16
 
 
 
17
  #openai.api_key = os.environ["OPENAI_API_KEY"]
18
 
19
  template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up
 
22
 
23
  QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
24
 
25
+ print(os.listdir("docs/chroma/"))
26
+ print(os.listdir("docs/youtube/"))
27
+
28
  def invoke(openai_api_key, youtube_url, prompt):
29
  openai.api_key = openai_api_key
30
+ if (len(os.listdir("docs/chroma/")) == 0):
31
  youtube_dir = "docs/youtube/"
32
  loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
33
  docs = loader.load()
 
38
  llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
39
  qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
40
  result = qa_chain({"query": prompt})
41
+ shutil.rmtree(youtube_dir)
42
+ shutil.rmtree(chroma_dir)
43
  return result["result"]
44
 
45
  description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.