bstraehle commited on
Commit
e38fd6d
1 Parent(s): 35d4315

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -16,11 +16,16 @@ _ = load_dotenv(find_dotenv())
16
 
17
  #openai.api_key = os.environ["OPENAI_API_KEY"]
18
 
19
- template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up
20
- an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
21
- {context} Question: {question} Helpful Answer: """
22
 
23
- CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
 
 
 
 
 
24
 
25
  CHROMA_DIR = "docs/chroma"
26
  YOUTUBE_DIR = "docs/youtube"
@@ -40,12 +45,12 @@ def invoke(openai_api_key, use_rag, prompt):
40
  text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
41
  splits = text_splitter.split_documents(docs)
42
  vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
43
- rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": CHAIN_PROMPT})
44
  result = rag_chain({"query": prompt})
45
  result = result["result"]
46
  else:
47
  chain = LLMChain(llm = llm, prompt = prompt)
48
- result = chain.run({})
49
  #print(result)
50
  return result
51
 
 
16
 
17
  #openai.api_key = os.environ["OPENAI_API_KEY"]
18
 
19
+ template = """Answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as
20
+ possible. Always say "🚀 Thanks for using the app - Bernd Straehle." at the end of the answer.
21
+ Question: {question} Helpful Answer: """
22
 
23
+ rag_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up
24
+ an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
25
+ {context} Question: {question} Helpful Answer: """
26
+
27
+ CHAIN_PROMPT = PromptTemplate(input_variables = ["question"], template = template)
28
+ RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = rag_template)
29
 
30
  CHROMA_DIR = "docs/chroma"
31
  YOUTUBE_DIR = "docs/youtube"
 
45
  text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
46
  splits = text_splitter.split_documents(docs)
47
  vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
48
+ rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT})
49
  result = rag_chain({"query": prompt})
50
  result = result["result"]
51
  else:
52
  chain = LLMChain(llm = llm, prompt = prompt)
53
+ result = chain.run({"question": prompt})
54
  #print(result)
55
  return result
56