Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
import openai, os,
|
3 |
|
4 |
from langchain.chains import RetrievalQA
|
5 |
from langchain.chat_models import ChatOpenAI
|
@@ -14,7 +14,7 @@ from langchain.vectorstores import Chroma
|
|
14 |
from dotenv import load_dotenv, find_dotenv
|
15 |
_ = load_dotenv(find_dotenv())
|
16 |
|
17 |
-
openai.api_key =
|
18 |
|
19 |
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up
|
20 |
an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
|
@@ -22,10 +22,8 @@ template = """Use the following pieces of context to answer the question at the
|
|
22 |
|
23 |
QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
CHROMA_DIR = "docs/chroma" + rng
|
28 |
-
YOUTUBE_DIR = "docs/youtube" + rng
|
29 |
|
30 |
MODEL_NAME = "gpt-4"
|
31 |
|
@@ -47,6 +45,7 @@ def invoke(openai_api_key, youtube_url, process_video, prompt):
|
|
47 |
qa_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
|
48 |
result = qa_chain({"query": prompt})
|
49 |
#print(result)
|
|
|
50 |
return result["result"]
|
51 |
|
52 |
description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data
|
|
|
1 |
import gradio as gr
|
2 |
+
import openai, os, shutil
|
3 |
|
4 |
from langchain.chains import RetrievalQA
|
5 |
from langchain.chat_models import ChatOpenAI
|
|
|
14 |
from dotenv import load_dotenv, find_dotenv
|
15 |
_ = load_dotenv(find_dotenv())
|
16 |
|
17 |
+
#openai.api_key = os.environ["OPENAI_API_KEY"]
|
18 |
|
19 |
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up
|
20 |
an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer.
|
|
|
22 |
|
23 |
QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)
|
24 |
|
25 |
+
CHROMA_DIR = "docs/chroma"
|
26 |
+
YOUTUBE_DIR = "docs/youtube"
|
|
|
|
|
27 |
|
28 |
MODEL_NAME = "gpt-4"
|
29 |
|
|
|
45 |
qa_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
|
46 |
result = qa_chain({"query": prompt})
|
47 |
#print(result)
|
48 |
+
openai.api_key = ""
|
49 |
return result["result"]
|
50 |
|
51 |
description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data
|