Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ from langchain.chains import RetrievalQA # for conversing with chatGPT
|
|
11 |
from langchain.chat_models import ChatOpenAI # the LLM model we'll use (ChatGPT)
|
12 |
from langchain import PromptTemplate
|
13 |
|
14 |
-
def load_pdf_and_generate_embeddings(pdf_doc, open_ai_key, relevant_pages
|
15 |
if openai_key is not None:
|
16 |
os.environ['OPENAI_API_KEY'] = open_ai_key
|
17 |
#Load the pdf file
|
@@ -23,9 +23,7 @@ def load_pdf_and_generate_embeddings(pdf_doc, open_ai_key, relevant_pages='all')
|
|
23 |
|
24 |
pages_to_be_loaded =[]
|
25 |
|
26 |
-
if relevant_pages
|
27 |
-
pages_to_be_loaded = pages.copy()
|
28 |
-
else:
|
29 |
page_numbers = relevant_pages.split(",")
|
30 |
if len(page_numbers) != 0:
|
31 |
for page_number in page_numbers:
|
@@ -37,6 +35,8 @@ def load_pdf_and_generate_embeddings(pdf_doc, open_ai_key, relevant_pages='all')
|
|
37 |
pages_to_be_loaded = pages.copy()
|
38 |
else:
|
39 |
pages_to_be_loaded = pages.copy()
|
|
|
|
|
40 |
|
41 |
#To create a vector store, we use the Chroma class, which takes the documents (pages in our case) and the embeddings instance
|
42 |
vectordb = Chroma.from_documents(pages_to_be_loaded, embedding=embeddings)
|
|
|
11 |
from langchain.chat_models import ChatOpenAI # the LLM model we'll use (ChatGPT)
|
12 |
from langchain import PromptTemplate
|
13 |
|
14 |
+
def load_pdf_and_generate_embeddings(pdf_doc, open_ai_key, relevant_pages):
|
15 |
if openai_key is not None:
|
16 |
os.environ['OPENAI_API_KEY'] = open_ai_key
|
17 |
#Load the pdf file
|
|
|
23 |
|
24 |
pages_to_be_loaded =[]
|
25 |
|
26 |
+
if relevant_pages:
|
|
|
|
|
27 |
page_numbers = relevant_pages.split(",")
|
28 |
if len(page_numbers) != 0:
|
29 |
for page_number in page_numbers:
|
|
|
35 |
pages_to_be_loaded = pages.copy()
|
36 |
else:
|
37 |
pages_to_be_loaded = pages.copy()
|
38 |
+
else:
|
39 |
+
pages_to_be_loaded = pages.copy()
|
40 |
|
41 |
#To create a vector store, we use the Chroma class, which takes the documents (pages in our case) and the embeddings instance
|
42 |
vectordb = Chroma.from_documents(pages_to_be_loaded, embedding=embeddings)
|