Spaces:
Sleeping
Sleeping
import os | |
import openai | |
from langchain.chat_models import ChatOpenAI | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import Chroma | |
from langchain.chains.question_answering import load_qa_chain | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.document_loaders import UnstructuredPDFLoader | |
# OpenAI API Key Setup | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
# Load The Goal PDF | |
loader = UnstructuredPDFLoader("data/The Goal - A Process of Ongoing Improvement (Third Revised Edition).pdf") # , mode="elements" | |
docs = loader.load() | |
# Split Text Chunks | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) | |
splits = text_splitter.split_documents(docs) | |
# Embed Chunks into Chroma Vector Store | |
vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings()) | |
retriever = vectorstore.as_retriever() | |
# Use RAG Prompt Template | |
prompt = hub.pull("rlm/rag-prompt") | |
llm = ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0) # or gpt-3.5-turbo | |
def format_docs(docs): | |
return "\n\n".join(doc.page_content for doc in docs) | |
rag_chain = ( | |
{"context": retriever | format_docs, "question": RunnablePassthrough()} | |
| prompt | |
| llm | |
| StrOutputParser() | |
) | |
for chunk in rag_chain.stream("What is a Bottleneck Constraint?"): | |
print(chunk, end="", flush=True) | |
rag_chain.invoke("What is a Bottleneck Constraint?") |