prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0)
search = DuckDuckGoSearchAPIWrapper()
def retriever(query):
return search.run(query)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/cpi/"
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(path + "cpi.pdf")
pdf_pages = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits_pypdf = text_splitter.split_documents(pdf_pages)
all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf]
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "cpi.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
baseline = Chroma.from_texts(
texts=all_splits_pypdf_texts,
collection_name="baseline",
embedding=OpenAIEmbeddings(),
)
retriever_baseline = baseline.as_retriever()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
import base64
import io
import os
from io import BytesIO
from langchain_core.messages import HumanMessage
from PIL import Image
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Image summary"""
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024)
msg = chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
img_base64_list = []
image_summaries = []
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
import uuid
from base64 import b64decode
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_core.documents import Document
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
if text_summaries:
add_documents(retriever, text_summaries, texts)
if table_summaries:
add_documents(retriever, table_summaries, tables)
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
multi_vector_img = Chroma(
collection_name="multi_vector_img", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img = create_multi_vector_retriever(
multi_vector_img,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
img_base64_list,
)
query = "What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?"
suffix_for_images = " Include any pie charts, graphs, or tables."
docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images)
from IPython.display import HTML, display
def plt_img_base64(img_base64):
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
display(HTML(image_html))
plt_img_base64(docs[1])
multi_vector_text = Chroma(
collection_name="multi_vector_text", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img_summary = create_multi_vector_retriever(
multi_vector_text,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
image_summaries,
)
from langchain_experimental.open_clip import OpenCLIPEmbeddings
multimodal_embd = Chroma(
collection_name="multimodal_embd", embedding_function=OpenCLIPEmbeddings()
)
image_uris = sorted(
[
os.path.join(path, image_name)
for image_name in os.listdir(path)
if image_name.endswith(".jpg")
]
)
if image_uris:
multimodal_embd.add_images(uris=image_uris)
if texts:
multimodal_embd.add_texts(texts=texts)
if tables:
multimodal_embd.add_texts(texts=tables)
retriever_multimodal_embd = multimodal_embd.as_retriever()
from operator import itemgetter
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
rag_prompt_text = ChatPromptTemplate.from_template(template)
def text_rag_chain(retriever):
"""RAG chain"""
model = ChatOpenAI(temperature=0, model="gpt-4")
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| rag_prompt_text
| model
| StrOutputParser()
)
return chain
import re
from langchain_core.documents import Document
from langchain_core.runnables import RunnableLambda
def looks_like_base64(sb):
"""Check if the string looks like base64."""
return re.match("^[A-Za-z0-9+/]+[=]{0,2}$", sb) is not None
def is_image_data(b64data):
"""Check if the base64 data is an image by looking at the start of the data."""
image_signatures = {
b"\xFF\xD8\xFF": "jpg",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png",
b"\x47\x49\x46\x38": "gif",
b"\x52\x49\x46\x46": "webp",
}
try:
header = base64.b64decode(b64data)[:8] # Decode and get the first 8 bytes
for sig, format in image_signatures.items():
if header.startswith(sig):
return True
return False
except Exception:
return False
def split_image_text_types(docs):
"""Split base64-encoded images and texts."""
b64_images = []
texts = []
for doc in docs:
if isinstance(doc, Document):
doc = doc.page_content
if looks_like_base64(doc) and is_image_data(doc):
b64_images.append(doc)
else:
texts.append(doc)
return {"images": b64_images, "texts": texts}
def img_prompt_func(data_dict):
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
if data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{data_dict['context']['images'][0]}"
},
}
messages.append(image_message)
text_message = {
"type": "text",
"text": (
"Answer the question based only on the provided context, which can include text, tables, and image(s). "
"If an image is provided, analyze it carefully to help answer the question.\n"
f"User-provided question / keywords: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
return [HumanMessage(content=messages)]
def multi_modal_rag_chain(retriever):
"""Multi-modal RAG chain"""
model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024)
chain = (
{
"context": retriever | | RunnableLambda(split_image_text_types) | langchain_core.runnables.RunnableLambda |
import re
from IPython.display import Image, display
from steamship import Block, Steamship
from langchain.agents import AgentType, initialize_agent
from langchain.tools import SteamshipImageGenerationTool
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
tools = [SteamshipImageGenerationTool(model_name="dall-e")]
mrkl = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
output = mrkl.run("How would you visualize a parot playing soccer?")
def show_output(output):
"""Display the multi-modal output from the agent."""
UUID_PATTERN = re.compile(
r"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})"
)
outputs = UUID_PATTERN.split(output)
outputs = [
re.sub(r"^\W+", "", el) for el in outputs
] # Clean trailing and leading non-word characters
for output in outputs:
maybe_block_id = UUID_PATTERN.search(output)
if maybe_block_id:
display(Image(Block.get(Steamship(), _id=maybe_block_id.group()).raw()))
else:
print(output, end="\n\n")
tools = [ | SteamshipImageGenerationTool(model_name="stable-diffusion") | langchain.tools.SteamshipImageGenerationTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
from langchain_community.tools.google_finance import GoogleFinanceQueryRun
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
os.environ["SERPAPI_API_KEY"] = ""
tool = GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper())
tool.run("Google")
import os
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = ""
os.environ["SERP_API_KEY"] = ""
llm = | OpenAI() | langchain_openai.OpenAI |
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores import Tair
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = FakeEmbeddings(size=128)
tair_url = "redis://localhost:6379"
Tair.drop_index(tair_url=tair_url)
vector_store = | Tair.from_documents(docs, embeddings, tair_url=tair_url) | langchain_community.vectorstores.Tair.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-community')
import os
os.environ["YDC_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from langchain_community.tools.you import YouSearchTool
from langchain_community.utilities.you import YouSearchAPIWrapper
api_wrapper = YouSearchAPIWrapper(num_web_results=1)
tool = YouSearchTool(api_wrapper=api_wrapper)
tool
response = tool.invoke("What is the weather in NY")
print(len(response))
for item in response:
print(item)
get_ipython().system('pip install --upgrade --quiet langchain langchain-openai langchainhub langchain-community')
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an assistant."""
base_prompt = hub.pull("langchain-ai/openai-functions-template")
prompt = base_prompt.partial(instructions=instructions)
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
from langchain.prompts import PromptTemplate
prompt = (
PromptTemplate.from_template("Tell me a joke about {topic}")
+ ", make it funny"
+ "\n\nand in {language}"
)
prompt
prompt.format(topic="sports", language="spanish")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
chain = LLMChain(llm=model, prompt=prompt)
chain.run(topic="sports", language="spanish")
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
prompt = | SystemMessage(content="You are a nice pirate") | langchain_core.messages.SystemMessage |
from langchain import hub
from langchain.agents import AgentExecutor, tool
from langchain.agents.output_parsers import XMLAgentOutputParser
from langchain_community.chat_models import ChatAnthropic
model = | ChatAnthropic(model="claude-2") | langchain_community.chat_models.ChatAnthropic |
from langchain_community.document_loaders import CoNLLULoader
loader = | CoNLLULoader("example_data/conllu.conllu") | langchain_community.document_loaders.CoNLLULoader |
get_ipython().system('pip install databricks-sql-connector')
from langchain_community.utilities import SQLDatabase
db = SQLDatabase.from_databricks(catalog="samples", schema="nyctaxi")
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model_name="gpt-4")
from langchain_community.utilities import SQLDatabaseChain
db_chain = | SQLDatabaseChain.from_llm(llm, db, verbose=True) | langchain_community.utilities.SQLDatabaseChain.from_llm |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_with_sources_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
streaming_llm = OpenAI(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
openai_api_key=openai_api_key,
)
question_generator = | LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) | langchain.chains.llm.LLMChain |
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import ForefrontAI
from getpass import getpass
FOREFRONTAI_API_KEY = getpass()
os.environ["FOREFRONTAI_API_KEY"] = FOREFRONTAI_API_KEY
llm = ForefrontAI(endpoint_url="YOUR ENDPOINT URL HERE")
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
from langchain.output_parsers import XMLOutputParser
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
model = ChatAnthropic(model="claude-2", max_tokens_to_sample=512, temperature=0.1)
actor_query = "Generate the shortened filmography for Tom Hanks."
output = model.invoke(
f"""{actor_query}
Please enclose the movies in <movie></movie> tags"""
)
print(output.content)
parser = | XMLOutputParser() | langchain.output_parsers.XMLOutputParser |
from langchain.chains import LLMMathChain
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.tools import Tool
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
from langchain_openai import ChatOpenAI, OpenAI
search = | DuckDuckGoSearchAPIWrapper() | langchain_community.utilities.DuckDuckGoSearchAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.model_laboratory import ModelLaboratory
from langchain.prompts import PromptTemplate
from langchain_community.llms import Cohere, HuggingFaceHub
from langchain_openai import OpenAI
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:")
llms = [
| OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet unstructured')
from langchain_community.document_loaders import UnstructuredEmailLoader
loader = UnstructuredEmailLoader("example_data/fake-email.eml")
data = loader.load()
data
loader = | UnstructuredEmailLoader("example_data/fake-email.eml", mode="elements") | langchain_community.document_loaders.UnstructuredEmailLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn')
from langchain_community.retrievers import TFIDFRetriever
retriever = TFIDFRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"])
from langchain_core.documents import Document
retriever = TFIDFRetriever.from_documents(
[
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="world"),
Document(page_content="hello"),
Document(page_content="foo bar"),
]
)
result = retriever.get_relevant_documents("foo")
result
retriever.save_local("testing.pkl")
retriever_copy = | TFIDFRetriever.load_local("testing.pkl") | langchain_community.retrievers.TFIDFRetriever.load_local |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet weaviate-client')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
WEAVIATE_URL = getpass.getpass("WEAVIATE_URL:")
os.environ["WEAVIATE_API_KEY"] = getpass.getpass("WEAVIATE_API_KEY:")
WEAVIATE_API_KEY = os.environ["WEAVIATE_API_KEY"]
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Weaviate
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = Weaviate.from_documents(docs, embeddings, weaviate_url=WEAVIATE_URL, by_text=False)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
import weaviate
client = weaviate.Client(
url=WEAVIATE_URL, auth_client_secret=weaviate.AuthApiKey(WEAVIATE_API_KEY)
)
vectorstore = Weaviate.from_documents(
documents, embeddings, client=client, by_text=False
)
docs = db.similarity_search_with_score(query, by_text=False)
docs[0]
retriever = db.as_retriever(search_type="mmr")
retriever.get_relevant_documents(query)[0]
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) | langchain_openai.ChatOpenAI |
from langchain.prompts import ChatMessagePromptTemplate
prompt = "May the {subject} be with you"
chat_message_prompt = ChatMessagePromptTemplate.from_template(
role="Jedi", template=prompt
)
chat_message_prompt.format(subject="force")
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
human_prompt = "Summarize our conversation so far in {word_count} words."
human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)
chat_prompt = ChatPromptTemplate.from_messages(
[ | MessagesPlaceholder(variable_name="conversation") | langchain.prompts.MessagesPlaceholder |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu')
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(texts, embeddings)
retriever = db.as_retriever()
from langchain.tools.retriever import create_retriever_tool
tool = create_retriever_tool(
retriever,
"search_state_of_union",
"Searches and returns excerpts from the 2022 State of the Union.",
)
tools = [tool]
from langchain import hub
prompt = hub.pull("hwchase17/openai-tools-agent")
prompt.messages
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
YBUSER = "[SANDBOX USER]"
YBPASSWORD = "[SANDBOX PASSWORD]"
YBDATABASE = "[SANDBOX_DATABASE]"
YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com"
OPENAI_API_KEY = "[OPENAI API KEY]"
import os
import pathlib
import re
import sys
import urllib.parse as urlparse
from getpass import getpass
import psycopg2
from IPython.display import Markdown, display
from langchain.chains import LLMChain, RetrievalQAWithSourcesChain
from langchain.docstore.document import Document
from langchain_community.vectorstores import Yellowbrick
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
yellowbrick_connection_string = (
f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}"
)
YB_DOC_DATABASE = "sample_data"
YB_DOC_TABLE = "yellowbrick_documentation"
embedding_table = "my_embeddings"
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
system_template = """If you don't know the answer, Make up your best guess."""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = | ChatPromptTemplate.from_messages(messages) | langchain.prompts.chat.ChatPromptTemplate.from_messages |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun
search = DuckDuckGoSearchRun()
search.run("Obama's first name?")
from langchain.tools import DuckDuckGoSearchResults
search = | DuckDuckGoSearchResults() | langchain.tools.DuckDuckGoSearchResults |
from getpass import getpass
WRITER_API_KEY = getpass()
import os
os.environ["WRITER_API_KEY"] = WRITER_API_KEY
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Writer
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = Writer()
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
from typing import Callable, List
from langchain.memory import ConversationBufferMemory
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
from langchain.agents import AgentType, initialize_agent, load_tools
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
class DialogueAgentWithTools(DialogueAgent):
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
tool_names: List[str],
**tool_kwargs,
) -> None:
super().__init__(name, system_message, model)
self.tools = load_tools(tool_names, **tool_kwargs)
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
agent_chain = initialize_agent(
self.tools,
self.model,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=ConversationBufferMemory(
memory_key="chat_history", return_messages=True
),
)
message = AIMessage(
content=agent_chain.run(
input="\n".join(
[self.system_message.content] + self.message_history + [self.prefix]
)
)
)
return message.content
names = {
"AI accelerationist": ["arxiv", "ddg-search", "wikipedia"],
"AI alarmist": ["arxiv", "ddg-search", "wikipedia"],
}
topic = "The current impact of automation and artificial intelligence on employment"
word_limit = 50 # word limit for task brainstorming
conversation_description = f"""Here is the topic of conversation: {topic}
The participants are: {', '.join(names.keys())}"""
agent_descriptor_system_message = SystemMessage(
content="You can add detail to the description of the conversation participant."
)
def generate_agent_description(name):
agent_specifier_prompt = [
agent_descriptor_system_message,
HumanMessage(
content=f"""{conversation_description}
Please reply with a creative description of {name}, in {word_limit} words or less.
Speak directly to {name}.
Give them a point of view.
Do not add anything else."""
),
]
agent_description = | ChatOpenAI(temperature=1.0) | langchain_openai.ChatOpenAI |
from langchain.memory.motorhead_memory import MotorheadMemory
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
AI:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template
)
memory = MotorheadMemory(
session_id="testing-1", url="http://localhost:8080", memory_key="chat_history"
)
await memory.init()
llm_chain = LLMChain(
llm= | OpenAI() | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 langchain-openai tiktoken python-dotenv')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "amazon-textract-caller>=0.2.0"')
from langchain_community.document_loaders import AmazonTextractPDFLoader
loader = | AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg") | langchain_community.document_loaders.AmazonTextractPDFLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet arxiv')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent, load_tools
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = load_tools(
["arxiv"],
)
prompt = hub.pull("hwchase17/react")
agent = | create_react_agent(llm, tools, prompt) | langchain.agents.create_react_agent |
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
splits = text_splitter.split_documents(data)
embedding = OpenAIEmbeddings()
vectordb = | Chroma.from_documents(documents=splits, embedding=embedding) | langchain_community.vectorstores.Chroma.from_documents |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-community')
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.messages import AIMessage
from langchain_community.llms.chatglm3 import ChatGLM3
template = """{question}"""
prompt = PromptTemplate.from_template(template)
endpoint_url = "http://127.0.0.1:8000/v1/chat/completions"
messages = [
AIMessage(content="我将从美国到中国来旅游,出行前希望了解中国的城市"),
AIMessage(content="欢迎问我任何问题。"),
]
llm = ChatGLM3(
endpoint_url=endpoint_url,
max_tokens=80000,
prefix_messages=messages,
top_p=0.9,
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "北京和上海两座城市有什么不同?"
llm_chain.run(question)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import ChatGLM
template = """{question}"""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
from langchain_openai import OpenAI
llm = | OpenAI(temperature=1, max_tokens=512, model="gpt-3.5-turbo-instruct") | langchain_openai.OpenAI |
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.tools import ShellTool
tool = ShellTool()
print(tool.run("echo Hello World!"))
tool = ShellTool(callbacks=[HumanApprovalCallbackHandler()])
print(tool.run("ls /usr"))
print(tool.run("ls /private"))
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import OpenAI
def _should_check(serialized_obj: dict) -> bool:
return serialized_obj.get("name") == "terminal"
def _approve(_input: str) -> bool:
if _input == "echo 'Hello World'":
return True
msg = (
"Do you approve of the following input? "
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no."
)
msg += "\n\n" + _input + "\n"
resp = input(msg)
return resp.lower() in ("yes", "y")
callbacks = [ | HumanApprovalCallbackHandler(should_check=_should_check, approve=_approve) | langchain.callbacks.HumanApprovalCallbackHandler |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
path = "/Users/rlm/Desktop/Papers/LLaVA/"
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOllama(model="llama2:13b-chat")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements if i.text != ""]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
cleaned_img_summary = [
s.split("clip_model_load: total allocated memory: 201.27 MB\n\n", 1)[1].strip()
for s in img_summaries
]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
vectorstore = Chroma(
collection_name="summaries", embedding_function=GPT4AllEmbeddings()
)
store = InMemoryStore() # <- Can we extend this to images
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
Document(page_content=s, metadata={id_key: img_ids[i]})
for i, s in enumerate(cleaned_img_summary)
]
retriever.vectorstore.add_documents(summary_img)
retriever.docstore.mset(
list(zip(img_ids, cleaned_img_summary))
) # Store the image summary as the raw document
retriever.get_relevant_documents("Images / figures with playful and creative examples")[
0
]
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOllama(model="llama2:13b-chat")
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_core.tools import tool
@tool
def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:
"""Do something complex with a complex tool."""
return int_arg * float_arg
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
model_with_tools = model.bind_tools(
[complex_tool],
tool_choice="complex_tool",
)
from operator import itemgetter
from langchain.output_parsers import JsonOutputKeyToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
chain.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
from typing import Any
from langchain_core.runnables import RunnableConfig
def try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable:
try:
complex_tool.invoke(tool_args, config=config)
except Exception as e:
return f"Calling tool with arguments:\n\n{tool_args}\n\nraised the following error:\n\n{type(e)}: {e}"
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| try_except_tool
)
print(
chain.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
)
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
better_model = | ChatOpenAI(model="gpt-4-1106-preview", temperature=0) | langchain_openai.ChatOpenAI |
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
def initialize_chain(instructions, memory=None):
if memory is None:
memory = ConversationBufferWindowMemory()
memory.ai_prefix = "Assistant"
template = f"""
Instructions: {instructions}
{{{memory.memory_key}}}
Human: {{human_input}}
Assistant:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"], template=template
)
chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory= | ConversationBufferWindowMemory() | langchain.memory.ConversationBufferWindowMemory |
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
splits = text_splitter.split_documents(data)
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=splits, embedding=embedding)
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain_openai import ChatOpenAI
question = "What are the approaches to Task Decomposition?"
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores import Vectara
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
vectara = Vectara.from_files(["state_of_the_union.txt"])
summary_config = {"is_enabled": True, "max_results": 5, "response_lang": "eng"}
retriever = vectara.as_retriever(
search_kwargs={"k": 3, "summary_config": summary_config}
)
def get_sources(documents):
return documents[:-1]
def get_summary(documents):
return documents[-1].page_content
query_str = "what did Biden say?"
(retriever | get_summary).invoke(query_str)
(retriever | get_sources).invoke(query_str)
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
mqr = | MultiQueryRetriever.from_llm(retriever=retriever, llm=llm) | langchain.retrievers.multi_query.MultiQueryRetriever.from_llm |
import os
import pprint
os.environ["SERPER_API_KEY"] = ""
from langchain_community.utilities import GoogleSerperAPIWrapper
search = GoogleSerperAPIWrapper()
search.run("Obama's first name?")
os.environ["OPENAI_API_KEY"] = ""
from langchain.agents import AgentType, Tool, initialize_agent
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
search = GoogleSerperAPIWrapper()
tools = [
Tool(
name="Intermediate Answer",
func=search.run,
description="useful for when you need to ask with search",
)
]
self_ask_with_search = initialize_agent(
tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True
)
self_ask_with_search.run(
"What is the hometown of the reigning men's U.S. Open champion?"
)
search = | GoogleSerperAPIWrapper() | langchain_community.utilities.GoogleSerperAPIWrapper |
get_ipython().system(' pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet')
from pprint import pprint
from docugami import Docugami
from docugami.lib.upload import upload_to_named_docset, wait_for_dgml
DOCSET_NAME = "NTSB Aviation Incident Reports"
FILE_PATHS = [
"/Users/tjaffri/ntsb/Report_CEN23LA277_192541.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA338_192753.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA363_192876.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA394_192995.pdf",
"/Users/tjaffri/ntsb/Report_ERA23LA114_106615.pdf",
"/Users/tjaffri/ntsb/Report_WPR23LA254_192532.pdf",
]
assert len(FILE_PATHS) > 5, "Please provide at least 6 files"
dg_client = Docugami()
dg_docs = upload_to_named_docset(dg_client, FILE_PATHS, DOCSET_NAME)
dgml_paths = wait_for_dgml(dg_client, dg_docs)
pprint(dgml_paths)
from pathlib import Path
from dgml_utils.segmentation import get_chunks_str
dgml_path = dgml_paths[Path(FILE_PATHS[0]).name]
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=True, # Ensures Docugami XML semantic tags are included in the chunked output (set to False for text-only chunks and tables as Markdown)
max_text_length=1024 * 8, # 8k chars are ~2k tokens for OpenAI.
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=False, # text-only chunks and tables as Markdown
max_text_length=1024
* 8, # 8k chars are ~2k tokens for OpenAI. Ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
import requests
dgml = requests.get(
"https://raw.githubusercontent.com/docugami/dgml-utils/main/python/tests/test_data/article/Jane%20Doe.xml"
).text
chunks = get_chunks_str(dgml, include_xml_tags=True)
len(chunks)
category_counts = {}
for element in chunks:
category = element.structure
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
category_counts
table_elements = [c for c in chunks if "table" in c.structure.split()]
print(f"There are {len(table_elements)} tables")
text_elements = [c for c in chunks if "table" not in c.structure.split()]
print(f"There are {len(text_elements)} text elements")
for element in text_elements[:20]:
print(element.text)
print(table_elements[0].text)
chunks_as_text = get_chunks_str(dgml, include_xml_tags=False)
table_elements_as_text = [c for c in chunks_as_text if "table" in c.structure.split()]
print(table_elements_as_text[0].text)
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores.chroma import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
def build_retriever(text_elements, tables, table_summaries):
vectorstore = Chroma(
collection_name="summaries", embedding_function=OpenAIEmbeddings()
)
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
texts = [i.text for i in text_elements]
doc_ids = [str(uuid.uuid4()) for _ in texts]
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
return retriever
retriever = build_retriever(text_elements, tables, table_summaries)
from langchain_core.runnables import RunnablePassthrough
system_prompt = SystemMessagePromptTemplate.from_template(
"You are a helpful assistant that answers questions based on provided context. Your provided context can include text or tables, "
"and may also contain semantic XML markup. Pay attention the semantic XML markup to understand more about the context semantics as "
"well as structure (e.g. lists and tabular layouts expressed with HTML-like tags)"
)
human_prompt = HumanMessagePromptTemplate.from_template(
"""Context:
{context}
Question: {question}"""
)
def build_chain(retriever, model):
prompt = | ChatPromptTemplate.from_messages([system_prompt, human_prompt]) | langchain.prompts.ChatPromptTemplate.from_messages |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts import PromptTemplate
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template)
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
summary_chain = LLMChain(
llm=OpenAI(),
prompt=prompt,
verbose=True,
memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory
)
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="Summary",
func=summary_chain.run,
description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.",
),
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory
)
agent_chain.run(input="What is ChatGPT?")
agent_chain.run(input="Who developed it?")
agent_chain.run(
input="Thanks. Summarize the conversation, for my daughter 5 years old."
)
print(agent_chain.memory.buffer)
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = | PromptTemplate(input_variables=["input", "chat_history"], template=template) | langchain.prompts.PromptTemplate |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = OpenAI(temperature=0)
from pathlib import Path
relevant_parts = []
for p in Path(".").absolute().parts:
relevant_parts.append(p)
if relevant_parts[-3:] == ["langchain", "docs", "modules"]:
break
doc_path = str(Path(*relevant_parts) / "state_of_the_union.txt")
from langchain_community.document_loaders import TextLoader
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
state_of_union = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=docsearch.as_retriever()
)
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/")
docs = loader.load()
ruff_texts = text_splitter.split_documents(docs)
ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff")
ruff = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=ruff_db.as_retriever()
)
from langchain.agents import AgentType, Tool, initialize_agent
from langchain_openai import OpenAI
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.",
),
| Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter) | langchain.agents.Tool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = | WikipediaQueryRun(api_wrapper=api_wrapper) | langchain_community.tools.WikipediaQueryRun |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet usearch')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import USearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
import os
os.environ["OPENAI_API_KEY"] = "...input your openai api key here..."
from langchain_experimental.agents.agent_toolkits import create_spark_dataframe_agent
from langchain_openai import OpenAI
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
csv_file_path = "titanic.csv"
df = spark.read.csv(csv_file_path, header=True, inferSchema=True)
df.show()
agent = create_spark_dataframe_agent(llm= | OpenAI(temperature=0) | langchain_openai.OpenAI |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(temperature=0, model="gpt-4-turbo-preview") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet predictionguard langchain')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import PredictionGuard
os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
pgllm("Tell me a joke")
template = """Respond to the following query based on the context.
Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! 🎉 We have officially added TWO new candle subscription box options! 📦
Exclusive Candle Box - $80
Monthly Candle Box - $45 (NEW!)
Scent of The Month Box - $28 (NEW!)
Head to stories to get ALLL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉
Query: {query}
Result: """
prompt = PromptTemplate.from_template(template)
pgllm(prompt.format(query="What kind of post is this?"))
pgllm = PredictionGuard(
model="OpenAI-text-davinci-003",
output={
"type": "categorical",
"categories": ["product announcement", "apology", "relational"],
},
)
pgllm(prompt.format(query="What kind of post is this?"))
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Write out the following equation using algebraic symbols then solve it. Use the format\n\nEQUATION:...\nSOLUTION:...\n\n",
),
("human", "{equation_statement}"),
]
)
model = ChatOpenAI(temperature=0)
runnable = (
{"equation_statement": RunnablePassthrough()} | prompt | model | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa')
from vespa.package import ApplicationPackage, Field, RankProfile
app_package = ApplicationPackage(name="testapp")
app_package.schema.add_fields(
Field(
name="text", type="string", indexing=["index", "summary"], index="enable-bm25"
),
Field(
name="embedding",
type="tensor<float>(x[384])",
indexing=["attribute", "summary"],
attribute=["distance-metric: angular"],
),
)
app_package.schema.add_rank_profile(
RankProfile(
name="default",
first_phase="closeness(field, embedding)",
inputs=[("query(query_embedding)", "tensor<float>(x[384])")],
)
)
from vespa.deployment import VespaDocker
vespa_docker = VespaDocker()
vespa_app = vespa_docker.deploy(application_package=app_package)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from typing import List
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = | Field(description="question to set up a joke") | langchain_core.pydantic_v1.Field |
get_ipython().system('pip install --upgrade volcengine')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.document_loaders import TextLoader
from langchain.vectorstores.vikingdb import VikingDB, VikingDBConfig
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = | TextLoader("./test.txt") | langchain.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install tika')
import os
from langchain_community.vectorstores import LLMRails
os.environ["LLM_RAILS_DATASTORE_ID"] = "Your datastore id "
os.environ["LLM_RAILS_API_KEY"] = "Your API Key"
llm_rails = | LLMRails.from_texts(["Your text here"]) | langchain_community.vectorstores.LLMRails.from_texts |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rapidfuzz')
from langchain.evaluation import load_evaluator
evaluator = | load_evaluator("string_distance") | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python')
import os
from langchain.callbacks import ContextCallbackHandler
token = os.environ["CONTEXT_API_TOKEN"]
context_callback = ContextCallbackHandler(token)
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
chat = ChatOpenAI(
headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)]
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to French."
),
HumanMessage(content="I love programming."),
]
print(chat(messages))
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="What is a good name for a company that makes {product}?",
input_variables=["product"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
callback = ContextCallbackHandler(token)
chat = ChatOpenAI(temperature=0.9, callbacks=[callback])
chain = | LLMChain(llm=chat, prompt=chat_prompt_template, callbacks=[callback]) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import chain
from langchain_openai import ChatOpenAI
prompt1 = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
prompt2 = ChatPromptTemplate.from_template("What is the subject of this joke: {joke}")
@chain
def custom_chain(text):
prompt_val1 = prompt1.invoke({"topic": text})
output1 = ChatOpenAI().invoke(prompt_val1)
parsed_output1 = StrOutputParser().invoke(output1)
chain2 = prompt2 | | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/photos/"
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "photos.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
import os
import uuid
import chromadb
import numpy as np
from langchain_community.vectorstores import Chroma
from langchain_experimental.open_clip import OpenCLIPEmbeddings
from PIL import Image as _PILImage
vectorstore = Chroma(
collection_name="mm_rag_clip_photos", embedding_function=OpenCLIPEmbeddings()
)
image_uris = sorted(
[
os.path.join(path, image_name)
for image_name in os.listdir(path)
if image_name.endswith(".jpg")
]
)
vectorstore.add_images(uris=image_uris)
vectorstore.add_texts(texts=texts)
retriever = vectorstore.as_retriever()
import base64
import io
from io import BytesIO
import numpy as np
from PIL import Image
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string.
Args:
base64_string (str): Base64 string of the original image.
size (tuple): Desired size of the image as (width, height).
Returns:
str: Base64 string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def is_base64(s):
"""Check if a string is Base64 encoded"""
try:
return base64.b64encode(base64.b64decode(s)) == s.encode()
except Exception:
return False
def split_image_text_types(docs):
"""Split numpy array images and texts"""
images = []
text = []
for doc in docs:
doc = doc.page_content # Extract Document contents
if is_base64(doc):
images.append(
resize_base64_image(doc, size=(250, 250))
) # base64 encoded str
else:
text.append(doc)
return {"images": images, "texts": text}
from operator import itemgetter
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
def prompt_func(data_dict):
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
if data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{data_dict['context']['images'][0]}"
},
}
messages.append(image_message)
text_message = {
"type": "text",
"text": (
"As an expert art critic and historian, your task is to analyze and interpret images, "
"considering their historical and cultural significance. Alongside the images, you will be "
"provided with related text to offer context. Both will be retrieved from a vectorstore based "
"on user-input keywords. Please use your extensive knowledge and analytical skills to provide a "
"comprehensive summary that includes:\n"
"- A detailed description of the visual elements in the image.\n"
"- The historical and cultural context of the image.\n"
"- An interpretation of the image's symbolism and meaning.\n"
"- Connections between the image and the related text.\n\n"
f"User-provided keywords: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
return [HumanMessage(content=messages)]
model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024)
chain = (
{
"context": retriever | RunnableLambda(split_image_text_types),
"question": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
from langchain_community.document_loaders.blob_loaders.youtube_audio import (
YoutubeAudioLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import (
OpenAIWhisperParser,
OpenAIWhisperParserLocal,
)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yt_dlp')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pydub')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet librosa')
local = False
urls = ["https://youtu.be/kCc8FmEb1nY", "https://youtu.be/VMj-3S1tku0"]
save_dir = "~/Downloads/YouTube"
if local:
loader = GenericLoader(
YoutubeAudioLoader(urls, save_dir), OpenAIWhisperParserLocal()
)
else:
loader = GenericLoader(YoutubeAudioLoader(urls, save_dir), OpenAIWhisperParser())
docs = loader.load()
docs[0].page_content[0:500]
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import FAISS
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
combined_docs = [doc.page_content for doc in docs]
text = " ".join(combined_docs)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150)
splits = text_splitter.split_text(text)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores import Vectara
from langchain_core.documents import Document
from langchain_openai import OpenAI
from langchain_text_splitters import CharacterTextSplitter
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"year": 1995, "genre": "animated"},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"year": 1979,
"rating": 9.9,
"director": "Andrei Tarkovsky",
"genre": "science fiction",
},
),
]
vectara = | Vectara() | langchain_community.vectorstores.Vectara |
import re
from IPython.display import Image, display
from steamship import Block, Steamship
from langchain.agents import AgentType, initialize_agent
from langchain.tools import SteamshipImageGenerationTool
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet manifest-ml')
from langchain_community.llms.manifest import ManifestWrapper
from manifest import Manifest
manifest = Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5000"
)
print(manifest.client_pool.get_current_client().get_model_params())
llm = ManifestWrapper(
client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 256}
)
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
from langchain_text_splitters import CharacterTextSplitter
_prompt = """Write a concise summary of the following:
{text}
CONCISE SUMMARY:"""
prompt = PromptTemplate.from_template(_prompt)
text_splitter = CharacterTextSplitter()
mp_chain = | MapReduceChain.from_params(llm, prompt, text_splitter) | langchain.chains.mapreduce.MapReduceChain.from_params |
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pypdf pymongo langchain-openai tiktoken')
import getpass
MONGODB_ATLAS_CLUSTER_URI = getpass.getpass("MongoDB Atlas Cluster URI:")
from pymongo import MongoClient
client = MongoClient(MONGODB_ATLAS_CLUSTER_URI)
DB_NAME = "langchain_db"
COLLECTION_NAME = "test"
ATLAS_VECTOR_SEARCH_INDEX_NAME = "index_name"
MONGODB_COLLECTION = client[DB_NAME][COLLECTION_NAME]
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf")
data = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
docs = text_splitter.split_documents(data)
print(docs[0])
from langchain_community.vectorstores import MongoDBAtlasVectorSearch
from langchain_openai import OpenAIEmbeddings
vector_search = MongoDBAtlasVectorSearch.from_documents(
documents=docs,
embedding= | OpenAIEmbeddings(disallowed_special=()) | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tigrisdb openapi-schema-pydantic langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
os.environ["TIGRIS_PROJECT"] = getpass.getpass("Tigris Project Name:")
os.environ["TIGRIS_CLIENT_ID"] = getpass.getpass("Tigris Client Id:")
os.environ["TIGRIS_CLIENT_SECRET"] = getpass.getpass("Tigris Client Secret:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Tigris
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_store = | Tigris.from_documents(docs, embeddings, index_name="my_embeddings") | langchain_community.vectorstores.Tigris.from_documents |
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = Chroma.from_documents(docs, embedding_function)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
db2 = | Chroma.from_documents(docs, embedding_function, persist_directory="./chroma_db") | langchain_community.vectorstores.Chroma.from_documents |
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
import dspy
colbertv2 = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts")
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
from langchain_openai import OpenAI
set_llm_cache(SQLiteCache(database_path="cache.db"))
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)
def retrieve(inputs):
return [doc["text"] for doc in colbertv2(inputs["question"], k=5)]
colbertv2("cycling")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
prompt = PromptTemplate.from_template(
"Given {context}, answer the question `{question}` as a tweet."
)
vanilla_chain = (
| RunnablePassthrough.assign(context=retrieve) | langchain_core.runnables.RunnablePassthrough.assign |
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI
examples = [
{
"input": "Could the members of The Police perform lawful arrests?",
"output": "what can the members of The Police do?",
},
{
"input": "Jan Sindel’s was born in what country?",
"output": "what is Jan Sindel’s personal history?",
},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
),
few_shot_prompt,
("user", "{question}"),
]
)
question_gen = prompt | ChatOpenAI(temperature=0) | StrOutputParser()
question = "was chatgpt around while trump was president?"
question_gen.invoke({"question": question})
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
search = DuckDuckGoSearchAPIWrapper(max_results=4)
def retriever(query):
return search.run(query)
retriever(question)
retriever(question_gen.invoke({"question": question}))
from langchain import hub
response_prompt = hub.pull("langchain-ai/stepback-answer")
chain = (
{
"normal_context": RunnableLambda(lambda x: x["question"]) | retriever,
"step_back_context": question_gen | retriever,
"question": lambda x: x["question"],
}
| response_prompt
| ChatOpenAI(temperature=0)
| StrOutputParser()
)
chain.invoke({"question": question})
response_prompt_template = """You are an expert of world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
{normal_context}
Original Question: {question}
Answer:"""
response_prompt = ChatPromptTemplate.from_template(response_prompt_template)
chain = (
{
"normal_context": RunnableLambda(lambda x: x["question"]) | retriever,
"question": lambda x: x["question"],
}
| response_prompt
| ChatOpenAI(temperature=0)
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, doc in enumerate(texts):
doc.metadata["page_chunk"] = i
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
retriever = vectorstore.as_retriever()
from langchain.tools.retriever import create_retriever_tool
retriever_tool = create_retriever_tool(
retriever,
"state-of-union-retriever",
"Query a retriever to get information about state of the union address",
)
from typing import List
from langchain_core.pydantic_v1 import BaseModel, Field
class Response(BaseModel):
"""Final response to the question being asked"""
answer: str = Field(description="The final answer to respond to the user")
sources: List[int] = Field(
description="List of page chunks that contain answer to the question. Only include a page chunk if it contains relevant information"
)
import json
from langchain_core.agents import AgentActionMessageLog, AgentFinish
def parse(output):
if "function_call" not in output.additional_kwargs:
return | AgentFinish(return_values={"output": output.content}, log=output.content) | langchain_core.agents.AgentFinish |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai tiktoken')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet supabase')
import getpass
import os
os.environ["SUPABASE_URL"] = getpass.getpass("Supabase URL:")
os.environ["SUPABASE_SERVICE_KEY"] = getpass.getpass("Supabase Service Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet python-dotenv')
from dotenv import load_dotenv
load_dotenv()
import os
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
from supabase.client import Client, create_client
supabase_url = os.environ.get("SUPABASE_URL")
supabase_key = os.environ.get("SUPABASE_SERVICE_KEY")
supabase: Client = create_client(supabase_url, supabase_key)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.model_laboratory import ModelLaboratory
from langchain.prompts import PromptTemplate
from langchain_community.llms import Cohere, HuggingFaceHub
from langchain_openai import OpenAI
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:")
llms = [
OpenAI(temperature=0),
Cohere(temperature=0),
HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 1}),
]
model_lab = ModelLaboratory.from_llms(llms)
model_lab.compare("What color is a flamingo?")
prompt = PromptTemplate(
template="What is the capital of {state}?", input_variables=["state"]
)
model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)
model_lab_with_prompt.compare("New York")
from langchain.chains import SelfAskWithSearchChain
from langchain_community.utilities import SerpAPIWrapper
open_ai_llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet openlm')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
import os
from getpass import getpass
if "OPENAI_API_KEY" not in os.environ:
print("Enter your OpenAI API key:")
os.environ["OPENAI_API_KEY"] = getpass()
if "HF_API_TOKEN" not in os.environ:
print("Enter your HuggingFace Hub API key:")
os.environ["HF_API_TOKEN"] = getpass()
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpenLM
question = "What is the capital of France?"
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
from typing import Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_experimental.autonomous_agents import BabyAGI
from langchain_openai import OpenAI, OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install faiss-cpu > /dev/null')
get_ipython().run_line_magic('pip', 'install google-search-results > /dev/null')
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
embeddings_model = OpenAIEmbeddings()
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain_community.utilities import SerpAPIWrapper
from langchain_openai import OpenAI
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = SerpAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
),
]
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
suffix = """Question: {task}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context", "agent_scratchpad"],
)
llm = OpenAI(temperature=0)
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = | ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) | langchain.agents.ZeroShotAgent |
from langchain.chains import GraphCypherQAChain
from langchain_community.graphs import Neo4jGraph
from langchain_openai import ChatOpenAI
graph = Neo4jGraph(
url="bolt://localhost:7687", username="neo4j", password="pleaseletmein"
)
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
graph.refresh_schema()
print(graph.schema)
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True
)
chain.run("Who played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2
)
chain.run("Who played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True
)
result = chain("Who played in Top Gun?")
print(f"Intermediate steps: {result['intermediate_steps']}")
print(f"Final answer: {result['result']}")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, return_direct=True
)
chain.run("Who played in Top Gun?")
from langchain.prompts.prompt import PromptTemplate
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
Examples: Here are a few examples of generated Cypher statements for particular questions:
MATCH (m:Movie {{title:"Top Gun"}})<-[:ACTED_IN]-()
RETURN count(*) AS numberOfActors
The question is:
{question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0),
graph=graph,
verbose=True,
cypher_prompt=CYPHER_GENERATION_PROMPT,
)
chain.run("How many people played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
graph=graph,
cypher_llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo"),
qa_llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k"),
verbose=True,
)
chain.run("Who played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
graph=graph,
cypher_llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo"),
qa_llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k"),
verbose=True,
exclude_types=["Movie"],
)
print(chain.graph_schema)
chain = GraphCypherQAChain.from_llm(
llm= | ChatOpenAI(temperature=0, model="gpt-3.5-turbo") | langchain_openai.ChatOpenAI |
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
conversation = ConversationChain(
llm=llm, verbose=True, memory=ConversationBufferMemory()
)
conversation.predict(input="Hi there!")
conversation.predict(input="What's the weather?")
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI Assistant:"""
PROMPT = | PromptTemplate(input_variables=["history", "input"], template=template) | langchain.prompts.prompt.PromptTemplate |
from langchain_community.document_loaders import HuggingFaceDatasetLoader
dataset_name = "imdb"
page_content_column = "text"
loader = | HuggingFaceDatasetLoader(dataset_name, page_content_column) | langchain_community.document_loaders.hugging_face_dataset.HuggingFaceDatasetLoader |
from datetime import datetime, timedelta
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, InMemoryDocstore({}), {})
retriever = TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore, decay_rate=0.0000000000000000000000001, k=1
)
yesterday = datetime.now() - timedelta(days=1)
retriever.add_documents(
[Document(page_content="hello world", metadata={"last_accessed_at": yesterday})]
)
retriever.add_documents([Document(page_content="hello foo")])
retriever.get_relevant_documents("hello world")
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, InMemoryDocstore({}), {})
retriever = TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore, decay_rate=0.999, k=1
)
yesterday = datetime.now() - timedelta(days=1)
retriever.add_documents(
[ | Document(page_content="hello world", metadata={"last_accessed_at": yesterday}) | langchain_core.documents.Document |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet aleph-alpha-client')
from getpass import getpass
ALEPH_ALPHA_API_KEY = getpass()
from langchain.prompts import PromptTemplate
from langchain_community.llms import AlephAlpha
template = """Q: {question}
A:"""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opaqueprompts langchain')
import os
os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>"
os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>"
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.globals import set_debug, set_verbose
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpaquePrompts
from langchain_openai import OpenAI
set_debug(True)
| set_verbose(True) | langchain.globals.set_verbose |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark chromadb')
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"year": 1995, "genre": "animated"},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"year": 1979,
"director": "Andrei Tarkovsky",
"genre": "thriller",
"rating": 9.9,
},
),
]
vectorstore = Chroma.from_documents(docs, OpenAIEmbeddings())
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import ChatOpenAI
metadata_field_info = [
AttributeInfo(
name="genre",
description="The genre of the movie. One of ['science fiction', 'comedy', 'drama', 'thriller', 'romance', 'action', 'animated']",
type="string",
),
AttributeInfo(
name="year",
description="The year the movie was released",
type="integer",
),
AttributeInfo(
name="director",
description="The name of the movie director",
type="string",
),
AttributeInfo(
name="rating", description="A 1-10 rating for the movie", type="float"
),
]
document_content_description = "Brief summary of a movie"
llm = ChatOpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
)
retriever.invoke("I want to watch a movie rated higher than 8.5")
retriever.invoke("Has Greta Gerwig directed any movies about women")
retriever.invoke("What's a highly rated (above 8.5) science fiction film?")
retriever.invoke(
"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated"
)
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
enable_limit=True,
)
retriever.invoke("What are two movies about dinosaurs")
from langchain.chains.query_constructor.base import (
StructuredQueryOutputParser,
get_query_constructor_prompt,
)
prompt = get_query_constructor_prompt(
document_content_description,
metadata_field_info,
)
output_parser = | StructuredQueryOutputParser.from_components() | langchain.chains.query_constructor.base.StructuredQueryOutputParser.from_components |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke(
{"question": "foo", "context": "bar"}
)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
llm = ChatAnthropic(temperature=0).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="anthropic",
openai=ChatOpenAI(),
gpt4=ChatOpenAI(model="gpt-4"),
)
prompt = | PromptTemplate.from_template("Tell me a joke about {topic}") | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet ain-py')
import os
os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"] = ""
import os
from ain.account import Account
if os.environ.get("AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY", None):
account = Account(os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"])
else:
account = Account.create()
os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"] = account.private_key
print(
f"""
address: {account.address}
private_key: {account.private_key}
"""
)
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
toolkit = | AINetworkToolkit() | langchain_community.agent_toolkits.ainetwork.toolkit.AINetworkToolkit |
from langchain_community.chat_models.llama_edge import LlamaEdgeChatService
from langchain_core.messages import HumanMessage, SystemMessage
service_url = "https://b008-54-186-154-209.ngrok-free.app"
chat = LlamaEdgeChatService(service_url=service_url)
system_message = SystemMessage(content="You are an AI assistant")
user_message = HumanMessage(content="What is the capital of France?")
messages = [system_message, user_message]
response = chat(messages)
print(f"[Bot] {response.content}")
service_url = "https://b008-54-186-154-209.ngrok-free.app"
chat = LlamaEdgeChatService(service_url=service_url, streaming=True)
system_message = | SystemMessage(content="You are an AI assistant") | langchain_core.messages.SystemMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sentence-transformers > /dev/null')
from langchain.chains import LLMChain, StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_community.document_transformers import (
LongContextReorder,
)
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
texts = [
"Basquetball is a great sport.",
"Fly me to the moon is one of my favourite songs.",
"The Celtics are my favourite team.",
"This is a document about the Boston Celtics",
"I simply love going to the movies",
"The Boston Celtics won the game by 20 points",
"This is just a random text.",
"Elden Ring is one of the best games in the last 15 years.",
"L. Kornet is one of the best Celtics players.",
"Larry Bird was an iconic NBA player.",
]
retriever = | Chroma.from_texts(texts, embedding=embeddings) | langchain_community.vectorstores.Chroma.from_texts |
get_ipython().run_line_magic('', 'pip install --upgrade --quiet flashrank')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss_cpu')
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader(
"../../modules/state_of_the_union.txt",
).load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
retriever = | FAISS.from_documents(texts, embedding) | langchain_community.vectorstores.FAISS.from_documents |
from langchain_community.embeddings import TensorflowHubEmbeddings
embeddings = | TensorflowHubEmbeddings() | langchain_community.embeddings.TensorflowHubEmbeddings |
from ragatouille import RAGPretrainedModel
RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
import requests
def get_wikipedia_page(title: str):
"""
Retrieve the full text content of a Wikipedia page.
:param title: str - Title of the Wikipedia page.
:return: str - Full text content of the page as raw string.
"""
URL = "https://en.wikipedia.org/w/api.php"
params = {
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
}
headers = {"User-Agent": "RAGatouille_tutorial/0.0.1 (ben@clavie.eu)"}
response = requests.get(URL, params=params, headers=headers)
data = response.json()
page = next(iter(data["query"]["pages"].values()))
return page["extract"] if "extract" in page else None
full_document = get_wikipedia_page("Hayao_Miyazaki")
RAG.index(
collection=[full_document],
index_name="Miyazaki-123",
max_document_length=180,
split_documents=True,
)
results = RAG.search(query="What animation studio did Miyazaki found?", k=3)
results
retriever = RAG.as_langchain_retriever(k=3)
retriever.invoke("What animation studio did Miyazaki found?")
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
"""Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}"""
)
llm = ChatOpenAI()
document_chain = create_stuff_documents_chain(llm, prompt)
retrieval_chain = | create_retrieval_chain(retriever, document_chain) | langchain.chains.create_retrieval_chain |
import nest_asyncio
nest_asyncio.apply()
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import SurrealDBStore
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain_community.utils.openai_functions import (
convert_pydantic_to_openai_function,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
openai_functions = [convert_pydantic_to_openai_function(Joke)]
model = ChatOpenAI(temperature=0)
prompt = ChatPromptTemplate.from_messages(
[("system", "You are helpful assistant"), ("user", "{input}")]
)
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
parser = JsonOutputFunctionsParser()
chain = prompt | model.bind(functions=openai_functions) | parser
chain.invoke({"input": "tell me a joke"})
for s in chain.stream({"input": "tell me a joke"}):
print(s)
from typing import List
from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser
class Jokes(BaseModel):
"""Jokes to tell user."""
joke: List[Joke]
funniness_level: int
parser = JsonKeyOutputFunctionsParser(key_name="joke")
openai_functions = [convert_pydantic_to_openai_function(Jokes)]
chain = prompt | model.bind(functions=openai_functions) | parser
chain.invoke({"input": "tell me two jokes"})
for s in chain.stream({"input": "tell me two jokes"}):
print(s)
from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
@validator("setup")
def question_ends_with_question_mark(cls, field):
if field[-1] != "?":
raise ValueError("Badly formed question!")
return field
parser = PydanticOutputFunctionsParser(pydantic_schema=Joke)
openai_functions = [ | convert_pydantic_to_openai_function(Joke) | langchain_community.utils.openai_functions.convert_pydantic_to_openai_function |
from langchain.memory import ConversationTokenBufferMemory
from langchain_openai import OpenAI
llm = OpenAI()
memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
memory.load_memory_variables({})
memory = ConversationTokenBufferMemory(
llm=llm, max_token_limit=10, return_messages=True
)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
from langchain.chains import ConversationChain
conversation_with_summary = ConversationChain(
llm=llm,
memory=ConversationTokenBufferMemory(llm= | OpenAI() | langchain_openai.OpenAI |
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, text in enumerate(texts):
text.metadata["source"] = f"{i}-pl"
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
qa_chain = create_qa_with_sources_chain(llm)
doc_prompt = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
final_qa_chain = StuffDocumentsChain(
llm_chain=qa_chain,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)
query = "What did the president say about russia"
retrieval_qa.run(query)
qa_chain_pydantic = create_qa_with_sources_chain(llm, output_parser="pydantic")
final_qa_chain_pydantic = StuffDocumentsChain(
llm_chain=qa_chain_pydantic,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa_pydantic = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic
)
retrieval_qa_pydantic.run(query)
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.memory import ConversationBufferMemory
memory = | ConversationBufferMemory(memory_key="chat_history", return_messages=True) | langchain.memory.ConversationBufferMemory |
get_ipython().system(' pip install langchain replicate')
from langchain_community.chat_models import ChatOllama
llama2_chat = ChatOllama(model="llama2:13b-chat")
llama2_code = ChatOllama(model="codellama:7b-instruct")
from langchain_community.llms import Replicate
replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llama2_chat_replicate = Replicate(
model=replicate_id, input={"temperature": 0.01, "max_length": 500, "top_p": 1}
)
llm = llama2_chat
from langchain_community.utilities import SQLDatabase
db = SQLDatabase.from_uri("sqlite:///nba_roster.db", sample_rows_in_table_info=0)
def get_schema(_):
return db.get_table_info()
def run_query(query):
return db.run(query)
from langchain_core.prompts import ChatPromptTemplate
template = """Based on the table schema below, write a SQL query that would answer the user's question:
{schema}
Question: {question}
SQL Query:"""
prompt = ChatPromptTemplate.from_messages(
[
("system", "Given an input question, convert it to a SQL query. No pre-amble."),
("human", template),
]
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
sql_response = (
RunnablePassthrough.assign(schema=get_schema)
| prompt
| llm.bind(stop=["\nSQLResult:"])
| StrOutputParser()
)
sql_response.invoke({"question": "What team is Klay Thompson on?"})
template = """Based on the table schema below, question, sql query, and sql response, write a natural language response:
{schema}
Question: {question}
SQL Query: {query}
SQL Response: {response}"""
prompt_response = ChatPromptTemplate.from_messages(
[
(
"system",
"Given an input question and SQL response, convert it to a natural language answer. No pre-amble.",
),
("human", template),
]
)
full_chain = (
| RunnablePassthrough.assign(query=sql_response) | langchain_core.runnables.RunnablePassthrough.assign |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet elasticsearch == 7.11.0')
import getpass
import os
os.environ["QIANFAN_AK"] = getpass.getpass("Your Qianfan AK:")
os.environ["QIANFAN_SK"] = getpass.getpass("Your Qianfan SK:")
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../../state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, doc in enumerate(texts):
doc.metadata["page_chunk"] = i
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
retriever = vectorstore.as_retriever()
from langchain.tools.retriever import create_retriever_tool
retriever_tool = create_retriever_tool(
retriever,
"state-of-union-retriever",
"Query a retriever to get information about state of the union address",
)
from typing import List
from langchain_core.pydantic_v1 import BaseModel, Field
class Response(BaseModel):
"""Final response to the question being asked"""
answer: str = Field(description="The final answer to respond to the user")
sources: List[int] = Field(
description="List of page chunks that contain answer to the question. Only include a page chunk if it contains relevant information"
)
import json
from langchain_core.agents import AgentActionMessageLog, AgentFinish
def parse(output):
if "function_call" not in output.additional_kwargs:
return AgentFinish(return_values={"output": output.content}, log=output.content)
function_call = output.additional_kwargs["function_call"]
name = function_call["name"]
inputs = json.loads(function_call["arguments"])
if name == "Response":
return AgentFinish(return_values=inputs, log=str(function_call))
else:
return AgentActionMessageLog(
tool=name, tool_input=inputs, log="", message_log=[output]
)
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu')
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import CohereEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, CohereEmbeddings()).as_retriever(
search_kwargs={"k": 20}
)
query = "What did the president say about Ketanji Brown Jackson"
docs = retriever.get_relevant_documents(query)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import CohereRerank
from langchain_community.llms import Cohere
llm = | Cohere(temperature=0) | langchain_community.llms.Cohere |
from langchain.memory import ConversationSummaryBufferMemory
from langchain_openai import OpenAI
llm = OpenAI()
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
memory.load_memory_variables({})
memory = ConversationSummaryBufferMemory(
llm=llm, max_token_limit=10, return_messages=True
)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
messages = memory.chat_memory.messages
previous_summary = ""
memory.predict_new_summary(messages, previous_summary)
from langchain.chains import ConversationChain
conversation_with_summary = ConversationChain(
llm=llm,
memory=ConversationSummaryBufferMemory(llm= | OpenAI() | langchain_openai.OpenAI |
from typing import Callable, List
from langchain.memory import ConversationBufferMemory
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
from langchain.agents import AgentType, initialize_agent, load_tools
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
class DialogueAgentWithTools(DialogueAgent):
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
tool_names: List[str],
**tool_kwargs,
) -> None:
super().__init__(name, system_message, model)
self.tools = | load_tools(tool_names, **tool_kwargs) | langchain.agents.load_tools |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
YBUSER = "[SANDBOX USER]"
YBPASSWORD = "[SANDBOX PASSWORD]"
YBDATABASE = "[SANDBOX_DATABASE]"
YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com"
OPENAI_API_KEY = "[OPENAI API KEY]"
import os
import pathlib
import re
import sys
import urllib.parse as urlparse
from getpass import getpass
import psycopg2
from IPython.display import Markdown, display
from langchain.chains import LLMChain, RetrievalQAWithSourcesChain
from langchain.docstore.document import Document
from langchain_community.vectorstores import Yellowbrick
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
yellowbrick_connection_string = (
f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}"
)
YB_DOC_DATABASE = "sample_data"
YB_DOC_TABLE = "yellowbrick_documentation"
embedding_table = "my_embeddings"
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
system_template = """If you don't know the answer, Make up your best guess."""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(
model_name="gpt-3.5-turbo", # Modify model_name if you have access to GPT-4
temperature=0,
max_tokens=256,
)
chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=False,
)
def print_result_simple(query):
result = chain(query)
output_text = f"""### Question:
{query}
{result['text']}
"""
display(Markdown(output_text))
print_result_simple("How many databases can be in a Yellowbrick Instance?")
print_result_simple("What's an easy way to add users in bulk to Yellowbrick?")
try:
conn = psycopg2.connect(yellowbrick_connection_string)
except psycopg2.Error as e:
print(f"Error connecting to the database: {e}")
exit(1)
cursor = conn.cursor()
create_table_query = f"""
CREATE TABLE if not exists {embedding_table} (
id uuid,
embedding_id integer,
text character varying(60000),
metadata character varying(1024),
embedding double precision
)
DISTRIBUTE ON (id);
truncate table {embedding_table};
"""
try:
cursor.execute(create_table_query)
print(f"Table '{embedding_table}' created successfully!")
except psycopg2.Error as e:
print(f"Error creating table: {e}")
conn.rollback()
conn.commit()
cursor.close()
conn.close()
yellowbrick_doc_connection_string = (
f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YB_DOC_DATABASE}"
)
conn = psycopg2.connect(yellowbrick_doc_connection_string)
cursor = conn.cursor()
query = f"SELECT path, document FROM {YB_DOC_TABLE}"
cursor.execute(query)
yellowbrick_documents = cursor.fetchall()
print(f"Extracted {len(yellowbrick_documents)} documents successfully!")
cursor.close()
conn.close()
DOCUMENT_BASE_URL = "https://docs.yellowbrick.com/6.7.1/" # Actual URL
separator = "\n## " # This separator assumes Markdown docs from the repo uses ### as logical main header most of the time
chunk_size_limit = 2000
max_chunk_overlap = 200
documents = [
Document(
page_content=document[1],
metadata={"source": DOCUMENT_BASE_URL + document[0].replace(".md", ".html")},
)
for document in yellowbrick_documents
]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size_limit,
chunk_overlap=max_chunk_overlap,
separators=[separator, "\nn", "\n", ",", " ", ""],
)
split_docs = text_splitter.split_documents(documents)
docs_text = [doc.page_content for doc in split_docs]
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from datetime import datetime
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
from langchain_community.utilities.clickup import ClickupAPIWrapper
from langchain_openai import OpenAI
oauth_client_id = "ABC..."
oauth_client_secret = "123..."
redirect_uri = "https://google.com"
print("Click this link, select your workspace, click `Connect Workspace`")
print( | ClickupAPIWrapper.get_access_code_url(oauth_client_id, redirect_uri) | langchain_community.utilities.clickup.ClickupAPIWrapper.get_access_code_url |
import functools
import random
from collections import OrderedDict
from typing import Callable, List
import tenacity
from langchain.output_parsers import RegexParser
from langchain.prompts import (
PromptTemplate,
)
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
class IntegerOutputParser(RegexParser):
def get_format_instructions(self) -> str:
return "Your response should be an integer delimited by angled brackets, like this: <int>."
class DirectorDialogueAgent(DialogueAgent):
def __init__(
self,
name,
system_message: SystemMessage,
model: ChatOpenAI,
speakers: List[DialogueAgent],
stopping_probability: float,
) -> None:
super().__init__(name, system_message, model)
self.speakers = speakers
self.next_speaker = ""
self.stop = False
self.stopping_probability = stopping_probability
self.termination_clause = "Finish the conversation by stating a concluding message and thanking everyone."
self.continuation_clause = "Do not end the conversation. Keep the conversation going by adding your own ideas."
self.response_prompt_template = PromptTemplate(
input_variables=["message_history", "termination_clause"],
template=f"""{{message_history}}
Follow up with an insightful comment.
{{termination_clause}}
{self.prefix}
""",
)
self.choice_parser = IntegerOutputParser(
regex=r"<(\d+)>", output_keys=["choice"], default_output_key="choice"
)
self.choose_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "speaker_names"],
template=f"""{{message_history}}
Given the above conversation, select the next speaker by choosing index next to their name:
{{speaker_names}}
{self.choice_parser.get_format_instructions()}
Do nothing else.
""",
)
self.prompt_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "next_speaker"],
template=f"""{{message_history}}
The next speaker is {{next_speaker}}.
Prompt the next speaker to speak with an insightful question.
{self.prefix}
""",
)
def _generate_response(self):
sample = random.uniform(0, 1)
self.stop = sample < self.stopping_probability
print(f"\tStop? {self.stop}\n")
response_prompt = self.response_prompt_template.format(
message_history="\n".join(self.message_history),
termination_clause=self.termination_clause if self.stop else "",
)
self.response = self.model(
[
self.system_message,
HumanMessage(content=response_prompt),
]
).content
return self.response
@tenacity.retry(
stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_none(), # No waiting time between retries
retry=tenacity.retry_if_exception_type(ValueError),
before_sleep=lambda retry_state: print(
f"ValueError occurred: {retry_state.outcome.exception()}, retrying..."
),
retry_error_callback=lambda retry_state: 0,
) # Default value when all retries are exhausted
def _choose_next_speaker(self) -> str:
speaker_names = "\n".join(
[f"{idx}: {name}" for idx, name in enumerate(self.speakers)]
)
choice_prompt = self.choose_next_speaker_prompt_template.format(
message_history="\n".join(
self.message_history + [self.prefix] + [self.response]
),
speaker_names=speaker_names,
)
choice_string = self.model(
[
self.system_message,
HumanMessage(content=choice_prompt),
]
).content
choice = int(self.choice_parser.parse(choice_string)["choice"])
return choice
def select_next_speaker(self):
return self.chosen_speaker_id
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
self.response = self._generate_response()
if self.stop:
message = self.response
else:
self.chosen_speaker_id = self._choose_next_speaker()
self.next_speaker = self.speakers[self.chosen_speaker_id]
print(f"\tNext speaker: {self.next_speaker}\n")
next_prompt = self.prompt_next_speaker_prompt_template.format(
message_history="\n".join(
self.message_history + [self.prefix] + [self.response]
),
next_speaker=self.next_speaker,
)
message = self.model(
[
self.system_message,
HumanMessage(content=next_prompt),
]
).content
message = " ".join([self.response, message])
return message
topic = "The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze"
director_name = "Jon Stewart"
agent_summaries = OrderedDict(
{
"Jon Stewart": ("Host of the Daily Show", "New York"),
"Samantha Bee": ("Hollywood Correspondent", "Los Angeles"),
"Aasif Mandvi": ("CIA Correspondent", "Washington D.C."),
"Ronny Chieng": ("Average American Correspondent", "Cleveland, Ohio"),
}
)
word_limit = 50
agent_summary_string = "\n- ".join(
[""]
+ [
f"{name}: {role}, located in {location}"
for name, (role, location) in agent_summaries.items()
]
)
conversation_description = f"""This is a Daily Show episode discussing the following topic: {topic}.
The episode features {agent_summary_string}."""
agent_descriptor_system_message = SystemMessage(
content="You can add detail to the description of each person."
)
def generate_agent_description(agent_name, agent_role, agent_location):
agent_specifier_prompt = [
agent_descriptor_system_message,
HumanMessage(
content=f"""{conversation_description}
Please reply with a creative description of {agent_name}, who is a {agent_role} in {agent_location}, that emphasizes their particular role and location.
Speak directly to {agent_name} in {word_limit} words or less.
Do not add anything else."""
),
]
agent_description = ChatOpenAI(temperature=1.0)(agent_specifier_prompt).content
return agent_description
def generate_agent_header(agent_name, agent_role, agent_location, agent_description):
return f"""{conversation_description}
Your name is {agent_name}, your role is {agent_role}, and you are located in {agent_location}.
Your description is as follows: {agent_description}
You are discussing the topic: {topic}.
Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.
"""
def generate_agent_system_message(agent_name, agent_header):
return SystemMessage(
content=(
f"""{agent_header}
You will speak in the style of {agent_name}, and exaggerate your personality.
Do not say the same things over and over again.
Speak in the first person from the perspective of {agent_name}
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Speak only from the perspective of {agent_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
agent_descriptions = [
generate_agent_description(name, role, location)
for name, (role, location) in agent_summaries.items()
]
agent_headers = [
generate_agent_header(name, role, location, description)
for (name, (role, location)), description in zip(
agent_summaries.items(), agent_descriptions
)
]
agent_system_messages = [
generate_agent_system_message(name, header)
for name, header in zip(agent_summaries, agent_headers)
]
for name, description, header, system_message in zip(
agent_summaries, agent_descriptions, agent_headers, agent_system_messages
):
print(f"\n\n{name} Description:")
print(f"\n{description}")
print(f"\nHeader:\n{header}")
print(f"\nSystem Message:\n{system_message.content}")
topic_specifier_prompt = [
| SystemMessage(content="You can make a task more specific.") | langchain.schema.SystemMessage |
get_ipython().system(' pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet')
from pprint import pprint
from docugami import Docugami
from docugami.lib.upload import upload_to_named_docset, wait_for_dgml
DOCSET_NAME = "NTSB Aviation Incident Reports"
FILE_PATHS = [
"/Users/tjaffri/ntsb/Report_CEN23LA277_192541.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA338_192753.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA363_192876.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA394_192995.pdf",
"/Users/tjaffri/ntsb/Report_ERA23LA114_106615.pdf",
"/Users/tjaffri/ntsb/Report_WPR23LA254_192532.pdf",
]
assert len(FILE_PATHS) > 5, "Please provide at least 6 files"
dg_client = Docugami()
dg_docs = upload_to_named_docset(dg_client, FILE_PATHS, DOCSET_NAME)
dgml_paths = wait_for_dgml(dg_client, dg_docs)
pprint(dgml_paths)
from pathlib import Path
from dgml_utils.segmentation import get_chunks_str
dgml_path = dgml_paths[Path(FILE_PATHS[0]).name]
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=True, # Ensures Docugami XML semantic tags are included in the chunked output (set to False for text-only chunks and tables as Markdown)
max_text_length=1024 * 8, # 8k chars are ~2k tokens for OpenAI.
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=False, # text-only chunks and tables as Markdown
max_text_length=1024
* 8, # 8k chars are ~2k tokens for OpenAI. Ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
import requests
dgml = requests.get(
"https://raw.githubusercontent.com/docugami/dgml-utils/main/python/tests/test_data/article/Jane%20Doe.xml"
).text
chunks = get_chunks_str(dgml, include_xml_tags=True)
len(chunks)
category_counts = {}
for element in chunks:
category = element.structure
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
category_counts
table_elements = [c for c in chunks if "table" in c.structure.split()]
print(f"There are {len(table_elements)} tables")
text_elements = [c for c in chunks if "table" not in c.structure.split()]
print(f"There are {len(text_elements)} text elements")
for element in text_elements[:20]:
print(element.text)
print(table_elements[0].text)
chunks_as_text = get_chunks_str(dgml, include_xml_tags=False)
table_elements_as_text = [c for c in chunks_as_text if "table" in c.structure.split()]
print(table_elements_as_text[0].text)
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores.chroma import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
def build_retriever(text_elements, tables, table_summaries):
vectorstore = Chroma(
collection_name="summaries", embedding_function=OpenAIEmbeddings()
)
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
texts = [i.text for i in text_elements]
doc_ids = [str(uuid.uuid4()) for _ in texts]
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
| Document(page_content=s, metadata={id_key: table_ids[i]}) | langchain_core.documents.Document |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "unstructured[all-docs]"')
from langchain_community.document_loaders import UnstructuredFileLoader
loader = UnstructuredFileLoader("./example_data/state_of_the_union.txt")
docs = loader.load()
docs[0].page_content[:400]
files = ["./example_data/whatsapp_chat.txt", "./example_data/layout-parser-paper.pdf"]
loader = | UnstructuredFileLoader(files) | langchain_community.document_loaders.UnstructuredFileLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sentence-transformers > /dev/null')
from langchain.chains import LLMChain, StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_community.document_transformers import (
LongContextReorder,
)
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
texts = [
"Basquetball is a great sport.",
"Fly me to the moon is one of my favourite songs.",
"The Celtics are my favourite team.",
"This is a document about the Boston Celtics",
"I simply love going to the movies",
"The Boston Celtics won the game by 20 points",
"This is just a random text.",
"Elden Ring is one of the best games in the last 15 years.",
"L. Kornet is one of the best Celtics players.",
"Larry Bird was an iconic NBA player.",
]
retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": 10}
)
query = "What can you tell me about the Celtics?"
docs = retriever.get_relevant_documents(query)
docs
reordering = LongContextReorder()
reordered_docs = reordering.transform_documents(docs)
reordered_docs
document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
stuff_prompt_override = """Given this text extracts:
-----
{context}
-----
Please answer the following question:
{query}"""
prompt = PromptTemplate(
template=stuff_prompt_override, input_variables=["context", "query"]
)
llm_chain = | LLMChain(llm=llm, prompt=prompt) | langchain.chains.LLMChain |